code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # HW-8
# ## Problem 1 :
# In this problem , we have to find the following integrals :
# $ a) e^x \\ b) sin(1/x) \\c) x^3 $ using simpson, adaptive trapezoid, trapezoid algorithms. \
# a ) To integrate the function : $e^x$ . To use Simpson algorithm we use the codes used practiced in class (CP1_CalculusUtilityFunctions/integrals.py) and do the necessary imports:
from integrals import simpson as ss
import matplotlib.pyplot as plt
import numpy as np
import math
# %load_ext pycodestyle_magic
# %pycodestyle_on
# First We check the function $e^x$ with boundary [0,10] with subintervals 10. Here I am definig the function rather than using "Lambda" because its recommended by linting tool.
# +
def f(x): return np.exp(x)
sum = ss(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.exp(x)
sum = ss(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Finaly we check with boudary [-1,1]
# +
def f(x): return np.exp(x)
sum = ss(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the fucntion $x^3$, we check with boudary [0,10]
# +
def f(x): return x ** 3
sum = ss(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return x ** 3
sum = ss(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Finaly we check with boudary [-1,1]
# +
def f(x): return x ** 3
sum = ss(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the 3rd function sin(1/x), we check with interval [0,10]: As our function is not defined at x = 0 , I will skip the initial value of 0.
# +
def f(x): return np.sin(1/x)
sum = ss(f, 0.01, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 10, 0.1) # As our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the value of x= 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.sin(1/x)
sum = ss(f, 0.01, 2*(np.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 2*(np.pi), 0.1)
y = np.sin(1/x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
Finaly we check with boudary [-1,1]
# +
def f(x): return np.sin(1/x)
sum = ss(f, -1, 1, .1) # skipping x= 0 input by stepping as 0.1
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1)
y = np.sin(1/x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ## Method 2 (Adaptive Tropozoid)
from integrals import adaptive_trapezoid as at
# For the fucntion $e^x$, we check with boudary [0,10]
# +
def f(x): return np.exp(x)
sum = at(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.exp(x)
sum = at(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
Finaly we check with boudary [-1,1]
# +
def f(x): return np.exp(x)
sum = at(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the 2nd function $x^3$, we check with interval [0,10]:
# +
def f(x): return x ** 3
sum = at(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return x ** 3
sum = at(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Finaly we check with boudary [-1,1]
# +
def f(x): return np.exp(x)
sum = at(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the 3rd function $sin(1/x)$, first , we check with interval [0,10]:
# +
def f(x): return np.sin(1/x)
sum = at(f, 0.01, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 10, 0.1) # As our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the initial value of 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.sin(1/x)
sum = at(f, 0.01, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 2*(math.pi), 0.1) # Our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the initial value of 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# # Finaly we check with boudary [-1,1].
# I was not able to skip the value for x=0 , which gives error like ' RuntimeWarning: divide by zero encountered in double_scalars'.I could not fix the error.
# +
def f(x): return np.sin(1/x)
sum = at(f, 0.01, 1, 1e-5)
print('The sum is: % d ' % sum)
x = np.arange(-1, 10, 0.1) # As our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the initial value of 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ## Method3 : Trapezoid
# ### For function : $e^x$ with boundary [0,10] with subintervals 10.
from integrals import trapezoid as tp
# +
def f(x): return np.exp(x)
sum = tp(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, .01)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.exp(x)
sum = tp(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), .01)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
Finaly we check with boudary [-1,1]
# +
def f(x): return np.exp(x)
sum = tp(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, .01)
y = np.exp(x)
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the 2nd function $x^3$, we check with interval [0,10]:
# +
def f(x): return x**3
sum = tp(f, 0, 10, 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 10, .01)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return x**3
sum = tp(f, 0, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0, 2*(math.pi), .01)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Finaly we check with boudary [-1,1]
# +
def f(x): return x**3
sum = tp(f, -1, 1, 10)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, .01)
y = x**3
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# ### For the 3rd function $sin(1/x)$, first , we check with interval [0,10]:
# +
def f(x): return np.sin(1/x)
sum = tp(f, 0.01, 10, .1)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 10, 0.1) # As our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the initial value of 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Second: We check with the boundary [$0,2\pi$]
# +
def f(x): return np.sin(1/x)
sum = tp(f, 0.01, 2*(math.pi), 10)
print('The sum is: % d ' % sum)
x = np.arange(0.01, 2*(math.pi), 0.1) # Our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the initial value of 0 , starting from 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
# -
# Finaly we check with boudary [-1,1]
# +
def f(x): return np.sin(1/x)
sum = tp(f, -1, 1, 0.1)
print('The sum is: % d ' % sum)
x = np.arange(-1, 1, 0.1) # As our function is not defined at x = 0,
y = np.sin(1/x) # I am skipping the value of x= 0 , by giving steps 0.01
plt.plot(x, y, 'k--')
plt.fill_between(x, y, color='#539ecd')
plt.grid()
plt.show()
| UtilityFunctions/hw_8_part_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda]
# language: python
# name: conda-env-anaconda-py
# ---
# ### Nov 2018
# ### MJP
# # Want to experiment with use of scipy's optimization functions:
# - scipy.optimize.least_squares / scipy.optimize.minimize / scipy.optimize.curve_fit
# ### Purpose is to understand how to use them as a (temporary?) part of developing a robust fitting routine for use in the MPC's NBody code
# %matplotlib inline
import numpy as np
from scipy.optimize import least_squares
import matplotlib.pyplot as plt
# # Least-squares seems to be the "lowest-level" function
# - https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
# - "Given the residuals f(x) (an m-dimensional real function of n real variables) and the loss function rho(s) (a scalar function), least_squares finds a local minimum of the cost function F(x): ..."
#
# +
# Follow ingedients in https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html#scipy.optimize.least_squares
# Basic model form
def modelFunc(x,t):
return x[0] + x[1] * np.exp(x[2] * t)
# Make data according to model, but add noise
def synthesizeData(x,t, noise=0.1, n_outliers=3 , random_state=0):
y = modelFunc(x,t)
rnd = np.random.RandomState(random_state)
error = noise * rnd.randn(t.size)
outliers = rnd.randint(0, t.size, n_outliers)
error[outliers] *= 10
return y + error
x_True = np.array([0.5,2.0,-1.0])
t_Data = np.arange(0,21,1)
y_Data = synthesizeData(x_True, t_Data)
# Define function for computing residuals
def resFunc(x, t, y):
return modelFunc(x,t) - y
#return x[0] + x[1] * np.exp(x[2] * t) - y
# initial estimate of parameters.
x0 = np.array([1.0, 1.0, 0.0])
# L.S.Fit
res_lsq = least_squares(resFunc, x0, args=(t_Data, y_Data))
# L.S. Fit with differing loss-functions
# Now compute two solutions with two different robust loss functions. The parameter f_scale is set to 0.1, meaning that inlier residuals should not significantly exceed 0.1 (the noise level used).
res_soft_l1 = least_squares(resFunc, x0, loss='soft_l1', f_scale=0.1, args=(t_Data, y_Data))
res_log = least_squares(resFunc, x0, loss='cauchy', f_scale=0.1, args=(t_Data, y_Data))
# Plot results
y_true = modelFunc(x_True, t_Data)
y_lsq = modelFunc(res_lsq.x, t_Data )
y_soft_l1 = modelFunc(res_soft_l1.x, t_Data)
y_log = modelFunc(res_log.x, t_Data)
plt.plot(t_Data, y_Data, 'o')
plt.plot(t_Data, y_true, 'k', linewidth=2, label='true')
plt.plot(t_Data, y_lsq, label='linear loss')
plt.plot(t_Data, y_soft_l1 , label='soft_l1 loss ')
plt.plot(t_Data, y_log, label='log (cauchy) loss')
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.show()
# -
# ### Here I experiment with an analytic function & its jacobian
# +
#https://www.reddit.com/r/learnpython/comments/6tcy6m/how_to_input_jacobian_function_in/
def func(t, K, zeta, omega_n, omega_d, phi):
return K*np.exp(-zeta*omega_n*t)*np.cos(omega_d*t - phi)
def jacobian(t, K, zeta, omega_n, omega_d, phi):
dK =np.exp(-omega_n*t*zeta)*np.cos(omega_d*t - phi)
dzeta =-K*omega_n*t*np.exp(-omega_n*t*zeta)*np.cos(omega_d*t - phi)
domegan=-K*t*zeta*np.exp(-omega_n*t*zeta)*np.cos(omega_d*t - phi)
domegad=-K*t*np.exp(-omega_n*t*zeta)*np.sin(omega_d*t - phi) # stemmer med mathcad
dphi =K*np.exp(-omega_n*t*zeta)*np.sin(omega_d*t - phi)
return np.transpose([dK,dzeta,domegan,domegad,dphi])
opt.curve_fit(func, Time, Data1,jac=jacobian)
| mpcfit/experimentation_with_leastsquares.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cv] *
# language: python
# name: conda-env-cv-py
# ---
# +
import sys, os
import glob
import os
import sys
import pdb
import os.path as osp
sys.path.append(os.getcwd())
import importlib, time
import numpy as np
import torch
from torch.utils.data import DataLoader
from humor.utils.config import TestConfig
from humor.utils.logging import Logger, class_name_to_file_name, mkdir, cp_files
from humor.utils.torch import get_device, save_state, load_state
from humor.utils.stats import StatTracker
from humor.utils.transforms import rotation_matrix_to_angle_axis
from humor.body_model.utils import SMPL_JOINTS
from humor.datasets.amass_utils import NUM_KEYPT_VERTS, CONTACT_INDS
from humor.losses.humor_loss import CONTACT_THRESH
NUM_WORKERS = 0
def parse_args(argv):
# create config and parse args
config = TestConfig(argv)
known_args, unknown_args = config.parse()
print('Unrecognized args: ' + str(unknown_args))
return known_args
def test(args_obj, config_file):
# set up output
args = args_obj.base
mkdir(args.out)
# create logging system
test_log_path = os.path.join(args.out, 'test.log')
Logger.init(test_log_path)
# save arguments used
Logger.log('Base args: ' + str(args))
Logger.log('Model args: ' + str(args_obj.model))
Logger.log('Dataset args: ' + str(args_obj.dataset))
Logger.log('Loss args: ' + str(args_obj.loss))
# save training script/model/dataset/config used
test_scripts_path = os.path.join(args.out, 'test_scripts')
mkdir(test_scripts_path)
pkg_root = os.path.join(cur_file_path, '..')
dataset_file = class_name_to_file_name(args.dataset)
dataset_file_path = os.path.join(pkg_root, 'datasets/' + dataset_file + '.py')
model_file = class_name_to_file_name(args.model)
loss_file = class_name_to_file_name(args.loss)
model_file_path = os.path.join(pkg_root, 'models/' + model_file + '.py')
train_file_path = os.path.join(pkg_root, 'test/test_humor.py')
cp_files(test_scripts_path, [train_file_path, model_file_path, dataset_file_path, config_file])
# load model class and instantiate
model_class = importlib.import_module('models.' + model_file)
Model = getattr(model_class, args.model)
model = Model(**args_obj.model_dict,
model_smpl_batch_size=args.batch_size) # assumes model is HumorModel
# load loss class and instantiate
loss_class = importlib.import_module('losses.' + loss_file)
Loss = getattr(loss_class, args.loss)
loss_func = Loss(**args_obj.loss_dict,
smpl_batch_size=args.batch_size*args_obj.dataset.sample_num_frames) # assumes loss is HumorLoss
device = get_device(args.gpu)
model.to(device)
loss_func.to(device)
print(model)
# count params
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
Logger.log('Num model params: ' + str(params))
# freeze params in loss
for param in loss_func.parameters():
param.requires_grad = False
# load in pretrained weights if given
if args.ckpt is not None:
start_epoch, min_val_loss, min_train_loss = load_state(args.ckpt, model, optimizer=None, map_location=device, ignore_keys=model.ignore_keys)
Logger.log('Successfully loaded saved weights...')
Logger.log('Saved checkpoint is from epoch idx %d with min val loss %.6f...' % (start_epoch, min_val_loss))
else:
Logger.log('ERROR: No weight specified to load!!')
# return
# load dataset class and instantiate training and validation set
if args.test_on_train:
Logger.log('WARNING: running evaluation on TRAINING data as requested...should only be used for debugging!')
elif args.test_on_val:
Logger.log('WARNING: running evaluation on VALIDATION data as requested...should only be used for debugging!')
Dataset = getattr(importlib.import_module('datasets.' + dataset_file), args.dataset)
split = 'test'
if args.test_on_train:
split = 'train'
elif args.test_on_val:
split = 'val'
test_dataset = Dataset(split=split, **args_obj.dataset_dict)
# create loaders
test_loader = DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=args.shuffle_test,
num_workers=NUM_WORKERS,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda _: np.random.seed())
test_dataset.return_global = True
model.dataset = test_dataset
if args.eval_full_test:
Logger.log('Running full test set evaluation...')
# stats tracker
tensorboard_path = os.path.join(args.out, 'test_tensorboard')
mkdir(tensorboard_path)
stat_tracker = StatTracker(tensorboard_path)
# testing with same stats as training
test_start_t = time.time()
test_dataset.pre_batch()
model.eval()
for i, data in enumerate(test_loader):
batch_start_t = time.time()
# run model
# note we're always using ground truth input so this is only measuring single-step error, just like in training
loss, stats_dict = model_class.step(model, loss_func, data, test_dataset, device, 0, mode='test', use_gt_p=1.0)
# collect stats
batch_elapsed_t = time.time() - batch_start_t
total_elapsed_t = time.time() - test_start_t
stats_dict['loss'] = loss
stats_dict['time_per_batch'] = torch.Tensor([batch_elapsed_t])[0]
stat_tracker.update(stats_dict, tag='test')
if i % args.print_every == 0:
stat_tracker.print(i, len(test_loader),
0, 1,
total_elapsed_time=total_elapsed_t,
tag='test')
test_dataset.pre_batch()
if args.eval_sampling or args.eval_sampling_debug:
eval_sampling(model, test_dataset, test_loader, device,
out_dir=args.out if args.eval_sampling else None,
num_samples=args.eval_num_samples,
samp_len=args.eval_sampling_len,
viz_contacts=args.viz_contacts,
viz_pred_joints=args.viz_pred_joints,
viz_smpl_joints=args.viz_smpl_joints)
Logger.log('Finished!')
def eval_sampling(model, test_dataset, test_loader, device,
out_dir=None,
num_samples=1,
samp_len=10.0,
viz_contacts=False,
viz_pred_joints=False,
viz_smpl_joints=False):
Logger.log('Evaluating sampling qualitatively...')
from body_model.body_model import BodyModel
from body_model.utils import SMPLH_PATH
eval_qual_samp_len = int(samp_len * 30.0) # at 30 Hz
res_out_dir = None
if out_dir is not None:
res_out_dir = os.path.join(out_dir, 'eval_sampling')
if not os.path.exists(res_out_dir):
os.mkdir(res_out_dir)
J = len(SMPL_JOINTS)
V = NUM_KEYPT_VERTS
male_bm_path = os.path.join(SMPLH_PATH, 'male/model.npz')
female_bm_path = os.path.join(SMPLH_PATH, 'female/model.npz')
male_bm = BodyModel(bm_path=male_bm_path, num_betas=16, batch_size=eval_qual_samp_len).to(device)
female_bm = BodyModel(bm_path=female_bm_path, num_betas=16, batch_size=eval_qual_samp_len).to(device)
with torch.no_grad():
test_dataset.pre_batch()
model.eval()
for i, data in enumerate(test_loader):
# get inputs
batch_in, batch_out, meta = data
print(meta['path'])
seq_name_list = [spath[:-4] for spath in meta['path']]
if res_out_dir is None:
batch_res_out_list = [None]*len(seq_name_list)
else:
batch_res_out_list = [os.path.join(res_out_dir, seq_name.replace('/', '_') + '_b' + str(i) + 'seq' + str(sidx)) for sidx, seq_name in enumerate(seq_name_list)]
print(batch_res_out_list)
# continue
x_past, _, gt_dict, input_dict, global_gt_dict = model.prepare_input(batch_in, device,
data_out=batch_out,
return_input_dict=True,
return_global_dict=True)
# roll out predicted motion
B, T, _, _ = x_past.size()
x_past = x_past[:,0,:,:] # only need input for first step
rollout_input_dict = dict()
for k in input_dict.keys():
rollout_input_dict[k] = input_dict[k][:,0,:,:] # only need first step
# sample same trajectory multiple times and save the joints/contacts output
for samp_idx in range(num_samples):
x_pred_dict = model.roll_out(x_past, rollout_input_dict, eval_qual_samp_len, gender=meta['gender'], betas=meta['betas'].to(device))
# visualize and save
print('Visualizing sample %d/%d!' % (samp_idx+1, num_samples))
imsize = (1080, 1080)
cur_res_out_list = batch_res_out_list
if res_out_dir is not None:
cur_res_out_list = [out_path + '_samp%d' % (samp_idx) for out_path in batch_res_out_list]
imsize = (720, 720)
viz_eval_samp(global_gt_dict, x_pred_dict, meta, male_bm, female_bm, cur_res_out_list,
imw=imsize[0],
imh=imsize[1],
show_smpl_joints=viz_smpl_joints,
show_pred_joints=viz_pred_joints,
show_contacts=viz_contacts
)
def viz_eval_samp(global_gt_dict, x_pred_dict, meta, male_bm, female_bm, out_path_list,
imw=720,
imh=720,
show_pred_joints=False,
show_smpl_joints=False,
show_contacts=False):
'''
Given x_pred_dict from the model rollout and the ground truth dict, runs through SMPL model to visualize
'''
J = len(SMPL_JOINTS)
V = NUM_KEYPT_VERTS
pred_world_root_orient = x_pred_dict['root_orient']
B, T, _ = pred_world_root_orient.size()
pred_world_root_orient = rotation_matrix_to_angle_axis(pred_world_root_orient.reshape((B*T, 3, 3))).reshape((B, T, 3))
pred_world_pose_body = x_pred_dict['pose_body']
pred_world_pose_body = rotation_matrix_to_angle_axis(pred_world_pose_body.reshape((B*T*(J-1), 3, 3))).reshape((B, T, (J-1)*3))
pred_world_trans = x_pred_dict['trans']
pred_world_joints = x_pred_dict['joints'].reshape((B, T, J, 3))
viz_contacts = [None]*B
if show_contacts and 'contacts' in x_pred_dict.keys():
pred_contacts = torch.sigmoid(x_pred_dict['contacts'])
pred_contacts = (pred_contacts > CONTACT_THRESH).to(torch.float)
viz_contacts = torch.zeros((B, T, len(SMPL_JOINTS))).to(pred_contacts)
viz_contacts[:,:,CONTACT_INDS] = pred_contacts
pred_contacts = viz_contacts
betas = meta['betas'].to(global_gt_dict[list(global_gt_dict.keys())[0]].device)
for b in range(B):
bm_world = male_bm if meta['gender'][b] == 'male' else female_bm
# pred
body_pred = bm_world(pose_body=pred_world_pose_body[b],
pose_hand=None,
betas=betas[b,0].reshape((1, -1)).expand((T, 16)),
root_orient=pred_world_root_orient[b],
trans=pred_world_trans[b])
pred_smpl_joints = body_pred.Jtr[:, :J]
viz_joints = None
if show_smpl_joints:
viz_joints = pred_smpl_joints
elif show_pred_joints:
viz_joints = pred_world_joints[b]
cur_offscreen = out_path_list[b] is not None
from viz.utils import viz_smpl_seq, create_video
body_alpha = 0.5 if viz_joints is not None and cur_offscreen else 1.0
viz_smpl_seq(body_pred,
imw=imw, imh=imh, fps=30,
render_body=True,
render_joints=viz_joints is not None,
render_skeleton=viz_joints is not None and cur_offscreen,
render_ground=True,
contacts=viz_contacts[b],
joints_seq=viz_joints,
body_alpha=body_alpha,
use_offscreen=cur_offscreen,
out_path=out_path_list[b],
wireframe=False,
RGBA=False,
follow_camera=True,
cam_offset=[0.0, 2.2, 0.9],
joint_color=[ 0.0, 1.0, 0.0 ],
point_color=[0.0, 0.0, 1.0],
skel_color=[0.5, 0.5, 0.5],
joint_rad=0.015,
point_rad=0.015
)
if cur_offscreen:
create_video(out_path_list[b] + '/frame_%08d.' + '%s' % ('png'), out_path_list[b] + '.mp4', 30)
def main(args, config_file):
test(args, config_file)
# -
args_obj = parse_args(['@./configs/test_humor_sampling.cfg'])
config_file = './configs/test_humor_sampling.cfg'
args = args_obj.base
# +
import humor.models.humor_model as HumorModel
from humor.losses.humor_loss import HumorLoss
from humor.datasets.amass_discrete_dataset import AmassDiscreteDataset
# pkg_root = os.path.join('..')
# model_file_path = os.path.join(pkg_root, 'models/' + "humor_model" + '.py')
# train_file_path = os.path.join(pkg_root, 'test/test_humor.py')
# cp_files(test_scripts_path, [train_file_path, model_file_path, dataset_file_path, config_file])
# load model class and instantiate
model_class = HumorModel
model = HumorModel.HumorModel(**args_obj.model_dict,
model_smpl_batch_size=args.batch_size) # assumes model is HumorModel
# load loss class and instantiate
loss_func = HumorLoss(**args_obj.loss_dict,
smpl_batch_size=args.batch_size*args_obj.dataset.sample_num_frames) # assumes loss is HumorLoss
device = get_device(args.gpu)
model.to(device)
loss_func.to(device)
print(model)
# count params
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print('Num model params: ' + str(params))
# freeze params in loss
for param in loss_func.parameters():
param.requires_grad = False
# load in pretrained weights if given
if args.ckpt is not None:
start_epoch, min_val_loss, min_train_loss = load_state(args.ckpt, model, optimizer=None, map_location=device, ignore_keys=model.ignore_keys)
print('Successfully loaded saved weights...')
print('Saved checkpoint is from epoch idx %d with min val loss %.6f...' % (start_epoch, min_val_loss))
else:
print('ERROR: No weight specified to load!!')
# return
# load dataset class and instantiate training and validation set
if args.test_on_train:
print('WARNING: running evaluation on TRAINING data as requested...should only be used for debugging!')
elif args.test_on_val:
print('WARNING: running evaluation on VALIDATION data as requested...should only be used for debugging!')
split = 'val'
# if args.test_on_train:
# split = 'train'
# elif args.test_on_val:
# split = 'val'
test_dataset = AmassDiscreteDataset(split=split, **args_obj.dataset_dict)
# create loaders
test_loader = DataLoader(test_dataset,
batch_size=args.batch_size,
shuffle=args.shuffle_test,
num_workers=NUM_WORKERS,
pin_memory=True,
drop_last=False,
worker_init_fn=lambda _: np.random.seed())
test_dataset.return_global = True
model.dataset = test_dataset
# -
# ## One Step Error
# +
test_dataset.pre_batch()
model.eval()
for i, data in enumerate(test_loader):
batch_start_t = time.time()
# run model
# note we're always using ground truth input so this is only measuring single-step error, just like in training
loss, stats_dict = model_class.step(model, loss_func, data, test_dataset, device, 0, mode='test', use_gt_p=1.0)
print(loss)
break
# -
data[0].keys()
data[0]['pose_body'].shape
# # Sampling for sequence
for i, data in enumerate(test_loader):
batch_in, batch_out, meta = data
break
data_names = ['trans', 'trans_vel', 'root_orient', 'root_orient_vel', 'pose_body', 'joints', 'joints_vel', 'contacts']
x_past, _, gt_dict, input_dict, global_gt_dict = model.prepare_input(
batch_in,
device,
data_out=batch_out,
return_input_dict=True,
return_global_dict=True,
)
# +
# rollout_input_dict = dict()
# for k in input_dict.keys():
# rollout_input_dict[k] = input_dict[k][
# :, 0, :, :
# ] # only need first step
# eval_qual_samp_len = 1
# x_pred_dict = model.roll_out(
# x_past[0, 0:1],
# rollout_input_dict,
# eval_qual_samp_len,
# gender=meta["gender"],
# betas=meta["betas"].to(device),
# )
sample_out = model.sample_step(x_past[0, 0])
decoder_out = sample_out['decoder_out']
x_pred_dict = model.split_output(decoder_out, convert_rots=True)
# -
# +
from copycat.utils.transform_utils import (
convert_aa_to_orth6d, convert_orth_6d_to_aa, vertizalize_smpl_root,
rotation_matrix_to_angle_axis, rot6d_to_rotmat, convert_orth_6d_to_mat, angle_axis_to_rotation_matrix,
angle_axis_to_quaternion
)
from copycat.smpllib.smpl_parser import SMPL_Parser, SMPL_BONE_ORDER_NAMES, SMPLH_Parser
device_cpu = torch.device("cpu")
# smpl_p = SMPL_Parser("/hdd/zen/dev/copycat/Copycat/data/smpl", gender = "male")
# smpl_p.to(device_cpu)
smplh_p = SMPLH_Parser("/hdd/zen/dev/copycat/Copycat/data/smpl", gender = "male", use_pca = False, create_transl = False)
smplh_p.to(device_cpu)
# -
import pyvista as pv
pose_aa_body = rotation_matrix_to_angle_axis(x_pred_dict['pose_body'].reshape(21, 3, 3))
pose_aa = torch.cat([rotation_matrix_to_angle_axis(x_pred_dict['root_orient'].reshape(1, 3, 3)), pose_aa_body, torch.zeros((30, 3)).to(device)])
# spose_aa_body = rotation_matrix_to_angle_axis(input_dict['pose_body'][0,0].reshape(21, 3, 3))
# pose_aa_prev = torch.cat([rotation_matrix_to_angle_axis(input_dict['root_orient'][0,0].reshape(1, 3, 3)), pose_aa_body, torch.zeros((30, 3)).to(device)])
# +
with torch.no_grad():
pose = pose_aa
# pose[:, :3] = 0
betas = torch.zeros((1, 16))
verts, jts = smplh_p.get_joints_verts(pose.cpu(), betas.cpu())
vertices = verts[0].numpy()
verts_prev, jts = smplh_p.get_joints_verts(pose_aa_prev.cpu(), betas.cpu())
vertices_prev = verts_prev[0].numpy()
# mesh faces
faces = smplh_p.faces
faces = np.hstack([np.concatenate([[3], f]) for f in faces])
mesh = pv.PolyData(vertices, faces = faces)
mesh_prev = pv.PolyData(vertices_prev, faces = faces)
# mesh.plot( jupyter_backend='pythreejs')
# pv.plot([mesh, mesh], jupyter_backend='pythreejs')
pl = pv.Plotter()
plane = pv.Plane( i_size=5, j_size=5, i_resolution=10, j_resolution=10)
pl.add_mesh(mesh, show_edges=True, color='yellow')
pl.add_mesh(mesh_prev, show_edges=True, color='red')
pl.add_mesh(plane, show_edges=True, color='white')
pl.show(jupyter_backend='pythreejs', cpos=[-1, 1, 0.5])
# -
x_pred_dict['contacts'].shape
# # Testing
# +
import joblib
test_data = joblib.load('test.pkl')
acc_data, x_pred_dict = test_data[0], test_data[1]
i = 0
def dict_to_data(x_pred_dict):
B = x_pred_dict['pose_body'].squeeze().shape[0]
pose_aa_body = rotation_matrix_to_angle_axis(x_pred_dict['pose_body'].squeeze().reshape(B * 21, 3, 3)).reshape(B, 21, 3)
root_pose = rotation_matrix_to_angle_axis(x_pred_dict['root_orient'].squeeze().reshape(B, 3, 3)).reshape(B, 1, 3)
pose_aa = torch.cat([root_pose, pose_aa_body, torch.zeros((B, 30, 3)).to(device)], dim = 1)
trans = x_pred_dict['trans'].squeeze()
return pose_aa, trans
# +
from collections import defaultdict
faces = smplh_p.faces
faces = np.hstack([np.concatenate([[3], f]) for f in faces])
pl = pv.Plotter()
plane = pv.Plane( i_size=5, j_size=5, i_resolution=10, j_resolution=10)
pose_aa, trans = dict_to_data(x_pred_dict)
B = pose_aa.shape[0]
with torch.no_grad():
pose = pose_aa
betas = torch.zeros((B, 16))
verts, jts = smplh_p.get_joints_verts(pose.cpu(), betas.cpu(), trans.cpu())
for i in range(verts.shape[0]):
vertices = verts[i].numpy()
mesh = pv.PolyData(vertices, faces = faces)
pl.add_mesh(mesh, show_edges=True, color='yellow')
x_pred_dict_acc = defaultdict(list)
for data_entry in acc_data:
[x_pred_dict_acc[k].append(v.cpu().numpy()) for k, v in data_entry.items()]
x_pred_dict_acc = {k: torch.from_numpy(np.array(v)).to(device) for k, v in x_pred_dict_acc.items()}
pose_aa, trans = dict_to_data(x_pred_dict_acc)
with torch.no_grad():
pose = pose_aa
betas = torch.zeros((B, 16))
verts, jts = smplh_p.get_joints_verts(pose.cpu(), betas.cpu(), trans.cpu())
for i in range(verts.shape[0]):
vertices = verts[i].numpy()
mesh = pv.PolyData(vertices, faces = faces)
pl.add_mesh(mesh, show_edges=True, color='red')
pl.add_mesh(plane, show_edges=True, color='white')
pl.show(jupyter_backend='pythreejs', cpos=[-1, 1, 0.5])
# -
from copycat.smpllib.smpl_parser import SMPL_Parser, SMPLH_Parser
smpl_p = SMPL_Parser("/hdd/zen/dev/copycat/Copycat/data/smpl", gender="neutral")
smpl_hp = SMPLH_Parser("/hdd/zen/dev/copycat/Copycat/data/smpl", gender="neutral")
pose_aa_body = rotation_matrix_to_angle_axis(x_pred_dict['pose_body'].reshape(-1, 3, 3)).reshape(50, 21, 3)
with torch.no_grad():
verts, jts = smplh_p.get_joints_verts(pose.cpu(), betas.cpu(), trans.cpu())
root_pose = rotation_matrix_to_angle_axis(x_pred_dict['root_orient'].squeeze().reshape(B, 3, 3)).reshape(B, 1, 3)
pose_aa = torch.cat([root_pose, pose_aa_body, torch.zeros((B, 2, 3)).to(device)], dim = 1)
verts_h, jts_h = smpl_p.get_joints_verts(pose_aa.cpu(), betas.cpu(), trans.cpu())
# +
pl = pv.Plotter()
plane = pv.Plane( i_size=5, j_size=5, i_resolution=10, j_resolution=10)
vertices = verts[0].numpy()
mesh = pv.PolyData(vertices, faces = faces)
pl.add_mesh(mesh, show_edges=True, color='red')
vertices = verts_h[0].numpy()
mesh = pv.PolyData(vertices, faces = faces)
pl.add_mesh(mesh, show_edges=True, color='yellow')
pl.add_mesh(plane, show_edges=True, color='white')
pl.show(jupyter_backend='pythreejs', cpos=[-1, 1, 0.5])
# -
| Humor-Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Shellyga/Adversarial-Domain-Adaptation-with-Keras/blob/master/Shelly_Adversarial_Domain_Adaptation_with_Keras.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="eL528F1bqeYW" colab_type="code" outputId="ef77ab10-5d40-4a7b-f4e7-2cd7768207ba" colab={"base_uri": "https://localhost:8080/", "height": 34}
# %tensorflow_version 1.x
# + id="rPpk2rxrreIC" colab_type="code" outputId="c1ea01f6-d9e7-4be8-dd9f-dbf4f3399778" colab={"base_uri": "https://localhost:8080/", "height": 34}
import tensorflow
print(tensorflow.__version__)
# + [markdown] id="2GQgQF0CqiOf" colab_type="text"
# # Driver
# + id="_egWQHVLqkNe" colab_type="code" outputId="f4ab3ee0-3c16-4f85-aa37-ae30fa3169af" colab={"base_uri": "https://localhost:8080/", "height": 1000}
SEED = 7
import os
import sys
import argparse
import random
import numpy as np
# import tensorflow.python.keras as tf
from tensorflow.compat.v1 import set_random_seed
# import tensorflow.python.keras as tf
os.environ['PYTHONHASHSEED']=str(SEED)
np.random.seed(SEED)
set_random_seed(SEED)
random.seed(SEED)
from PIL import Image
from keras.utils import to_categorical
from keras.layers import Input
from keras.optimizers import Adam
from keras.utils import multi_gpu_model
from sklearn.metrics import accuracy_score
# import model
# import optimizer
def pil_loader(path):
# print(path)
# Return the RGB variant of input image
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
def one_hot_encoding(param):
# Read the source and target labels from param
s_label = param["source_label"]
t_label = param["target_label"]
# Encode the labels into one-hot format
classes = (np.concatenate((s_label, t_label), axis = 0))
num_classes = np.max(classes)
if 0 in classes:
num_classes = num_classes+1
s_label = to_categorical(s_label, num_classes = num_classes)
t_label = to_categorical(t_label, num_classes = num_classes)
return s_label, t_label
def data_loader(filepath, inp_dims):
# Load images and corresponding labels from the text file, stack them in numpy arrays and return
if not os.path.isfile(filepath):
print("File path {} does not exist. Exiting...".format(filepath))
# sys.exit()
img = []
label = []
with open(filepath,'r',encoding='utf-8-sig') as fp:
for line in fp:
token = line.split()
# print('drive/My Drive/project_data/'+token[0])
image_path = "drive/My Drive/project_data/"+token[0]
i = pil_loader(image_path)
i = i.resize((inp_dims[0], inp_dims[1]), Image.ANTIALIAS)
img.append(np.array(i))
label.append(int(token[1]))
img = np.array(img)
label = np.array(label)
return img, label
def batch_generator(data, batch_size):
#Generate batches of data.
all_examples_indices = len(data[0])
while True:
mini_batch_indices = np.random.choice(all_examples_indices, size = batch_size, replace = False)
tbr = [k[mini_batch_indices] for k in data]
yield tbr
def train(param):
models = {}
inp = Input(shape = (param["inp_dims"]))
embedding = build_embedding(param, inp)
classifier = build_classifier(param, embedding)
# classifier = build_classifier(param)
discriminator = build_discriminator(param, embedding)
if param["number_of_gpus"] > 1:
models["combined_classifier"] = multi_gpu_model(build_combined_classifier(inp, classifier), gpus = param["number_of_gpus"])
models["combined_discriminator"] = multi_gpu_model(build_combined_discriminator(inp, discriminator), gpus = param["number_of_gpus"])
models["combined_model"] = multi_gpu_model(build_combined_model(inp, [classifier, discriminator]), gpus = param["number_of_gpus"])
else:
models["combined_classifier"] = build_combined_classifier(inp, classifier)
models["combined_discriminator"] = build_combined_discriminator(inp, discriminator)
models["combined_model"] = build_combined_model(inp, [classifier, discriminator])
models["combined_classifier"].compile(optimizer = opt_classifier(param), loss = 'categorical_crossentropy', metrics = ['accuracy'])
models["combined_discriminator"].compile(optimizer = opt_discriminator(param), loss = 'binary_crossentropy', metrics = ['accuracy'])
models["combined_model"].compile(optimizer = opt_combined(param), loss = {'class_act_last': 'categorical_crossentropy', 'dis_act_last': \
'binary_crossentropy'}, loss_weights = {'class_act_last': param["class_loss_weight"], 'dis_act_last': param["dis_loss_weight"]}, metrics = ['accuracy'])
Xs, ys = param["source_data"], param["source_label"]
Xt, yt = param["target_data"], param["target_label"]
# Xs_train, Xs_test, Ys_train, Ys_test = train_test_split(Xs, ys, test_size=0.25, random_state=0)
# Xt_train, Xt_test, Yt_train, Yt_test = train_test_split(Xt, yt, test_size=0.25, random_state=0)
# Source domain is represented by label 0 and Target by 1
ys_adv = np.array(([0.] * ys.shape[0]))
yt_adv = np.array(([1.] * yt.shape[0]))
y_advb_1 = np.array(([1] * param["batch_size"] + [0] * param["batch_size"])) # For gradient reversal
y_advb_2 = np.array(([0] * param["batch_size"] + [1] * param["batch_size"]))
weight_class = np.array(([1] * param["batch_size"] + [0] * param["batch_size"]))
weight_adv = np.ones((param["batch_size"] * 2,))
S_batches = batch_generator([Xs, ys], param["batch_size"])
T_batches = batch_generator([Xt, np.zeros(shape = (len(Xt),))], param["batch_size"])
param["target_accuracy"] = 0
optim = {}
optim["iter"] = 0
optim["acc"] = ""
optim["labels"] = np.array(Xt.shape[0],)
gap_last_snap = 0
for i in range(param["num_iterations"]):
Xsb, ysb = next(S_batches)
Xtb, ytb = next(T_batches)
X_adv = np.concatenate([Xsb, Xtb])
y_class = np.concatenate([ysb, np.zeros_like(ysb)])
adv_weights = []
for layer in models["combined_model"].layers:
if (layer.name.startswith("dis_")):
adv_weights.append(layer.get_weights())
stats1 = models["combined_model"].train_on_batch(X_adv, [y_class, y_advb_1],\
sample_weight=[weight_class, weight_adv])
k = 0
for layer in models["combined_model"].layers:
if (layer.name.startswith("dis_")):
layer.set_weights(adv_weights[k])
k += 1
class_weights = []
for layer in models["combined_model"].layers:
if (not layer.name.startswith("dis_")):
class_weights.append(layer.get_weights())
stats2 = models["combined_discriminator"].train_on_batch(X_adv, [y_advb_2])
k = 0
for layer in models["combined_model"].layers:
if (not layer.name.startswith("dis_")):
layer.set_weights(class_weights[k])
k += 1
if ((i + 1) % param["test_interval"] == 0):
ys_pred = models["combined_classifier"].predict(Xs)
yt_pred = models["combined_classifier"].predict(Xt)
ys_adv_pred = models["combined_discriminator"].predict(Xs)
yt_adv_pred = models["combined_discriminator"].predict(Xt)
source_accuracy = accuracy_score(ys.argmax(1), ys_pred.argmax(1))
target_accuracy = accuracy_score(yt.argmax(1), yt_pred.argmax(1))
source_domain_accuracy = accuracy_score(ys_adv, np.round(ys_adv_pred))
target_domain_accuracy = accuracy_score(yt_adv, np.round(yt_adv_pred))
log_str = "iter: {:05d}: \nLABEL CLASSIFICATION: source_accuracy: {:.5f}, target_accuracy: {:.5f}\
\nDOMAIN DISCRIMINATION: source_domain_accuracy: {:.5f}, target_domain_accuracy: {:.5f} \n"\
.format(i, source_accuracy*100, target_accuracy*100,
source_domain_accuracy*100, target_domain_accuracy*100)
print(log_str)
if param["target_accuracy"] < target_accuracy:
optim["iter"] = i
optim["acc"] = log_str
# optim["labels"] = ys_pred.argmax(1)
if (gap_last_snap >= param["snapshot_interval"]):
gap_last_snap = 0
np.save(os.path.join(param["output_path"],"yPred_{}".format(optim["iter"])), optim["labels"])
open(os.path.join(param["output_path"], "acc_{}.txt".format(optim["iter"])), "w").write(optim["acc"])
models["combined_classifier"].save(os.path.join(param["output_path"],"iter_{:05d}_model.h5".format(i)))
gap_last_snap = gap_last_snap + 1;
if __name__ == "__main__":
# Read parameter values from the console
parser = argparse.ArgumentParser(description = 'Domain Adaptation')
parser.add_argument('--number_of_gpus', type = int, nargs = '?', default = '1', help = "Number of gpus to run")
parser.add_argument('--network_name', type = str, default = 'ResNet50', help = "Name of the feature extractor network")
parser.add_argument('--dataset_name', type = str, default = 'Office', help = "Name of the source dataset")
parser.add_argument('--dropout_classifier', type = float, default = 0.25, help = "Dropout ratio for classifier")
parser.add_argument('--dropout_discriminator', type = float, default = 0.25, help = "Dropout ratio for discriminator")
parser.add_argument('--source_path', type = str, default = 'amazon_10_list.txt', help = "Path to source dataset")
parser.add_argument('--target_path', type = str, default = 'webcam_10_list.txt', help = "Path to target dataset")
parser.add_argument('--lr_classifier', type = float, default = 0.0001, help = "Learning rate for classifier model")
parser.add_argument('--b1_classifier', type = float, default = 0.9, help = "Exponential decay rate of first moment \
for classifier model optimizer")
parser.add_argument('--b2_classifier', type = float, default = 0.999, help = "Exponential decay rate of second moment \
for classifier model optimizer")
parser.add_argument('--lr_discriminator', type = float, default = 0.00001, help = "Learning rate for discriminator model")
parser.add_argument('--b1_discriminator', type = float, default = 0.9, help = "Exponential decay rate of first moment \
for discriminator model optimizer")
parser.add_argument('--b2_discriminator', type = float, default = 0.999, help = "Exponential decay rate of second moment \
for discriminator model optimizer")
parser.add_argument('--lr_combined', type = float, default = 0.00001, help = "Learning rate for combined model")
parser.add_argument('--b1_combined', type = float, default = 0.9, help = "Exponential decay rate of first moment \
for combined model optimizer")
parser.add_argument('--b2_combined', type = float, default = 0.999, help = "Exponential decay rate of second moment \
for combined model optimizer")
parser.add_argument('--classifier_loss_weight', type = float, default = 1, help = "Classifier loss weight")
parser.add_argument('--discriminator_loss_weight', type = float, default = 4, help = "Discriminator loss weight")
parser.add_argument('--batch_size', type = int, default = 32, help = "Batch size for training")
parser.add_argument('--test_interval', type = int, default = 3, help = "Gap between two successive test phases")
parser.add_argument('--num_iterations', type = int, default = 12000, help = "Number of iterations")
parser.add_argument('--snapshot_interval', type = int, default = 500, help = "Minimum gap between saving outputs")
parser.add_argument('--output_dir', type = str, default = 'Models', help = "Directory for saving outputs")
# args = parser.parse_args()
# Set GPU device
# os.environ["CUDA_VISIBLE_DEVICES"] = str(list(np.arange(args.number_of_gpus))).strip('[]')
# Initialize parameters
param = {}
param["number_of_gpus"] = 1
param["network_name"] = 'ResNet50'
param["inp_dims"] = [224, 224, 3]
# param["num_iterations"] = 12000
param["num_iterations"] = 500
param["lr_classifier"] = 0.0001
param["b1_classifier"] = 0.9
param["b2_classifier"] = 0.999
param["lr_discriminator"] = 0.00001
param["b1_discriminator"] = 0.9
param["b2_discriminator"] = 0.999
param["lr_combined"] = 0.00001
param["b1_combined"] = 0.9
param["b2_combined"] = 0.999
param["batch_size"] = int(32/2)
param["class_loss_weight"] = 1
param["dis_loss_weight"] = 4
param["drop_classifier"] = 0.25
param["drop_discriminator"] = 0.25
param["test_interval"] = 3
param["source_path"] = 'drive/My Drive/project_data/your_file.txt'
param["target_path"] = 'drive/My Drive/project_data/your_file_shelly.txt'
# param["snapshot_interval"] = 500
param["snapshot_interval"] = 5
param["output_path"] = 'drive/My Drive/project_data/result2'
# Create directory for saving models and log files
if not os.path.exists(param["output_path"]):
os.mkdir(param["output_path"])
# Load source and target data
param["source_data"], param["source_label"] = data_loader(param["source_path"], param["inp_dims"])
param["target_data"], param["target_label"] = data_loader(param["target_path"], param["inp_dims"])
# Encode labels into one-hot format
param["source_label"], param["target_label"] = one_hot_encoding(param)
# Train data
train(param)
# + [markdown] id="C8pb9VyIqh_q" colab_type="text"
# # Traib
# + id="KBL2_iTDqmX2" colab_type="code" colab={}
# def train(param):
# models = {}
# inp = Input(shape = (param["inp_dims"]))
# embedding = build_embedding(param, inp)
# discriminator = build_discriminator(param, embedding)
# models["combined_discriminator"] = build_combined_discriminator(inp, discriminator)
# models["combined_discriminator"].compile(optimizer = opt_discriminator(param), loss = 'binary_crossentropy', metrics = ['accuracy'])
# Xs, ys = param["source_data"], param["source_label"]
# Xt, yt = param["target_data"], param["target_label"]
# # Source domain is represented by label 0 and Target by 1
# ys_adv = np.array(([0.] * ys.shape[0]))
# yt_adv = np.array(([1.] * yt.shape[0]))
# y_advb_1 = np.array(([1] * param["batch_size"] + [0] * param["batch_size"])) # For gradient reversal
# y_advb_2 = np.array(([0] * param["batch_size"] + [1] * param["batch_size"]))
# weight_class = np.array(([1] * param["batch_size"] + [0] * param["batch_size"]))
# weight_adv = np.ones((param["batch_size"] * 2,))
# S_batches = batch_generator([Xs, ys], param["batch_size"])
# T_batches = batch_generator([Xt, np.zeros(shape = (len(Xt),))], param["batch_size"])
# param["target_accuracy"] = 0
# optim = {}
# optim["iter"] = 0
# optim["acc"] = ""
# optim["labels"] = np.array(Xt.shape[0],)
# gap_last_snap = 0
# for i in range(param["num_iterations"]):
# Xsb, ysb = next(S_batches)
# Xtb, ytb = next(T_batches)
# X_adv = np.concatenate([Xsb, Xtb])
# y_class = np.concatenate([ysb, np.zeros_like(ysb)])
# adv_weights = []
# stats2 = models["combined_discriminator"].train_on_batch(X_adv, [y_advb_2])
# if ((i + 1) % param["test_interval"] == 0):
# ys_adv_pred = models["combined_discriminator"].predict(Xs)
# yt_adv_pred = models["combined_discriminator"].predict(Xt)
# source_domain_accuracy = accuracy_score(ys_adv, np.round(ys_adv_pred))
# target_domain_accuracy = accuracy_score(yt_adv, np.round(yt_adv_pred))
# print(source_domain_accuracy)
# print(target_domain_accuracy)
# + id="W2tAkuZEvWDO" colab_type="code" colab={}
with open('drive/My Drive/project_data/your_file_shelly.txt','r',encoding='utf-8-sig') as fp:
for line in fp:
token = line.split()
print(token[0])
image_path = "drive/My Drive/project_data/"+token[0]
# print(image_path)
with open(image_path, 'rb') as f:
with Image.open(f) as img:
image= img.convert('RGB')
# + [markdown] id="iqYXgaotqk_g" colab_type="text"
# # Model
# + id="Bxcy4fLWqpgf" colab_type="code" colab={}
import random
import numpy as np
from keras.models import Model
from keras.applications.resnet50 import ResNet50
from keras.layers import Input, Conv2D, MaxPool2D, Flatten, Dense
from keras.layers import BatchNormalization, Activation, Dropout
# from keras_vggface.vggface import VGGFace
# from sklearn.preprocessing import LabelEncoder
# from sklearn.model_selection import train_test_split
def build_embedding(param, inp):
network = eval(param["network_name"])
base = network(weights = 'imagenet', include_top = False)
feat = base(inp)
flat = Flatten()(feat)
return flat
def build_classifier(param, embedding):
dense1 = Dense(400, name = 'class_dense1')(embedding)
bn1 = BatchNormalization(name = 'class_bn1')(dense1)
act1 = Activation('relu', name = 'class_act1')(bn1)
drop2 = Dropout(param["drop_classifier"], name = 'class_drop1')(act1)
dense2 = Dense(100, name = 'class_dense2')(drop2)
bn2 = BatchNormalization(name = 'class_bn2')(dense2)
act2 = Activation('relu', name = 'class_act2')(bn2)
drop2 = Dropout(param["drop_classifier"], name = 'class_drop2')(act2)
densel = Dense(param["source_label"].shape[1], name = 'class_dense_last')(drop2)
bnl = BatchNormalization(name = 'class_bn_last')(densel)
actl = Activation('softmax', name = 'class_act_last')(bnl)
return actl
def build_discriminator(param, embedding):
dense1 = Dense(400, name = 'dis_dense1')(embedding)
bn1 = BatchNormalization(name='dis_bn1')(dense1)
act1 = Activation('relu', name = 'dis_act1')(bn1)
drop1 = Dropout(param["drop_discriminator"], name = 'dis_drop1')(act1)
dense2 = Dense(100, name = 'dis_dense2')(drop1)
bn2 = BatchNormalization(name='dis_bn2')(dense2)
act2 = Activation('relu', name = 'dis_act2')(bn2)
drop2 = Dropout(param["drop_discriminator"], name = 'dis_drop2')(act2)
densel = Dense(1, name = 'dis_dense_last')(drop2)
bnl = BatchNormalization(name = 'dis_bn_last')(densel)
actl = Activation('sigmoid', name = 'dis_act_last')(bnl)
return actl
def build_combined_classifier(inp, classifier):
comb_model = Model(inputs = inp, outputs = [classifier])
return comb_model
def build_combined_discriminator(inp, discriminator):
comb_model = Model(inputs = inp, outputs = [discriminator])
return comb_model
def build_combined_model(inp, comb):
comb_model = Model(inputs = inp, outputs = comb)
return comb_model
# + id="57CQpjm7OklS" colab_type="code" colab={}
pip install keras_vggface
# + [markdown] id="1DrTIu1SqtIe" colab_type="text"
# # Optimizer
# + id="ve8K7kUmqu5W" colab_type="code" outputId="8f248405-603d-4a8f-ffa0-8a1eaf6c98a8" colab={"base_uri": "https://localhost:8080/", "height": 34}
import numpy as np
from keras.optimizers import Adam
def opt_classifier(param):
return Adam(lr=param["lr_classifier"], beta_1=param["b1_classifier"], beta_2=param["b2_classifier"])
def opt_discriminator(param):
return Adam(lr=param["lr_discriminator"], beta_1=param["b1_discriminator"], beta_2=param["b2_discriminator"])
def opt_combined(param):
return Adam(lr=param["lr_combined"], beta_1=param["b1_combined"], beta_2=param["b2_combined"])
# + [markdown] id="YHRxvLjYqwvG" colab_type="text"
# # Drive
# + id="cxlgU5w6qyJO" colab_type="code" outputId="d12c6def-3a07-4448-e855-bad61dcb339c" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
| Shelly_Adversarial_Domain_Adaptation_with_Keras.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# # EP 15: Recursion
# EP 15.3: integer permutation
A = [3,7]
res = []
def permutate(x):
N = len(x)
if N == 1:
return x
for i in range(N):
x[0], x[i] = x[i], x[0]
permutate(x[1:])
res.append(x)
return res
res = permutate(A)
res
# EP 15.4: integer subsets
# EP 15.3:
# EP 15.3: integar permutation
| recursion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <b>Calcule a integral dada</b>
# $21. \int \frac{3u - 3}{(u^2 - 2u + 6)^2}du$
# $u = u^2 - 2u + 6$
# $du = 2u - 2du$
# <b>Simplificando as equações</b>
# $3u - 3 \rightarrow 3(u - 1)$
# $2u - 2 \rightarrow 2(u - 1)$
# <b>Aplicando as substituições</b>
# $\int \frac{3u - 3}{(u^2 - 2u + 6)^2}du \rightarrow \frac{3}{2} \cdot \int \frac{1}{u^2} du$
# <b>Integrando $\frac{3}{2} \cdot \int u^{-2} du$</b>
# $\frac{3}{2} \cdot \int u^{-2}du = -\frac{3}{2}u^{-1} + C$
# $\frac{3}{2} \cdot \int u^{-2}du = -\frac{3}{2} \frac{1}{u} + C$
# <b>Desfazendo as substituições</b>
# $\frac{3}{2} \cdot \int u^{-2}du = -\frac{3}{2(u^2 - 2u + 6)} + C$
| Problemas 5.2/21.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
def sigmoid(x):
"""
Calculate sigmoid
"""
return 1/(1+np.exp(-x))
learnrate = 0.5
x = np.array([1, 2])
y = np.array(0.5)
# Initial weights
w = np.array([0.5, -0.5])
# Calculate one gradient descent step for each weight
# TODO: Calculate output of neural network
nn_output = sigmoid(np.dot(x, w))
# TODO: Calculate error of neural network
error = y - nn_output
# TODO: Calculate change in weights
del_w = learnrate * error * nn_output * (1 - nn_output) * x
print('Neural Network output:')
print(nn_output)
print('Amount of Error:')
print(error)
print('Change in Weights:')
print(del_w)
| test_code/05-GradientDescent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # 1-1.5 Intro Python
# ## Getting started with Python in Jupyter Notebooks
# - Python 3 in Jupyter notebooks
# - `print()`
# - comments
# - data types basics
# - variables
# - addition with Strings and Integers
# - Errors
# - **character art**
#
# -----
#
#
# ><font size="5" color="#00A0B2" face="verdana"> <B>Student will be able to</B></font>
# - use Python 3 in Jupyter notebooks
# - write working code using `print()`, `type()`, comments and variables
# - combine Strings using string addition (+)
# - add numbers in code (+)
# - troubleshoot errors
# - **create character art**
# -
# #
# <font size="6" color="#00A0B2" face="verdana"> <B>Extra Activity</B></font>
# ## print() character art
# []( http://edxinteractivepage.blob.core.windows.net/edxpages/f7cff1a7-5601-48a1-95a6-fd1fdfabd20e.html?details=[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d7a3d1b4-8d8d-4e9e-a984-a6920bcd7ca1/Unit1_Section1.5-ASCII_Art.ism/manifest","type":"application/vnd.ms-sstr+xml"}],[{"src":"http://jupyternootbookwams.streaming.mediaservices.windows.net/d7a3d1b4-8d8d-4e9e-a984-a6920bcd7ca1/Unit1_Section1.5-ASCII_Art.vtt","srclang":"en","kind":"subtitles","label":"english"}])
# print() output to the console can create character art in the forms of pictures and stylized text. Below we show how to create a stylized letter "A"
# the letter 'A'
print(" *")
print(" * *")
print(" *****")
print(" * *")
print("* *")
print()
# <font size="6" color="#B24C00" face="verdana"> <B>Extra Task</B></font>
# - create the flying bird in character art in the Code cell below
#
# ```
#
# _ _
# \ /
# \ . . /
# V
#
# ```
#
# create # [ ] flying bird character art
print("_ _")
print(" \ / ")
print(" \ . . /")
print(" V ")
print()
# <font size="4" color="#B24C00" face="verdana"> <B>Extra Task cont...</B></font>
# - create the capital letter "E" in character art in the Code cell below
# [ ] capital letter "E" character art
print("eeeeeee")
print("e")
print("eeee")
print("e")
print("e")
print("eeeeeee")
print()
# [Terms of use](http://go.microsoft.com/fwlink/?LinkID=206977) [Privacy & cookies](https://go.microsoft.com/fwlink/?LinkId=521839) © 2017 Microsoft
| Python Absolute Beginner/Module_1_1.5_Absolute_Beginner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Finger Detection and Counting
# ## Imports
# +
import cv2
import numpy as np
# Used for distance calculation later on
from sklearn.metrics import pairwise
# -
# ### Global Variables
#
# We will use these as we go along.
# +
# This background will be a global variable that we update through a few functions
background = None
# Start with a halfway point between 0 and 1 of accumulated weight
accumulated_weight = 0.5
# Manually set up our ROI for grabbing the hand.
# Feel free to change these. I just chose the top right corner for filming.
roi_top = 20
roi_bottom = 300
roi_right = 300
roi_left = 600
# -
# ## Finding Average Background Value
#
# The function calculates the weighted sum of the input image src and the accumulator dst so that dst becomes a running average of a frame sequence:
def calc_accum_avg(frame, accumulated_weight):
'''
Given a frame and a previous accumulated weight, computed the weighted average of the image passed in.
'''
# Grab the background
global background
# For first time, create the background from a copy of the frame.
if background is None:
background = frame.copy().astype("float")
return None
# compute weighted average, accumulate it and update the background
cv2.accumulateWeighted(frame, background, accumulated_weight)
# ## Segment the Hand Region in Frame
def segment(frame, threshold=25):
global background
# Calculates the Absolute Differentce between the backgroud and the passed in frame
diff = cv2.absdiff(background.astype("uint8"), frame)
# Apply a threshold to the image so we can grab the foreground
# We only need the threshold, so we will throw away the first item in the tuple with an underscore _
_ , thresholded = cv2.threshold(diff, threshold, 255, cv2.THRESH_BINARY)
# Grab the external contours form the image
# Again, only grabbing what we need here and throwing away the rest
image, contours, hierarchy = cv2.findContours(thresholded.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# If length of contours list is 0, then we didn't grab any contours!
if len(contours) == 0:
return None
else:
# Given the way we are using the program, the largest external contour should be the hand (largest by area)
# This will be our segment
hand_segment = max(contours, key=cv2.contourArea)
# Return both the hand segment and the thresholded hand image
return (thresholded, hand_segment)
# ## Counting Fingers with a Convex Hull
#
# We just calculated the external contour of the hand. Now using that segmented hand, let's see how to calculate fingers. Then we can count how many are up!
#
# Example of ConvexHulls:
#
# <img src="images/hand_convex.png">
def count_fingers(thresholded, hand_segment):
# Calculated the convex hull of the hand segment
conv_hull = cv2.convexHull(hand_segment)
# Now the convex hull will have at least 4 most outward points, on the top, bottom, left, and right.
# Let's grab those points by using argmin and argmax. Keep in mind, this would require reading the documentation
# And understanding the general array shape returned by the conv hull.
# Find the top, bottom, left , and right.
# Then make sure they are in tuple format
top = tuple(conv_hull[conv_hull[:, :, 1].argmin()][0])
bottom = tuple(conv_hull[conv_hull[:, :, 1].argmax()][0])
left = tuple(conv_hull[conv_hull[:, :, 0].argmin()][0])
right = tuple(conv_hull[conv_hull[:, :, 0].argmax()][0])
# In theory, the center of the hand is half way between the top and bottom and halfway between left and right
cX = (left[0] + right[0]) // 2
cY = (top[1] + bottom[1]) // 2
# find the maximum euclidean distance between the center of the palm
# and the most extreme points of the convex hull
# Calculate the Euclidean Distance between the center of the hand and the left, right, top, and bottom.
distance = pairwise.euclidean_distances([(cX, cY)], Y=[left, right, top, bottom])[0]
# Grab the largest distance
max_distance = distance.max()
# Create a circle with 90% radius of the max euclidean distance
radius = int(0.8 * max_distance)
circumference = (2 * np.pi * radius)
# Not grab an ROI of only that circle
circular_roi = np.zeros(thresholded.shape[:2], dtype="uint8")
# draw the circular ROI
cv2.circle(circular_roi, (cX, cY), radius, 255, 10)
# Using bit-wise AND with the cirle ROI as a mask.
# This then returns the cut out obtained using the mask on the thresholded hand image.
circular_roi = cv2.bitwise_and(thresholded, thresholded, mask=circular_roi)
# Grab contours in circle ROI
image, contours, hierarchy = cv2.findContours(circular_roi.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# Finger count starts at 0
count = 0
# loop through the contours to see if we count any more fingers.
for cnt in contours:
# Bounding box of countour
(x, y, w, h) = cv2.boundingRect(cnt)
# Increment count of fingers based on two conditions:
# 1. Contour region is not the very bottom of hand area (the wrist)
out_of_wrist = ((cY + (cY * 0.25)) > (y + h))
# 2. Number of points along the contour does not exceed 25% of the circumference of the circular ROI (otherwise we're counting points off the hand)
limit_points = ((circumference * 0.25) > cnt.shape[0])
if out_of_wrist and limit_points:
count += 1
return count
# ## Run Program
# +
cam = cv2.VideoCapture(0)
# Intialize a frame count
num_frames = 0
# keep looping, until interrupted
while True:
# get the current frame
ret, frame = cam.read()
# flip the frame so that it is not the mirror view
frame = cv2.flip(frame, 1)
# clone the frame
frame_copy = frame.copy()
# Grab the ROI from the frame
roi = frame[roi_top:roi_bottom, roi_right:roi_left]
# Apply grayscale and blur to ROI
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 0)
# For the first 30 frames we will calculate the average of the background.
# We will tell the user while this is happening
if num_frames < 60:
calc_accum_avg(gray, accumulated_weight)
if num_frames <= 59:
cv2.putText(frame_copy, "WAIT! GETTING BACKGROUND AVG.", (200, 400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
cv2.imshow("Finger Count",frame_copy)
else:
# now that we have the background, we can segment the hand.
# segment the hand region
hand = segment(gray)
# First check if we were able to actually detect a hand
if hand is not None:
# unpack
thresholded, hand_segment = hand
# Draw contours around hand segment
cv2.drawContours(frame_copy, [hand_segment + (roi_right, roi_top)], -1, (255, 0, 0),1)
# Count the fingers
fingers = count_fingers(thresholded, hand_segment)
# Display count
cv2.putText(frame_copy, str(fingers), (70, 45), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 2)
# Also display the thresholded image
cv2.imshow("Thesholded", thresholded)
# Draw ROI Rectangle on frame copy
cv2.rectangle(frame_copy, (roi_left, roi_top), (roi_right, roi_bottom), (0,0,255), 5)
# increment the number of frames for tracking
num_frames += 1
# Display the frame with segmented hand
cv2.imshow("Finger Count", frame_copy)
# Close windows with Esc
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
# Release the camera and destroy all the windows
cam.release()
cv2.destroyAllWindows()
# -
| .ipynb_checkpoints/Finger_Count-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem 010
# # Summation of primes
#
# The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
#
# Find the sum of all the primes below two million.
#
# +
from scripts.myfunc import prime_below
import time
start_time = time.time()
n = 2e3
primes = prime_below(n)
print(sum(primes))
print(time.time()-start_time, 'sec')
# +
from scripts.myfunc import prime_below
import time
start_time = time.time()
n = 2e4
primes = prime_below(n)
print(sum(primes))
print(time.time()-start_time, 'sec')
# +
from scripts.myfunc import prime_below
import time
start_time = time.time()
n = 2e5
primes = prime_below(n)
print(sum(primes))
print(time.time()-start_time, 'sec')
# +
from scripts.myfunc import prime_below
import time
start_time = time.time()
n = 2e6
primes = prime_below(n)
print(sum(primes))
print(time.time()-start_time, 'sec')
# -
| notebooks/problem_solved/problem_010_Summation_of_primes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: virtualenv
# language: python
# name: virtualenv
# ---
# # Critical Assesment of Baselines
# +
# %matplotlib inline
import pylab
pylab.rcParams['figure.figsize'] = (10.0, 8.0)
import os
import numpy as np
from src.python.baselines import *
from tempfile import gettempdir
tmp_dir = gettempdir()
# -
client = MongoClient("mongodb://127.0.0.1:27017")
db = client['prot2vec']
# ### Molecular Function
evaluate_performance(db, ["naive", "blast", "seq2go-proba"], 'F')
# ### Cellular Component
evaluate_performance(db, ["naive", "seq2go"], 'C')
# ### Biological Process
evaluate_performance(db, ["naive", "blast"], 'P')
| src/python/baselines-drilldwon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import osmium as osm
import pandas as pd
import numpy as np
class TimelineHandler(osm.SimpleHandler):
def __init__(self):
osm.SimpleHandler.__init__(self)
self.elements = []
def add_elements(self, e, elem_type):
self.elements.append([elem_type,
e.id,
e.version,
e.visible,
pd.Timestamp(e.timestamp),
e.uid,
e.changeset])
def node(self, n):
self.add_elements(n, 'node')
def way(self, w):
self.add_elements(w, 'way')
def relation(self, r):
self.add_elements(r, 'relation')
tlhandler = TimelineHandler()
tlhandler.apply_file("data/ottgat.osh.pbf")
colnames = ['type', 'id', 'version', 'visible', 'ts', 'uid', 'chgset']
elements = pd.DataFrame(tlhandler.elements, columns=colnames)
elements = elements.sort_values(by=['type', 'id', 'ts'])
user_md = (elements.groupby('uid')['ts']
.agg(["min", "max"])
.reset_index())
user_md.columns = ['uid', 'first_at', 'last_at']
user_md['lifespan'] = ((user_md.last_at - user_md.first_at)
/ pd.Timedelta('1d'))
extraction_date = elements.ts.max()
user_md['n_inscription_days'] = ((extraction_date - user_md.first_at)
/ pd.Timedelta('1d'))
elements['ts_round'] = elements.ts.apply(lambda x: x.round('d'))
user_md['n_activity_days'] = (elements
.groupby('uid')['ts_round']
.nunique()
.reset_index())['ts_round']
user_md.sort_values(by=['first_at'])
newfeature = (elements.groupby(['uid'])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = ['uid', "n_total_modif"]
user_md = pd.merge(user_md, newfeature, on='uid', how="outer").fillna(0)
newfeature = (elements.query('type == "node"').groupby(['uid'])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = ['uid', "n_total_modif_node"]
user_md = pd.merge(user_md, newfeature, on='uid', how="outer").fillna(0)
newfeature = (elements.query('type == "way"').groupby(['uid'])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = ['uid', "n_total_modif_way"]
user_md = pd.merge(user_md, newfeature, on='uid', how="outer").fillna(0)
newfeature = (elements.query('type == "relation"').groupby(['uid'])['id']
.count()
.reset_index()
.fillna(0))
newfeature.columns = ['uid', "n_total_modif_relation"]
user_md = pd.merge(user_md, newfeature, on='uid', how="outer").fillna(0)
osmelem_versioning = (elements.groupby(['type', 'id'])['version']
.agg(["first", "last"])
.reset_index())
osmelem_versioning.columns = ['type', 'id', 'vmin', 'vmax']
elements = pd.merge(elements, osmelem_versioning, on=['type', 'id'])
elements['init'] = elements.version == elements.vmin
elements['up_to_date'] = elements.version == elements.vmax
# note that 'elements' is sorted by type, id and ts
elements['willbe_corr'] = np.logical_and(elements.id.diff(-1)==0,
elements.uid.diff(-1)!=0)
elements['willbe_autocorr'] = np.logical_and(elements.id.diff(-1)==0,
elements.uid
.diff(-1)==0)
# +
def create_count_features(metadata, element_type, data, grp_feat, res_feat, feature_suffix):
feature_name = 'n_'+ element_type + '_modif' + feature_suffix
newfeature = (data.groupby([grp_feat])[res_feat]
.count()
.reset_index()
.fillna(0))
newfeature.columns = [grp_feat, feature_name]
metadata = pd.merge(metadata, newfeature, on=grp_feat, how="outer").fillna(0)
return metadata
def extract_modif_features(metadata, data, element_type):
typed_data = data.query('type==@element_type')
metadata = create_count_features(metadata, element_type, typed_data,
'uid', 'id', '')
metadata = create_count_features(metadata, element_type,
typed_data.query("init"),
'uid', 'id', "_cr")
metadata = create_count_features(metadata, element_type,
typed_data.query("not init and visible"),
'uid', 'id', "_imp")
metadata = create_count_features(metadata, element_type,
typed_data.query("not init and not visible"),
'uid', 'id', "_del")
metadata = create_count_features(metadata, element_type,
typed_data.query("up_to_date"),
'uid', 'id', "_utd")
metadata = create_count_features(metadata, element_type,
typed_data.query("willbe_corr"),
'uid', 'id', "_cor")
metadata = create_count_features(metadata, element_type,
typed_data.query("willbe_autocorr"),
'uid', 'id', "_autocor")
return metadata
# -
user_md = extract_modif_features(user_md, elements, 'node')
user_md = extract_modif_features(user_md, elements, 'way')
user_md = extract_modif_features(user_md, elements, 'relation')
user_md = user_md.set_index('uid')
user_md.to_csv("usermd.csv")
| notebooks/version.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#from audioModel import predict, train, accuracy, getCorrectAndIncorrect
#from audioInput import getLaughTracks
import tensorflow as tf
from audioModel import predict, accuracy, getCorrectAndIncorrect
from audioInput import readFolderRecursive
from audioDisplay import getModel, displayAudio
from audioInput_v2 import gatherTestingData, getOneHot, concatSamples
from audioDisplay import printResults
import numpy as np
# +
defs = {
'orchestra': {
'dirs': ['data/samples/test-samples/orchestra-test','data/samples/test-samples/wolves-test'],
'classes': ['orchestra', 'wolves'],
'model_name': 'orchestra-wolves',
},
'guitar': {
'dirs': ['data/samples/test-samples/guitar','data/samples/test-samples/piano'],
'classes': ['guitar', 'piano'],
'model_name': 'guitar-piano',
},
'laugh': {
'dirs': ['data/samples/laughter-test','data/samples/notlaughter-test'],
'classes': ['laughter', 'notlaughter'],
'model_name': 'laffbox',
}
}
definition = defs['guitar']
# +
files = []
for d in definition['dirs']:
files += readFolderRecursive(d)
x, chunks = gatherTestingData(files)
model_name = getModel(definition['model_name'])
preds = predict(model_name, len(definition['classes']), x)
# +
def getLabelsFromChunks(chunks):
labels = {}
for chunk in chunks:
label = chunk['file'].split('/')[-2:-1][0]
i = len(labels.keys()) - 1
#print('label', label)
labels[label] = i
one_hot_labels = None
classes = len(labels.keys())
#print('classes', classes)
for chunk in chunks:
label = chunk['file'].split('/')[2:3][0]
#print('label', label)
#print('labels', labels[label])
one_hots = getOneHot(classes, labels[label])
if one_hot_labels is None:
one_hot_labels = [one_hots]
else:
#print('one hots', one_hots)
one_hot_labels = np.concatenate((one_hot_labels, [one_hots]), axis=0)
#print('final one hots', one_hot_labels)
return one_hot_labels
def displayCorrectIncorrectResults(preds, labels, chunks):
#print(preds, labels, chunks)
pretty = printResults(preds)
prettyLabels = printResults(labels)
#for i in range(len(preds)):
#print('actual', prettyLabels[i], 'predicted (pretty)', pretty[i], 'predicted', preds[i])
correct = []
incorrect = []
print(preds, labels)
print('accuracy', accuracy(preds, labels))
correct, incorrect = getCorrectAndIncorrect(preds, labels)
#print(correct[0:5])
for result in incorrect[0:5]:
i = result['i']
print(i, chunks[i])
#return correct, incorrect
displayCorrectIncorrectResults(preds, getLabelsFromChunks(chunks), chunks)
#printResults2(preds, ])
# -
chunks[123]['audio']
| src/Testing prediction in browser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3-azureml
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# + gather={"logged": 1616913470803}
import numpy as numpy
import pandas as pd
import matplotlib.pyplot as plt
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616913483258}
employee = pd.read_csv('WA_Fn-UseC_-HR-Employee-Attrition.csv')
employee
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616913853367}
employee.tail()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616913748231}
employee.head()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616913620557}
employee.info()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616913646999}
employee.describe()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616915847761}
employee.dtypes
# + [markdown] nteract={"transient": {"deleting": false}}
# Socio-Demographic Effect on Attrition
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616914643106}
age_effect = employee.groupby('Attrition').mean()['Age']
age_effect
#The average age of employees who are lost by attrition 33.61
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616919690060}
dist_effect = employee.groupby('Attrition').mean()['DistanceFromHome']
dist_effect
# The average distance from home for those who exited thecompany through attrition was higher (10.63)
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# + [markdown] nteract={"transient": {"deleting": false}}
# Effect of renumeration and other benefits on attrition
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616916235791}
BusinessTravel = pd.crosstab(employee['BusinessTravel'],employee['Attrition'], margins = False, normalize='columns')
BusinessTravel
# a higher proportion of those who travel for business left the company through attrition.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616919495617}
MonthlyIncome_effect = employee.groupby('Attrition').mean()['MonthlyIncome']
MonthlyIncome_effect
#Employees who exited the company through atrrition had lower average mothly Income.
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} gather={"logged": 1616919901908}
TrainingTimesLastYear = pd.crosstab(employee['TrainingTimesLastYear'],employee['Attrition'], margins = False, normalize='columns')
TrainingTimesLastYear
'''
The proportion of employees who exited with no training per year was higher than those who are still with the company with no training per year
Number of training per year tend to retain employees
'''
| .ipynb_aml_checkpoints/Assignment-2-3-checkpoint2021-2-28-10-30-10.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Looking at EMNIST data
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from importlib.util import find_spec
if find_spec("text_recognizer") is None:
import sys
sys.path.append('..')
from text_recognizer.data.emnist import EMNIST
# -
data = EMNIST()
data.prepare_data()
data.setup()
print(data)
# test_dataloader method is defined in the BaseDataModule class which is inherited by the EMNIST data class
x, y = next(iter(data.test_dataloader()))
print(x.shape, x.dtype, x.min(), x.mean(), x.std(), x.max())
print(y.shape, y.dtype, y.min(), y.max())
fig = plt.figure(figsize=(9, 9))
for i in range(9):
ax = fig.add_subplot(3, 3, i + 1)
rand_i = np.random.randint(len(data.data_test))
image, label = data.data_test[rand_i]
ax.imshow(image.reshape(28, 28), cmap='gray')
ax.set_title(data.mapping[label])
# ## Train a CNN model
# +
import pytorch_lightning as pl
from text_recognizer.models import CNN
from text_recognizer.lit_models import BaseLitModel
model = CNN(data_config=data.config())
lit_model = BaseLitModel(model=model)
trainer = pl.Trainer(gpus=1, max_epochs=5)
trainer.fit(lit_model, datamodule=data)
# +
x, y = next(iter(data.test_dataloader()))
logits = model(x) # (B, C)
print(logits.shape)
preds = logits.argmax(-1)
print(y, preds)
# -
fig = plt.figure(figsize=(9, 9))
for i in range(9):
ax = fig.add_subplot(3, 3, i + 1)
rand_i = np.random.randint(len(data.data_test))
image, label = data.data_test[rand_i]
image_for_model = image.unsqueeze(0) # (1, 1, 28, 28)
logits = model(image_for_model) # (1, C)
pred_ind = logits.argmax(-1) # (1, )
pred_label = data.mapping[pred_ind]
ax.imshow(image.reshape(28, 28), cmap='gray')
ax.set_title(f'Correct: {data.mapping[label]}, Pred: {pred_label}')
| lab2/notebooks/01-look-at-emnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decision Tree Model
#
# In this notebook we will expirement with a decision tree to get an optimal predicted repair time.
# ## Imports
# +
from pandas import read_csv as pd_read_csv, DataFrame
from numpy import array as np_array, sqrt as np_sqrt
import matplotlib.pyplot as plt
import pickle
from sklearn.tree import DecisionTreeClassifier, plot_tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, accuracy_score
# -
# ## Fetching
df = pd_read_csv("../Data/CSV/cleaned.csv", sep=';', low_memory=False)
print(f'No less than {df.shape[0]} rows and {df.shape[1]} columns!')
# ## Model
# +
model = DecisionTreeClassifier()
feature_names = ['geocode', 'prioriteit', 'oorzaakcode']
features = df[feature_names]
target_names = ['hersteltijd_15']
targets = df[target_names]
#: Split data into train set (80%) and test set (20%).
features_train, features_test, targets_train, targets_test = train_test_split(features, targets, random_state=10)
model.fit(features_train, targets_train)
print(f'{len(features_train)} elementen in de trainset en {len(features_test)} elementen in de testset')
# -
# We choose a decision tree classifier in the hope that the model sees a correlation that isn't directly noticeable by comparing the numbers as individuals but as groups.
# +
#: We calculate the module score by using the test set,
#: this is why splitting your data into a training and test set is important.
score_1 = model.score(features_test, targets_test)
# An alternative way to calculate the predicted value is by predicting the model's targets by using the features test as input
# and scoring it against the actual targets of the test set.
targets_model = model.predict(features_test)
score_2 = accuracy_score(targets_test, targets_model)
print(f'Scores (methode 1 = {score_1}) and (methode 2 = {score_2}) so the methods are equal is {score_1 == score_2}.')
print(f'With a mean error of: {np_sqrt(mean_squared_error(targets_test, targets_model))}')
# -
# The scores are not optimal but reasonable, the unreasonable part is the mean squared error. We have tried multiple hyperparameters and transforming the data, but it seems to be as bad or even worse than this result. Later in this notebook we will test it on our subset.
# ## Displaying
plt.figure(figsize=(20, 10))
plot_tree(model, feature_names=feature_names, filled=True, max_depth=2, fontsize=10)
# We can see how the top part of the decision tree works, displaying it all would be far too much to take in that is why we let computers process it. At the top we see a decision the tree makes, is the oorzaakcode lower or equal to 296 proceed to the left and do that check, if not proceed to the right and to the right check. This will happen until we hit the bottom and get classified into a group.
# ## Predicting
subset_df = pd_read_csv("../Data/CSV/subset.csv", sep=';')
features = subset_df[feature_names]
targets = subset_df['hersteltijd_15']
subset_df.head()
targets_model = model.predict(features)
score = accuracy_score(targets, targets_model)
print(f'Score = {score}, predictions = {targets_model}, ground truth = {targets.values}.')
# With a score of 0.7 we would think that it does quite well but we see that it uses it's default answer (0) quite a lot which just happens to match up with the subset data.
model.predict_proba(features)
# this shows the chances in wich leaf the rapairtime will fall.
# ## Saving
subset_df['hersteltijd_dt'] = targets_model
subset_df.to_csv('../Data/CSV/subset.csv', sep=';', index=False)
# Lastly we make a new column for the predicted results and save it so we can use it in the GUI later on.
with open('../Data/Model/dt.pkl', 'wb') as file:
pickle.dump(model, file)
# Here we save the model so we can load the trained model in the GUI.
# ## Conclusion
#
# With a score of ~0.32 the model scores suboptimal, we would like to improve this and will look into other possible models like logistic regression.
| Notebooks/3_decision_tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_zWm890luPgj"
# **Witness** **Chiminya** **R178497R**
#
# **Talent** **Maritinyu** **R178504Y**..
#
# **Assignment** **2** **KBS**
# + id="t-tJ03YScKWO"
import pandas as pd
import numpy as np
# + id="I3PA77pUaRwq"
import cv2 # for capturing videos
import math # for mathematical operations
import matplotlib.pyplot as plt # for plotting the images
# %matplotlib inline
import pandas as pd
from keras.preprocessing import image # for preprocessing the images
import numpy as np # for mathematical operations
from keras.utils import np_utils
from skimage.transform import resize # for resizing images
# + colab={"base_uri": "https://localhost:8080/"} id="E0KfDDRGglgC" outputId="4c77a5b2-9a25-4ba4-d38c-5b814c800d7f"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="mVxN3C1-OhlZ"
# # Importing video from google drive for training
# + id="w2E8hUEFaR1f" colab={"base_uri": "https://localhost:8080/"} outputId="638b5799-bbba-41ea-c310-6430637cadde"
count = 0
#videoFile = "Tom and jerry.mp4"
cap = cv2.VideoCapture('/content/drive/My Drive/Colab Notebooks/Tom and jerry.mp4') # capturing the video from the given path
frameRate = cap.get(5) #frame rate
x=1
while(cap.isOpened()):
frameId = cap.get(1) #current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename ="frame%d.jpg" % count;count+=1
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
# + colab={"base_uri": "https://localhost:8080/", "height": 258} id="akKlWQ94fV_g" outputId="ca1226a5-157b-4715-a13a-14fe7ccd0937"
img = plt.imread('frame0.jpg') # reading image using its name
plt.imshow(img)
# + colab={"base_uri": "https://localhost:8080/", "height": 205} id="oMhNssV7fdKB" outputId="eeed4703-c8dc-44c5-b690-97b4ba45843a"
data = pd.read_csv('/content/drive/My Drive/Colab Notebooks/mapping.csv') # reading the csv file
data.head() # printing first five rows of the file
# + id="RjoL4uF3mEw6"
X = [ ] # creating an empty array
for img_name in data.Image_ID:
img = plt.imread('' + img_name)
X.append(img) # storing each image in array X
X = np.array(X) # converting list to array
# + id="kNN8j22YmQGp"
y = data.Class
dummy_y = np_utils.to_categorical(y) # one hot encoding Classes
# + id="EeI_nUpUmQKA"
image = []
for i in range(0,X.shape[0]):
a = resize(X[i], preserve_range=True, output_shape=(224,224)).astype(int) # reshaping to 224*224*3
image.append(a)
X = np.array(image)
# + id="SFBkwgTemYHy"
from keras.applications.vgg16 import preprocess_input
#X = preprocess_input(X, mode='tf') # preprocessing the input data
# + id="DeuHTGyBmcJt"
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(X, dummy_y, test_size=0.3, random_state=42) # preparing the validation set
# + id="uLMbwvEyno-Z"
from keras.models import Sequential
from keras.applications.vgg16 import VGG16
from keras.layers import Dense, InputLayer, Dropout
# + [markdown] id="t4np7Wz7PJ4x"
# # Importing thr pretrained VGG16 model whuch uses imagenet dataset
# + id="WkLS3e00npCa"
#Importing thr pretrained VGG16 model whuch uses imagenet dataset
base_model = VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)) # include_top=False to remove the top layer
# + colab={"base_uri": "https://localhost:8080/"} id="JQXu3fPTQotT" outputId="01081679-b11e-40d4-aff9-c294115ac9b6"
from keras.applications.vgg16 import VGG16
model = VGG16()
print(model.summary())
# + colab={"base_uri": "https://localhost:8080/"} id="c-ztpzHBn5-w" outputId="cbe71716-4a6c-4f26-854b-6d3401903ff9"
X_train = base_model.predict(X_train)
X_valid = base_model.predict(X_valid)
X_train.shape, X_valid.shape
# + id="BXLsYvSBoC60"
X_train = X_train.reshape(208, 7*7*512) # converting to 1-D
X_valid = X_valid.reshape(90, 7*7*512)
# + id="uICZu9mboGBn"
train = X_train/X_train.max() # centering the data
X_valid = X_valid/X_train.max()
# + [markdown] id="pDvrUQo4PP5M"
# # **Building New mdel **
# + id="z33yc8xkoNYT"
# i. Building the model
model = Sequential()
model.add(InputLayer((7*7*512,))) # input layer
model.add(Dense(units=1024, activation='sigmoid')) # hidden layer
model.add(Dense(3, activation='softmax')) # output layer
# + colab={"base_uri": "https://localhost:8080/"} id="FdqVQOSUoOh8" outputId="0c24bd8a-cfdb-4d11-ab4e-3d231fda5078"
model.summary()
# + id="iiIyGRCSpRHu"
# ii. Compiling the model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="wf2whgQDpRNL" outputId="341e9e99-13a3-428b-9e7f-31ddb6dae96d"
# iii. Training the model
model.fit(train, y_train, epochs=100, validation_data=(X_valid, y_valid))
# + colab={"base_uri": "https://localhost:8080/"} id="JeTnvG_WxlUC" outputId="36cbaa65-cef3-414e-dcdb-9f01ff91a7c8"
# Evaluate the model
loss, acc = model.evaluate(train,y_train , verbose=2)
print("Untrained model, accuracy: {:5.2f}%".format(100 * acc))
# + [markdown] id="7ykbjwN6PYyF"
# # **Saving Model for use in the web app**
# + id="t-bsYynt_Ba_"
#Saving Model
model.save('/content/drive/My Drive/Colab Notebooks/my_model.h5')
# + colab={"base_uri": "https://localhost:8080/"} id="ro9K2nQfUqpj" outputId="4cd2135a-9c8a-43f9-9a76-617b5c371efc"
import tensorflow as tf
from tensorflow import keras
# Recreate the exact same model, including its weights and the optimizer
new_model = tf.keras.models.load_model('/content/drive/My Drive/Colab Notebooks/my_model.h5')
# Show the model architecture
new_model.summary()
# + id="CPWjM4yaYC_r"
# + [markdown] id="1ouVzmDJwD0O"
# # Predicting
# + id="BuetHYNtpWsC"
count = 0
videoFile = "<NAME> 3.mp4"
cap = cv2.VideoCapture(videoFile)
frameRate = cap.get(5) #frame rate
x=1
while(cap.isOpened()):
frameId = cap.get(1) #current frame number
ret, frame = cap.read()
if (ret != True):
break
if (frameId % math.floor(frameRate) == 0):
filename ="test%d.jpg" % count;count+=1
cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
# + id="SOL_rNWIwIiT"
test = pd.read_csv('test.csv')
# + id="eipp5BOkwIk0"
test_image = []
for img_name in test.Image_ID:
img = plt.imread('' + img_name)
test_image.append(img)
test_img = np.array(test_image)
# + id="XY8SkWfxwPOK"
test_image = []
for i in range(0,test_img.shape[0]):
a = resize(test_img[i], preserve_range=True, output_shape=(224,224)).astype(int)
test_image.append(a)
test_image = np.array(test_image)
# + id="RocrHbnjwSpc"
# preprocessing the images
test_image = preprocess_input(test_image, mode='tf')
# extracting features from the images using pretrained model
test_image = base_model.predict(test_image)
# converting the images to 1-D form
test_image = test_image.reshape(186, 7*7*512)
# zero centered images
test_image = test_image/test_image.max()
# + id="30ZHddQ7wZfq"
predictions = model.predict_classes(test_image)
# + id="013bFIhYwdNw"
print("The screen time of JERRY is", predictions[predictions==1].shape[0], "seconds")
print("The screen time of TOM is", predictions[predictions==2].shape[0], "seconds")
# + id="MNITHYw6wjWv"
# + id="jfppwhztwjZU"
| FeatureDetectorVideo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import yaml
config_path = open("labClass6.yaml")
config = yaml.safe_load(config_path)
weather_key = config['weather_key']
bucket = config['bucket']
cities = config['cities']
# +
import requests
from datetime import date, timedelta
end = date.today()
start = end - timedelta(7)
dates = [str(start+timedelta(days=x)) for x in range((end-start).days)]
base_url = 'http://api.weatherapi.com/v1/'
history_api = 'history.json?'
auth = f'key={weather_key}'
# +
data = []
for city in cities:
for date in dates:
request_body = base_url + history_api + auth + f"&q={city}&dt={date}"
payload = requests.get(request_body).json()
payload['dt'] = date
payload['city'] = city
data.append(payload)
# -
from google.cloud import storage
client = storage.Client()
gcs_bucket = client.get_bucket(bucket)
import json
for row in data:
path = f"weather/{row['dt']}/{row['city']}/forecast.json"
blob = gcs_bucket.blob(path)
with blob.open(mode = 'w') as file:
json.dump(row, file)
| course_labs/class_6/labClass6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pretrained GAN
# +
import os
os.environ['CUDA_VISIBLE_DEVICES']='3'
import fastai
from fastai import *
from fastai.vision import *
from fastai.callbacks import *
from torchvision.models import vgg16_bn
# -
from PIL import Image, ImageDraw, ImageFont
path = untar_data(URLs.PETS)
path_hr = path/'images'
path_lr = path/'crappy'
# +
#torch.cuda.set_device(1)
# -
# ## Crappified data
# Prepare the input data by crappifying images.
def crappify(fn,i):
dest = path_lr/fn.relative_to(path_hr)
dest.parent.mkdir(parents=True, exist_ok=True)
img = PIL.Image.open(fn)
targ_sz = resize_to(img, 96, use_min=True)
img = img.resize(targ_sz, resample=PIL.Image.BILINEAR).convert('RGB')
w,h = img.size
q = random.randint(10,70)
ImageDraw.Draw(img).text((random.randint(0,w//2),random.randint(0,h//2)), str(q), fill=(255,255,255))
img.save(dest, quality=q)
# Uncomment the first time you run this notebook.
# +
#il = ImageItemList.from_folder(path_hr)
#parallel(crappify, il.items)
# -
# For gradual resizing we can change the commented line here.
bs,size = 24,160
# bs,size = 8,256
arch = models.resnet34
# ## Pre-train generator
# Now let's pretrain the generator.
arch = models.resnet34
src = ImageImageList.from_folder(path_lr).random_split_by_pct(0.1, seed=42)
def get_data(bs,size):
data = (src.label_from_func(lambda x: path_hr/x.name)
.transform(get_transforms(max_zoom=2.), size=size, tfm_y=True)
.databunch(bs=bs).normalize(imagenet_stats, do_y=True))
data.c = 3
return data
data_gen = get_data(bs,size)
wd = 1e-3
def create_gen_learner():
return unet_learner(data_gen, arch, wd=wd, blur=True, norm_type=NormType.Weight,
self_attention=True, y_range=(-3.,3.), loss_func=MSELossFlat())
learn_gen = create_gen_learner()
learn_gen.fit_one_cycle(2, pct_start=0.8)
learn_gen.unfreeze()
learn_gen.fit_one_cycle(3, slice(1e-6,1e-3))
learn_gen.show_results(rows=4)
learn_gen.save('gen-pre2')
# ## Save generated images
learn_gen.load('gen-pre2');
name_gen = 'image_gen'
path_gen = path/name_gen
path_gen.mkdir(exist_ok=True)
def save_preds(dl):
i=0
names = dl.dataset.items
for b in dl:
preds = learn_gen.pred_batch(batch=b, reconstruct=True)
for o in preds:
o.save(path_gen/names[i].name)
i += 1
save_preds(data_gen.fix_dl)
PIL.Image.open(path_gen.ls()[0])
# ## Train critic
classes = [name_gen, 'images']
# +
src = ImageItemList.from_folder(path, include=classes).random_split_by_pct(0.1, seed=42)
ll = src.label_from_folder(classes=classes)
data_crit = (ll.transform(get_transforms(max_zoom=2.), size=size)
.databunch(bs=bs).normalize(imagenet_stats))
data_crit.c = 3
# -
data_crit.show_batch(rows=3, ds_type=DatasetType.Train, imgsize=3)
# +
conv_args = dict(leaky=0.2, norm_type=NormType.Spectral)
def conv(ni:int, nf:int, ks:int=3, stride:int=1, **kwargs):
return conv_layer(ni, nf, ks=ks, stride=stride, **conv_args, **kwargs)
def critic(n_channels:int=3, nf:int=128, n_blocks:int=3, p:int=0.15):
layers = [
conv(n_channels, nf, ks=4, stride=2),
nn.Dropout2d(p/2),
res_block(nf, dense=True,**conv_args)]
nf *= 2 # after dense block
for i in range(n_blocks):
layers += [
nn.Dropout2d(p),
conv(nf, nf*2, ks=4, stride=2, self_attention=(i==0))]
nf *= 2
layers += [
conv(nf, 1, ks=4, bias=False, padding=0, use_activ=False),
Flatten()]
return nn.Sequential(*layers)
# -
# Module to apply the loss function to every element of the last features before taking the mean.
class AdaptiveLoss(nn.Module):
def __init__(self, crit):
super().__init__()
self.crit = crit
def forward(self, output, target):
return self.crit(output, target[:,None].expand_as(output).float())
# Specific accuracy metric.
def accuracy_thresh_expand(y_pred:Tensor, y_true:Tensor, thresh:float=0.5, sigmoid:bool=True)->Rank0Tensor:
"Compute accuracy when `y_pred` and `y_true` are the same size."
if sigmoid: y_pred = y_pred.sigmoid()
return ((y_pred>thresh)==y_true[:,None].expand_as(y_pred).byte()).float().mean()
# Pretrain the critic on crappy vs not crappy.
def create_critic_learner(loss_func, metrics):
return Learner(data_crit, critic(), metrics=metrics, loss_func=loss_func, wd=wd)
learn_critic = create_critic_learner(metrics=accuracy_thresh_expand, loss_func=AdaptiveLoss(nn.BCEWithLogitsLoss()))
learn_critic.fit_one_cycle(6, 1e-3)
learn_critic.fit_one_cycle(6, 1e-3)
learn_critic.save('critic-pre2')
# ## GAN
# Now we'll combine those pretrained model in a GAN.
from fastai.vision.gan import *
# Those are the losses from before.
loss_critic = AdaptiveLoss(nn.BCEWithLogitsLoss())
loss_gen = MSELossFlat()
learn_crit=None
learn_gen=None
gc.collect()
learn_crit = create_critic_learner(metrics=None, loss_func=AdaptiveLoss(nn.BCEWithLogitsLoss())).load('critic-pre2')
learn_gen = create_gen_learner().load('gen-pre2')
# To define a GAN Learner, we just have to specify the learner objects foor the generator and the critic. The switcher is a callback that decides when to switch from discriminator to generator and vice versa. Here we do as many iterations of the discriminator as needed to get its loss back < 0.5 then one iteration of the generator.
#
# The loss of the critic is given by `learn_crit.loss_func`. We take the average of this loss function on the batch of real predictions (target 1) and the batch of fake predicitions (target 0).
#
# The loss of the generator is weighted sum (weights in `weights_gen`) of `learn_crit.loss_func` on the batch of fake (passed throught the critic to become predictions) with a target of 1, and the `learn_gen.loss_func` applied to the output (batch of fake) and the target (corresponding batch of superres images).
@dataclass
class GANDiscriminativeLR(LearnerCallback):
"`Callback` that handles multiplying the learning rate by `mult_lr` for the critic."
mult_lr:float = 5.
def on_batch_begin(self, train, **kwargs):
"Multiply the current lr if necessary."
if not self.learn.gan_trainer.gen_mode and train:
self.learn.opt.lr *= self.mult_lr
def on_step_end(self, **kwargs):
"Put the LR back to its value if necessary."
if not self.learn.gan_trainer.gen_mode:
self.learn.opt.lr /= self.mult_lr
switcher = partial(AdaptiveGANSwitcher, critic_thresh=0.65)
#switcher = partial(FixedGANSwitcher, n_crit=1, n_gen=1)
learn = GANLearner.from_learners(learn_gen, learn_crit, weights_gen=(1.,250.), show_img=True, switcher=switcher,
opt_func=partial(optim.Adam, betas=(0.,0.99)), wd=wd)
learn.callback_fns.append(partial(GANDiscriminativeLR, mult_lr=5.))
learn.fit(1,4e-5)
learn.fit(20,4e-5)
learn.show_results()
learn.fit(40,4e-5)
learn.show_results()
learn.fit(20,4e-6)
learn.show_results(rows=24)
# ## fin
| deep-learning/fastai-docs/fastai_docs-master/dev_nb/discriminator-digit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Boosting Tutorial
# ### Author: <NAME>
# ### Version: 1.0
#
# * DataSet 1 Intro:
# * https://scikit-learn.org/stable/datasets/toy_dataset.html#breast-cancer-wisconsin-diagnostic-dataset
# * DataSet 2 Intro:
# * https://scikit-learn.org/stable/datasets/toy_dataset.html#boston-house-prices-dataset
#
# * Referenzen
# * https://scikit-learn.org/
# * https://towardsdatascience.com/ensemble-learning-bagging-boosting-3098079e5422 --> mehr Hintergrundwissen
#
# +
# Basis Bibliotheken die wir zur Ausführung benötigen
# loader klasse für den Datensatz
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_iris
# Splitter für die Daten
from sklearn.model_selection import train_test_split
# Booster verfahren und Klassifizierer
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
# -
# ## Load Data Iris
# +
# Laden des Datensatzes in x als Features und y als Labels
x, y = load_iris(return_X_y=True)
# aufteilen in Train und Test
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
# -
# ## Modelling
# +
# Boosting Klassifizierer
# Basis Klassifizierer
dtree = DecisionTreeClassifier(max_depth=2, random_state=42)
# Übergabe des Klassifizieres an das Boosting Verfahren
adaboosting = AdaBoostClassifier(base_estimator=dtree, n_estimators=10, learning_rate=0.1, random_state=42)
# Train!
adaboosting.fit(x_train, y_train)
# -
# Performance
print(f"Train score: {adaboosting.score(x_train, y_train)}")
print(f"Test score: {adaboosting.score(x_test, y_test)}")
# ## Load Data BreastCancer
x, y = load_breast_cancer(return_X_y=True)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
# ## Modelling
# +
# Boosting Klassifizierer
# Basis Klassifizierer
dtree = DecisionTreeClassifier(max_depth=2, random_state=42)
# Übergabe des Klassifizieres an das Boosting Verfahren
adaboosting = AdaBoostClassifier(base_estimator=dtree, n_estimators=10, learning_rate=0.1, random_state=42)
# Train!
adaboosting.fit(x_train, y_train)
# -
# Performance
print(f"Train score: {adaboosting.score(x_train, y_train)}")
print(f"Test score: {adaboosting.score(x_test, y_test)}")
| BoostingClassifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example: Model Titanic Survival
# This example is provided to demonstrate some of the typical programming activities for working with Python in a SAS Viya environment to run actions in SAS Cloud Analytic Services. The actions that are used in the example require SAS Visual Data Mining and Machine Learning.
#
# For more information, see http://support.sas.com/documentation/prod-p/vdmml/index.html.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from IPython.core.display import display, HTML
# %matplotlib inline
import swat
s = swat.CAS('cloud.example.com', 5570)
# ## Get the Data
# +
ulresult = s.upload('http://biostat.mc.vanderbilt.edu/wiki/pub/Main/DataSets/titanic3.csv')
titanic3 = ulresult.casTable
type(titanic3)
# -
titanic3.table.columnInfo()
# ## Add a Computed Column
# Some of the columns in the data are problematic for modeling:
# * The name column should not have a bearing on the analysis.
# * The boat and body columns are proxies for the response variable, survived.
# * The cabin column is similar to name in that it is too specific to be generalized. A computed column, deck, is created because it is slightly more general than cabin.
# +
# Create a computed variable.
titanic3.computedVars = ['deck'] # 1
titanic3.computedVarsProgram = \
"if cabin ne '' then deck = ksubstr(cabin,1,1); else deck = '';"
numeric=['pclass', 'survived', 'age', 'sibsp', 'parch', 'fare']
# Remove boat and body because they are proxies for survived.
# Remove ticket and cabin. Use the computed column, deck, instead.
char = ['sex', 'deck', 'embarked', 'home.dest']
all = numeric + char
# -
# ## Group By Analysis: Descriptive Statistics
# The simple.summary action is used to provide some descriptive statistics. The groupBy parameter is set on the Titanic3 object so that the statistics are shown for survivors and those that did not survive.
# +
# The numeric variable was defined earlier.
results = titanic3[numeric].groupby("survived").simple.summary()
resultColumns = ['Column', 'Min', 'Max', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr'];
display(HTML('<h3>Perished</h3>'))
display(results['ByGroup1.Summary'][resultColumns]) # 1
display(HTML('<h3>Survived</h3>'))
display(results['ByGroup2.Summary'][resultColumns])
# -
# ## Sample the Data
# +
s.builtins.loadActionSet('sampling')
# The sampling.stratified action does not accept the vars parameter.
# Instead, copyVars is used to select the columns to copy to the output table.
if 'vars' in titanic3.params:
del titanic3.vars
# Temporarily set a groupBy parameter.
with titanic3:
titanic3.groupBy={'survived'}
titanic3.sampling.stratified(
partInd=True, # 1
samppct=40, # 2
seed=1234,
output={
'casout':{'name':'titanic3part', 'replace':True},
'copyVars':all
}
)
titanic3.table.dropTable() # 3
titanic3part = s.CASTable('titanic3part') # 4
ci = titanic3part.columnInfo()
display(ci)
# -
# ## Check that Sampling is Even
# As long as each partition has approximately .38 for the mean, then survivor rows are distributed evenly in the partitions.
# +
survSummary = titanic3part['survived'].groupby('_partind_').simple.summary()
resultColumns = ['Column', 'N', 'NMiss', 'Mean', 'Sum', 'Std', 'StdErr']
display(survSummary['ByGroupInfo'])
display(survSummary['ByGroup1.Summary'][resultColumns])
display(survSummary['ByGroup2.Summary'][resultColumns])
# -
# ## Train a Model
# The casOut parameter that is shown in the example is used to store the model as an in-memory table. The next step of this example is to show how to score data with the model.
# +
s.builtins.loadActionSet('decisionTree') # 1
training = titanic3part.query('0 = _partind_') # 2
trainingResults = training.forestTrain(
target='survived',
inputs=all,
nominals=char + ['pclass', 'survived'],
casOut={'name':'forestModel', 'replace':True},
seed=1234,
binOrder=True,
varImp=True
)
display(trainingResults)
# -
# ## Use the Model for Scoring
# In this example, both the training data and the validation data are scored. This is done so that we can assess the effectiveness of the model for predicting whether someone survives on the Titanic.
#
# The in-memory table, forestModel, is used as the model. The scoring output is stored in an in-memory table that is named forestScored.
# +
forestModel = s.CASTable('forestModel')
titanic3part.forestScore(
modelTable=forestModel,
copyVars=['survived', '_partind_'],
casOut={'name':'forestScored', 'replace':True},
vote='prob'
)
# -
# ## Assess the Model
# The assess action is part of the percentile action set. You can run the loadActionSet action to ensure that the action is available to your session.
# +
s.builtins.loadActionSet('percentile')
forestScored = s.CASTable('forestScored') # 1
forestScored.groupBy='_PartInd_' # 2
forestScored.computedVars=['P1', 'P0'] # 3
forestScored.computedVarsProgram='''
if '1' eq strip(_RF_PredName_) then do;
P1 = _RF_PredP_;
P0 = 1 - _RF_PredP_;
end;
else do;
P1 = 1 - _RF_PredP_;
P0 = _RF_PredP_;
end;
''' # 4
forestScored.percentile.assess(
casOut={'name':'forestAssess', 'replace':True},
nbins=10,
cutStep = 0.01,
inputs=['P1'],
response='survived',
event='1',
pVar=['P0'],
pEvent='0'
)
# -
# ## Plot ROC
# +
forestAssess_ROC = \
s.CASTable('forestAssess_ROC', where='1 = _partind_') # 1
out2 = forestAssess_ROC.to_frame()
plt.figure(figsize=(8,8))
plt.plot(out2._FPR_,out2._Sensitivity_,'bo-',linewidth=2)
plt.plot(pd.Series(range(0,11,1))/10,pd.Series(range(0,11,1))/10,'k--',linewidth=1)
plt.xlabel('False Positive Rate')
plt.ylabel('Correct Classification Rate')
plt.grid(True)
plt.title('ROC Curve')
plt.show()
# -
# ## Plot Lift
# +
forestAssess = \
s.CASTable('forestAssess', where='1 = _partind_') # 1
lift = forestAssess.to_frame()
plt.figure(figsize=(8,8))
plt.plot(lift._Depth_, lift._Lift_,'bo-',linewidth=2)
plt.xlabel('Percentile')
plt.ylabel('Lift')
plt.grid(True)
plt.title('Lift Chart')
plt.show()
# -
s.close()
# Copyright SAS Institute
#
# Disclaimer: SAS may reference other websites or content or resources for use at Customer's sole discretion. SAS has no control over any websites or resources that are provided by companies or persons other than SAS. Customer acknowledges and agrees that SAS is not responsible for the availability or use of any such external sites or resources, and does not endorse any advertising, products, or other materials on or available from such websites or resources. Customer acknowledges and agrees that SAS is not liable for any loss or damage that may be incurred by Customer or its end users as a result of the availability or use of those external sites or resources, or as a result of any reliance placed by Customer or its end users on the completeness, accuracy, or existence of any advertising, products, or other materials on, or available from, such websites or resources.
| python/titanic/TitanicExample.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to Ibis
#
# Ibis is a Python framework to access data and perform analytical computations from different sources, in a standard way.
#
# In a way, you can think of Ibis as writing SQL in Python, with a focus on analytics, more than simply accessing data. And aside from SQL databases, you can use it with other backends, including big data systems.
#
# Why not simply use SQL instead? SQL is great and widely used. However, SQL has different flavors for different database engines, and SQL is very difficult to maintain when your queries are very complex. Ibis solves both problems by standardizing your code across backends and making it maintainable. Since Ibis is Python, you can structure your code in different files, functions, name variables, write tests, etc.
#
# This tutorial will guide you through Ibis features and provide practical examples. Some knowledge of Python is assumed and knowledge of SQL will be helpful but not required.
#
# Ibis is open source - if anything can be improved in this tutorial, or in Ibis itself, please open an issue in the [Ibis GitHub repository](https://github.com/ibis-project/ibis/) or open a pull request with the fix.
# ### Getting started
#
# To start using Ibis, you need a Python environment with Ibis installed. If you don't know how to create an environment, we recommend following the [setup instructions](https://ibis-project.org/getting_started.html) in the Ibis website.
#
# Once you have your environment ready, to start using Ibis simply import the `ibis` module:
import ibis
# To make it things easier in this tutorial, we will be using _Ibis interactive mode_. For production code, that will rarely be the case. More details on Ibis non-interactive (aka lazy) mode are covered in the third tutorial, _Expressions, lazy mode and logging queries_.
#
# To set the interactive mode, use:
ibis.options.interactive = True
# Next thing we need is to create a connection object. The connection defines where the data is stored and where the computations will be performed.
#
# For a comparison to pandas, this is not the same as where the data is imported from (e.g. `pandas.read_sql`). pandas loads data into memory and performs the computations itself. Ibis won't load the data and perform any computation, but instead will leave the data in the backend defined in the connection, and will _ask_ the backend to perform the computations.
#
# In this tutorial we will be using a SQLite connection for its simplicity (no installation is needed). But Ibis can work with many different backends, including big data systems, or GPU-accelerated analytical databases. As well as most common relational databases (PostgreSQL, MySQL,...).
#
# To create a SQL connection to our example SQLite database `data/geography.db`, use:
# +
import os
database_file_path = os.path.join('data', 'geography.db')
connection = ibis.sqlite.connect(database_file_path)
# -
# Note that if you installed Ibis with `pip` instead of `conda`, you may need to install the SQLite backend separately with `pip install ibis-framework[sqlite]`.
#
# ### Exploring the data
#
# To list the tables in the `connection` object, we can use the `.list_tables()` method. If you are using Jupyter, you can see all the methods and attributes of the `connection` object by writing `connection.` and pressing the `<TAB>` key.
connection.list_tables()
# These two tables include data about countries, and about GDP by country and year.
#
# The data from countries has been obtained from [GeoNames](https://www.geonames.org/countries/).
# The GDP table will be used in the next tutorial, and the data has been obtained from the
# [World Bank website](https://data.worldbank.org/indicator/NY.GDP.MKTP.CD).
#
# Next, we want to access a specific table in the database. We can create a handler to the `countries` table with:
countries = connection.table('countries')
# To list the columns of the `countries` table, we can use the `columns` attribute.
#
# Again, Jupyter users can see all the methods and attributes of the `countries` object by typing `countries.` and pressing `<TAB>`.
countries.columns
# We can now access a sample of the data. Let's focus on the `name`, `continent` and `population` columns to start with. We can visualize the values of the columns with:
countries['name', 'continent', 'population']
# The table is too big for all the results to be displayed, and we probably don't want to see all of them at once anyway. For this reason, just the beginning and the end of the results is displayed. Often, the number of rows will be so large that this operation could take a long time.
#
# To check how many rows a table has, we can use the `.count()` method:
countries.count()
# To fetch just a subset of the rows, we can use the `.limit(n)` method, where `n` is the number of samples we want. In this case we will fetch the first `3` countries from the table:
countries['name', 'continent', 'population'].limit(3)
# ### Filters and order
#
# Now that we've got an intuition of the data available in the table `countries`, we will extract some information from it by applying filters and sorting the data.
#
# Let's focus on a single continent. We can see a list of unique continents in the table using the `.distinct()` method:
countries['continent'].distinct()
# We will focus on Asia (`AS` in the table). We can identify which rows belong to Asian countries using the standard Python `==` operator:
countries['continent'] == 'AS'
# The result has a value `True` for rows where the condition is true, and the value `False` when it's not.
#
# We can provide this expression to the method `.filter()`, and save the result in the variable `asian_countries` for future use.
asian_countries = countries['name', 'continent', 'population'].filter(countries['continent'] == 'AS')
asian_countries
# We can check how many countries exist in Asia (based on the information in the database) by using the `.count()` method we've already seen:
asian_countries.count()
# Next, we want to find the most populated countries in Asia. To obtain them, we are going to sort the countries by the column `population`, and just fetch the first 10. To sort by a column in Ibis, we can use the `.sort_by()` method:
asian_countries.sort_by('population').limit(10)
# This will return the least populated countries, since `.sort_by` will by default order in ascending order (ascending order like in `1, 2, 3, 4`). This behavior is consistent with SQL `ORDER BY`.
#
# To order in descending order we can use `ibis.desc()`:
asian_countries.sort_by(ibis.desc('population')).limit(10)
# This is the list of the 10 most populated countries based on the data from [GeoNames](https://www.geonames.org/).
#
# To learn more about Ibis, continue to the next tutorial.
| docs/source/tutorial/01-Introduction-to-Ibis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # A Recurrent Network Mechanism of Time Integration in Perceptual Decisions
# Wong, Wang
# Journal of Neuroscience, January 2006, 26(4):1212-1328
# ## Material and Methods
# ### The two-variable network model
# ### Calling Library Fuctions
# +
# LIBRARY
import numpy as np # vector manipulation
import math # math functions
import sys
# THIS IS FOR PLOTTING
# %matplotlib inline
import matplotlib.pyplot as plt # side-stepping mpl backend
import warnings
warnings.filterwarnings("ignore")
# -
# ### The Reduced Network Model
def H(x):
a=270 # Hz/nA
b=108 # Hz
d=.154 # seconds
f=(a*x-b)/(1-np.exp(-d*(a*x-b)))
return f
x=np.arange(-1,1,0.01)
# ### Neural Circuit
# $$ x_{1}=J_{11}S_1-J_{12}S_2+I_{0}+I_{1}+I_{noise,1}$$
# $$ x_{2}=J_{22}S_2-J_{21}S_1+I_{0}+I_{2}+I_{noise,1}$$
#
# where the synaptic couplings are $J_{11}=0.2609$, $J_{22}=0.2609$, $J_{12}=0.0497$ and $J_{21}=0.0497$.
# $I_{0}=0.3255 nA$ represents external input
#
def total_synaptic_current(S_1,S_2,I_1,I_2,I_noise_1,I_noise_2):
# Synaptic coupling
J_11=0.2609 # nA
J_22=0.2609 # nA
J_12=0.0497 # nA
J_21=0.0497 # nA
I_0=0.3255 # nA
x_1=J_11*S_1-J_12*S_2+I_0+I_1+I_noise_1
x_2=J_22*S_2-J_21*S_1+I_0+I_2+I_noise_2
return x_1, x_2
# ### Background activity
# $$ \tau_{AMPA}\frac{d I_{noise,i}(t)}{dt} =-I_{noise,i}(t)+\eta_i(t)\sqrt{\tau_{AMPA}}\sigma_{noise}$$
def Background_Activity(I_noise):
h=0.1
sigma_noise=0.02 # nA
tau_AMPA=2 #ms
eta_noise=np.random.normal(0,1,1)
k=0#(-(I_noise)+eta_noise*np.sqrt(tau_AMPA)*sigma_noise)
I_noise_new=I_noise+h/tau_AMPA*(-(I_noise+h/2*k)+eta_noise
*np.sqrt(tau_AMPA)*sigma_noise)
return I_noise_new
# ### Network Dynamics
# $$ \frac{d S_{i}}{dt} =-\frac{S_{i}}{\tau_S}+(1-S_{i})\gamma H_{i}$$
# +
def Network_Dynamics_VIS(S,x):
h=0.1/1000 #ms
gamma=0.641
tau_S=.100 #s
k=(-S/tau_S+(1-S)*gamma*H(x)/1)
S_new=S+h*(-(S+h/2*k)/tau_S+(1-S+h/2*k)*gamma*H(x))
return S_new
def Network_Dynamics_VEST(S,x,tau_VEST):
h=0.1/1000 #ms
gamma=0.641
#tau_S=.10 #s
k=(-S/tau_VEST+(1-S)*gamma*H(x)/1)
S_new=S+h*(-(S+h/2*k)/tau_VEST+(1-S+h/2*k)*gamma*H(x))
return S_new
# -
# ### Input Current Target
#
# $$ I_i=J_{A,ext}\mu_0\left(1+ \frac{c'}{100} \right) $$
#
def I_input_1(c_dash):
J_A_ext=5.2/10000 # nA/Hz
mu_0=30 # Hz
I_motion=J_A_ext*mu_0*(1+(c_dash)/100)
return I_motion
# $$ I_2=J_{A,ext}\mu_0\left(1- \frac{c'}{100} \right) $$
# +
def I_input_2(c_dash):
J_A_ext=0.00052 # nA/Hz
mu_0=30 # Hz
I_motion=J_A_ext*mu_0*(1-(c_dash)/100)
return I_motion
def Reaction_Time(Firing_Rate_1,Firing_Rate_2,Threshold,time):
ANSWER=0
RT=0
count=0
if Firing_Rate_1 >=Threshold:
ANSWER=1
RT=time
count=1
elif Firing_Rate_2 >=Threshold:
ANSWER=0
RT=time
count=1
return ANSWER,RT,count
# +
h=0.1
time=np.arange(-100,1500,h)
J_A_ext=0.00052 # nA/Hz
mu_0=30 # Hz
Vector_coherence=[1.0,3.0,5.0]#,7.5,10.0,15.0]
Vector_coherence=[10.0]#,7.5,10.0,15.0]
TAU_VEST=np.arange(0.09,0.11,0.002)#Vector_coherence=[1.0,3.0,5.0]
TAU_VEST=0.1
Threshold=15
N=1000
RT_VEST_coh_hit=np.zeros(len(Vector_coherence))
RT_VEST_coh_miss=np.zeros(len(Vector_coherence))
Prob_VEST=np.zeros(len(Vector_coherence))
RT_VIS_coh_hit=np.zeros(len(Vector_coherence))
RT_VIS_coh_miss=np.zeros(len(Vector_coherence))
Prob_VIS=np.zeros(len(Vector_coherence))
#fig = plt.figure(figsize=(12,8))
ALL_F_1=0.2*np.ones((N,len(time)))
ALL_F_2=0.2*np.ones((N,len(time)))
I_1=0.0*np.ones(len(time)) # np.zeros((1,len(time)))
I_2=0.0*np.ones(len(time)) # np.zeros((1,len(time)))
Firing_target_1=0*time # np.zeros((1,len(time)))
Firing_target_2=0*time # np.zeros((1,len(time)))
Firing_target_3=0*time # np.zeros((1,len(time)))
Firing_target_4=0*time # np.zeros((1,len(time)))
# +
RT_VEST_coh_hit=[]
RT_VEST_coh_miss=[]#np.zeros(len(Vector_coherence))
Prob_VEST=[]#np.zeros(len(Vector_coherence))
RT_VIS_coh_hit=[]#np.zeros(len(Vector_coherence))
RT_VIS_coh_miss=[]#np.zeros(len(Vector_coherence))
Prob_VIS=[]#np.zeros(len(Vector_coherence))
j=0
#for j in range(0,len(Vector_coherence)):
for k in range(0,len(TAU_VEST)):
ANSWER_VIS=np.zeros(N)
RT_VIS=np.zeros(N)
ANSWER_VEST=np.zeros(N)
RT_VEST=np.zeros(N)
for n in range(0,N):
if n%250==0:
print('Tau %0.4f : n: %d' %(TAU_VEST[k],n))
I_noise_1=0.001*np.random.normal(0,1,len(time))
I_noise_2=0.001*np.random.normal(0,1,len(time))
x_1=J_A_ext*mu_0*np.random.uniform(0,1,len(time))
x_2=J_A_ext*mu_0*np.random.uniform(0,1,len(time))
x_3=J_A_ext*mu_0*np.random.uniform(0,1,len(time))
x_4=J_A_ext*mu_0*np.random.uniform(0,1,len(time))
S_1=0.2*np.ones(len(time))+0.01*np.random.normal(0,1,len(time))#0.1*np.random.uniform(0,1,len(time))# np.random.normal(0,1,len(time))*0.0004
S_2=0.2*np.ones(len(time))+0.01*np.random.normal(0,1,len(time)) #0.1*np.random.uniform(0,1,len(time)) #np.random.normal(0,1,len(time))*0.0004
S_3=0.2*np.ones(len(time))+0.01*np.random.normal(0,1,len(time))#0.1*np.random.uniform(0,1,len(time))# np.random.normal(0,1,len(time))*0.0004
S_4=0.2*np.ones(len(time))+0.01*np.random.normal(0,1,len(time)) #0.1*np.random.uniform(0,1,len(time)) #np.random.normal(0,1,len(time))*0.0004
Firing_target_1[0]=H(x_1[0])
Firing_target_2[0]=H(x_2[0])
Firing_target_3[0]=H(x_1[0])
Firing_target_4[0]=H(x_2[0])
count=0
count2=0
for i in range (0,len(time)-1):
if time[i] >=0 and time[i]<1000:
c_dash=Vector_coherence[j]
else:
c_dash=0.0
I_noise_1[i+1]=Background_Activity(I_noise_1[i])
I_noise_2[i+1]=Background_Activity(I_noise_2[i])
I_1[i+1]=I_input_1(c_dash)
I_2[i+1]=I_input_1(-c_dash)
x_1[i+1],x_2[i+1]=total_synaptic_current(S_1[i],S_2[i],I_1[i],I_2[i],I_noise_1[i],I_noise_2[i])
S_1[i+1]=Network_Dynamics_VIS(S_1[i],x_1[i+1])
S_2[i+1]=Network_Dynamics_VIS(S_2[i],x_2[i+1])
Firing_target_1[i]=H(x_1[i])
Firing_target_2[i]=H(x_2[i])
I_noise_1[i+1]=Background_Activity(I_noise_1[i])
I_noise_2[i+1]=Background_Activity(I_noise_2[i])
x_3[i+1],x_4[i+1]=total_synaptic_current(S_3[i],S_4[i],I_1[i],I_2[i],I_noise_1[i],I_noise_2[i])
S_3[i+1]=Network_Dynamics_VEST(S_3[i],x_3[i+1],TAU_VEST[k])
S_4[i+1]=Network_Dynamics_VEST(S_4[i],x_4[i+1],TAU_VEST[k])
Firing_target_3[i]=H(x_3[i])
Firing_target_4[i]=H(x_4[i])
if count <0.5:
ANSWER_VIS[n],RT_VIS[n],count=Reaction_Time(Firing_target_1[i],Firing_target_2[i],Threshold,time[i])
if count2 <0.5:
ANSWER_VEST[n],RT_VEST[n],count2=Reaction_Time(Firing_target_3[i],Firing_target_4[i],Threshold,time[i])
RT_VIS_coh_hit.append(np.average(RT_VIS[ANSWER_VIS==1]))
RT_VIS_coh_miss.append(np.average(RT_VIS[ANSWER_VIS==0]))
Prob_VIS.append(np.average(ANSWER_VIS))
RT_VEST_coh_hit.append(np.average(RT_VEST[ANSWER_VEST==1]))
RT_VEST_coh_miss.append(np.average(RT_VEST[ANSWER_VEST==0]))
Prob_VEST.append(np.average(ANSWER_VEST))
# -
# ### Plotting
# #### Input
# +
fig = plt.figure(figsize=(8,4))
plt.plot(time,Firing_target_1,'-',color='blue',label="Vis Right")
plt.plot(time,Firing_target_2,'-',color='red',label="Vis Left")
plt.plot(time,Firing_target_3,':',color='blue',label="Vest Right")
plt.plot(time,Firing_target_4,':',color='red',label="Vest Left")
plt.grid()
plt.xlabel('time(ms)')
plt.ylabel('Firing Rate (Hz)')
plt.xlim((-100,1000))
plt.ylim((0,Threshold*1.5))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('Multisensory_Example_Firing_Pattern.png', bbox_inches='tight',dpi=300)
# plt.
#plt.ylim((30,50))
plt.show()
# +
fig = plt.figure(figsize=(10,4))
plt.subplot(121)
plt.plot(Vector_coherence,Prob_VIS,'o:',color='red')
plt.plot(Vector_coherence,Prob_VEST,'o:',color='blue')
plt.xlabel('Coherence')
plt.ylabel('Probability')
plt.xscale('log')
plt.ylim(0,1)
plt.subplot(122)
plt.plot((Vector_coherence),RT_VIS_coh_hit,'o:',color='red',label="Visual Correct")
plt.plot((Vector_coherence),RT_VIS_coh_miss,'o:',fillstyle='none',color='red',label="Visual Incorrect")
plt.plot((Vector_coherence),RT_VEST_coh_hit,'o:',color='blue',label="Vestibular Correct")
plt.plot((Vector_coherence),RT_VEST_coh_miss,'o:',fillstyle='none',color='blue',label="Vestibular Incorrect")
plt.xscale('log')
plt.legend()
plt.ylim(0,1400)
plt.xlabel('Coherence')
plt.ylabel('Reaction Time')
plt.savefig('Behavioural.png', bbox_inches='tight',dpi=300)
plt.show()
# +
fig = plt.figure(figsize=(10,4))
plt.subplot(121)
plt.plot(TAU_VEST,Prob_VIS,'o:',color='red')
plt.plot(TAU_VEST,Prob_VEST,'o:',color='blue')
#plt.plot(Prob_VIS,'o:',color='red')
#plt.plot(Prob_VEST,'o:',color='blue')
plt.xlabel('Tau Vest')
plt.ylabel('Probability')
#plt.xscale('log')
#plt.ylim(0,1)
plt.subplot(122)
plt.plot(TAU_VEST,RT_VIS_coh_hit,'o:',color='red',label="Visual Correct")
plt.plot(TAU_VEST,RT_VIS_coh_miss,'o:',fillstyle='none',color='red',label="Visual Incorrect")
plt.plot(TAU_VEST,RT_VEST_coh_hit,'o:',color='blue',label="Vestibular Correct")
plt.plot(TAU_VEST,RT_VEST_coh_miss,'o:',fillstyle='none',color='blue',label="Vestibular Incorrect")
#plt.xscale('log')
plt.legend()
plt.ylim(0,1400)
plt.xlabel('Coherence')
plt.ylabel('Reaction Time')
plt.savefig('Tau_Multisensory_Behavioural.png', bbox_inches='tight',dpi=300)
plt.show()
# -
RT_VEST_coh_hit
Prob_VEST
TAU_VEST[0]
TAU_VEST=np.arange(0.09,0.11,0.002)
TAU_VEST
| MULTISENSORY/Visual Vestibular Heading A Network Mechanism of Perceptual _RB.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Import all necessary libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# import seaborn as sns
from sklearn.preprocessing import MinMaxScaler,StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.metrics import r2_score, mean_squared_error
#import all the models
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
# ### load the cleaned csv file
df = pd.read_csv('~/Desktop/ml_database.csv')
df.head()
# ### Display the column names in the data frame:
df.columns
# ### Delete the unnecessary columns:
df.drop(['Facility Id'], axis = 1, inplace = True)
# ### Display the data types of the columns:
df.dtypes
# ### Display the shape of the data frame:
df.shape
# ### Display the general information about the dataset:
df.info()
# ### Count the number of the missing values in the data frame:
df.isnull().sum()
# ### Value Counts for Industry type column:
df['industry_type'].value_counts().head(10)
# ### Histograme for '%Renewables' column:
df['%Renewables'].hist()
# ### Display the number of unique values in 'State' column:
df['State'].unique()
# ### Assign numeric codes to categorical columns 'State' and 'industry_type'
# +
df['State'] = df['State'].astype('category')
df['State'] = df['State'].cat.codes
df['industry_type'] = df['industry_type'].astype('category')
df['industry_type'] = df['industry_type'].cat.codes
# -
# ### Make the histograme for 'total_direct_emissions' column:
# We can see that most of the values fall in the beggining of the graph, graph has long tail, and do not follow normal distribution. Models may struggle here.
df['total_direct_emissions'].hist()
plt.show()
# ### Log transformation of the 'total_direct_emissions' column and take values only greater than 6,
df['total_direct_emissions'] = np.log1p(df['total_direct_emissions'])
df['total_direct_emissions']
df = df.loc[df['total_direct_emissions'] > 6, :]
# We can see that data seems to follow normal distribution now, we can fit the model
df['total_direct_emissions'].hist()
df = df.rename(columns={"USDA_energy_invest_$": "USDA_energy_invest", "%Renewables": "renewables_percent"})
df = df.drop(columns=["State", "year"])
# ### How data frame looks like now:
df.to_csv("ml_database_final.csv", index=False)
df.head()
# ### Correlation between target variable 'total_direct_emissions' and all others
df.drop("total_direct_emissions", axis=1).apply(lambda x: x.corr(df.total_direct_emissions))
# ### plot the correlation heatmap
# +
# corr = df.corr()
# plt.figure(figsize = (14,8))
# sns.heatmap(corr,
# xticklabels=corr.columns,
# linewidth=0.5,
# yticklabels=corr.columns,
# cmap="YlGnBu")
# plt.show()
# -
# ### Divide the data into Features and Response variable:
#features
X = df.loc[:, df.columns != 'total_direct_emissions']
# Target variable
y = df['total_direct_emissions']
# ### Use 80% of the rows as training set and 20% of the rows as testing set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=42)
# ### Scale the training and testing dataset using StandardScaler:
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.fit_transform(X_test)
# ## Fit and Train Linear regression Model:
# Inilialize the Linear Regresor
Lr_regressor = LinearRegression()
# Fit the Linear Regresor Regressor
Lr_regressor.fit(X_train_scaled, y_train)
# predict the values using test data
y_pred = Lr_regressor.predict(X_test_scaled)
# ### Evaluate the Linear Regression Model using R2 and RMSE score:
R2 = r2_score(y_pred, y_test)
RMSE = np.sqrt(mean_squared_error(y_pred, y_test))
print("R2 score of Linear Regressot is:", R2)
print("RMSE score of Linear Regressot is:", RMSE)
# ## Fit and train RandomForest Regressor Model:
# Inilialize the Random Forest Regresor
Rf_regressor = RandomForestRegressor()
# Fit the Random Forest Regressor
Rf_regressor.fit(X_train_scaled, y_train)
# predict the values using test data
y_pred = Rf_regressor.predict(X_test_scaled)
# ### Evaluate the RandomForest Regressor using R2 and RMSE score:
R2 = r2_score(y_pred, y_test)
RMSE = np.sqrt(mean_squared_error(y_pred, y_test))
print("R2 score of RandomForest Regressot is:", R2)
print("RMSE score of RandomForest Regressot is:", RMSE)
# ## Fit and train DecisionTree Regressor Model:
# Inilialize the Decision Tree Regressor
Dt_regressor = DecisionTreeRegressor()
# Fit the Decision Tree Regressor
Dt_regressor.fit(X_train_scaled, y_train)
# predict the values using test data
y_pred = Dt_regressor.predict(X_test_scaled)
# ### Evaluate the Linear DecisionTree Regressor using R2 and RMSE score:
R2 = r2_score(y_pred, y_test)
RMSE = np.sqrt(mean_squared_error(y_pred, y_test))
print("R2 score of DecisionTree Regressor is:", R2)
print("RMSE score of DecisionTree Regressor is:", RMSE)
# Calculate feature importance in the Random Forest model.
importances = Dt_regressor.feature_importances_
importances
# We can sort the features by their importance.
sorted(zip(Dt_regressor.feature_importances_, X.columns), reverse=True)
| ipynb/ml_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"is_executing": false}
from datetime import date
from gs_quant.datetime.relative_date import RelativeDate
from gs_quant.markets.securities import ExchangeCode
from gs_quant.session import GsSession, Environment
# + pycharm={"name": "#%%\n", "is_executing": false}
# external users should substitute their client id and secret; please skip this step if using internal jupyterh
GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('read_product_data',))
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Date Rule conventions
# A: first day of th year
# b: business days of calendar passed, USD by default
# d: Gregorian calendar days
# e: end of month (ignores number)
# m: month
# r: end of year
# u: business days ignoring USD holidays
# v: gets last business day of month (does not ignore number)
# x: gets last business day of month (ignores the number)
# y: add years, Result will be moved to the next week if falling on a weekend
# + pycharm={"name": "#%%\n"}
# Returns four business days after the first business day on or after the 15th calendar day of the month
date: date = RelativeDate('14d+0u+4u').apply_rule(exchanges=[ExchangeCode.NYSE])
| gs_quant/documentation/01_markets/02_rdates/examples/0004_chaining_relativedate_rules.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.5
# language: julia
# name: julia-0.4
# ---
A = [1 3 1
1 1 -1
3 11 6]
b = [9, 1, 35]
u = [1 3 1]
w = [1 3 1
2 2]
x = A \ b
inv(A) * b
Ahuge = rand(1000, 1000)
bhuge = rand(1000)
Ahuge \ bhuge
@time Ahuge \ bhuge;
@time Ahuge \ bhuge;
@time inv(Ahuge) * bhuge
@time inv(Ahuge) * bhuge
L, U = lu([A, b], Val{false})
L, U = lu([A b], Val{false})
w = rand(3)
n = 3
x1 = rand(3)
y1 = w . x1
y1 = w⋅x1
# +
x2 = rand(3);y2=w⋅x2
x3 = rand(3);y3=w⋅x3
X = [x1, x2, x2]
y = [y1; y2; y3]
# -
X = [x1 x2 x3]
X*w-y
x1
a = [2, 5, 6]
a = [2 5 6]
X = [x1;x2;x3]
X = [x1 x2 x3]'
X*w-y
X\y
using Flux
Pkg.add("Flux")
Pkg.update()
A = [2 -1 5
3 4 4
-4 -2 0]
B = [1 0 -2
1 -5 1
-3 0 3]
@show(A.*B)
@show(A*B)
function my_own_matmul(A, B)
m, n1 = size(A)
n2, p = size(B)
if n1 != n2 error("No good, n1=$(n1) != n2=$(n2)") end
C = [vec(A[i,:])⋅B[:,j] for i=1:m,j=1:p]
end
my_own_matmul(A, B)
A
B
A[1,:] ⋅ B[:, 1]
A[1,:]
B[:, 1]
a = rand(2, 2)
a[1, 1]
a[1,:]
using PyPlot
x = linspace(1,10000,10000)
fig, ax = subplots()
ax[:plot](x, [n^0.999999*log2(n) for n in x], label=L"$n^{0.99999}*log2(n)$")
#plot(x, [10000000 * n for n in x])
ax[:plot](x, [1.000001^n for n in x], label=L"$1.000001^n$")
#ax[:plot](x, [n^2 for n in x], label=L"$n^2$")
ax[:legend]()
| julia_attempt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
# ### 1. Open Air Quality Dataset
df_airQuality = pd.read_csv('../airQuality_2019/datos201912.csv', delimiter = ";")
df_airQuality.head(3)
df_airQuality.dtypes
# ### 2. Open Monitoring Stations Dataset
df_stations = pd.read_excel('../monitoring_stations/stations.xls', index_col=0)
# extract columns needed
df_stations_locations = df_stations[['CODIGO_CORTO', 'LONGITUD', 'LATITUD']]
# df_stations_locations = df_stations_locations.drop(columns = [1])
stations_locations = df_stations_locations.loc[: , "CODIGO_CORTO":"LATITUD"]
stations_locations.head()
# ### 3. Extract Carbon Monoxide
# extract magnitude 6 which corresponds to carbon monoxide (CO)
magnitude_6 = df_airQuality.loc[df_airQuality['MAGNITUD'] == 6]
# replace all zeros with nan (this is useful to calculate the mean later)
magnitude_6 = magnitude_6.replace(0, np.NaN)
magnitude_6.head(3)
# remove validation columns (with V as first letter)
magnitude_6_noVColumns = magnitude_6.loc[:, ~magnitude_6.columns.str.startswith('V')]
# drop columns not needed
magnitude_6_reduced = magnitude_6_noVColumns.drop(columns=['PROVINCIA', 'MUNICIPIO','PUNTO_MUESTREO','MES'])
magnitude_6_reduced.head(3)
# extract only days from the dataframe
days = magnitude_6_reduced.loc[: , "D01":"D31"]
# calculate new column with mean aggregating all columns extracted above
magnitude_6_reduced['carbon_monoxid_mean'] = days.mean(axis=1)
magnitude_6_reduced.head(3)
# drop all columns with day values (days)
magnitude_6_dropColumns = magnitude_6_reduced.drop(columns = days)
magnitude_6_dropColumns.head(3)
carbon_monoxide_2019 = magnitude_6_dropColumns.groupby('ESTACION') \
.agg({'carbon_monoxid_mean':'mean'}) \
.rename(columns={0:'monitoring_station','carbon_monoxid_mean':'carbon_monoxide_mean2019'})
carbon_monoxide_2019 = carbon_monoxide_2019.rename(columns={'ESTACION':'monitoring_station'})
carbon_monoxide_2019.head(15)
co_mean_2019 = carbon_monoxide_2019.merge(stations_locations, left_on='monitoring_station', right_on='CODIGO_CORTO')
co_mean_2019 = co_mean_2019.drop(columns = ['CODIGO_CORTO'])
co_mean_2019 = co_mean_2019.rename(columns={'monitoring_station': 'station','carbon_monoxide_mean2019': 'co','LONGITUD':'longitude', 'LATITUD':'latitude'})
co_mean_2019.head()
co_mean_2019.to_csv('../airQuality_2019/codata.csv', index=False)
# ### 4. Extract Particulate Matter 10
# extract magnitude 6 which corresponds to carbon monoxide (CO)
magnitude_8 = df_airQuality.loc[df_airQuality['MAGNITUD'] == 8]
# replace all zeros with nan (this is useful to calculate the mean later)
magnitude_8 = magnitude_8.replace(0, np.NaN)
magnitude_8.head(3)
# remove validation columns (with V as first letter)
magnitude_8_noVColumns = magnitude_8.loc[:, ~magnitude_8.columns.str.startswith('V')]
# drop columns not needed
magnitude_8_reduced = magnitude_8_noVColumns.drop(columns=['PROVINCIA', 'MUNICIPIO','PUNTO_MUESTREO','MES'])
magnitude_8_reduced.head(3)
# extract only days from the dataframe
days = magnitude_8_reduced.loc[: , "D01":"D31"]
# calculate new column with mean aggregating all columns extracted above
magnitude_8_reduced['particulate_matter'] = days.mean(axis=1)
magnitude_8_reduced.head(3)
# drop all columns with day values (days)
magnitude_8_dropColumns = magnitude_8_reduced.drop(columns = days)
magnitude_8_dropColumns.head(3)
# +
particulate_matter_2019 = magnitude_8_dropColumns.groupby('ESTACION') \
.agg({'particulate_matter':'mean'}) \
.rename(columns={'particulate_matter':'particulate_matter_mean2019'}) \
.reset_index()
particulate_matter_2019 = particulate_matter_2019.rename(columns={'ESTACION':'monitoring_station'})
particulate_matter_2019
# -
pm_mean_2019 = particulate_matter_2019.merge(stations_locations, left_on='monitoring_station', right_on='CODIGO_CORTO')
pm_mean_2019 = pm_mean_2019.drop(columns = ['CODIGO_CORTO'])
pm_mean_2019 = pm_mean_2019.rename(columns={'monitoring_station': 'station','particulate_matter_mean2019': 'pm','LONGITUD':'longitude', 'LATITUD':'latitude'})
pm_mean_2019.head()
pm_mean_2019.to_csv('../airQuality_2019/pmdata.csv', index=False)
| results/urban_foresty_science/Processing/Air_quality/script/AirQuality_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Robust Asset Allocation (RAA)
# + [markdown] heading_collapsed=true
# ## Colaboratory Imports for Backtests
# + code_folding=[0] hidden=true
# only for colaboratory
# !pip install git+https://github.com/scubamut/fintools.git
# !pip install cvxopt
# !pip install pyfolio
# !pip install ffn
# !pip install itable
# !pip install git+https://github.com/pydata/pandas-datareader.git
# -
# ## Imports for Backtests
# +
from __future__ import print_function
from IPython.utils.path import get_ipython_dir
print(get_ipython().profile_dir.location)
import pandas_datareader.data as web
import matplotlib.pyplot as plt
import pandas as pd
import datetime
import talib
import itable
import ffn
from fintools.Parameters import Parameters
from fintools.set_start_end import set_start_end
from fintools.get_yahoo_prices import get_yahoo_prices
from fintools.compute_weights_RS_DM import compute_weights_RS_DM
from fintools.compute_weights_PMA import compute_weights_PMA
from fintools.endpoints import endpoints
from fintools.backtest import backtest
from fintools.monthly_return_table import monthly_return_table
from fintools.show_return_table import show_return_table
from fintools.finhelpers3 import highlight_pos_neg
# %matplotlib inline
# -
# # RAAB000
# ## Symbols
# +
symbols =['MTUM','IWD','EFA','EFV','VNQ','DBC','IEF']
cash_proxy = 'CASHX'
risk_free = 'BIL'
# get data
tickers = symbols.copy()
if cash_proxy != 'CASHX' :
tickers = list(set(tickers + [cash_proxy]))
if isinstance(risk_free, str) :
tickers = list(set(tickers + [risk_free]))
# -
# ## Variables
# +
rs_lookback = 12
risk_lookback = 12
n_top = 7
# Define which online source one should use
data_source = 'yahoo'
start_date = '2000-01-01'
end_date = datetime.datetime.today().strftime('%Y-%m-%d')
# chosen from one of
data_column = 'Adj Close'
data_path = '/home/scubamut/Projects/DATA/'
# -
# ## Data
# User pandas_reader.data.DataReader to load MultiIndex Data.
MI_data = web.DataReader(tickers, data_source, start_date, end_date)
data = MI_data[data_column].sort_index(ascending=True)
# ### Clean Data
inception_dates = pd.DataFrame([data[ticker].first_valid_index() for ticker in data.columns],
index=data.keys(), columns=['inception'])
print (inception_dates)
# ### make sure there is no missing data
# +
# make sure there is no missing data (nan)
# and, if so, save csv
for col in data.columns:
df = data[col]
print (col, data[col].first_valid_index(),
len(data[col].dropna()),
len(df[data[col].first_valid_index():]) == len(data[col].dropna()))
if len(df[data[col].first_valid_index():]) == len(data[col].dropna()):
data[col].dropna().to_csv(data_path + col + '.csv')
else:
print ('***********************')
# +
def get_date(timestamp):
return (str(timestamp.year) + '-' + str(timestamp.month) + '-' + str(timestamp.day))
prices = data.copy().dropna()
start_date = get_date(prices.index[0].date())
end_date = get_date(prices.index[-1].date())
print('start_date=' , start_date, 'end_date=', end_date)
# -
# +
end_points = endpoints(period='M', trading_days=prices.index)
prices_m = prices.loc[end_points]
returns = prices_m[symbols].pct_change(rs_lookback)[rs_lookback:]
absolute_momentum_rule = returns > 0
if isinstance(risk_free, int) :
excess_returns = returns
else :
risk_free_returns = prices_m[risk_free].pct_change(rs_lookback)[rs_lookback:]
excess_returns = returns.subtract(risk_free_returns, axis=0).dropna()
rebalance_dates = excess_returns.index.join(absolute_momentum_rule.index, how='inner')
# relative strength ranking
ranked = excess_returns.loc[rebalance_dates][symbols].rank(ascending=False, axis=1, method='dense')
# elligibility rule - top n_top ranked securities
elligible = ranked[ranked<=n_top] > 0
# equal weight allocations
elligible = elligible.multiply(1./elligible.sum(1), axis=0)
# downside protection
weights = pd.DataFrame(0.,index=elligible.index, columns=prices.columns)
if cash_proxy == 'CASHX' :
weights[cash_proxy] = 0
prices[cash_proxy] = 1.
weights[symbols] = (elligible * absolute_momentum_rule).dropna()
weights[cash_proxy] += 1 - weights[symbols].sum(axis=1)
# backtest
p_value, p_holdings, p_weights = backtest(prices, weights, 10000., offset=0, commission=10.)
p_value.plot(figsize=(15,10), grid=True)
# -
# algo stats
ffn.calc_perf_stats(p_value).display()
show_return_table(p_value)
df = monthly_return_table (p_value)
frame = df['Annual Returns'].to_frame()
frame['positive'] = df['Annual Returns'] >= 0
frame['Annual Returns'].plot(figsize=(15,10),kind='bar',color=frame.positive.map({True: 'g', False: 'r'}), grid=True)
# # RAAA00
# +
symbols =['VCVSX','VUSTX','VWEHX','VFIIX','VGHCX','FRESX']
cash_proxy = 'VFIIX'
risk_free = 'FFXSX'
rs_lookback = 1
risk_lookback = 1
n_top = 5
# get data
tickers = symbols.copy()
if cash_proxy != 'CASHX' :
tickers = list(set(tickers + [cash_proxy]))
if isinstance(risk_free, str) :
tickers = list(set(tickers + [risk_free]))
data = pd.DataFrame (columns=tickers)
# Define which online source one should use
data_source = 'yahoo'
# We would like all available data from 01/01/2000 until today.
start_date = '2000-01-01'
end_date = datetime.datetime.today().strftime('%Y-%m-%d')
# User pandas_reader.data.DataReader to load the desired data. As simple as that.
panel_data = web.DataReader(tickers, data_source, start_date, end_date)
data = panel_data['Adj Close'].sort_index(ascending=True)
inception_dates = pd.DataFrame([data[ticker].first_valid_index() for ticker in data.columns],
index=data.keys(), columns=['inception'])
print (inception_dates)
prices = data.copy().dropna()
end_points = endpoints(period='M', trading_days=prices.index)
prices_m = prices.loc[end_points]
returns = prices_m[symbols].pct_change(rs_lookback)[rs_lookback:]
absolute_momentum_rule = returns > 0
if isinstance(risk_free, int) :
excess_returns = returns
else :
risk_free_returns = prices_m[risk_free].pct_change(rs_lookback)[rs_lookback:]
excess_returns = returns.subtract(risk_free_returns, axis=0).dropna()
rebalance_dates = excess_returns.index.join(absolute_momentum_rule.index, how='inner')
# relative strength ranking
ranked = excess_returns.loc[rebalance_dates][symbols].rank(ascending=False, axis=1, method='dense')
# elligibility rule - top n_top ranked securities
elligible = ranked[ranked<=n_top] > 0
# equal weight allocations
elligible = elligible.multiply(1./elligible.sum(1), axis=0)
# downside protection
weights = pd.DataFrame(0.,index=elligible.index, columns=prices.columns)
if cash_proxy == 'CASHX' :
weights[cash_proxy] = 0
prices[cash_proxy] = 1.
weights[symbols] = (elligible * absolute_momentum_rule).dropna()
weights[cash_proxy] += 1 - weights[symbols].sum(axis=1)
# backtest
p_value, p_holdings, p_weights = backtest(prices, weights, 10000., offset=0, commission=10.)
p_value.plot(figsize=(15,10), grid=True)
# -
# algo stats
ffn.calc_perf_stats(p_value).display()
show_return_table(p_value)
df = monthly_return_table (p_value)
frame = df['Annual Returns'].to_frame()
frame['positive'] = df['Annual Returns'] >= 0
frame['Annual Returns'].plot(figsize=(15,10),kind='bar',color=frame.positive.map({True: 'g', False: 'r'}), grid=True)
# # STEP BY STEP BACKTEST
def talib_transform(df, function, lookback):
'''
df: DataFrame of ETF prices (daily, monthly, ..)
function: talib function (eg talib.MA)
lookback: no of lookback periods (days, months, ..)
example: transform = talib_transform(df, talib.MA, 21)
'''
# make sure that TALIB fuction only requires one price!
try:
return pd.DataFrame({etf:function(df[etf], lookback) for etf in df.columns},
columns=df.columns,
index=df.index).dropna()
except:
print('ValueError: Shape of passed values is incorrect!')
def _apply_RAA (self, context, allocations, cash_proxy, *args):
excess_returns = context.algo_data['EMOM']
tmp1 = [0.5 if excess_returns[asset] > 0 else 0. for asset in allocations.index]
prices = context.algo_data['price']
MA = context.algo_data['smma']
tmp2 = [0.5 if prices[asset] > MA[asset] else 0. for asset in allocations.index]
dpf = pd.Series([x + y for x, y in zip(tmp1, tmp2)], index=allocations.index)
new_allocations = allocations * dpf
new_allocations[cash_proxy] = new_allocations[cash_proxy] + (1 - np.sum(new_allocations))
record('BOND EXPOSURE', new_allocations[cash_proxy])
return new_allocations
# ## Allocations
#
# The RAA Balanced allocation (if fully allocated) is as follows:
#
# MTUM US Momentum 10%
# IWD US Large Cap Value 10%
# EFA International Equities 10%
# EFV International Value 10%
# VNQ US Real Estate 20%
# DBC Commodities 20%
# IEF Int-Term US Treasuries 20%
# +
allocations = {'MTUM':0.1,'IWD':0.1,'EFA':0.1,'EFV':0.1,'VNQ':0.2,'DBC':0.2,'IEF':0.2,'BIL':0.}
symbols =['MTUM','IWD','EFA','EFV','VNQ','DBC','IEF']
cash_proxy = 'CASHX'
risk_free = 'BIL'
rs_lookback = 12
risk_lookback = 12
tx_lookback = 12
n_top = 7
# get data
tickers = symbols.copy()
if cash_proxy != 'CASHX' :
tickers = list(set(tickers + [cash_proxy]))
if isinstance(risk_free, str) :
tickers = list(set(tickers + [risk_free]))
data = pd.DataFrame (columns=tickers)
# Define which online source one should use
data_source = 'yahoo'
# We would like all available data from 01/01/2000 until today.
start_date = '1986-01-01'
end_date = datetime.datetime.today().strftime('%Y-%m-%d')
# User pandas_reader.data.DataReader to load the desired data. As simple as that.
multindexed = web.DataReader(tickers, data_source, start_date, end_date)
data = multindexed['Adj Close'].sort_index(ascending=True)
inception_dates = pd.DataFrame([data[ticker].first_valid_index() for ticker in data.columns],
index=data.keys(), columns=['inception'])
# -
inception_dates
data[:3]
prices = data.copy().dropna()
prices[:5]
end_points = endpoints(period='M', trading_days=prices.index)
prices_m = prices.loc[end_points]
prices_m[:3]
[a[1] for a in allocations.items()]
elligible = pd.DataFrame(allocations, index=prices_m.index, columns=symbols)
elligible[:3]
rebalance_dates = prices_m.index
# 1. At the close on the last trading day of the month, compare the following for each asset class: (1) 12-month return versus the 12-month return of BIL (short-term US Treasuries), and (2) closing price versus the 12-month moving average.
# 12 month returns
returns = prices_m[symbols].pct_change(rs_lookback)[rs_lookback:]
returns[:3]
# 12 month returns of risk_free etf (BIL)
risk_free_returns = prices_m[risk_free].pct_change(risk_lookback)[risk_lookback:]
risk_free_returns[:3]
# +
if isinstance(risk_free, int) :
excess_returns = returns
else :
risk_free_returns = prices_m[risk_free].pct_change(rs_lookback)[rs_lookback:]
excess_returns = returns.subtract(risk_free_returns, axis=0).dropna()
rule1 = lambda x: x > 0
tmp1 = excess_returns.applymap(rule1).astype(int) * 0.5
tmp1[:5]
# +
# calculate 12-month moving average.
df = prices_m[symbols]
function = talib.MA
lookback = tx_lookback
MA_prices = talib_transform(df,function,lookback)
MA_prices[:3]
# -
# rule2: prices - MA_prices > 0
rule2 = lambda x: x > 0
tmp2 = (prices_m[symbols] - MA_prices).dropna().applymap(rule2).astype(int) * 0.5
tmp2[:5]
RAA_rule = (tmp1 + tmp2).dropna()
RAA_rule[:5]
# +
# allocations = 1
# x = 0.
# check if returns > risk_free_returns (x = x + 0.5)
# check if prices_m > MA_prices (x = x + 0.5)
# weights = allocations * x
# weight for CASH = 1 -
# -
rebalance_dates = excess_returns.index.join(absolute_momentum_rule.index, how='inner')
rebalance_dates
# +
#####################################################################
# -
weights = (elligible * RAA_rule).dropna()
weights[:3]
# downside protection
if cash_proxy == 'CASHX' :
weights[cash_proxy] = 1. - weights.sum(1)
weights[:10]
# +
# backtest
p_value, p_holdings, p_weights = backtest(prices, weights, 10000., offset=0, commission=10.)
p_value.plot(figsize=(15,10), grid=True)
# -
# # SCRATCHPAD
| MY_AS_STRATEGIES/[WIP] Robust Asset Allocation (RAA).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install emoji
import numpy as np
import pandas as pd
train = pd.read_csv('dataset/train_emoji.csv',header=None)
test = pd.read_csv('dataset/test_emoji.csv',header=None)
train.head()
import emoji as emoji
# +
#emoji.EMOJI_ALIAS_UNICODE
# -
emoji_dictionary = {"0": "\u2764\uFE0F", # :heart: prints a black instead of red heart depending on the font
"1": ":baseball:",
"2": ":beaming_face_with_smiling_eyes:",
"3": ":downcast_face_with_sweat:",
"4": ":fork_and_knife:",
}
emoji.emojize(":fork_and_knife:")
for e in emoji_dictionary.values():
print(emoji.emojize(e))
data = train.values
for i in range(10):
print(data[i][0],emoji.emojize(emoji_dictionary[str(data[i][1])]))
from keras.utils import to_categorical
# +
XT = train[0]
Xt = test[0]
YT = to_categorical(train[1])
Yt = to_categorical(test[1])
print(XT.shape)
print(Xt.shape)
print(YT.shape)
print(Yt.shape)
# -
embeddings = {}
with open('glove.6B.50d.txt',encoding='utf-8') as f:
for line in f:
values = line.split()
word = values[0]
coeffs = np.asarray(values[1:],dtype='float32')
#print(word)
#print(coeffs)
embeddings[word] = coeffs
def getOutputEmbeddings(X):
embedding_matrix_output = np.zeros((X.shape[0],10,50))
for ix in range(X.shape[0]):
X[ix] = X[ix].split()
for jx in range(len(X[ix])):
embedding_matrix_output[ix][jx] = embeddings[X[ix][jx].lower()]
return embedding_matrix_output
emb_XT = getOutputEmbeddings(XT)
emb_Xt = getOutputEmbeddings(Xt)
print(emb_XT.shape)
print(emb_Xt.shape)
from keras.layers import *
from keras.models import Sequential
model = Sequential()
model.add(LSTM(64,input_shape=(10,50),return_sequences=True))
model.add(Dropout(0.4))
model.add(LSTM(64,input_shape=(10,50)))
model.add(Dropout(0.3))
model.add(Dense(5))
model.add(Activation('softmax'))
model.summary()
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['acc'])
model.fit(emb_XT,YT,batch_size=32,epochs=40,shuffle=True,validation_split=0.1)
model.evaluate(emb_Xt,Yt)
pred = model.predict_classes(emb_Xt)
for i in range(30):
print(' '.join(Xt[i]))
print(emoji.emojize(emoji_dictionary[str(np.argmax(Yt[i]))]))
print(emoji.emojize(emoji_dictionary[str(pred[i])]))
| emoji-class.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Plotting MODFLOW listing file budgets
#
# This notebook shows how to
#
# * make stacked bar chart summaries of MODFLOW water budgets by stress period, including global budgets and budgets for advanced stress packages (SFR, Lake, etc).
# * make stacked bar charts of net fluxes for each variable
# * plot time series of individual terms (e.g. model packages, or advanced stress package variables)
#
from pathlib import Path
from mfexport.listfile import plot_list_budget, get_listfile_data, plot_budget_summary, plot_budget_term
# #### Example MODFLOW-NWT model with monthly stress periods
# +
listfile = Path('data/lpr/lpr_inset.list')
model_name = listfile.stem
model_start_date='2011-01-01'
output_path = 'output'
# -
# #### Example MODFLOW 6 model with biannual stress periods
# +
mf6_listfile = Path('../mfexport/tests/data/shellmound/shellmound.list')
mf6_model_name = listfile.stem
mf6_model_start_date='1998-04-01'
output_path = 'output'
# -
# ### Parse the listing file budget to a dataframe
# * no ``budgetkey`` argument returns the global mass balance
# * alternatively, use an identifying ``budgetkey`` (text string from the listing file) to get the terms for an advanced stress package
df = get_listfile_data(listfile=listfile, model_start_datetime=model_start_date)
df.head()
mf6_df = get_listfile_data(listfile=mf6_listfile, model_start_datetime=mf6_model_start_date,
)
mf6_df.head()
# #### Get an advanced stress package budget
# * in this case, for the SFR package
# * this requires a package budget to be written to the listing file (MODFLOW 6)
sfr_df = get_listfile_data(listfile=mf6_listfile, model_start_datetime=mf6_model_start_date,
budgetkey='SFR BUDGET')
sfr_df.head()
# ### Basic summary of MODFLOW water balance
plot_budget_summary(df, title_prefix='Little Plover example',
xtick_stride=1)
# ### Plot just the net fluxes for each component
# * add a secondary axis with other units
#
# Note: `model_length_units` and `model_time_units` are needed to convert units to the secondary axis units.
plot_budget_summary(df, title_prefix='Little Plover example', term_nets=True,
model_length_units='feet', model_time_units='time',
secondary_axis_units='mgal/day')
# ### Plot a subset of results
#
# This can be useful for models with many stress periods
plot_budget_summary(df, title_prefix='Little Plover example',
xtick_stride=2,
plot_start_date='2011-05', plot_end_date='2011-09')
# ### Plot a budget term
# Two plots are produced
# * absolute values, optionally with secondary axis as above
# * as a fraction of model or advanced stress package (e.g. SFR) budget
plot_budget_term(sfr_df, 'RUNOFF_IN', title_prefix='Shellmound SFR')
# #### Any column in the listing file budget dataframe (``df``) can be plotted,
# by specifying the first part of the column name (without the `_IN` or `_OUT` at the end)
df.columns
# #### For example
plot_budget_term(df, 'WELLS', title_prefix='Shellmound SFR')
# ### Plot term by stress period instead of time
#
# Can be useful for models with long spin-up periods that obscure shorter periods of interest when whole simulation time is plotted.
plot_budget_term(sfr_df, 'RUNOFF_IN', title_prefix='Shellmound SFR',
plot_start_date=None, plot_end_date=None,
datetime_xaxis=False)
# ### Plot mass balance error
plot_budget_term(df, 'PERCENT_DISCREPANCY', title_prefix='Mass Balance',
title_suffix='discrepency')
# ### Macro to plot everything to PDFs
# * budget summary (in/out and net)
# * timeseries of budget terms for each package, and within each advanced stress package
plot_list_budget(listfile=mf6_listfile, output_path=output_path,
model_start_datetime='1998-04-01')
| Examples/plotting_listing_file_budgets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JoseAugustoVital/Decision-Score-MarketPlace/blob/main/decision_score.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="f4nBMawrfyQ9"
# # ***UNIVERSIDADE FEDERAL DO MATO GROSSO DO SUL***
# # Análise de dados para aumentar nível de satisfação dos clientes de um marketplace utilizando árvore de decisão.
# **TRABALHO 3 - INTELIGÊNCIA ARTIFICIAL 2021/1**
# __________________________________________________
#
# **Aluno:**
#
# Nome: **<NAME>**
#
#
# + id="avPdDxxTjYu2"
# INÍCIO DO ESTUDO
# + [markdown] id="ZUR9p8AcjKE3"
# **Importação do sistema operacional**
# + id="drhV6bHNjHb3"
import os
# + [markdown] id="zzvcMJWdjcW4"
# **Determinação do acesso ao diretório**
# + id="V2H548yrnSba"
os.chdir('/content/drive/MyDrive/datasets')
# + colab={"base_uri": "https://localhost:8080/"} id="jPZkdEl2nU4H" outputId="b015ba36-d099-4729-b31e-545cf611b934"
from google.colab import drive
drive.mount('/content/drive')
# + id="n9qQeOJ3nXyZ" colab={"base_uri": "https://localhost:8080/"} outputId="ad44cee1-a0d6-4797-a502-dc00534d40b6"
# !pwd
# + id="X8IPY4nqnaeS" colab={"base_uri": "https://localhost:8080/"} outputId="805b434f-a446-41a9-c668-44c79a8736c1"
# !ls
# + [markdown] id="nWdV2dWYjiKs"
# **Importação das bibliotecas Pandas e Numpy**
# + id="BlRCrISsoAW1"
import pandas as pd
import numpy as np
# + [markdown] id="ic3pDpAMjo_o"
# **Leitura dos tabelas .csv**
# + id="zRiA7d0foUWQ"
tabela_cliente = pd.read_csv('olist_customers_dataset.csv')
tabela_localizacao = pd.read_csv('olist_geolocation_dataset.csv')
tabela_pedido = pd.read_csv('olist_order_items_dataset.csv')
tabela_pagamento = pd.read_csv('olist_order_payments_dataset.csv')
tabela_review = pd.read_csv('olist_order_reviews_dataset.csv')
tabela_entrega_pedido = pd.read_csv('olist_orders_dataset.csv')
tabela_descricao_produto = pd.read_csv('olist_products_dataset.csv')
tabela_vendedor = pd.read_csv('olist_sellers_dataset.csv')
tabela_categoria_traduzido = pd.read_csv('product_category_name_translation.csv')
# + [markdown] id="9A4ikaMnkIcy"
# **Checagem dos 5 primeiros elementos de cada tabela**
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="vhyWCLncrGX1" outputId="3249bb2d-d868-4c02-b4da-db2b97e486bd"
tabela_cliente.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="Z1lQrW3qsJJ_" outputId="b3d34a65-9082-4a50-de97-9acd9a382c14"
tabela_localizacao.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="3NjCvWr3sK8q" outputId="3efba991-93b7-44a4-d842-45123bdea64b"
tabela_pedido.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="p8B0Y0QZsQSP" outputId="9ddd178a-0f65-4790-bd70-b1b0ecba047a"
tabela_pagamento.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="PiEeBr0SsV09" outputId="5a4101b3-2a6c-41bc-8826-03473b3da038"
tabela_review.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 217} id="HFcJGmwZsYe_" outputId="d3343b58-8ddc-4975-a542-3058b891dfb2"
tabela_entrega_pedido.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="nUGAv-KksgHu" outputId="f464514f-c747-4bff-f44a-a9dc9895eac7"
tabela_descricao_produto.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="p2RUDHKfsk7e" outputId="f973414b-2fe8-4c3c-ff1b-2a300b354554"
tabela_vendedor.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 197} id="0nGneeeCsrpD" outputId="70c19788-9301-4344-8877-36dcf5097045"
tabela_categoria_traduzido.head()
# + [markdown] id="HMbzhNcBkO3n"
# **Início do processo de união das 9 tabelas disponibilizadas com a finalidade de produzir uma tabela resultante que possua os elementos mais importantes para a determinação do review_score. No primero merge realizado, unimos a tabela de clientes com as respectivas entregas dos pedidos usando o código individual de cada consumidor como parâmetro.**
# + colab={"base_uri": "https://localhost:8080/", "height": 460} id="X-g-qw1L2RDs" outputId="c5b83310-be17-46bb-f691-0c1dd52fc661"
pd.merge(tabela_cliente, tabela_entrega_pedido, on=["customer_id"], how="inner")
# + [markdown] id="tCK8g_SMlBPg"
# **Processo de união com as demais tabelas disponibilizadas**
#
# **1 - (Clientes, Entregas)**
#
# **2 - (1, Pedidos)**
#
# **3 - (2, Pagamentos)**
#
# **4 - (3, Review)**
#
# **5 - (4, Vendedor)**
# + id="kMWNThPpt8Nq"
test = pd.merge(tabela_cliente, tabela_entrega_pedido, on=["customer_id"], how="inner")
test = pd.merge(test, tabela_pedido, on=["order_id"], how="inner")
test = pd.merge(test, tabela_pagamento, on=["order_id"], how="inner")
test = pd.merge(test, tabela_review, on=["order_id"], how="inner")
test = pd.merge(test, tabela_vendedor, on=["seller_id"], how="inner")
# + [markdown] id="qmlrFnJHmvre"
# **Tabela Resultante**
#
# **Linhas: 118315**
#
# **Colunas: 31**
# + colab={"base_uri": "https://localhost:8080/", "height": 576} id="LC75YzkN7sQI" outputId="ede5d00e-f49c-42d8-ad09-c003b61d3a04"
test
# + [markdown] id="kXOr8zbKnCY1"
# **Segunda filtragem consiste em remover elementos que não possuem
# relação com a variável review_score**
# + id="rwYGaT5i8AJI"
#test = test.drop(columns=["customer_unique_id"],axis=1)
#test = test.drop(columns=["customer_city"],axis=1)
#test = test.drop(columns=["customer_state"],axis=1)
#test = test.drop(columns=["order_status"],axis=1)
#test = test.drop(columns=["order_purchase_timestamp"],axis=1)
#test = test.drop(columns=["order_approved_at"],axis=1)
#test = test.drop(columns=["order_delivered_carrier_date"],axis=1)
#test = test.drop(columns=["order_delivered_customer_date"],axis=1)
#test = test.drop(columns=["order_estimated_delivery_date"],axis=1)
#test = test.drop(columns=["shipping_limit_date"],axis=1)
#test = test.drop(columns=["review_creation_date"],axis=1)
#test = test.drop(columns=["review_answer_timestamp"],axis=1)
#test = test.drop(columns=["seller_city"],axis=1)
#test = test.drop(columns=["seller_state"],axis=1)
#test = test.drop(columns=["review_comment_title"],axis=1)
#test = test.drop(columns=["review_comment_message"],axis=1)
# + [markdown] id="N1-zu58cnPwH"
# **Tabela Resultante após a remoção de atributos não prioritários para o nível de
# satisfação dos clientes**
#
# **Linhas: 118315**
#
# **Colunas: 15**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="OCSpD4v29xwE" outputId="e956220e-c41b-4c23-a129-89aa958977a9"
test
# + [markdown] id="kpsBLuM_nlg9"
# **Inserindo cada atributo da tabela resultante em um vetor para melhor manipulação dos dados**
#
# + id="HZEvIcF4d1-j"
vetor_cliente = np.array(test.customer_id)
vetor_cepcliente = np.array(test.customer_zip_code_prefix)
vetor_pedido = np.array(test.order_id)
vetor_idpedido = np.array(test.order_item_id)
vetor_produto = np.array(test.product_id)
vetor_vendedor = np.array(test.seller_id)
vetor_preco_produto = np.array(test.price)
vetor_frete = np.array(test.freight_value)
vetor_parcela = np.array(test.payment_sequential)
vetor_tipopagamento = np.array(test.payment_type)
vetor_pay = np.array(test.payment_installments)
vetor_valorfinal = np.array(test.payment_value)
vetor_review = np.array(test.review_id)
vetor_score = np.array(test.review_score)
vetor_cepvendedor = np.array(test.seller_zip_code_prefix)
# + [markdown] id="0ItdyyMcn9CG"
# **Definindo um novo dataframe vazio**
# + colab={"base_uri": "https://localhost:8080/", "height": 31} id="nx2dYDC4iwvU" outputId="85d8066b-7484-4be6-cfc8-b35c8dc4da0d"
df = pd.DataFrame()
df
# + id="BCjEurLwbeX_"
# + [markdown] id="ZA_52lVkoJPf"
# **Definindo as colunas do novo dataframe e atribuindo para cada coluna, seu respectivo vetor de dados registrado anteriormente.**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="Q18yi71okmxX" outputId="9ede9e94-7207-4b13-cbb2-d3b08455111b"
COLUNAS = [
'Cliente',
'CEP_Cliente',
'Pedido',
'id_Pedido',
'Produto',
'Vendedor',
'Preco_produto',
'Frete',
'Parcela',
'Tipo_pagamento',
'Installments',
'Valor_total',
'ID_Review',
'CEP_Vendedor',
'Score'
]
df = pd.DataFrame(columns =COLUNAS)
df.Cliente = vetor_cliente
df.CEP_Cliente = vetor_cepcliente
df.Pedido = vetor_pedido
df.id_Pedido = vetor_idpedido
df.Produto = vetor_produto
df.Vendedor = vetor_vendedor
df.Preco_produto = vetor_preco_produto
df.Frete = vetor_frete
df.Parcela = vetor_parcela
df.Tipo_pagamento = vetor_tipopagamento
df.Installments = vetor_pay
df.Valor_total = vetor_valorfinal
df.ID_Review = vetor_review
df.CEP_Vendedor = vetor_cepvendedor
df.Score = vetor_score
df
# + id="b4it0nDmZkLz"
# + [markdown] id="66MvCTZhojfD"
# **Impressão da coluna de clientes.**
# + colab={"base_uri": "https://localhost:8080/"} id="RObPztDlZuyy" outputId="36f1c393-cbdc-4d08-fdfc-50ef32bbb308"
df.Cliente
# + id="yZSj8AI3l9HN"
#for index, row in df.iterrows():
# if row['Score'] == 1:
# df.loc[index,'Classe'] = 'Pessimo'
# if row['Score'] == 2:
# df.loc[index,'Classe'] = 'Ruim'
# if row['Score'] == 3:
# df.loc[index,'Classe'] = 'Mediano'
# if row['Score'] == 4:
# df.loc[index,'Classe'] = 'Bom'
# if row['Score'] == 5:
# df.loc[index,'Classe'] = 'Otimo'
# + [markdown] id="TI83wjbsosb4"
# **Informações do dataframe**
#
# **Atributos, elementos não nulos, Tipo das variáveis da coluna**
# + colab={"base_uri": "https://localhost:8080/"} id="cgMwakkDqt69" outputId="7ddf5d80-951c-471b-c866-2d8ae6544bb8"
df.info()
# + [markdown] id="J6XCWwjoo-qR"
# **Agrupando os elementos do dataframe por consumidor**
# + colab={"base_uri": "https://localhost:8080/"} id="ms7mqyohr3Vi" outputId="2e2d84ec-56f3-471f-c1d9-fd52ed9c870f"
df.groupby(by='Cliente').size()
# + [markdown] id="NmOH3BXVpFXw"
# **Importando os métodos de árvore de decisão**
# + id="iZkAJqTcr-Jx"
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.model_selection import train_test_split
from sklearn import metrics
# + [markdown] id="D8vd_a8LpSZn"
# **Para simplificar os dados e evitar criar um dummy com esse dataframe, removemos todos os elementos não-numéricos para que o modelo seja capaz de realizar a execução. Solução encontrada para simplificar os atributos do tipo "Objeto" para tipos numéricos.**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="AaSgVhO_oRXa" outputId="c4e95572-3809-46a2-e214-df604cc2659d"
df['Cliente'] = df['Cliente'].str.replace(r'\D', '')
df['Pedido'] = df['Pedido'].str.replace(r'\D', '')
df['Produto'] = df['Produto'].str.replace(r'\D', '')
df['Vendedor'] = df['Vendedor'].str.replace(r'\D', '')
df['ID_Review'] = df['ID_Review'].str.replace(r'\D', '')
df
# + [markdown] id="QnH0ZizKp8oS"
# **Realizamos o procedimento de remoção dos elementos não-numéricos para todas as colunas do tipo objeto com exceção do tipo de pagamento pois o tipo de pagamento se resume a poucas opções. Dessa forma, usamos a função get_dummies apenas para o tipo de pagamento**
#
# **Portanto, a coluna Tipo_pagamento se divide em quatro colunas com lógica booleana. As novas colunas são: Tipo_pagamento_boleto, Tipo_pagamento_credit_card, Tipo_pagamento_debit_card, Tipo_pagamento_voucher**
# + id="YKd_tWQkuQSg"
result_df = pd.get_dummies(df, columns=["Tipo_pagamento"])
# + [markdown] id="M3c9J8p3rKnq"
# **Resultado final do dataframe**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="yb6A7z8vucBa" outputId="1a5a1f09-13ac-4e4a-8bcf-d9b76dcaeb8f"
result_df
# + [markdown] id="JitzjNP6rSOX"
# **Criação de um dataframe reserva para possíveis conclusões**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="fhoaN5g41GrY" outputId="1a5fa314-cd2a-4dab-e077-3090c22825f0"
reserva = result_df
reserva
# + [markdown] id="rcQS41Cgrbyc"
# **Eliminando todas as linhas com nível 4 ou nível 5 de satisfação. Dessa forma, temos um dataframe com todos os dados, e um apenas com dados classificados com nível 3,2 ou 1 ou seja, mediano, ruim ou péssimo. (elementos que apresentam nível de insatisfação interessante para análise)**
# + colab={"base_uri": "https://localhost:8080/", "height": 426} id="zsJnHcd41PQg" outputId="f5129c76-19b2-49bc-c72d-476145ad78e4"
reserva = reserva.drop(reserva[reserva.Score > 3].index)
reserva
# + [markdown] id="EvyVhc4SsCeK"
# **Processo de separação de treine/teste**
#
# **Proporções estabelecidas:**
#
# **70% treino**
#
# **30% teste**
# + id="qkxlrlPQuxEj"
X_train, X_test, y_train, y_test = train_test_split(result_df.drop('Score', axis=1), result_df['Score'], test_size=0.3)
# + [markdown] id="tAFvAFdUsbwN"
# **Número de amostras para cada processo**
# + colab={"base_uri": "https://localhost:8080/"} id="Nd5-MBfhvSIO" outputId="34471355-d94b-46bc-929e-c22a32e06475"
X_train.shape, X_test.shape
# + [markdown] id="9RscsBD7sj6B"
# **Número de targets para cada processo**
# + colab={"base_uri": "https://localhost:8080/"} id="JMLrmy6Kvjqp" outputId="fbc1b7c6-c877-4e67-fa70-beeb3051891f"
y_train.shape, y_test.shape
# + [markdown] id="g5AiSR7Nsq3-"
# **Criação do classifcador**
# + id="9aWgIjFMvqFs"
cls = DecisionTreeClassifier()
# + [markdown] id="xzwBsel3sxQd"
# **Treinamento**
# + id="8DIWgvnEnoOU"
cls = cls.fit(X_train, y_train)
# + [markdown] id="X9vbLtpgszwX"
# **Vetor com as importancias de cada atributo para a determinação do review_score**
# + id="qFwlgWk90yrM" colab={"base_uri": "https://localhost:8080/"} outputId="9ca8d911-b0e8-408d-f960-4ccb06886ade"
cls.feature_importances_
# + id="UXGh4Az704BS" colab={"base_uri": "https://localhost:8080/", "height": 217} outputId="b962c481-629f-4c75-fc05-ed82d866134a"
df.head()
# + [markdown] id="pCkeQ72Is99r"
# **Para tornar o modelo mais visual, criamos um laço para a impressão dos pesos de cada atributo para determinar o score**
# + id="OdazSuQp1G5A" colab={"base_uri": "https://localhost:8080/"} outputId="6c0842fb-5736-4644-a949-2f874f6b6d55"
for feature, importancia in zip(result_df.columns, cls.feature_importances_):
print("{}:{:.1f}%".format(feature,((importancia*100))))
# + [markdown] id="djvjXVRAtQci"
# **Vetor com predições para checagem do aprendizado**
# + id="yPLeiTs51Nrn" colab={"base_uri": "https://localhost:8080/"} outputId="08bf5075-67e8-4517-ae7b-9f0571863579"
result = cls.predict(X_test)
result
# + colab={"base_uri": "https://localhost:8080/"} id="4RFm4F3ZytpD" outputId="d4400103-0e8e-477d-e454-1e400dead1c3"
result_df.Score[118310]
# + [markdown] id="MBWWaHnUtYSp"
# **Representação das métricas de precisão e médias do modelo**
# + id="dV9I2VYiBlO8" colab={"base_uri": "https://localhost:8080/"} outputId="19cda1d3-6b20-4c4b-fc6f-f4d96552688b"
from sklearn import metrics
print(metrics.classification_report(y_test,result))
# + [markdown] id="cxzlSiMItexo"
# **Precisão total**
# + id="axNM8PZNJXgS" colab={"base_uri": "https://localhost:8080/"} outputId="bd5e2f7e-77a1-42bf-88ee-21a607c5ca70"
from sklearn.model_selection import cross_val_score
allScores = cross_val_score(cls, X_train, y_train , cv=10)
allScores.mean()
# + [markdown] id="jP4vlUzPtkYg"
# **Treinamento utilizando o dataframe reserva. (apenas com os níveis de satisfação abaixo da média, score <=3)**
# + [markdown] id="fAACLv_It3DG"
# **Split do dataframe em treino e teste (70%, 30%, respectivamente)**
# + id="_UJouks212AE"
X_train, X_test, y_train, y_test = train_test_split(reserva.drop('Score', axis=1), reserva['Score'], test_size=0.3)
# + [markdown] id="s556Ukh7uFhU"
# **Quantidade de amostras do treino**
# + colab={"base_uri": "https://localhost:8080/"} id="CVoF5-F117qX" outputId="2b6fb6c2-2463-44de-ccce-9e01460c9f24"
X_train.shape, X_test.shape
# + [markdown] id="aAihR-_juLqC"
# **Classificador**
# + id="zXUQqpiO2ANc"
clf = DecisionTreeClassifier()
# + [markdown] id="G1patiNzuQRc"
# **Treino**
# + id="7TlD0zq62ETT"
clf = clf.fit(X_train, y_train)
# + [markdown] id="vVYuvi6ouS0N"
# **Importancia de cada atributo para determinar o nível de satisfação dos consumidores**
# + colab={"base_uri": "https://localhost:8080/"} id="u92IM-hV2IC3" outputId="fc1a77cc-3fdf-4fcb-ac7c-457be0372f5a"
clf.feature_importances_
# + colab={"base_uri": "https://localhost:8080/"} id="-4kfOqzr2LLr" outputId="7c5b7d74-fa2b-4c04-b10e-80a0766ae092"
for feature, importancia in zip(reserva.columns, clf.feature_importances_):
print("{}:{:.1f}%".format(feature,((importancia*100))))
# + [markdown] id="eI9iD4zjulSZ"
# # ANÁLISE DOS DADOS PROCESSADOS
#
# **Com o objetivo de aumentar o review_score desse marketplace, utilizamos o algoritmo de árvore de decisão para encontrar elementos que influenciam diretamente a aceitação e satisfação dos clientes, analisando desde o processo de despacho do produto, até a qualidade do atendimento e do produto final. Dessa forma, utilizamos estratégias de filtragem. Inicialmente eliminamos os elementos que, evidentemente, não apresentavam influencia sobre a nota registrada pelo cliente. Após esse passo inicial, simplificamos o tipo dos dados para facilitar o processamento do dataframe pelo algoritmo. Após o processo de preparar os dados para o aprendizado registramos os seguintes valores:**
#
# **Considerando todos as notas (Ótimo, bom, mediano, ruim, péssimo) :**
#
# Cliente:10.4%
#
# CEP_Cliente:11.0%
#
# Pedido:10.4%
#
# id_Pedido:0.8%
#
# Produto:9.4%
#
# Vendedor:8.0%
#
# Preco_produto:8.0%
#
# Frete:8.1%
#
# Parcela:0.3%
#
# Installments:3.7%
#
# Valor_total:8.6%
#
# ID_Review:11.4%
#
# CEP_Vendedor:7.7%
#
# Score:0.8%
#
# Tipo_pagamento_boleto:0.8%
#
# Tipo_pagamento_credit_card:0.2%
#
# Tipo_pagamento_debit_card:0.3%
#
# ------------------------------------
#
# **Considerando apenas avaliações com notas instatisfatórias (mediano, ruim, péssimo) :**
#
# Cliente:10.0%
#
# CEP_Cliente:11.1%
#
# Pedido:10.9%
#
# id_Pedido:0.8%
#
# Produto:9.5%
#
# Vendedor:8.3%
#
# Preco_produto:7.4%
#
# Frete:8.3%
#
# Parcela:0.2%
#
# Installments:3.6%
#
# Valor_total:8.7%
#
# ID_Review:11.6%
#
# CEP_Vendedor:7.6%
#
# Score:0.7%
#
# Tipo_pagamento_boleto:0.7%
#
# Tipo_pagamento_credit_card:0.2%
#
# Tipo_pagamento_debit_card:0.3%
#
# ---------------------
# **Portanto, com base nos resultados obtidos, o dono do marketplace deve se atentar aos seguintes parâmetros de sua logística:**
#
# **1 - Relação entre CEP_Cliente, Frete e CEP_Vendedor**
#
# **Esses atributos apresentaram importancia direta na nota registrada pelo cliente, dessa forma, algumas razões devem ser consideradas:**
#
# **-Problemas com a entrega.**
#
# **-Qualidade da entrega(tempo de entrega,comprometimento do produto durante o processo de transporte).**
#
# **-Alto preço do frete para determinadas regiões.**
#
# -----------------------------
# **2 - Produto**
#
# **O atributo produto apresentou importancia direta na insatisfação dos clientes. Portanto, deve-se considerar:**
#
# **-Qualidade ruim de determinados produtos.**
#
# **-O produto entregue não apresentar as caracteristicas do produto anunciado.**
#
# **-Entrega de produtos errados, problema de logística.**
#
# ---------------------------------
#
# **3 - Vendedor**
#
# **O atributo Vendedor apresentou importancia direta na insatisfação dos clientes. Portanto, deve-se considerar:**
#
# **-Qualidade duvidosa do atendimento por parte do vendedor.**
#
# **-Erro do vendedor em alguma etapa do processo em específico.**
#
# --------------------------
#
#
# # **CONCLUSÃO**
#
#
# **Portanto, é possível concluir que para o marketplace aumentar seu score, analisar problemas logísticos por parte do processo de transporte, aspectos do produto e os vendedores são os fatores mais importantes para entender o que se passa na empresa e solucionar o problema que está gerando insatisfação dos clientes desse e-commerce.**
#
#
#
#
#
#
| decision_score.ipynb |
# # Driver Assist Machine Learning Using RCNN-Masking
# ### What is Mask R-CNN:
# - R-CNN stands for "Regions with CNN features", CNN stands for "Convolutional Neural Network".
#
# - R-CNN grabs parts of an image (or region) as a bounding box, and computes each region for CNN features,
# it then classifies each region to determine what it is through ROI align, testing pixel by pixel to form the mask.
# R-CNN then takes the output from the ROI align and helps generate the bounding boxes and classifies the target to determine what it is.
#
# - Mask R-CNN goes through a process of pixel-level classification with convolutional neural networks to mask over cars.
import cv2
import numpy as np
import os
import sys
from soco import coco
from mrcnn import utils
from mrcnn import model as modellib
# This portion of the code is specifying the path to the appropiate directories, while also grabbing the weights for the pre-trained model.
#
# The mask_rcnn_coco.h5 file is a pre-trained dataset provided by matterport that act as weights for MS COCO.
#
# It is mask-RCNN trained for object detection.
dirMain = os.path.abspath("./")
dirModel = os.path.join(dirMain, "logs")
sys.path.append(os.path.join(dirMain,"/coco/"))
path_Coco = os.path.join(dirMain, "mrcnn/mask_rcnn_coco.h5")
# A configuration object is required to make an inference for the Mask_RCNN instance.
#
# The configuration is set to specify the number of images per batch.
class Configure_coco(coco.CocoConfig):
# Since we are running inference 1 image at a time, batch size is set to 1. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
IMAGES_PER_GPU = 1
# Creating an object of class Configure_coco to configure the masking model.
nConfig = Configure_coco()
nConfig.display()
# MaskRCNN instance object created in inference mode since this mode is used to make estimations for a given image, the dirModel variable is the path to where the log messages will be stored.
mrcnn_model = modellib.MaskRCNN(
mode="inference", model_dir=dirModel, config=nConfig
)
# Load the weights that will be used to calculate the estimations, and assist in classifying the detected object in the frame.
mrcnn_model.load_weights(path_Coco, by_name=True)
# Classification types to compare to for the given trained model.
class_names = [
'BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush'
]
# This function applies a cyan coloured mask with a 50% opacity to the ROI detected in the source image.
def apply_mask(cyan_col, mask, source, transp=0.5):
for n, c in enumerate(cyan_col):
source[:, :, n] = np.where(
mask == 1,
source[:, :, n] * (1 - transp) + transp * c,
source[:, :, n]
)
return source
# Apply the mask, bounding box, and classification to the region of interest.
def mask_frame(source, region_interest, masks, class_ids, cls_names, scores):
# Number of instances found in ROI
n_instances = region_interest.shape[0]
if not n_instances:
print('NO Instances FOUND in ROI')
else:
assert region_interest.shape[0] == masks.shape[-1] == class_ids.shape[0]
# For each instance found apply mask, box, and label
for i in range(n_instances):
# Detect only road obstacles from the class names specified in the class_names array above. class_names[1 .. 14]
if class_ids[i] in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]:
if not np.any(region_interest[i]):
continue
# Coordinates for region of interest
y1, x1, y2, x2 = region_interest[i]
# Classification for the ROI
label = class_names[class_ids[i]]
# Confidence score in relation to its classification
score = scores[i] if scores is not None else None
# Store classification and score as a string caption to the object detected to be used as a label
caption = '{} {:.2f}'.format(label, score) if score else label
mask = masks[:, :, i]
# Cyan color for mask / bounding box / label in BGR
cyan_col = (240,252,3)
# Apply the mask on the detected object
source = apply_mask(cyan_col, mask, source)
# Draw bounding box using the x/y coordinates from the roi on the detected object
source = cv2.rectangle(source, (x1, y1), (x2, y2), cyan_col, 1)
# Write the label classification above the detected object using the x/y coordinates
source = cv2.putText(
source, caption, (x1, y1), cv2.FONT_HERSHEY_COMPLEX, 0.7, cyan_col, 1
)
return source
# #### Capture Video Real-Time from Camera:
stream = cv2.VideoCapture(0)
# Get video capture size.
width = stream.get(cv2.CAP_PROP_FRAME_WIDTH) # float value, converted to integer in the next line when writing
height = stream.get(cv2.CAP_PROP_FRAME_HEIGHT) # float value, converted to integer in the next line when writing
# Create VideoWriter object.
#
# 0x7634706d is the (*'MP4V') video writing formatting, with an output resolution of the original size.
video_output = cv2.VideoWriter('OutputVideo/output.mp4', 0x7634706d, 60.0, (int(width),int(height)))
# Start capturing footage frame by frame and apply mask
#Start capturing footage frame by frame and apply mask
while True:
# read in the stream wether its live camera feed or a video footage
is_streaming , frame = stream.read()
if not is_streaming:
print("Finished stream, ending program")
break
#Make a prediction with the model creating a dictionary with a set of key value pairs that list possible objects detected
get_frame_results = mrcnn_model.detect([frame], verbose=1)
# Apply the bounding boxes, mask and classification to the footage after setting up the dictionary of key value pairs
# Following keypoints in the dictionary
# rois: Bounding boxes / regions of interest (ROI) for objects detected
# masks: Masks to generate for objects detected
# class_ids: Reference to the classification types
# scores: Confidence score in relation to its classification to determine what it is
detected_frame = get_frame_results[0]
masked_image = mask_frame(frame, detected_frame['rois'], detected_frame['masks'], detected_frame['class_ids'],
class_names, detected_frame['scores'])
# Write to the video output
video_output.write(masked_image)
cv2.imshow("Driver Assist Machine Learning RCNN Mask",masked_image)
# Press 'q' to exit the program early, the output video file will still be generated if terminated early
if(cv2.waitKey(1) & 0xFF == ord('q')):
break
# Release Stream and video writing.
stream.release()
video_output.release()
cv2.destroyWindow("Driver Assist Machine Learning RCNN Mask")
| DAML_RCNN_Mask_RealTime.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="d925a804d0af50242aeb7738eb4f9dd5771e2547"
# # Predicting the price of BMW cars
# + [markdown] _uuid="3a7351dc6490c2da8c91bd90730f332e35914cc9"
# **We use GradientBoostRegressor and RandomForestRegressor to estimate the price of a BMW car
# The steps included in the kernel are**
# 1. Preprocessing the data. This is mostly related to categorising and transforming the data
# 2. Splitting the data into testing and training data
# 3. Prediction
#
# **Point of Interest**
# We have one problem in our data. Splitting the data into test/train sets will also result into the fact that we may miss some car models in either the test or train sets. To address this we replicate the car models that have a minimum car count. This results in better trained/tested models as shown along the feature importance plots.
# And final run of the code uses the 8 features as mentioned by the description of the dataset. Surprisingly these features are of lesser value and are not good for descrimination.
#
# <a href="#pre"> Preprocessing </a>
#
# <a href="#graphs"> Graphs </a>
#
# <a href="#prediction">Prediction</a>
#
# <a href="#replicate">Replication of low count car models</a>
#
# <a href="#replicate_prediction">Prediction</a>
#
# <a href="#desc_features_run">8 Features based prediction</a>
#
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from pprint import pprint
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
import os
print(os.listdir("../input"))
# Any results you write to the current directory are saved as output.
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
df = pd.read_csv('../input/bmw_pricing_challenge.csv', delimiter=',')
# + [markdown] _uuid="c057bd9cedddbb9392886b96f0da594ded5f34d8"
# # Have a look at some basics
# + _uuid="ea84798b0d3f6476a566c6eb37adfd465250e130"
# Check for any nulls
print(df.isnull().sum())
# Show data types
print(df.dtypes)
# + [markdown] _uuid="9d9b442ff8c070ae7e36fcbd9a011d0efec42928"
# <a id='pre'></a>
# # Data Pre-processing
# + _uuid="73a551a492d7fe55511d2df5fb88f04ec30e0a47"
#Lets drop unwanted columns
df.drop(["maker_key","sold_at"], axis=1, inplace=True)
#Convert string/text to categorical values
car_models = df.model_key.copy()
model_labels = df['model_key'].astype('category').cat.categories.tolist()
model_labels_dict = {k: v for k,v in zip(model_labels,list(range(1,len(model_labels)+1)))}
fuel_labels = df['fuel'].astype('category').cat.categories.tolist()
fuel_labels_dict = {k: v for k,v in zip(fuel_labels,list(range(1,len(fuel_labels)+1)))}
paint_labels = df['paint_color'].astype('category').cat.categories.tolist()
paint_labels_dict = {k: v for k,v in zip(paint_labels,list(range(1,len(paint_labels)+1)))}
type_labels = df['car_type'].astype('category').cat.categories.tolist()
type_labels_dict = {k: v for k,v in zip(type_labels,list(range(1,len(type_labels)+1)))}
df.replace(model_labels_dict, inplace=True)
df.replace(fuel_labels_dict, inplace=True)
df.replace(paint_labels_dict, inplace=True)
df.replace(type_labels_dict, inplace=True)
df['model_key'] = df['model_key'].astype('category')
#Convert registration_date to integer
df['registration_date'] = df['registration_date'].str.replace("-","").astype(int)
print(df.dtypes)
# + [markdown] _uuid="be4e2cf36bbbe0d40f0499f19e2c4aa396b155ed"
# <a id='graphs'></a>
# # A visual peek into DATA
# + _uuid="4719204b3477cc1cb2e47a8c41b10cf62a155e70"
# Data visualizations/Insights
import seaborn as sns
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(20,15))
ax = fig.gca()
c = car_models.value_counts()
c.sort_values(ascending=False).plot.bar(width=0.5,edgecolor='k',align='center',linewidth=1)
plt.xlabel('Models',fontsize=10)
plt.ylabel('Counts',fontsize=10)
ax.tick_params(labelsize=10)
plt.title('BMW car models',fontsize=10)
plt.grid()
plt.ioff()
plt.show()
fig = plt.figure(figsize = (10,10))
ax = fig.gca()
sns.heatmap(df.corr(), annot=True, fmt=".2f")
plt.title("Correlation",fontsize=5)
plt.show()
fig = plt.figure(figsize = (10,15))
ax = fig.gca()
df.plot(ax=ax,kind='density',subplots=True,sharex=False)
plt.title("Density",fontsize=5)
plt.show()
# =============================================================================
# fig = plt.figure(figsize = (20,20))
# ax = fig.gca()
# sns.pairplot(data=df[0:100],hue="price") # pair plot a subset. Takes too long for the whole data
# plt.title("Pair plot",fontsize =10)
# plt.show()
#
# =============================================================================
# + [markdown] _uuid="62d27a4eafe4d0dcac1ebf79c5dea742de3a9410"
# <a id='regression_func'></a>
# # Regression
# + _uuid="0bb7e9543a3973839b4d3eefaf9b005e48301d10"
def feature_importance_plots(regressor,X_train):
feature_importances = pd.DataFrame(regressor.feature_importances_,index = X_train.columns, columns=['importance']).sort_values('importance', ascending=False)
feature_importances.plot(kind='bar')
plt.show()
def do_prediction(df, stratify):
price = df.price.copy()
df.drop(['price'],inplace=True,axis = 1)
if stratify and 'model_key' in df:
X_train, X_test, y_train, y_test = train_test_split(df,price, test_size=0.25, stratify=df['model_key'], random_state=5811)
else:
X_train, X_test, y_train, y_test = train_test_split(df, price, test_size=0.25, random_state=5811)
gbr = GradientBoostingRegressor(loss ='ls',n_estimators=150, max_depth=7,max_leaf_nodes = 9,random_state=5811)
# Look at the parameters
print('Parameters currently in use:\n')
pprint(gbr.get_params())
# Fit the training data
gbr.fit (X_train, y_train)
# get the predicted values from the test set
predicted_price = gbr.predict(X_test)
print('GBR R squared: %.4f' % gbr.score(X_test, y_test))
lin_mse = mean_squared_error(predicted_price, y_test)
lin_rmse = np.sqrt(lin_mse)
print('RMSE: %.4f' % lin_rmse)
feature_importance_plots(gbr,X_train)
forest_reg = RandomForestRegressor(n_estimators=150,min_samples_split=3,random_state=5811)
# Look at the parameters
print('Parameters currently in use:\n')
pprint(forest_reg.get_params())
forest_reg.fit(X_train, y_train)
predicted_price = forest_reg.predict(X_test)
print('RFR R squared: %.4f' % forest_reg.score(X_test, y_test))
lin_mse = mean_squared_error(predicted_price, y_test)
lin_rmse = np.sqrt(lin_mse)
print('RMSE: %.4f' % lin_rmse)
feature_importance_plots(forest_reg,X_train)
# + [markdown] _uuid="f32b82be9e6ebc6a10ead435a0735b982419f771"
# <a id='prediction'></a>
# # Prediction
# + _uuid="29cd0ab2e02e98f4f936ff8c0ac105a9f0289aaf"
df_copy = df.copy()
do_prediction(df.copy(),False)
# + [markdown] _uuid="794105d13ce497e93a6dada4ed2bb38feef0a219"
# <a id='replicate'></a>
# # Replicating car models that have a low count in our data
# + _uuid="b9fb20d78bc7dfbf046780ad9f7378e210f8b8ec"
def replicate_low_count(df_copy):
# What if between test/train splits we dont have the models of cars available?
# For that we find the bear minimum count and replicate the data.
min_counted = c <= 1
# Lets populate the data with replicas for the car models
print("DataFrame size before append:",len(df_copy))
for item in min_counted.iteritems():
if item[1]:
v = model_labels_dict[item[0]]
v2 = df_copy[df_copy['model_key'] == v].copy()
if len(v2) > 1:# Keep one copy- this happens when min_counted > 1
mean_price = v2["price"].mean()
v2 = v2.iloc[[0]].copy()
v2["price"] = mean_price
for copy_count in range(0,2):
df_copy = df_copy.append(v2,ignore_index=True)
print("DataFrame after append:",len(df_copy))
return df_copy
# + [markdown] _uuid="60061bd95573ca4a7e0db47fc9a75a065a03a5ff"
# <a id='replicate_prediction'></a>
# # Replicate prediction
# + _uuid="f9ac9dc9a3cd5c5988d95ecb415d10b434c160a8"
df_copy=replicate_low_count(df_copy)
do_prediction(df_copy,True)
# + [markdown] _uuid="958f9032f114ecae0959743a365dfcf01c0b88f0"
# <a id="desc_features_run"></a>
# # 8 Features based prediction
# + _uuid="8f11f4c52509d649030440879dd24567c8d8f373"
# Now lets use the 8 important features as described in the description file of the dataset
df_copy = df.copy()
df.drop(["engine_power",'mileage','paint_color',"registration_date","model_key","car_type","fuel"],inplace=True,axis=1)
do_prediction(df,False)
df_copy = replicate_low_count(df_copy)
df_copy.drop(["engine_power",'mileage','paint_color',"registration_date","model_key","car_type","fuel"],inplace=True,axis=1)
do_prediction(df_copy,True)
| caseStudy/.ipynb_checkpoints/bmw-regressors-price-prediction-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import numpy as np
import matplotlib.pyplot as plt
from synthetic import simulate_var
from models.cmlp import cMLP, cMLPSparse, train_model_ista, train_unregularized
from models.clstm import cLSTM, train_model_ista
# For GPU acceleration
device = torch.device('cuda')
import pandas as pd
df = pd.read_csv('C:/Users/chanyoung/Desktop/CausalDiscoveryToolbox-master/CausalDiscoveryToolbox-master/sachs.csv')
data = df.values
X = torch.tensor(data[np.newaxis], dtype=torch.float32, device=device)
X.shape
fig, axarr = plt.subplots(1, 2, figsize=(16, 5))
axarr[0].plot(data)
axarr[0].set_xlabel('T')
axarr[0].set_title('Entire time series')
axarr[1].plot(data[:50])
axarr[1].set_xlabel('T')
axarr[1].set_title('First 50 time points')
plt.tight_layout()
plt.show()
# Set up model
clstm = cLSTM(X.shape[-1], hidden=100).cuda(device=device)
| cs224w/example-clstm1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf1]
# language: python
# name: conda-env-tf1-py
# ---
import umap
from sklearn.datasets import fetch_openml
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
sns.set(context="paper", style="white")
mnist = fetch_openml("mnist_784", version=1)
mnist.data[0][:4]
np.shape(mnist.data)
# +
# with pickle
import pickle
f_name = 'saving_example.sav'
# time passes
loaded_model = pickle.load((open(f_name, 'rb')))
print(type(loaded_model))
# <class 'umap.umap_.UMAP'>
# -
loaded_model._sparse_data
reducer = umap.UMAP(random_state=42, verbose=True)
# +
#reducer.fit(mnist.data[:10])
# -
umap.__version__
sigmas, rhos = umap.umap_.smooth_knn_dist(loaded_model._knn_indices, loaded_model.n_neighbors, loaded_model.local_connectivity)
reducer.embedding_ = loaded_model.embedding_
reducer.graph_ = loaded_model.graph_#.toarray()
reducer._sparse_data = loaded_model._sparse_data
reducer._raw_data = loaded_model._raw_data
reducer._a = loaded_model._a
reducer._b = loaded_model._b
reducer.n_epochs = loaded_model.n_epochs
reducer.metric = loaded_model.metric
reducer.repulsion_strength = loaded_model.repulsion_strength
reducer._initial_alpha = loaded_model._initial_alpha
reducer.negative_sample_rate = loaded_model.negative_sample_rate
reducer._metric_kwds = loaded_model._metric_kwds
reducer._sigmas = sigmas
reducer._rhos = rhos
reducer._input_hash = loaded_model._input_hash
reducer._small_data = loaded_model._small_data
reducer._rp_forest = loaded_model._rp_forest
reducer._n_neighbors = loaded_model._n_neighbors
reducer._distance_func = loaded_model._distance_func
reducer._search_graph = loaded_model._search_graph
np.shape(mnist.data[:10])
# +
#z = reducer.transform(mnist.data[:10])
# -
trans = reducer.inverse_transform(loaded_model.embedding_[:3])
plt.matshow(mnist.data[0].reshape(28,28))
plt.matshow(trans[0].reshape(28,28), vmin=3000)#, vmax = 1)
plt.colorbar()
plt.matshow(trans[2].reshape(28,28), vmin=2900)#, vmax = 1)
plt.colorbar()
plt.matshow(trans[2].reshape(28,28))#, vmin=2900)#, vmax = 1)
plt.colorbar()
z = reducer.transform(mnist.data)
| notebooks/06.0-neural-networks/starling_figs/.ipynb_checkpoints/mnist-test-loaded-model-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zB3QrAkZS8j_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 343} outputId="a785030b-1f6a-4314-8484-7d0d233e1ba8"
# !pip install catboost
# + id="3arOHSJwCtNq" colab_type="code" colab={}
import pandas as pd
import numpy as np
from tqdm import tqdm
import copy
# + id="ap6qn2DXYGKn" colab_type="code" colab={}
from catboost import CatBoostClassifier
from sklearn.preprocessing import LabelEncoder
# + id="s3ZCXtzKDiDe" colab_type="code" colab={}
train_ = pd.read_csv('Train.csv')
test_ = pd.read_csv('Test.csv')
submission_ = pd.read_csv('SampleSubmission.csv')
# + id="UP03quGLDrWS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="d78f3056-ab8c-4e53-de09-cfd85c6a8a05"
print(train_.shape)
train_.head()
# + id="aZxRKLriDuJD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 241} outputId="1f5cb897-c16e-495d-f25f-274641047b8f"
print(test_.shape)
test_.head()
# + id="lkgKSHK3Ef31" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} outputId="1daebc33-984d-415a-a291-dd6a6137843c"
print(submission_.shape)
submission_.head()
# + id="mcl5aCXtYKgw" colab_type="code" colab={}
from sklearn.model_selection import KFold
def get_train_test_names(train_, test_, submission_):
kf = KFold(n_splits=5, shuffle=False)
for r, (train_index, test_index) in enumerate(kf.split(train_)):
test = train_.iloc[test_index]
X_test = []
X_test_columns = test.columns
for v in test.values:
info = v[:8]
binary = v[8:]
index = [k for k, i in enumerate(binary) if i == 1]
for i in index:
for k in range(len(binary)):
if k == i:
binary_transformed = list(copy.copy(binary))
binary_transformed[i] = 0
X_test.append(list(info) + binary_transformed)
X_test = pd.DataFrame(X_test)
X_test.columns = ['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code',
'occupation_code', 'occupation_category_code', 'P5DA', 'RIBP', '8NN1',
'7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3']
X_test['ID'] = [str(r)+'_'+str(i) for i in range(X_test.shape[0])]
yield train_.iloc[train_index], X_test, submission_, '1_fold' + str(r) + '.csv'
yield train_, test_, submission_, '1_main.csv'
# + [markdown] id="TufkmcXsYM9o" colab_type="text"
# ### Get folds
# + id="RLpJtQ7iYujP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="94eeba5f-88fa-46aa-a5b2-5d63fa2aa13f"
for train, test, submission, name in get_train_test_names(train_, test_, submission_):
np_data = []
train_columns = train.columns
for v in tqdm(train.values):
info = v[:8]
binary = v[8:]
index_n = [k for k, i in enumerate(binary) if i == 1]
for i in index_n:
for k in range(len(binary)):
if (k not in index_n) or (k == i):
binary_0 = list(copy.copy(binary))
binary_0[i] = 0
if k == i:
np_data.append(list(info) + binary_0 + [train_columns[8+k]] + [1])
else:
np_data.append(list(info) + binary_0 + [train_columns[8+k]] + [0])
df_data = pd.DataFrame(np_data)
df_data.columns = ['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code',
'occupation_code', 'occupation_category_code', 'P5DA', 'RIBP', '8NN1',
'7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3', 'product_pred', 'target']
np_data_test = []
answ_test = []
test_columns = test.columns
for v in tqdm(test.values):
info = v[:8]
binary = v[8:]
index_n = [k for k, i in enumerate(binary) if i == 1]
for k in range(len(binary)):
if k not in index_n:
np_data_test.append(list(info) + list(binary) + [test_columns[8+k]])
df_data_test = pd.DataFrame(np_data_test)
df_data_test.columns = ['ID', 'join_date', 'sex', 'marital_status', 'birth_year', 'branch_code',
'occupation_code', 'occupation_category_code', 'P5DA', 'RIBP', '8NN1',
'7POT', '66FJ', 'GYSR', 'SOP4', 'RVSZ', 'PYUQ', 'LJR9', 'N2MW', 'AHXO',
'BSTQ', 'FM3X', 'K6QO', 'QBOL', 'JWFN', 'JZ9D', 'J9JW', 'GHYX', 'ECY3', 'product_pred']
df_data['date1'] = df_data['join_date'].apply(lambda x: int(x.split('/')[0]) if (x == x) else np.nan)
df_data['date2'] = df_data['join_date'].apply(lambda x: int(x.split('/')[1]) if (x == x) else np.nan)
df_data['date3'] = df_data['join_date'].apply(lambda x: int(x.split('/')[2]) if (x == x) else np.nan)
df_data.drop('join_date', axis=1, inplace=True)
df_data_test['date1'] = df_data_test['join_date'].apply(lambda x: int(x.split('/')[0]) if (x == x) else np.nan)
df_data_test['date2'] = df_data_test['join_date'].apply(lambda x: int(x.split('/')[1]) if (x == x) else np.nan)
df_data_test['date3'] = df_data_test['join_date'].apply(lambda x: int(x.split('/')[2]) if (x == x) else np.nan)
df_data_test.drop('join_date', axis=1, inplace=True)
df_data['date_diff'] = df_data['date3'] - df_data['birth_year']
df_data_test['date_diff'] = df_data_test['date3'] - df_data_test['birth_year']
from catboost import CatBoostClassifier
cat_features = ['sex', 'marital_status', 'branch_code',
'occupation_code', 'occupation_category_code', 'product_pred']
model = CatBoostClassifier()
model.fit(df_data.drop(['ID', 'target'], axis=1), df_data['target'], cat_features)
preds_proba = model.predict_proba(df_data_test.drop(['ID',], axis=1))
df_answer = df_data_test[['ID', 'product_pred']]
df_answer['target'] = preds_proba[:,1]
df_answer['ID X PCODE'] = df_answer['ID'] + ' X ' + df_answer['product_pred']
df_answer.drop(['ID', 'product_pred'], axis=1, inplace=True)
df_answer.rename(columns={'target':'Label'}, inplace=True)
df_answer = submission[submission['ID X PCODE'].isin(list(set(list(submission['ID X PCODE'])) - set(list(df_answer['ID X PCODE']))))].append(df_answer)
df_answer.reset_index(drop=True, inplace=True)
df_answer.to_csv(name, index=False)
# + [markdown] id="5L-1uUg2ej0w" colab_type="text"
# OK
| stack/Stacker2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# $Q(q) = 40q^{-0.05q}$
# $\int_0^{10} 40q^{-0.05q} $
# $40\int_0^{10}q^{-0.05q} $
# $u = -0.05q$
# $du = -0.05dq$
# $dq = -\frac{du}{0.05}$
# $-\frac{1}{0.05}\cdot 40 \cdot \int_0^{10} q^udu = -800 \cdot q^{-0.05q}$
# $-800\cdot10^{-0.05\cdot10}$
| Simulado 01/7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Universidade Federal do Rio Grande do Sul (UFRGS)
# Programa de Pós-Graduação em Engenharia Civil (PPGEC)
#
# # PEC00144: Métodos experimentais na engenharia civil
#
# ### Trabalho final
# _<NAME>_
#
# _<NAME> <NAME>_
#
# +
#======================================================#
# 1. IMPORTING NECESSARY CLASSES AND DIMENSIONAL MATRIX#
#======================================================#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.linalg as sc
import scipy.stats as st
from MRPy import *
DimData = pd.read_excel('Resources/DimData.xlsx')
# -
# ## 1. Introdução
#
# Este trabalho consiste no projeto e ensaio de um modelo reduzido e está dividido como apresentado abaixo:
#
#
# * Projeto de modelo reduzido;
# * Construção do modelo reduzido;
# * Análise da propagação de erro;
# * Análise do sinal;
# * Instrumentação;
# * Análise do sinal;
# * Conclusão.
#
# O modelo real consiste em um _Shear building_ de 3 pavimentos, com suas características expostas abaixo:
# <img src="Resources/modelo.jpg" alt="modelo" width="480px"/>
# * Dimensões dos pilares:
#
# $B\times H = 0.7 \times 0.2{\rm m}$
#
# Propriedades da seção:
#
# $A = 0.14{\rm m}^2$ e $I = 0.000466 {\rm m}^4$ <br>
#
# * Massa de um pavimento:
#
# $m = 38400 {\rm kg}$
#
# * Altura dos pilares:
#
# $h =3{\rm m}$ <br>
#
# * Módulo de elasticidade do concreto:
#
# $E_{\rm c} = 28 \times 10^{9} {\rm N/m}^2$
#
#
# * Rigidez à flexão:
#
# $ E_cI = 13.04 * 10^6 Nm^2 $
#
#
# * Rigidez do pilar:
#
# $ K = \frac{12 E_cI}{h^3} = 57.99*10^6 N/m$
#
# O objetivo desta análise é a determinação da frequência fundamental de vibração da estrutura por meio de ensaios e verificar se estes resultados convergem para os valores calculados por meio das expressões disponíveis na literatura.
#
#
# ## 2. Projeto de modelo reduzido
#
# Para a realização do projeto de modelo reduzido, é necessário definir as 3 grandezas que formarão a nova base da matriz dimensional. Para isto, como o modelo visa a análise das frequências naturais, utiliza-se como nova base as grandezas de comprimento, aceleração e rigidez à flexão.
# +
#======================================================#
# 2. MAKING DIMENSIONAL MATRIX #
#======================================================#
ABC = ['L', 'a', 'EI']
LMT = ['L', 'M', 'T']
base = DimData.loc[ABC, LMT]
i_base = np.linalg.inv(base)
print(base)
# -
# Para definir o fator de escala da rigidez à flexão, é necessário impor as dimensões da seção transversal utilizada no modelo reduzido e o material que será utilizado. No presente estudo, será utilizada uma chapa de dimensões $ 23 \times 0.5 mm$. Abaixo está apresentado o cálculo da rigidez à flexão do modelo reduzido.
# * Módulo de elasticidade do alumínio
# $$ E = 71 GPa $$
# * Momento de inércia da seção
# $$ I = \frac{0.023 * 0.00045^3}{12} = 1.746*10^{-13} m^4$$
# * Rigidez à flexão
# $$ EI = 0.012401 Nm^2 $$
# As escalas adotadas no modelo são:
#======================================================#
# 3. DEFINING NEW SCALES #
#======================================================#
λ_L = 1/30 # Escala de comprimento do modelo reduzido
λ_a = 1 # Escala de acelração
λ_EI = 0.012401/(13.04*10**6) # Modelo construído em alumínio com seção(0.5X23mm)
# Agora, é possível calcular o valor das gradezas desejadas em função da nova base dimensional.
# +
#======================================================#
# 4. MAKING NEW DIMENSIONAL BASE MATRIX #
#======================================================#
par = ['f', 'EI', 'm']
npar = len(par)
DimMat = DimData.loc[par, LMT]
print(DimMat)
# +
scales = np.tile([λ_L, λ_a, λ_EI],(npar,1))
NewMat = pd.DataFrame(data = np.matmul(DimMat, i_base),
index = DimMat.index,
columns = ABC)
print(NewMat)
# -
# por fim, calculam-se as escalas:
# +
#======================================================#
# 5. PRINTING NECESSARY SCALES #
#======================================================#
[λ_f, λ_EI, λ_m] = np.prod(scales**NewMat, axis=1);
print('Frequência: λ_f = 1:{0:4.5f}'.format(1/λ_f), '\n'
'Rigidez à flexão: λ_EI = 1:{0:4.0f}'.format(1/λ_EI), '\n'
'Massa: λ_m = 1:{0:4.1f}'.format(1/λ_m))
# -
# Sendo assim, o valor da massa por pavimento será:
print('Massa do pavimento: {0:5.3f}g'.format(38450*λ_m*1000))
# ## 3. Construção do modelo reduzido
# Para a construção do modelo, utilizou-se tiras de chapa de alumínio, de seção $ 23 \times 0.5 mm $ conforme especificado no projeto. O comprimento destas tiras é 35 cm, sendo 30 cm o valor calculado em projeto e os outros 5 cm para realizar o engaste na base. Para representar as massas, serão utilizadas cantoneiras de alumínio, de massa por comprimento linear igual a $1.82g/cm$. Para representar os engastes, serão utilizadas 12 presilhas( 4 em cada pavimento) de papel junto a 6 suportes de alumínio(2 por pavimento), que serão tratadas como variáveis aleatórias. A rotina a seguir calcula a média e o desvio padrão destas variáveis, assim como o tamanho da cantoneira que deverá ser utilizada no modelo.
# +
#======================================================#
# 6. CALCULATING THE NECESSARY LENGHT OF THE BAR #
#======================================================#
m_p = [2.18,2.16,2.18,2.21,2.12,2.19,2.18,2.17,2.20,2.16,2.18,2.20] #massa das presilhas em [g]
m_s = [3.61,3.59,3.62,3.60,3.60,3.59] #massa dos suportes em [g]
mi_p = np.mean(m_p)
sigma_p = np.std(m_p)
mi_s = np.mean(m_s)
sigma_s = np.std(m_s)
lc = (38450*λ_m*1000 - 4*mi_p - 2*mi_s)/1.82 # Comprimento da cantoneira
print('--------------------------------------------','\n',
'Massa média da presilha: {0:5.2f}g'.format(mi_p),'\n',
'Desvio padrão da presilha: {0:5.2f}g'.format(sigma_p),'\n',
'Massa média do suporte: {0:5.2f}g'.format(mi_s),'\n',
'Desvio padrão do suporte: {0:5.2f}g'.format(sigma_s),'\n',
'Comprimento da cantoneira: {0:5.2f}cm'.format(lc),'\n',
'--------------------------------------------')
# -
# Para calcular o valor massa por pavimento, considera-se ela uma função de variáveis aleatórias, sendo expressa por:
# \begin{equation}
# M = 4m_p + 2m_s + 1.82l_c
# \end{equation}
# Em que $m_p$ e $m_s$ são, respectivamente, as massas da presilha e do suporte que serão consideradas variáveis aleatórias. Já que a função mostrada acima é linear, sendo as varáveis consideradas não correlacionadas, é possível utilizar a seguinte expressão para calcular o desvio padrão da massa do pavimento:
# \begin{equation}
# \sigma_M^2 = \sum_{i=1}^{n} a_i^2 \sigma_X^2
# \end{equation}
# Sendo $a_i$ coeficientes que multiplicam as variáveis e $\sigma_X$ o desvio padrão de cada variável.
# +
#======================================================#
# 7.ERROR PROPAGATION IN FLOOR MASS #
#======================================================#
mi_M = 4* mi_p + 2* mi_s+ lc*1.82
sigma_M = np.sqrt((4*sigma_p)**2+(2*sigma_s)**2)
print('--------------------------------------------','\n',
'Massa média de um pavimento: {0:5.2f}g'.format(mi_M),'\n',
'Desvio Padrão: {0:5.2f}g'.format(sigma_M),'\n',
'--------------------------------------------')
# -
# ## 4. Análise da propagação de erro
#
# Para definir o erro da frequência natural, utiliza-se uma simulação de Monte Carlo, considerando como variáveis aleatórias gaussianas o momento de inércia de área,onde admite-se um erro de 5% devido a imprecisão no corte da chapa, e a massa dos pavimentos, conforme foi calculado no terceiro tópico deste trabalho. Os valores médios e seus respectivos desvios padrões estão apresentados abaixo:
#
# $$ I = 1.746 \pm 0.0873 *10^{-13} m^4$$
# $$ M = 32.91 \pm 0.09 g$$
#
# Devido a imprecisão na ligação dos engastes, adiciona-se um erro de 0.5 cm no comprimento das chapas de alumínio.
# $$ L = 0.10 \pm 0.005 m $$
# +
#======================================================#
# 8. ERROR PROPAGATION IN NATURAL FREQUENCYS #
#======================================================#
E = 71e9
I = st.norm(1.746e-13,0.0873e-13)
M = st.norm(32.91/1000,0.09/1000)
L = st.norm(0.10,0.005)
#----------------------------------#
def autoval(E,I,L,M):
K = 12*E*I/(L*L*L)
M_K = np.array([[4*K,-2*K,0],
[-2*K, 4*K,-2*K],
[0,-2*K,2*K]])
M_M = np.identity(3)*M
w21,Phi1 = sc.eig(M_K,M_M)
iw = w21.argsort()
w21 = w21[iw]
Phi1 = Phi1[:,iw]
wr = np.real(w21)
wk = np.sqrt(w21)
fk = wk/(2*np.pi)
return fk[0]
#------------------------------------#
n = 50000
ii = I.rvs(n)
m = M.rvs(n)
l = L.rvs(n)
fi = []
for i in range(n):
f = autoval(E,ii[i],l[i],m[i])
fi += [np.real(f)]
mi_f = np.mean(fi)
sigma_f = np.std(fi)
print('---------------------------------------','\n',
'Valor médio da frequência: {0:5.2f}hz'.format((mi_f)),'\n',
'Valor do erro: {0:5.2f}hz' .format(sigma_f),'\n',
'Coeficiente de variação: {0:5.2f}%'.format(sigma_f/np.real(mi_f)*100),'\n',
'--------------------------------------')
# -
# Afim de verificar o tipo de distribuição da frequência, plota-se a função de probabilidade acumulada e o histograma da resposta:
# +
#======================================================#
# 9. PLOTING FREQUENCY PROBABILITY FUNCTIONS #
#======================================================#
fx = sorted(fi) #Colocando em ordem as frequências
fp = st.norm(mi_f,sigma_f) # Criando uma distribuição gaussiana
p = np.arange(0,n)/(n-1) # Cálculo da função de probabilidade acumulada
plt.figure(1,figsize=(12,5))
plt.subplot(1,2,1)
plt.title('Função de probabilidade acumulada',size = 12)
plt.plot(fx,p,'black',drawstyle = 'steps')
plt.axis([5, 8.5, 0, 1.1])
plt.grid(True)
plt.subplot(1,2,2)
plt.title('Histograma')
plt.hist(fx,bins= 64, range =(5,8),density= True,color = 'black',histtype = 'step')
plt.plot(fx,fp.pdf(fx),'r:')
plt.xlim(5,8.5)
plt.legend(('Função de probabilidade Gaussiana','Distribuição de probabilidades da resposta'))
plt.grid(True)
# -
# A partir do exposto, é notável que a distribuição de frequências também é do tipo gaussiana.
# ## 5. Instrumentação
# Para a realização do ensaio, utiliza-se o acelerômetro triaxial MPU6050 fixado no pavimento superior do modelo reduzido. O sinal deste acelerômetro será lido com auxílio do microcomputador Raspberry Pi 3 model B, a partir de ligações do tipo $I^2C$. A ligação entre os dois é feita a partir de fios de cobre esmaltado, afim de que estes não colaborem para o amortecimento do modelo. A Figura 2 ilustra o esquema de ligação entre o acelerômetro e o Raspberry.
# <img src="Resources/rasp_acel.PNG" alt="esquema de ligação" width="480px"/>
#
# O cabo vermelho corresponde ao VCC, que produz uma voltagem de 3,3 V, enquanto o preto simboliza o terra(GND). Já os cabos verde e amarelo representam, respectivamente, as ligações SDA e SCL, que serão responsáveis pela transmissão dos dados.
#
# A Figura 3 apresenta o modelo instrumentado.
# <img src="Resources/modelo_reduzido.jpg" alt="modelo reduzido" width="480px"/>
#
# ## 6. Análise do sinal
# Para a determinação da frenquência fundamental de vibração, submete-se a estrutura a um deslocamento inicial, fazendo-a descrever um movimento de vibração livre amortecida. A partir da série temporal, utiliza-se o artifício do periodograma para observar qual frequência possui a maior energia armazenada, sendo esta a frequência fundamental desejada.
#
# A partir de uma rotina em Python utilizada no Raspberry, é possível obtenção dos dados fornecidos pelo MPU6050. Com o auxílio da biblioteca ``MRPY``, é possível a realização da leitura das acelerações e da plotagem dos seus respectivos periodogramas. Este procedimento foi realizado em um Jupyter Notebook auxiliar ( ``import data``) e importado a partir da biblioteca pickle, como apresentado a seguir:
# +
#======================================================#
# 10. IMPORTING AND PLOTING SIGNALS #
#======================================================#
import pickle as pk
with open('sample_freq.pk', 'rb') as KM:
az, t, Saz, f,az2, t2, Saz2, f2,az3, t3, Saz3, f3 = pk.load(KM)
plt.figure(2,figsize=(15,12))
plt.subplot(3,2,1)
plt.title('Aceleração 1')
plt.plot(t, az[0],)
plt.xlim(0,17.5)
plt.ylabel('a_x (m/s^2)')
plt.grid(True)
plt.subplot(3,2,2)
plt.title('Periodograma 1')
plt.plot(f, Saz[0])
plt.ylabel('Densidade espectral')
plt.xlim(0,10)
plt.grid(True)
plt.subplot(3,2,3)
plt.title('Aceleração 2')
plt.plot(t2, az2[0])
plt.xlim(0,17.5)
plt.ylabel('a_x (m/s^2)')
plt.grid(True)
plt.subplot(3,2,4)
plt.title('Periodograma 2')
plt.plot(f2, Saz2[0])
plt.ylabel('Densidade espectral')
plt.xlim(0,10)
plt.grid(True)
plt.subplot(3,2,5)
plt.title('Aceleração 3')
plt.plot(t3, az3[0])
plt.xlim(0,17.5)
plt.xlabel('tempo (s)')
plt.ylabel('a_x (m/s^2)')
plt.grid(True)
plt.subplot(3,2,6)
plt.title('Periodograma 3')
plt.plot(f3, Saz3[0])
plt.ylabel('Densidade espectral')
plt.xlim(0,10)
plt.xlabel('Fequência(hz)')
plt.grid(True)
# -
# Para encontrar o valor exato da frequência que possui maior energia em cada espectro, utiliza-se a rotina a seguir:
# +
#======================================================#
# 11. PRINTING NATURAL FREQUENCYS #
#======================================================#
f_max1 =f[Saz[0].argmax()]
f_max2 =f2[Saz2[0].argmax()]
f_max3 =f3[Saz3[0].argmax()]
print(' As frequências naturais em cada ensaio são:','\n',
' Ensaio 1: {0:5.2f}hz'.format(f_max1),'\n',
' Ensaio 2: {0:5.2f}hz'.format(f_max2),'\n',
' Ensaio 3: {0:5.2f}hz'.format(f_max3),'\n',)
# -
# ## 7. Resultados e considerações finais
# A partir dos valores de frequência obtidos com o uso dos espectros, é possível concluir que os valores encontrados estão dentro da margem de erro estipulada no Capítulo 4 deste trabalho, concluindo, assim, que o modelo experimental valida a metodologia encontrada na literatura.
# Para conferir se a escala de frequência adotada neste trabalho está correta, calcula-se o valor da frequência fundamental do modelo real a partir da função autoval, definidada no Capítulo 4.
#======================================================#
# 12. PRINTING WORKS RESULTS #
#======================================================#
F_real = np.real(autoval(28e9,0.000466,3,38400))
print('-------------------------------------------------------','\n',
'Frequência fundamental do modelo real: {0:5.2f}hz'.format(F_real),'\n',
'Escala de frequência: 1:{0:5.2f}'.format(1/λ_f),'\n',
'Frequência fundamental do modelo reduzido: {0:5.2f}hz'.format(mi_f),'\n',
'Frequência fundamental escalada: {0:5.2f}hz'.format(mi_f/λ_f),'\n',
'-------------------------------------------------------',)
# Com isso, é possível afirmar, também, que a escala obtida a partir da análise dimensional está correta, visto que o erro é desprezível.
# Portanto, a partir do exposto neste trabalho, é possível concluir que a utilização do modelo reduzido como ferramenta de projeto é bastante proveitoso para problemas em que a formulação teórica é de caráter complexo. A análise de propagação de erro, considerando como variáveis aleatórias a massa, o comprimento e a inércia possibilitou definir o erro admissível para a medição experimental, sendo fundamental para a a validação dos dados encontrados.
#
# A utilização do MPU6050 em conjunto com o Raspberry Pi se provou bastante eficiente para a medição da frequência fundamental de vibração, sendo possível realizar até mesmo a medição das outras frequências naturais de vibração da estrutura, apesar de possuirem uma menor energia associada.
| Projeto_Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# 
# # How to Publish a Pipeline and Invoke the REST endpoint
# In this notebook, we will see how we can publish a pipeline and then invoke the REST endpoint.
# ## Prerequisites and Azure Machine Learning Basics
# If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, make sure you go through the [configuration Notebook](https://aka.ms/pl-config) first if you haven't. This sets you up with a working config file that has information on your workspace, subscription id, etc.
#
# ### Initialization Steps
# +
import azureml.core
from azureml.core import Workspace, Datastore, Experiment, Dataset
from azureml.data import OutputFileDatasetConfig
from azureml.core.compute import AmlCompute
from azureml.core.compute import ComputeTarget
# Check core SDK version number
print("SDK version:", azureml.core.VERSION)
from azureml.pipeline.core import Pipeline
from azureml.pipeline.steps import PythonScriptStep
from azureml.pipeline.core.graph import PipelineParameter
print("Pipeline SDK-specific imports completed")
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, ws.subscription_id, sep = '\n')
# Default datastore (Azure blob storage)
# def_blob_store = ws.get_default_datastore()
def_blob_store = Datastore(ws, "workspaceblobstore")
print("Blobstore's name: {}".format(def_blob_store.name))
# -
# ### Compute Targets
# #### Retrieve an already attached Azure Machine Learning Compute
#
# > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
# +
from azureml.core.compute_target import ComputeTargetException
aml_compute_target = "cpu-cluster"
try:
aml_compute = AmlCompute(ws, aml_compute_target)
print("found existing compute target.")
except ComputeTargetException:
print("creating new compute target")
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_D2_V2",
min_nodes = 1,
max_nodes = 4)
aml_compute = ComputeTarget.create(ws, aml_compute_target, provisioning_config)
aml_compute.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
# +
# For a more detailed view of current Azure Machine Learning Compute status, use get_status()
# example: un-comment the following line.
# print(aml_compute.get_status().serialize())
# -
# ## Building Pipeline Steps with Inputs and Outputs
# A step in the pipeline can take [dataset](https://docs.microsoft.com/python/api/azureml-core/azureml.data.filedataset?view=azure-ml-py) as input. This dataset can be a data source that lives in one of the accessible data locations, or intermediate data produced by a previous step in the pipeline.
# Specify a public dataset path
data_path = "https://dprepdata.blob.core.windows.net/demo/Titanic.csv"
# Or uploading data to the datastore
# data_path = def_blob_store.upload_files(["./your_data.pkl"], target_path="your_path", overwrite=True)
# Reference the data uploaded to blob storage using file dataset
# Assign the datasource to blob_input_data variable
blob_input_data = Dataset.File.from_files(data_path).as_named_input("test_data")
print("Dataset created")
# Define intermediate data using OutputFileDatasetConfig
processed_data1 = OutputFileDatasetConfig(name="processed_data1")
print("Output dataset object created")
# #### Define a Step that consumes a dataset and produces intermediate data.
# In this step, we define a step that consumes a dataset and produces intermediate data.
#
# **Open `train.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
#
# The best practice is to use separate folders for scripts and its dependent files for each step and specify that folder as the `source_directory` for the step. This helps reduce the size of the snapshot created for the step (only the specific folder is snapshotted). Since changes in any files in the `source_directory` would trigger a re-upload of the snapshot, this helps keep the reuse of the step when there are no changes in the `source_directory` of the step.
# +
# trainStep consumes the datasource (Datareference) in the previous step
# and produces processed_data1
source_directory = "publish_run_train"
trainStep = PythonScriptStep(
script_name="train.py",
arguments=["--input_data", blob_input_data.as_mount(), "--output_train", processed_data1],
compute_target=aml_compute,
source_directory=source_directory
)
print("trainStep created")
# -
# #### Define a Step that consumes intermediate data and produces intermediate data
# In this step, we define a step that consumes an intermediate data and produces intermediate data.
#
# **Open `extract.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
# +
# extractStep to use the intermediate data produced by trainStep
# This step also produces an output processed_data2
processed_data2 = OutputFileDatasetConfig(name="processed_data2")
source_directory = "publish_run_extract"
extractStep = PythonScriptStep(
script_name="extract.py",
arguments=["--input_extract", processed_data1.as_input(), "--output_extract", processed_data2],
compute_target=aml_compute,
source_directory=source_directory)
print("extractStep created")
# -
# #### Define a Step that consumes multiple intermediate data and produces intermediate data
# In this step, we define a step that consumes multiple intermediate data and produces intermediate data.
# ### PipelineParameter
# This step also has a [PipelineParameter](https://docs.microsoft.com/en-us/python/api/azureml-pipeline-core/azureml.pipeline.core.graph.pipelineparameter?view=azure-ml-py) argument that help with calling the REST endpoint of the published pipeline.
# We will use this later in publishing pipeline
pipeline_param = PipelineParameter(name="pipeline_arg", default_value=10)
print("pipeline parameter created")
# **Open `compare.py` in the local machine and examine the arguments, inputs, and outputs for the script. That will give you a good sense of why the script argument names used below are important.**
# +
# Now define compareStep that takes two inputs (both intermediate data), and produce an output
processed_data3 = OutputFileDatasetConfig(name="processed_data3")
# You can register the output as dataset after job completion
processed_data3 = processed_data3.register_on_complete("compare_result")
source_directory = "publish_run_compare"
compareStep = PythonScriptStep(
script_name="compare.py",
arguments=["--compare_data1", processed_data1.as_input(), "--compare_data2", processed_data2.as_input(), "--output_compare", processed_data3, "--pipeline_param", pipeline_param],
compute_target=aml_compute,
source_directory=source_directory)
print("compareStep created")
# -
# #### Build the pipeline
pipeline1 = Pipeline(workspace=ws, steps=[compareStep])
print ("Pipeline is built")
# ## Run published pipeline
# ### Publish the pipeline
published_pipeline1 = pipeline1.publish(name="My_New_Pipeline", description="My Published Pipeline Description", continue_on_step_failure=True)
published_pipeline1
# Note: the continue_on_step_failure parameter specifies whether the execution of steps in the Pipeline will continue if one step fails. The default value is False, meaning when one step fails, the Pipeline execution will stop, canceling any running steps.
# ### Publish the pipeline from a submitted PipelineRun
# It is also possible to publish a pipeline from a submitted PipelineRun
# submit a pipeline run
pipeline_run1 = Experiment(ws, 'Pipeline_experiment_sample').submit(pipeline1)
# publish a pipeline from the submitted pipeline run
published_pipeline2 = pipeline_run1.publish_pipeline(name="My_New_Pipeline2", description="My Published Pipeline Description", version="0.1", continue_on_step_failure=True)
published_pipeline2
# ### Get published pipeline
#
# You can get the published pipeline using **pipeline id**.
#
# To get all the published pipelines for a given workspace(ws):
# ```css
# all_pub_pipelines = PublishedPipeline.get_all(ws)
# ```
# +
from azureml.pipeline.core import PublishedPipeline
pipeline_id = published_pipeline1.id # use your published pipeline id
published_pipeline = PublishedPipeline.get(ws, pipeline_id)
published_pipeline
# -
# ### Run published pipeline using its REST endpoint
# [This notebook](https://aka.ms/pl-restep-auth) shows how to authenticate to AML workspace.
# +
from azureml.core.authentication import InteractiveLoginAuthentication
import requests
auth = InteractiveLoginAuthentication()
aad_token = auth.get_authentication_header()
rest_endpoint1 = published_pipeline.endpoint
print("You can perform HTTP POST on URL {} to trigger this pipeline".format(rest_endpoint1))
# specify the param when running the pipeline
response = requests.post(rest_endpoint1,
headers=aad_token,
json={"ExperimentName": "My_Pipeline1",
"RunSource": "SDK",
"ParameterAssignments": {"pipeline_arg": 45}})
# +
try:
response.raise_for_status()
except Exception as ex:
raise Exception('Received bad response from the endpoint: {}\n'
'Response Code: {}\n'
'Headers: {}\n'
'Content: {}'.format(rest_endpoint1, response.status_code, response.headers, response.content)) from ex
run_id = response.json().get('Id')
print('Submitted pipeline run: ', run_id)
# -
# # Next: Data Transfer
# The next [notebook](https://aka.ms/pl-data-trans) will showcase data transfer steps between different types of data stores.
| how-to-use-azureml/machine-learning-pipelines/intro-to-pipelines/aml-pipelines-publish-and-run-using-rest-endpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import graspy as gp
# -
res_df1 = pd.read_csv("../results/20200305_adj_row_wise.csv")
res_df1 = res_df1[res_df1.delta != 0]
res_df1 = res_df1.sort_values(['m', 'delta'])
res_df1 = res_df1[res_df1.m <= 250]
res_df = pd.read_csv("../results/2020401_weighted_correct_nodes.csv")
res_df = res_df.sort_values(['m', 'delta'])
res_df = res_df[res_df.delta != 0]
# +
# Make figure with correct subplots
sns.set_context("talk", font_scale=1.3)
fig, ax = plt.subplots(
3,
5,
gridspec_kw={"width_ratios": [1, 1, 1, 1, 0.05]},
figsize=(20, 15),
sharex=True,
sharey=True,
constrained_layout=True,
)
gs = ax[-1, -1].get_gridspec()
for ax_ in ax[:, -1]:
ax_.remove()
ax_leg = fig.add_subplot(gs[:, -1])
p = 0.5
spacing = 50
deltas = np.linspace(0, 1 - p, spacing + 1)[::10]
deltas[0] += 0.01
ms = np.linspace(0, 250, spacing + 1)[::10] * 2
ms[0] += 10
titles = [
["Different Node", "Same Node", "Different Node", "Same Node"],
["Different Node", "Same Node", "Different Node", "Same Node"],
]
# Do first row
df_cols = [
"mgc_node_1", "mgc_node_20", "hotelling_node_1", "hotelling_node_20"
]
for j, df_col in enumerate(df_cols):
sns.heatmap(
np.flipud(res_df1[df_col].values.reshape(spacing, -1)),
ax=ax[0, j],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=["{:.2f}".format(d) for d in deltas],
yticklabels=["{:.0f}".format(m) for m in ms],
cbar_ax=ax_leg,
vmin=0,
vmax=1,
)
ax[0, j].set_xticks(np.arange(0, ax[0, 0].get_xlim()[1] + 1, 10))
ax[0, j].set_yticks(np.arange(0, ax[0, 0].get_ylim()[0] + 1, 10)[::-1])
# if i != 0:
# ax[i, j].yaxis.set_major_formatter(plt.NullFormatter())
ax[0, j].set_title(titles[0][j])
# second and third row
# Plotting
df_cols = [
["omni_power_mgc_1", "omni_power_mgc_20", "omni_power_hotelling_1", "omni_power_hotelling_20"],
["mase_power_mgc_1", "mase_power_mgc_20", "mase_power_hotelling_1", "mase_power_hotelling_20"],
]
fmt = lambda x: "{:.2f}".format(x)
for i, row in enumerate(df_cols, start=1):
for j, df_col in enumerate(row):
sns.heatmap(
np.flipud(res_df[df_col].values.reshape(spacing, -1)),
ax=ax[i, j],
square=True,
center=0,
cmap="RdBu_r",
cbar_kws=dict(shrink=0.7),
xticklabels=["{:.2f}".format(d) for d in deltas],
yticklabels=["{:.0f}".format(m) for m in ms],
cbar_ax=ax_leg,
vmin=0,
vmax=1,
)
ax[i, j].set_xticks(np.arange(0, ax[0, 0].get_xlim()[1] + 1, 10))
ax[i, j].set_yticks(np.arange(0, ax[0, 0].get_ylim()[0] + 1, 10)[::-1])
# if i != 0:
# ax[i, j].yaxis.set_major_formatter(plt.NullFormatter())
if i ==0:
ax[i, j].set_title(titles[i][j])
fig.text(0.26, 1.035, "MGC", va="center", ha="center")
fig.text(0.725, 1.035, "Hotelling's", va="center", ha="center")
fig.text(-0.035, 0.5, "Sample Size", va="center", rotation="vertical")
# fig.text(-0.025, 0.175, "COSIE", va="center", rotation="vertical")
# fig.text(-0.025, 0.5, "JRDPG", va="center", rotation="vertical")
# fig.text(-0.025, 0.825, "Edges", va="center", rotation="vertical")
fig.text(-0.08, 0.175, "COSIE", ha="center")
fig.text(-0.08, 0.5, "JRDPG", ha="center")
fig.text(-0.08, 0.825, "Edges", ha="center")
fig.text(0.5, -0.02, "Effect Size", va="center", ha="center")
# -
fig.savefig("exp7.png", dpi=300, bbox_inches='tight')
fig.savefig("exp7.pdf", dpi=300, bbox_inches='tight')
| experiments/experiment_7/j1c/final/make_figs.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// # Avaliando djl
//
// ## [Javadoc](https://javadoc.djl.ai/)
// %system nvidia-smi
// %system nvcc --version
// %maven ai.djl:api:0.12.0
// %maven org.slf4j:slf4j-api:1.7.26
// %maven org.slf4j:slf4j-simple:1.7.26
// %maven ai.djl.mxnet:mxnet-engine:0.12.0
// %maven ai.djl.mxnet:mxnet-native-auto:1.8.0
import ai.djl.Device;
import ai.djl.ndarray.NDArray;
import ai.djl.ndarray.NDManager;
import ai.djl.ndarray.types.Shape;
import ai.djl.ndarray.types.DataType;
Device d = Device.gpu(0);
System.out.println(d);
Device[] devices=Device.getDevices();
for(Device device:devices){
System.out.println(device);
}
System.out.println(Device.getGpuCount());
NDManager manager = NDManager.newBaseManager();
NDArray x = manager.create(new int[]{1, 2, 3});
x.getDevice();
NDArray x = manager.ones(new Shape(2, 3), DataType.FLOAT32, Device.gpu());
x
| notebooks/djl/Cap 5 - Deep Learning Computation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Science Case #3: find any project where <spectral line(s)> was detected or observed
#
#
# The science case here would be to search the archive for instances where a rare line was
# observed and/or detected. Desired lines could be logically ANDed to narrow the results to
# coincident detections. For example, the search could be for Si18O observations. One could even
# limit results to be above a certain S/N or line ratio (peak or integrated). This is a straightforward
# search of ADMIT’s line identifications and line strengths. Additional constra ints can of course
# be given, e.g., a frequency range, or ALMA band. The information returned would allow the user
# to see what sources were observed, which transitions, { and examine moment maps of the
# detection }. This same pattern could be used to find sources where a large fraction of the
# CO-ladder was observed.
#
#
from astroquery.admit import ADMIT
import pandas as pd
import numpy as np
# display the whole table inthe notebook
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth',25)
a = ADMIT()
# ### Find any project where any CO transition was detected in a LineCube
a.query(formula="CO")
# ### Find any project where CO(J=2-1) transition was detected in a LineCube
a.query(formula="CO",transition="2-1")
# ### Find any project where any CO or CS transition was detected in a LineCube
a.query(formula="CO|CS")
# ### Sources where both CO and CS have been detected
result_co = a.query(formula="CO")
result_cs = a.query(formula="CS")
co_targets = set(result_co['target_name'])
cs_targets = set(result_cs['target_name'])
co_targets.intersection(cs_targets)
# ### Projects where both CO and CS have been detected
co_projects = set(result_co['obs_id'])
cs_projects = set(result_cs['obs_id'])
co_projects.intersection(cs_projects)
| notebooks/Case3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <h1 align="center">Assignment No :- 04</h1>
# # Title :-
# Apply Basic PCA on the iris dataset. The data set is available at:- https://raw.github.com/neurospin/pystatsml/master/datasets/iris.csv
# 1. Describe the data set. Should the dataset been standardized?
# 2. Describe the structure of correlations among variables.
# 3. Compute a PCA with the maximum number of components
# 4. Compute the cumulative explained variance ratio. Determine the number of components𝐾by your computed values.
# 5. Print the𝐾principal components directions and correlations of the𝐾principal
# 6. compo-nents with the original variables. Interpret the contribution of the original variables into the PC.
# 7. Plot the samples projected into the𝐾first PCs.
# 8. Color samples by their species.
# + [markdown] _uuid="d3a6a49c4f3c9c9da49b4565cf8f975ab0497e9b"
# # **Principle Component Analysis on Iris Dataset**
# -
# ## Iris Dataset:
# __The dataset consists of four attributes:__ sepal-width, sepal-length, petal-width and petal-length.(All in cm)<br>
# These are the attributes of specific types of iris plant. <br>
# There are three classes in the dataset: __Iris-setosa, Iris-versicolor and Iris-virginica.__<br>
# 
#
# + [markdown] _uuid="6d6066733d1db976e30ee99161de17da22792465"
# ## How PCA works?
# + [markdown] _uuid="2c6dec1ff3eb0892dfe2a42ed4e2d709b6f3b71a"
# Understanding PCA without visuals is difficult so let's see the video <br>
# __[PCA Concept explanation](https://www.youtube.com/watch?v=_UVHneBUBW0&t=2s)__
# + [markdown] _uuid="e49d7341f556e53374d9c35c622e663da5459900"
# - PCA finds the principal components of data.
# - Principal components are new variables that are constructed as linear combinations or mixtures of the initial variables.
# - Principal components are new axes that provide the best angle to see and evaluate the data, so that the differences between the observations are better visible.
# - It is often useful to measure data in terms of its principal components rather than on a normal x-y axis.
# - So what are principal components then? They’re the underlying structure in the data. They are the directions where there is the most variance, the directions where the data is most spread out.
#
# - PCA finds a new set of dimensions (or a set of basis of views) such that all the dimensions are orthogonal (and hence linearly independent) and ranked according to the variance of data along them. - It means more important principle axis occurs first. (more important = more variance/more spread out data)
#
# **How does PCA work -**
#
# 1. Calculate the covariance matrix X of data points.
# 2. Calculate eigen vectors and corresponding eigen values.
# 3. Sort the eigen vectors according to their eigen values in decreasing order.
# 4. Choose first k eigen vectors and that will be the new k dimensions.
# 5. Transform the original n dimensional data points into k dimensions.
#
#
# For Eigen term details : http://setosa.io/ev/eigenvectors-and-eigenvalues/
# + [markdown] _uuid="4d32926332dc6393302ed235a829bfbe0ff5495f"
# ### Implementing PCA
# #### Step 1: Standardization of the data
#
# - Standardize the range of the continuous initial variables so that each one of them contributes equally to the analysis.
# - Mathematically, this can be done by subtracting the mean and dividing by the standard deviation for each value of each variable.
#
# 
#
# #### Step 2: Calculate the covariance matrix
# - The aim of this step is to understand how the variables of the input data set are varying from the mean with respect to each other, or in other words, to see if there is any relationship between them.
# - Because sometimes, variables are highly correlated in such a way that they contain redundant information.
# - So, in order to identify these correlations, we compute the covariance matrix.
# 
# - Since the dataset we took is 4-dimensional, this will result in a 4x4 Covariance matrix.
#
#
# *Please note that Var[X] = Cov[X,X] and Var[Y] = Cov[Y,Y].*
#
# #### Step 3: Calculate the eigenvalues and eigenvectors
# - ***The eigenvectors of the Covariance matrix*** are actually the directions of the axes where there is the most variance(most information) and that we call Principal Components.
# - ***And eigenvalues*** are simply the coefficients attached to eigenvectors, which give the amount of variance carried in each Principal Component.
#
# To calculate the eigenvalues and eigenvectors for the covariance matrix:
# ƛ is an eigenvalue for a matrix A if it is a solution of the characteristic equation:
#
# det( ƛI - A ) = 0
#
# Where, I is the identity matrix of the same dimension as A which is a required condition for the matrix subtraction as well in this case and ‘det’ is the determinant of the matrix. For each eigenvalue ƛ, a corresponding eigen-vector v, can be found by solving:
#
# ( ƛI - A )v = 0
#
# #### Step 4: Choosing components and forming a feature vector:
# - In this step, we choose whether to keep all these components or discard those of lesser significance (of low eigenvalues), and form with the remaining ones a matrix of vectors that we call Feature vector.
# - We order the eigenvalues from largest to smallest so that it gives us the components in order or significance.
# - Here comes the **dimensionality reduction** part. If we have a dataset with n variables, then we have the corresponding n eigenvalues and eigenvectors.
# - It turns out that the eigenvector corresponding to the highest eigenvalue is the principal component of the dataset and it is our call as to how many eigenvalues we choose to proceed our analysis with.
# - To reduce the dimensions, we choose the first p eigenvalues and ignore the rest.
# - We do lose out some information in the process, but if the eigenvalues are small, we do not lose much.
#
# #### Step 5: Recast/Project the data along the Principal Components axes
# 
# -
# # IMPLEMENTATION
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
# Importing Necessary libraries
import numpy as np
import pandas as pd
from sklearn import preprocessing
import seaborn as sns
import matplotlib.pyplot as plt
# + _uuid="c0e68268c0de09908974f3cdab4bba027245aa8a"
iris = pd.read_csv("Iris.csv",index_col='Id')
iris.head()
# -
# ### 1. Describing the dataset
# + _uuid="e752b4921bb263bbb859252693c90e860fe6f4e4"
iris.describe(include='all')
# -
iris.info()
# **Insight:**
# - 4 attributes/variables are numerical in same units (similar scale)
# - Target is categorical variable
# #### Should the dataset been standardized?
# As all the attributes are in same units (cm), no need of standardization
# + _uuid="60784ca54e2f41cb699bc12a8d7e8a7188930a5a"
sns.countplot(y=iris.Species ,data=iris)
plt.xlabel("Count of each Target class")
plt.ylabel("Target classes")
plt.show()
# + [markdown] _uuid="c70dadee316e1d6ebfa4fdf54220fdfe38629a9f"
# #### Early Insights :
# 1. 150 rows
# 2. 4 Independent variables to act as factors
# 3. All have same units of measurement (cm)
# 4. No missing data
# 5. Three unique target classes namely : 'Iris-setosa', 'Iris-versicolor' and 'Iris-virginica'
# 6. No class imbalance, all target classes have equal number of rows (50 each).
# -
# ### 2. Describing structure of correlation among variables
# The Pearson correlation coefficient (named for Karl Pearson) can be used to summarize the strength of the linear relationship between two data samples.
# Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
# + _uuid="d1d73df53d6f6bc73aec24c02a29c40ca8101fce"
plt.figure(figsize=(5,5))
p=sns.heatmap(iris.corr(method='pearson'), annot=True,cmap='winter')
# -
# - Two variables may be related by a nonlinear relationship, such that the relationship is stronger or weaker across the distribution of the variables.
# - Further, the two variables being considered may have a non-Gaussian distribution.
# - In this case, the **Spearman’s correlation coefficient** (named for <NAME>) can be used to summarize the strength between the two data samples. <br>
# - This test of relationship can also be used if there is a linear relationship between the variables, but will have slightly less power (e.g. may result in lower coefficient scores).
#
# Spearman's correlation coefficient = covariance(rank(X), rank(Y)) / (stdv(rank(X)))
plt.figure(figsize=(5,5))
p=sns.heatmap(iris.corr(method='spearman'), annot=True,cmap='autumn')
# + [markdown] _uuid="1e081104ba925f264cf34ff7c14e165545045d52"
# **Insight:**
# - One of the biggest aims of these sort of plots is to identify features that are not much helpful in explaining the target outcome.
# - The SepalWidthCm feature seems to be less relevant in explaining the target class as compared to the other features
# + [markdown] _uuid="2c1f27918b3bbcdf575ffaa2de2fc664ab9fa22e"
# ### 3. Computing PCA
# -
# Let's separate attributes/features from Target
#
# i.e.
#
# Features => X = Sepal Length, Sepal Width, Petal Length, Petal Width <br>
# Target => y = Species
X = iris.drop(['Species'],axis=1) # we can use iris.iloc[:,0:3] --> Features
y = iris.Species # iris.iloc[:,-1] --> Target
# + _uuid="1ec278be18383ab69bef1e2da4e762aa77284d79"
from sklearn.decomposition import PCA
pca = PCA()
X_new = pca.fit_transform(X) #applying PCA on X
print(type(X_new))
# + _uuid="a4f1cb9a723fc8276cb12a9cd27dba859efd487b"
pca.get_covariance() # covariance matrix for X
# -
# ### 4. Computing the cumulative explained variance ratio.
# + _uuid="99ec81cdc7fafbb4e2a51730b9a133add0ebccce"
explained_variance=pca.explained_variance_ratio_*100 # % of information carried by each PC
explained_variance
# + _uuid="bdcc96082179169bbd562d928cf359cbc969518b"
with plt.style.context('seaborn-ticks'):
plt.figure(figsize=(6, 4))
plt.bar(range(4),explained_variance,label='individual explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.title('Scree Plot')
plt.tight_layout()
# -
CEVR = pca.explained_variance_ratio_.cumsum() # cumsum fn calculates cumulative sum
with plt.style.context('seaborn-notebook'):
plt.figure(figsize=(6, 4))
plt.plot(range(4),CEVR,"ro-", label='Cumulative Explained variance')
plt.ylabel('Cumulative Explained variance ratio')
plt.xlabel('Principal Components')
plt.legend(loc='best')
plt.tight_layout()
# ### 5. Determining the number of components 𝐾 by computed values.
# **Insight:**
# From above plot we can see that PC1 and PC2 contribute aprrox. 98% of information.<br>
# Hence, K = 2
# ### 6. Printing the 𝐾 principal components directions and correlations of the 𝐾 principal components with the original variables.
# (Original Variables are features)
# + _uuid="3dbae00df684412df06f7222b665255e15e581be"
pca=PCA(n_components=2) # Apply PCA with k= 2 components
X_pca=pca.fit_transform(X)
X_pca.shape # 2 features are our chosen 2 principal components
#print(type(X_pca))
# -
# **pca.components_** gives Principal axes in feature space, representing the **directions** of maximum variance in the data. Components are sorted by **explained_variance_**
pca.components_ # First row PC1 and second row PC2
loadings = pd.DataFrame(pca.components_.T,columns=['PC1','PC2'], index=X.columns)
loadings
# **NOTE:**<br>
# - The **columns** of the dataframe contain the **eigenvectors** associated with the first two principal components.<br>
# - Each **element** represents a loading, namely how much (**the weight**) each original variable contributes to the corresponding principal component.
# +
loadingmats = pca.components_.T * np.sqrt(pca.explained_variance_ratio_)
loading_matrix = pd.DataFrame(loadingmats, columns=['PC1', 'PC2'], index=X.columns)
loading_matrix
# -
# - Here **each entry** of the matrix contains the **correlation between the original variable and the principal component**.<br>
# - For example the original variable sepal length (cm) and the first principal component PC1 have a correlation of 0.34.
# ### 7. Interpreting the contribution of the original variables into the PC.
# **Insight:**
# From above results we can say that<br>
# - Petal Length contributes most to PC1
# - Sepal Length and width contribute almost equally to PC2
# - Positive loading indicate feature contributing towards the outcome
#
# - Negative loading indicate feature contributing in opposite direction to the outcome
# ### 8. Plotting the samples projected into the 𝐾 first PCs
# ### and
# ### 9. Coloring samples by their species.
'''
the code projects the original data which is 4 dimensional into 2 dimensions.
After dimensionality reduction, there usually isn’t a particular meaning assigned to each principal component.
The new components are just the two main dimensions of variation.
'''
principalDf = pd.DataFrame(data = X_pca, columns = ['PC1', 'PC2'])
iris = iris.reset_index(drop=True) # reset the index to 0
finalDf = pd.concat([principalDf, iris[['Species']]], axis = 1)
# Concatenating DataFrame along axis = 1. finalDf is the final DataFrame before plotting the data.
finalDf.head()
import seaborn as sns
plt.figure(figsize=(6,6))
sns.scatterplot(x= finalDf['PC1'], y=finalDf['PC2'],hue=finalDf['Species'], palette=['y','r','b']);
plt.title('Sample Projection on Principal Components')
# Plotting the data
fig = plt.figure(figsize = (6,6))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1', fontsize = 15)
ax.set_ylabel('Principal Component 2', fontsize = 15)
ax.set_title('2 component PCA', fontsize = 20)
targets = ['Iris-setosa', 'Iris-versicolor', 'Iris-virginica']
colors = ['y', 'r', 'b']
for target, color in zip(targets,colors):
indicesToKeep = finalDf['Species'] == target
ax.scatter(finalDf.loc[indicesToKeep, 'PC1']
, finalDf.loc[indicesToKeep, 'PC2']
, c = color
, s = 25)
ax.legend(targets)
ax.grid()
| 5th Sem/Honors - AIML/Practical No 04/Assignment No 04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.6.12 64-bit (''.venv'': venv)'
# metadata:
# interpreter:
# hash: 655261d510271b2fa1eddf3c55861fc26682a337f3dd08c381abe2a293d3d6c9
# name: 'Python 3.6.12 64-bit (''.venv'': venv)'
# ---
# [](https://github.com/awslabs/aws-data-wrangler)
#
# # 21 - Global Configurations
#
# [Wrangler](https://github.com/awslabs/aws-data-wrangler) has two ways to set global configurations that will override the regular default arguments configured in functions signatures.
#
# - **Environment variables**
# - **wr.config**
#
# *P.S. Check the [function API doc](https://aws-data-wrangler.readthedocs.io/en/stable/api.html) to see if your function has some argument that can be configured through Global configurations.*
# ## Environment Variables
# %env WR_DATABASE=default
# %env WR_CTAS_APPROACH=False
# %env WR_MAX_CACHE_SECONDS=900
# %env WR_MAX_CACHE_QUERY_INSPECTIONS=500
import awswrangler as wr
wr.athena.read_sql_query("SELECT 1 AS FOO")
# ## Resetting
# Specific
wr.config.reset("database")
# All
wr.config.reset()
# ## wr.config
wr.config.database = "default"
wr.config.ctas_approach = False
wr.config.max_cache_seconds = 900
wr.config.max_cache_query_inspections = 500
wr.athena.read_sql_query("SELECT 1 AS FOO")
# ## Visualizing
wr.config
| tutorials/021 - Global Configurations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # liNEAR REGRESSION
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import mpl_toolkits
# %matplotlib inline
# read training data into a dataframe
df = pd.read_csv('DC_Properties.csv')
# Show the first two rows of dataframe
df.head(2)
# Some statistical information about the data
df.describe()
df['INTWALL'].unique()
# Checking if there are missing values in the dataset
df.info()
# # histogram of each column
df.hist(bins=50, figsize=(20,15))
plt.savefig("attribute_histogram_plots")
plt.show()
#plt.figure(figsize=(15,15))
df.hist(column='PRICE', bins=10)
# +
# Delete non-Residential in Source column
# +
df= df[df.SOURCE == 'Residential']
# -
# Checking if there are missing values in the dataset
df.info()
# +
# delete CMPLX_NUM and LIVING_GBA because they have 0 value
# -
df.drop(['CMPLX_NUM','LIVING_GBA'],axis=1,inplace=True)
# +
#df.drop('SALEDATE',axis=1,inplace=True)
# +
# Drop all nan row based on price
# -
df=df.dropna(subset=['PRICE'])
df.info()
# +
# Drop X and Y because they are similar LATITUDE and LONGITUDE
# -
df.drop(['X','Y'],axis=1,inplace=True)
# +
# Drop source because we have only one type now
# -
df.drop(['SOURCE'],axis=1,inplace=True)
print(df.isnull().sum())
# +
# Drop Unnamed: 0 because it work as index and we have an new index for the data and Yr_RMDL BECAUSE IT need a lot of data
# -
df.drop(['Unnamed: 0','YR_RMDL'],axis=1,inplace=True)
# +
# drop the one raw missing from saledate and kitchens
# -
df=df.dropna(subset=['KITCHENS','SALEDATE'])
df= df[df.PRICE >= 1000]
df= df[df.BATHRM > 0]
print(df.isnull().sum())
df=df.dropna(subset=['CITY','STATE','CENSUS_BLOCK','CENSUS_BLOCK','AYB','STORIES','FULLADDRESS','QUADRANT'])
df.drop(['FULLADDRESS','CITY','STATE','ASSESSMENT_SUBNBHD','NATIONALGRID'],axis=1,inplace=True)
df["SALEDATE"] = df["SALEDATE"].apply(lambda x: str(x)[0:4])
# +
int_col = ['SALEDATE']
#con_col = ['BATHRM','HF_BATHRM','NUM_UNITS','ROOMS','BEDRM','EYB','STORIES','SALE_NUM','KITCHENS','FIREPLACES','LANDAREA']
for i in int_col:
df[i] = df[i].astype('int64')
# -
df['QUADRANT'].unique()
z = (['SQUARE','CENSUS_BLOCK','QUADRANT'])
for k in range(len(z)):
r = df[z[k]].unique()
for g in range(len(r)):
df = df.replace({z[k]: r[g]}, g)
df.info()
df4=df.copy()
HEAT = pd.get_dummies(df4['HEAT'],drop_first=True)
ASSESSMENT_NBHD = pd.get_dummies(df4['ASSESSMENT_NBHD'],drop_first=True)
WARD = pd.get_dummies(df4['WARD'],drop_first=True)
AC = pd.get_dummies(df4['AC'],drop_first=True)
QUALIFIED = pd.get_dummies(df4['QUALIFIED'],drop_first=True)
STYLE = pd.get_dummies(df4['STYLE'],drop_first=True)
STRUCT = pd.get_dummies(df4['STRUCT'],drop_first=True)
GRADE = pd.get_dummies(df4['GRADE'],drop_first=True)
CNDTN = pd.get_dummies(df4['CNDTN'],drop_first=True)
EXTWALL = pd.get_dummies(df4['EXTWALL'],drop_first=True)
ROOF = pd.get_dummies(df4['ROOF'],drop_first=True)
INTWALL = pd.get_dummies(df4['INTWALL'],drop_first=True)
#QUADRANT = pd.get_dummies(df['QUADRANT'],drop_first=True)
#CENSUS_BLOCK = pd.get_dummies(df['CENSUS_BLOCK'],drop_first=True)
df4.drop(['HEAT','ASSESSMENT_NBHD','WARD','AC','QUALIFIED','STYLE','STRUCT','GRADE','CNDTN','EXTWALL','ROOF','INTWALL','GIS_LAST_MOD_DTTM'],axis=1,inplace=True)
df4.head(2)
df1 = pd.concat([df4,HEAT,ASSESSMENT_NBHD,WARD,AC,QUALIFIED,STYLE,STRUCT,GRADE,CNDTN,EXTWALL,ROOF,INTWALL],axis=1)
df1.head(2)
X = df1.drop('PRICE',axis=1)
y = df1['PRICE']
df2=df1.copy()
df2['PRICE'].size
value= df2['PRICE'].values.copy()
value.sort()
break_point=0
break_point1=value[5734]
break_point2=value[5734*2]
break_point3=value[5734*3]
break_point4=value[5734*4]
break_point5=value[5734*5]
break_point6=value[5734*6]
break_point7=value[5734*7]
break_point8=value[5734*8]
break_point9=value[5734*9]
break_point10=[26100000]
for i in range(len(y)):
if (y.iloc[i] <= break_point1):
y.iloc[i] = 0
print('0')
elif ((y.iloc[i] > break_point1) & (y.iloc[i] <= break_point2)):
y.iloc[i] = 1
print('1')
elif ((y.iloc[i] > break_point2) & (y.iloc[i] <= break_point3)):
y.iloc[i] = 2
print('2')
elif ((y.iloc[i] > break_point3) & (y.iloc[i] <= break_point4)):
y.iloc[i] = 3
print('3')
elif ((y.iloc[i] > break_point4) & (y.iloc[i] <= break_point5)):
y.iloc[i] = 4
print('4')
elif ((y.iloc[i] > break_point5) & (y.iloc[i] <= break_point6)):
y.iloc[i] = 5
print('5')
elif ((y.iloc[i] > break_point6) & (y.iloc[i] <= break_point7)):
y.iloc[i] = 6
print('6')
elif ((y.iloc[i] > break_point7) & (y.iloc[i] <= break_point8)):
y.iloc[i] = 7
print('7')
elif ((y.iloc[i] > break_point8) & (y.iloc[i] <= break_point9)):
y.iloc[i] = 8
print('8')
else:
y.iloc[i] = 9
print('9')
from sklearn.linear_model import Perceptron
ppn = Perceptron(eta0 = 0.01)
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=99)
ppn.fit(X_train, y_train)
predict_NN = ppn.predict(X_test)
print(predict_NN.shape)
print(predict_NN)
# +
from sklearn.metrics import accuracy_score, f1_score, precision_score,\
recall_score, classification_report, confusion_matrix,r2_score
score= accuracy_score(y_test, predict_NN)
print('Accuracy:', score)
print('Classification Report:\n', classification_report(y_test, predict_NN))
# +
from sklearn import metrics
cm = metrics.confusion_matrix(y_test, predict_NN)
import seaborn as sns
plt.figure(figsize=(12,12))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title(all_sample_title, size = 15);
plt.show()
| Neural Network-Final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Download the song dataset from Katahira et al
# - Data location: https://datadryad.org/resource/doi:10.5061/dryad.6pt8g
# %load_ext autoreload
# %autoreload 2
from avgn.downloading.download import download_tqdm
from avgn.utils.paths import DATA_DIR
from avgn.utils.general import unzip_file
data_urls = [
('https://ndownloader.figshare.com/articles/3470165/versions/1', 'all_files.zip'),
]
output_loc = DATA_DIR/"raw/koumura/"
for url, filename in data_urls:
download_tqdm(url, output_location=output_loc/filename)
# unzip
for url, filename in data_urls:
unzip_file(output_loc/filename, output_loc/"zip_contents")
zip_files = list((output_loc/"zip_contents").glob('*.zip'))
for zf in tqdm(zip_files):
unzip_file(zf, output_loc/"zip_contents")
| notebooks/00.0-download-datasets/3.0-bengalese-finch-koumura-et-al.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Neural Network with decision boundary
#
import numpy as np
import pandas as pd
import math
import matplotlib
import matplotlib.pyplot as plt
# Method to split data into 70% Train and 30% Test data
def split_data(df, X, Y):
arr_rand = np.random.rand(df.shape[0])
split = arr_rand < np.percentile(arr_rand, 70)
X_train = X[split]
Y_train = Y[split]
X_test = X[~split]
Y_test = Y[~split]
return X_train, Y_train, X_test, Y_test
# Read data from the binary data file
data = pd.read_csv('hw3data', sep="\t", header=None)
# Creating a column for initial bias values
x = np.hstack((np.ones((data.shape[0], 1)), data.drop(data.columns[2], axis=1)))
y = data[2]
# Convert Class-1 and Class-2 into 0's and 1's
for i, val in enumerate(y):
if val == "Class-1":
y[i] = 0
else:
y[i] = 1
# Splitting the data
X_train, Y_train, X_test, Y_test = split_data(data, x, y)
# Sigmoid activation function
def activation_func(value):
return (1 / (1 + np.exp(-value)))
# Function to return mean squared error
def mse(y_predicted, Y_test):
return np.sum((y_predicted - Y_test) ** 2) / len(y_predicted)
# Derivative of sigmoid function
def sigmoid_derivative(val):
return activation_func(val) * (1 - activation_func(val))
# Method to predict y-values, test error, and accuracy
def predict_values(X_test, Y_test, weights):
weights_1 = weights[0]
weights_2 = weights[1]
n = Y_test.shape[0]
Y_test = np.array(Y_test).reshape(n, 1)
# Forward pass
z_1 = np.dot(X_test, weights_1)
act_1 = activation_func(z_1)
z_2 = np.dot(act_1, weights_2)
y_predicted = activation_func(z_2)
# Calculating the test error
test_error = mse(y_predicted, Y_test)
ctr = 0
# Returning values in 0's and 1's
for i, val in enumerate(y_predicted):
if val >= 0.5:
y_predicted[i] = 1
else:
y_predicted[i] = 0
for i, val in enumerate(y_predicted):
if y_predicted[i] == Y_test[i]:
ctr += 1
# Number of values matched
print('CORRECT', ctr)
accuracy = (ctr / len(Y_test)) * 100
# print('ACCURACY', accuracy)
return y_predicted, accuracy, test_error
# +
# Model to train the neural network
def neural_network_train(x, y, alpha, iterations):
# converting into numpy array
# dtype is a numpy.object, converting the array into astype(float)
# otherwise it will show a message saying numpy.float64 has no attribute log10
X = np.array(x)
X = X.astype(float)
n = X.shape[0]
Y = np.array(y).reshape(n, 1)
Y = Y.astype(float)
# Randomly initialising random weights
# weights = [L-1, L] where L-1 => no of neurons in prev layer, L => neurons in current neurons
weights_1 = np.random.random((3, 6))
weights_2 = np.random.random((6, 1))
# Final array to include all errors
loss_arr = []
# Looping over through N iterations
for i in range(iterations):
# Calcuating the predicted values of Y
z_1 = np.dot(X, weights_1)
# Declare the activation computations
act_1 = activation_func(z_1)
z_2 = np.dot(act_1, weights_2)
# Calculating the y-predicted value
y_predicted = activation_func(z_2)
#Calucating mean squared loss
loss_j = mse(y_predicted, Y)
# print('Iteration' + str(i), loss_j)
# Update the initial weights with gradients
weights_1 = weights_1 - ((1/len(y_predicted)) * ( alpha * np.dot(np.transpose(X), ((y_predicted - Y) * sigmoid_derivative(z_2) * sigmoid_derivative(z_1)))))
# Update the hidden layer weights with gradients
weights_2 = weights_2 - ((1/len(y_predicted)) * ( alpha * np.dot(np.transpose(act_1), ((y_predicted - Y) * sigmoid_derivative(z_2)))))
# Add the loss on each iteration
loss_arr.append(loss_j)
return loss_arr, (weights_1, weights_2)
learning_rate = 0.004
loss_arr, weights = neural_network_train(X_train, Y_train, learning_rate, 200000)
# -
# Plotting the graph for the loss J decreases over time for 150000 iterations
x_arr = np.arange(0,200000,1)
loss_arr = np.array(loss_arr)
plt.plot(x_arr, loss_arr)
# Calculating the accuracy, test error
y_predicted, accuracy, test_error = predict_values(X_test, Y_test, weights)
print('Accuracy: ', accuracy)
print('Test Error: ', test_error * 100, "%" )
# +
# Method to plot the decision boundary
# Uses the first two columns of the data for fitting the model as
# we need to find the predicted value for every point in the scatter plot.
# Get the updated final features weights
weight_final = weights[0]
# Cleaning the values of X, Y from the data
X = np.array(x)
# dtype is a numpy.object, converting the array into astype(float)
# otherwise it will show a message saying numpy.float64 has no attribute log10
X = X.astype(float)
n = X.shape[0]
Y = np.array(y).reshape(n, 1)
Y = Y.astype(float)
# Implementing a mesh grid-style scatter plot
# Step size in the mesh
h = 0.02
# Create sample points in a uniform grid to feed them to the classifier.
x_min, x_max = X[:, 1].min() - 1, X[:, 1].max() + 1
y_min, y_max = X[:, 2].min() - 1, X[:, 2].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Flatten the 2D array into 1D and getting the transpose of it
new_ravel_array = np.array([xx.ravel(), yy.ravel()]).T
# -
# Method to predict to capture their prediction of y values
def predict_values_mesh(X_test, weights):
weights_1 = weights[0]
weights_2 = weights[1]
# Forward pass
z_1 = np.dot(X_test, weights_1)
act_1 = activation_func(z_1)
z_2 = np.dot(act_1, weights_2)
y_predicted = activation_func(z_2)
print(y_predicted)
for i, val in enumerate(y_predicted):
if val >= 0.5:
y_predicted[i] = 1
else:
y_predicted[i] = 0
return y_predicted
# +
# Updating the flattened array with a column for bias as weights is of shape (3, 6)
final_bias_array = np.hstack((np.ones((new_ravel_array.shape[0], 1)), new_ravel_array))
# Return the y-predited values
y_predicted_new = predict_values_mesh(final_bias_array, weights)
# Reshaping the size with respect to 'xx' shape to plot
y_predicted_new = y_predicted_new.reshape(xx.shape)
y_predicted_new.shape
# -
# Plot the decision boundary
colors=['#ff4466', '#2d7dd2']
plt.contourf(xx, yy, y_predicted_new, cmap='binary_r', alpha=0.8)
plt.scatter(X[:, 1], X[:, 2], c=Y[:,0], cmap=matplotlib.colors.ListedColormap(colors))
plt.colorbar()
plt.show()
| neural_net.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/finerbrighterlighter/myanmar_covid19/blob/master/myanmar_compared.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="9cIgWhk4sfS_" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="d5c6e069-e7c8-428f-d343-6f338f4e74aa"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
import seaborn as sns
from google.colab import files
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# %matplotlib inline
# + id="mdPOcqg4760s" colab_type="code" outputId="8ead6643-5b5c-4e6c-b42e-1e9e247fdcb2" colab={"base_uri": "https://localhost:8080/", "height": 89}
color_plan = ["lightcoral",
"darkorange","lightgoldenrodyellow","peachpuff",
"plum","mediumpurple","purple",
"chartreuse","mediumspringgreen","green",
"aqua","mediumturquoise",
"powderblue","cornflowerblue","navy",
"wheat","darkgoldenrod","maroon",
"azure","oldlace","mistyrose","dimgray"]
sns.palplot(sns.color_palette(color_plan))
# + id="D0sWbh-XsYSe" colab_type="code" colab={}
data = "https://data.humdata.org/hxlproxy/data/download/time_series_covid19_confirmed_global_iso3_regions.csv?dest=data_edit&filter01=merge&merge-url01=https%3A%2F%2Fdocs.google.com%2Fspreadsheets%2Fd%2Fe%2F2PACX-1vTglKQRXpkKSErDiWG6ycqEth32MY0reMuVGhaslImLjfuLU0EUgyyu2e-3vKDArjqGX7dXEBV8FJ4f%2Fpub%3Fgid%3D1326629740%26single%3Dtrue%26output%3Dcsv&merge-keys01=%23country%2Bname&merge-tags01=%23country%2Bcode%2C%23region%2Bmain%2Bcode%2C%23region%2Bmain%2Bname%2C%23region%2Bsub%2Bcode%2C%23region%2Bsub%2Bname%2C%23region%2Bintermediate%2Bcode%2C%23region%2Bintermediate%2Bname&filter02=merge&merge-url02=https%3A%2F%2Fdocs.google.com%2Fspreadsheets%2Fd%2Fe%2F2PACX-1vTglKQRXpkKSErDiWG6ycqEth32MY0reMuVGhaslImLjfuLU0EUgyyu2e-3vKDArjqGX7dXEBV8FJ4f%2Fpub%3Fgid%3D398158223%26single%3Dtrue%26output%3Dcsv&merge-keys02=%23adm1%2Bname&merge-tags02=%23country%2Bcode%2C%23region%2Bmain%2Bcode%2C%23region%2Bmain%2Bname%2C%23region%2Bsub%2Bcode%2C%23region%2Bsub%2Bname%2C%23region%2Bintermediate%2Bcode%2C%23region%2Bintermediate%2Bname&merge-replace02=on&merge-overwrite02=on&tagger-match-all=on&tagger-01-header=province%2Fstate&tagger-01-tag=%23adm1%2Bname&tagger-02-header=country%2Fregion&tagger-02-tag=%23country%2Bname&tagger-03-header=lat&tagger-03-tag=%23geo%2Blat&tagger-04-header=long&tagger-04-tag=%23geo%2Blon&header-row=1&url=https%3A%2F%2Fraw.githubusercontent.com%2FCSSEGISandData%2FCOVID-19%2Fmaster%2Fcsse_covid_19_data%2Fcsse_covid_19_time_series%2Ftime_series_covid19_confirmed_global.csv"
# + id="HPocGyDosqCl" colab_type="code" outputId="6256c594-9a63-48d3-83bf-32af7a496cbd" colab={"base_uri": "https://localhost:8080/", "height": 524}
df_ori = pd.read_csv(data, header=0)
df_ori
# + id="thtV_PPHtalf" colab_type="code" outputId="653aeef6-87bb-417d-cb09-4dcb05ca5832" colab={"base_uri": "https://localhost:8080/", "height": 408}
# countries of interest = China and Top Five countries with highest mortality
# According to worldometer on 14/4/2020, it is US, Italy, Spain, France, and UK
#countries = ["China","US", "Italy", "Spain", "France", "United Kingdom"]
# Regional (ASEAN)
countries = ["Brunei","Cambodia","Indonesia","Laos","Malaysia","Philippines","Singapore","Thailand","Vietnam"]
# I will not be using Burma data here, since it is actually slightly incorrect.
df_countries = pd.DataFrame(countries, columns=["country"])
# merge
df_ori=df_countries.merge(df_ori.groupby(["Country/Region"]).sum(),left_on="country",right_on="Country/Region",indicator=False,how='left')
df_ori= df_ori.set_index(["country"])
df_ori
# + id="aLAeJ91Hu7tz" colab_type="code" outputId="a2936898-c128-4e97-8605-ab98a0df4d33" colab={"base_uri": "https://localhost:8080/", "height": 419}
df=df_ori.T.reset_index()
df.rename(columns={"index": "date"}, inplace=True)
df["date"]=pd.to_datetime(df["date"].values)
df
# + id="5pdHKZd6wXy6" colab_type="code" colab={}
# adding myanmar data
myanmar = "https://raw.githubusercontent.com/finerbrighterlighter/myanmar_covid19/master/mohs_announcement.csv"
df_myanmar = pd.read_csv(myanmar,header= 0)
df_myanmar["ann_date"] = pd.to_datetime(df_myanmar["ann_date"].values, dayfirst=True)
days_mm = pd.DataFrame(columns=["days","date"])
days_mm["days"] = np.arange(len(pd.date_range(start=df_myanmar.ann_date.min(), end=df_myanmar.ann_date.max())))
days_mm.loc[0,"date"]=df_myanmar.ann_date.min()
for i in range(1,len(days_mm)):
days_mm.loc[i,"date"] = days_mm.loc[i-1,"date"] + pd.Timedelta(days=1)
i=i+1
days_mm["date"] = pd.to_datetime(days_mm["date"])
days_mm=days_mm.merge(df_myanmar.groupby(["ann_date"]).size().to_frame("myanmar"),left_on="date",right_on="ann_date",indicator=False,how='left')
days_mm["myanmar"].fillna(0, inplace=True)
days_mm["date"] = pd.to_datetime(days_mm["date"])
days_mm["myanmar"]=days_mm["myanmar"].astype(int)
days_mm["myanmar"]=days_mm["myanmar"].cumsum().astype(int)
days_mm["days"]=days_mm["days"]+1
# + id="C_xirvrJzHWC" colab_type="code" colab={}
days = list(range(1, len(days_mm)+1))
days = pd.DataFrame(days, columns=["day"])
# + id="tJoFObfZ2x9F" colab_type="code" colab={}
# brunei - day 1 (9/3/2020)
df_bru = df.loc[47:, "Brunei"].reset_index(drop=True).to_frame("brunei")
days_bru = days.merge(df_bru, how="left", indicator=False, left_index=True, right_index=True)
# cambodia - day 1 (27/1/2020)
df_cam = df.loc[5:, "Cambodia"].reset_index(drop=True).to_frame("cambodia")
days_cam = days.merge(df_cam, how="left", indicator=False, left_index=True, right_index=True)
# indonesia - day 1 (2/3/2020)
df_indo = df.loc[40:, "Indonesia"].reset_index(drop=True).to_frame("indonesia")
days_indo = days.merge(df_indo, how="left", indicator=False, left_index=True, right_index=True)
# laos - day 1 (24/3/2020)
# laos is one day late for first discovery than Myanmar. last date is filled with ffill
df_laos = df.loc[61:, "Laos"].reset_index(drop=True).to_frame("laos")
days_laos = days.merge(df_laos, how="left", indicator=False, left_index=True, right_index=True)
days_laos["laos"]=days_laos["laos"].fillna(method='ffill')
# malaysia - day 1 (25/1/2020)
df_malay = df.loc[3:, "Malaysia"].reset_index(drop=True).to_frame("malaysia")
days_malay = days.merge(df_malay, how="left", indicator=False, left_index=True, right_index=True)
# philippines - day 1 (30/1/2020)
df_philip = df.loc[8:, "Philippines"].reset_index(drop=True).to_frame("philippines")
days_philip = days.merge(df_philip, how="left", indicator=False, left_index=True, right_index=True)
# singapore - day 1 (23/1/2020)
df_sg = df.loc[1:, "Singapore"].reset_index(drop=True).to_frame("singapore")
days_sg = days.merge(df_sg, how="left", indicator=False, left_index=True, right_index=True)
# thailand - day 1 (22/1/2020)
days_th = days.merge(df["Thailand"], how="left", indicator=False, left_index=True, right_index=True)
# vietnam - day 1 (23/1/2020)
df_viet = df.loc[1:, "Vietnam"].reset_index(drop=True).to_frame("vietnam")
days_viet = days.merge(df_viet, how="left", indicator=False, left_index=True, right_index=True)
# + id="qykR2CEt6GnH" colab_type="code" outputId="95fe9520-de4b-41be-9b17-21babc2ff412" colab={"base_uri": "https://localhost:8080/", "height": 400}
# plot
fig, ax = plt.subplots(figsize=(10,5))
ax.grid(linestyle=':', linewidth='0.5', color='silver')
ax.set_axisbelow(True)
sns.color_palette(color_plan)
#plt.gca().xaxis.set_major_locator(mticker.MultipleLocator(5))
#plt.gca().yaxis.set_major_locator(mticker.MultipleLocator(10))
brunei = plt.plot(days, days_bru["brunei"])
cambodia = plt.plot(days, days_cam["cambodia"])
#indonesia = plt.plot(days, days_indo["indonesia"])
laos = plt.plot(days, days_laos["laos"])
malaysia = plt.plot(days, days_malay["malaysia"])
philippines = plt.plot(days, days_philip["philippines"])
singapore = plt.plot(days, days_sg["singapore"])
thailand = plt.plot(days, days_th["Thailand"])
vietnam = plt.plot(days, days_viet["vietnam"])
myanmar = plt.plot(days_mm["days"], days_mm["myanmar"])
plt.title("Confirmed case growth for Myanmar compared as of "+str(pd.to_datetime("today").tz_localize("UTC").tz_convert("Asia/Yangon").strftime("%d-%m-%Y")))
plt.legend((brunei[0],cambodia[0],laos[0],malaysia[0],philippines[0],singapore[0],thailand[0],vietnam[0],myanmar[0]),
("Brunei ", "Cambodia", "Laos", "Malaysia", "Philippines", "Singapore", "Thailand", "Vietnam", "Myanmar"),
loc="lower left", bbox_to_anchor=(1, 0.5))
plt.text(0, -0.2,str(pd.to_datetime("today").tz_localize("UTC").tz_convert("Asia/Yangon").strftime("%d-%m-%Y"))+" is the "+str(len(days_mm))+"th day since we have identified the first COVID-19 patient in our country.", horizontalalignment="left", verticalalignment="bottom", transform=ax.transAxes)
plt.text(0, -0.3,"On "+str(len(days_mm))+"th day, Indonesia has "+ str(days_indo.loc[len(days_mm)-1,"indonesia"])+" confirmed cases.", horizontalalignment="left", verticalalignment="bottom", transform=ax.transAxes)
cases = str(pd.to_datetime("today").tz_localize("UTC").tz_convert("Asia/Yangon").strftime("%d-%m-%Y"))+"_comparison.svg"
plt.savefig(cases, bbox_inches = "tight")
plt.show()
files.download(cases)
# + id="b_ei2lls4U3v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="17fb6dad-0b73-4d02-b95f-216653922180"
reset
# + id="opcExIYK4VjZ" colab_type="code" colab={}
| myanmar_compared.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# language: python
# name: python3
# ---
import pandas as pd
import random
from tqdm import tqdm
import gensim
from gensim.models import Word2Vec
from gensim.parsing.preprocessing import preprocess_documents
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
# # References:
# -
#
# +
root = './RASA/data/'
model_root='./RASA/offline_models/'
processed = './Data/processing/Processed_Airbnb/'
raw = './Data/raw/'
def create_embeddings(text_corpus_listing, name):
processed_text_corpus = preprocess_documents(text_corpus_listing)
tagged_text_corpus = [TaggedDocument(d, [i]) for i, d in enumerate(processed_text_corpus)]
text_corpus_model = Doc2Vec(tagged_text_corpus, dm=0, vector_size=200, window=2, min_count=1, epochs=100, hs=1)
text_corpus_model.save(root+'embeddings/'+ name +'_embeddings')
return text_corpus_model
# -
listings = pd.read_csv(raw+'listings.csv.gz', sep=',')
reviews = pd.read_csv(processed+'ratings_filter.csv', sep=',')
listings=listings.rename(columns={"id": "listing_id"})
listings.to_csv(processed+'listings.csv.gz', sep=',')
# ### User Review Embeddings
reviews = reviews.dropna()
print("No.of.reviews:"+str(len(reviews)))
users = reviews["reviewer_id"].unique().tolist()
print("No.of.users:"+str(len(users)))
reviews.info()
comment_corpus_model = create_embeddings(reviews['comments'].values,'review')
new_doc = gensim.parsing.preprocessing.preprocess_string("private room dishwasher")
test_doc_vector = comment_corpus_model.infer_vector(new_doc)
sims = comment_corpus_model.docvecs.most_similar(positive = [test_doc_vector])
topK = 5
for s in sims[:topK]:
print(f"{(s[1])} | {reviews['listing_id'].iloc[s[0]]}")
# ### Listing word embeddings
listings = pd.read_csv(raw+'listings.csv.gz', sep=',')
print("No.of.listings:"+str(len(listings)))
listings.info()
# +
# choose columns to generate embeddings.
listings = listings[['id','listing_url','name','description','neighborhood_overview','picture_url',
'property_type','room_type','accommodates','bathrooms','bathrooms_text',
'bedrooms','beds','amenities','price','minimum_nights','maximum_nights','review_scores_rating',
'review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location']]
listings.fillna('0', inplace=True)
listings.reset_index(drop = True, inplace = True)
# -
import string
def remove_punc(sample_str):
# Create translation table in which special charcters
# are mapped to empty string
translation_table = str.maketrans('', '', string.punctuation)
# Remove special characters from the string using translation table
sample_str = sample_str.translate(translation_table)
return sample_str
# +
listings['words_features'] = listings['amenities'].apply(remove_punc)
for ind in listings.index:
listings['review_scores_rating'][ind] = (float(listings['review_scores_rating'][ind]) + float(listings['review_scores_accuracy'][ind]) + float(listings['review_scores_cleanliness'][ind]) + float(listings['review_scores_checkin'][ind]) + float(listings['review_scores_communication'][ind]) + float(listings['review_scores_location'][ind]))
listings['review_scores_rating'][ind] = (listings['review_scores_rating'][ind])/6
listings['words_features'][ind] = 'amenities:'+listings['words_features'][ind] +'description:'+ listings['description'][ind] +'neighborhood_overview:'+ listings['neighborhood_overview'][ind]+'property_type:'+ listings['property_type'][ind]+'room_type:'+ listings['room_type'][ind]+'accommodates:'+ str(listings['accommodates'][ind])+'bedrooms:'+ str(listings['bedrooms'][ind])+'beds:'+ str(listings['beds'][ind])+'price range:'+ listings['price'][ind]
listings = listings.rename(columns={"review_scores_rating": "overall_rating"})
listings["overall_rating"].fillna(0, inplace=True)
# +
text_corpus_model = create_embeddings(listings['words_features'].values,'list')
new_doc = gensim.parsing.preprocessing.preprocess_string("private room dishwasher")
test_doc_vector = text_corpus_model.infer_vector(new_doc)
sims = text_corpus_model.docvecs.most_similar(positive = [test_doc_vector])
for s in sims:
print(f"{(s[1])} | {listings['listing_url'].iloc[s[0]]}")
# -
# ### Content-based filtering
# - https://github.com/SarangDeshmukh7/Recommendation-Engine/blob/master/Content_Based_Filtering.ipynb
users = reviews["reviewer_id"].unique().tolist()
random.shuffle(users)
reviews_listing = pd.merge(listings,reviews)
# +
reviews_listing['listing_id']= reviews_listing['listing_id'].astype(str)
# extract 90% of user ID's
users_train = [users[i] for i in range(round(0.9*len(users)))]
#split data into train and validation set
train_df = reviews_listing[reviews_listing['reviewer_id'].isin(users_train)]
validation_df = reviews_listing[~reviews_listing['reviewer_id'].isin(users_train)]
# +
#list to capture watch history of the users
watch_train = []
# populate the list with the movie ID
for i in tqdm(users_train):
temp = train_df[train_df["reviewer_id"] == i]["listing_id"].tolist()
watch_train.append(temp)
# +
# train word2vec model
model = Word2Vec(window = 10, sg = 1, hs = 0,
negative = 10,
alpha=0.03, min_alpha=0.0007,
seed = 14)
model.build_vocab(watch_train, progress_per=200)
model.train(watch_train, total_examples = model.corpus_count,
epochs=10, report_delay=1)
# +
model.train(watch_train, total_examples = model.corpus_count,
epochs=10, report_delay=1)
model.save(model_root+'ContentBasedFilter')
# +
watch = train_df[["listing_id","listing_url","name"]]
# remove duplicates
watch.drop_duplicates(inplace=True, subset='listing_id', keep="last")
# create movie id and tittle dictionary
watch_dict = watch.groupby('listing_id')['listing_id'].apply(list).to_dict()
def similar_watch(v, n = 5):
# extract most similar movies for the input vector
ms = model.wv.similar_by_vector(v, topn= n+1)[1:]
# extract name and similarity score of the similar movies
new_ms = []
for j in ms:
pair = (watch_dict[j[0]][0], j[1])
new_ms.append(pair)
return new_ms
# -
print(watch_dict['2818'])
similar_watch('2818',5)
# ### Collaborative filtering
# - Refer notebook ColaborativeFiltering.ipynb
# +
# !pip3 install pyspark
# importing all the libraries we’ll require to build the book recommender
import sys
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import *
from pyspark.ml.recommendation import ALS,ALSModel
# define the configurations for this Spark program
conf = SparkConf().setMaster("local[*]").setAppName("airbnb")
conf.set("spark.executor.memory", "6G")
conf.set("spark.driver.memory", "2G")
conf.set("spark.executor.cores", "4")
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
conf.set("spark.default.parallelism", "4")
# create a Spark Session instead of a Spark Context
spark = SparkSession.builder \
.config(conf = conf) \
.appName("spark session example") \
.getOrCreate()
listings_df = spark.read.option("delimiter", ",").option("header", "true").csv('/content/drive/MyDrive/299/listings.csv')
listings_df.show(2)
listings_df.select('id').distinct().show(2)
user_ratings_df = spark.read.option("delimiter", ",").option("header", "true").csv('/content/drive/MyDrive/299/ratings_filter.csv')
# Columns User-ID, ISBN and Book-Rating were in string format, which we convert to int
ratings_df = user_ratings_df.withColumn("reviewer_id",
user_ratings_df['reviewer_id'].\
cast(IntegerType())).\
withColumn("listing_id", user_ratings_df['listing_id'].\
cast(IntegerType())).\
withColumn("rating",\
user_ratings_df['rating'].\
cast(IntegerType())).\
na.drop()
ratings_df.show(2)
# define parameters
als = ALS(maxIter=5, regParam=0.01, userCol="reviewer_id", itemCol="listing_id", ratingCol="rating",coldStartStrategy="drop")
#fit the model to the ratings
model = als.fit(ratings_df)
model.save("/content/drive/MyDrive/299/als_model")
user_ratings_df = spark.read.option("delimiter", ",").option("header", "true").csv('/content/drive/MyDrive/299/ratings_filter.csv')
# Columns User-ID, ISBN and Book-Rating were in string format, which we convert to int
ratings_df = user_ratings_df.withColumn("reviewer_id",
user_ratings_df['reviewer_id'].\
cast(IntegerType())).\
withColumn("listing_id", user_ratings_df['listing_id'].\
cast(IntegerType())).\
withColumn("rating",\
user_ratings_df['rating'].\
cast(IntegerType())).\
na.drop()
ratings_df.show(2)
# -
# ### Help link Embeddings
# +
import json
from typing import Any, Text, Dict, List
import torch
from bert_serving.client import BertClient
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import numpy as np
from sentence_transformers import SentenceTransformer
# sentence embedding selection
sentence_transformer_select=True
pretrained_model='stsb-roberta-large' # Refer: https://github.com/UKPLab/sentence-transformers/blob/master/docs/pretrained-models/nli-models.md
score_threshold = 0.70 # This confidence scores can be adjusted based on your need!!
# +
def encode_standard_question(sentence_transformer_select=True, pretrained_model='bert-base-nli-mean-tokens'):
"""
This will encode all the questions available in question database into sentence embedding. The result will be stored into numpy array for comparision purpose.
"""
if sentence_transformer_select:
bc = SentenceTransformer(pretrained_model)
else:
bc = BertClient(check_version=False)
data = json.load(open(raw+"/faq.json", "rt", encoding="utf-8"))
standard_questions = [each['q'].replace('-',' ') for each in data]
print("Standard question size", len(standard_questions))
print("Start to calculate encoder....")
if sentence_transformer_select:
standard_questions_encoder = torch.tensor(bc.encode(standard_questions)).numpy()
else:
standard_questions_encoder = bc.encode(standard_questions)
np.save(root+"embeddings/questions_embedding", standard_questions_encoder)
standard_questions_encoder_len = np.sqrt(np.sum(standard_questions_encoder * standard_questions_encoder, axis=1))
np.save(root+"embeddings/questions_embedding_len", standard_questions_encoder_len)
encode_standard_question(sentence_transformer_select,pretrained_model)
| .ipynb_checkpoints/3a_EmbeddingsAndModels-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Generating text with Recurrent Neural Networks
#
# In this notebook we will make use of the Recurrent Neural Networks to make sequence predictions. We will use the book "The Three Musketeers" by <NAME> as our dataset and we will predict characters in order to generate text.
# ## Data Reading
# Load the file from the data folder and inspect it. Standardize to lowercase.
filename = "../data/musquetairesShort"
raw_text = open(filename).read()
raw_text[:100]
text = raw_text.lower()
print('corpus length:', len(text))
# ### Text preparation
# We create a set with the different characters and two dictionaries from indices to chars
# <font color=red><b>Generate dictionaries for the char to indices and indices to chars.
# <br>_Hint: use the enumerate function on the chars set_</b>
# </font>
chars = sorted(list(set(text)))
print('total chars:', len(chars))
## Add code here
# char_indices =
# indices_char =
# Next we generate the input and output arrays:
#
# The input will consist on sentences of a fixed (_maxlen_) lenght, while the outputs will be the next characters in the text.
#
# So, if the text is "Welcome to Big Data Spain" with _maxlen_ = 5, we will have:
#
# + active=""
# Input = [
# w, e, l, c, o,
# e, l, c, o, m,
# l, c, o, m, e,
# ...
# ]
#
# Output = [
# m,
# e,
# ,
# ...
# ]
# -
# In order to avoid overfitting (and improve performances) we can add a _step_ to the structure so that with step = 3, for example:
# + active=""
# Input = [
# w, e, l, c, o,
# c, o, m, e, ,
# m, e, , t, o,
# ...
# ]
#
# Output = [
# m,
# t,
# ,
# ...
# ]
# -
# <font color=red><b>Fill the sentences and next_char lists with the input and output data</b></font>
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(text) - maxlen, step):
# Add code here
print('nb sequences:', len(sentences))
sentences[:5]
next_chars[:5]
# ### Dataset generation
# We turn the text into one-hot-like vectors. Initialize the Input and output arrays to zero as boolean
import numpy as np
X = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
Y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
X[i, t, char_indices[char]] = 1
Y[i, char_indices[next_chars[i]]] = 1
X[0]
print ("timesteps = ", len (X[0]), ", numchars = ", len (X[0][0]))
# ## Model Generation
# Build the LSTM model to be trained train on the data, on this config:
# - LSTM layer, with 256 units
# - LSTM layer, with 256 units
# - Dense layer, with 64 units
# - Dense softmax layer
# - On compilation, use adam as the optimizer and categorical_crossentropy as the loss function.
# - Print the summary
#
#
# <font color=red><b>Remember to initialize it propperly and to include input_shape on the first layer. <br> Hints: input_shape= (maxlen, len(chars))
# - Use the imported libraries</b></font>
# +
import os
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
tf.keras.backend.clear_session()
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras.layers import LSTM
# +
## Add code here
# -
# ### Model Training
# Train the model for two of epochs and see how it works. Use a batch_size of 128
# +
## Add code here
# -
# ### Model Evaluation
# Let's test our model. In order to obtain a probabilistic answer we can sample from a probability array instead of just taking the max argument:
#
# <font color=red><b> Sometimes probabilities are rounded. Apply a normalization-like tratment to them in order to avoid this when sampling</b> </font>
#
#
# $$ p_i = \frac{p_i}{\sum_j p_j}$$
#
## Build a function to get the next predicted index:
def sample(preds, sample = True):
# take a sample from the probabilities
if sample:
# probs can be rounded and not sum up to one. Recalculate the probs in order to avoid this
# Add code here
probas = np.random.multinomial(1, preds, 1)
else:
probas = preds
return np.argmax(probas)
# We get a seed in order to predict:
import random
start_index = random.randint(0, len(text) - maxlen - 1)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print (generated)
# #### Predictions
# This will be the secuence for which we are going to predict the next character:
#
# <font color=red> <b> Predict the next character given the input x_pred. <br>Hint: remember to take the first item in list</b> </font>
## Predict next character given a model and the sequence to predict
def get_next_char (model, x_pred, indices_char, Sample = True):
preds = ## Add code here
next_index = sample(preds, 1.0)
return indices_char[next_index]
# +
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
Sample = True
# this gets the next character
next_char = get_next_char (model, x_pred, indices_char, Sample)
print (next_char)
# -
# Let's predict some more characters:
import sys
start_index = random.randint(0, len(text) - maxlen - 1)
sentence = text[start_index: start_index + maxlen]
print('Seed: ' + sentence + '"')
print('---------------------- Generated Text -----------------------')
chars_to_predict = 400
for i in range(chars_to_predict):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.
next_char = get_next_char (model, x_pred, indices_char, Sample)
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
# ## Load a trained model
# Training Deep Learning Models is time consuming. So, some pretrained models are available to be loaded and take a look at better predictions. We will load a model for each 5 epochs in order to see the evolution.
#
# <font color=red> <b> Load a model for each time and predict the text <br> Hint: You can load the whole model or just the weights as the configuration is the same</b> </font>
# +
count = 0
partial_n_epoch = 5
times = 12
np.random.seed (1)
for j in range (times):
count += partial_n_epoch
print ("")
print ("-------------- Next Model --------------")
print ("Trained on ", count, " epochs")
modelName = '../models/MusquetairesModelOptimizedMode_' + str (count) + '.h5'
## Add code here
| training/MusquetairesTraining.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# # Clustering using numerical and categorical data : K-Prototypes
# + pycharm={"name": "#%%\n"}
import pandas as pd
import numpy as np
import os
import sys
from kmodes.kprototypes import KPrototypes
from sklearn.model_selection import train_test_split
customers = pd.read_csv("./../../../output/customers.csv")
customers.drop(columns=["first_order", "last_order"], inplace=True)
y = customers["review_score"]
X_train, X_test, y_train, y_test = train_test_split(
customers, y, test_size=0.2, random_state=42, stratify=y
)
customers = X_test
pd.plotting.scatter_matrix(customers);
# -
# ## Variables transformation from numerical to categorical
#
# And what if we consider the frequency as a category ?
#
# * It can be interesting to consider single order customers to the others ones.
# * Also, we could consider customers who contribute reviews (title, message) from the one who don't.
# * As monetary is really skewed, we may introduce bins.
#
# Let's try this and see if we can have a better clustering after that !
# + pycharm={"name": "#%%\n"}
customers
# -
# Let's plot the distribution of monetary :
# + pycharm={"name": "#%%\n"}
customers.monetary.describe()
# -
# ## Using K-Prototype clustering method
# + [markdown] pycharm={"name": "#%% md\n"}
# We have to evaluate the right number of clusters even if we are confident it's around **5** as stated by our previous experiments.
# + pycharm={"name": "#%%\n"}
# Prepare variables for transformation and remove useless ones
transformed_customers = customers.copy()
transformed_customers = transformed_customers.drop(
columns=["review_completion_percentage", "nb_orders"]
)
transformed_customers.loc[
transformed_customers.review_score == 0, "review_score"
] = 1e-5
transformed_customers.head()
# + pycharm={"name": "#%%\n"}
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, PowerTransformer
# Pre-processing
for feature in transformed_customers.select_dtypes(
exclude=["object", "category"]
).columns:
pipeline = make_pipeline(StandardScaler(), PowerTransformer())
transformed_customers[feature] = pipeline.fit_transform(
np.array(transformed_customers[feature]).reshape(-1, 1)
)
transformed_customers.head()
# + pycharm={"name": "#%%\n"}
pd.plotting.scatter_matrix(pd.DataFrame(transformed_customers));
# + pycharm={"name": "#%%\n"}
from kmodes.kprototypes import KPrototypes
costs = []
for n_clusters in list(range(2, 10)):
k_prototypes = KPrototypes(n_clusters=n_clusters, random_state=42, n_jobs=-1)
k_prototypes.fit_predict(transformed_customers, categorical=[8, 10, 13, 14])
costs.append({"n_clusters": n_clusters, "cost": k_prototypes.cost_})
pd.DataFrame(costs).plot(title="Cost per number of clusters", x="n_clusters", y="cost");
# + pycharm={"name": "#%%\n"}
import seaborn as sns
from sklearn import metrics
k_prototypes = KPrototypes(n_clusters=5, random_state=42, n_jobs=-1)
customers["cluster"] = k_prototypes.fit_predict(
transformed_customers, categorical=[8, 10, 13, 14]
)
customers["cluster"] = customers.cluster.astype("category")
print(customers["cluster"].value_counts())
sns.pairplot(customers, hue="cluster", palette="Dark2", diag_kind="kde");
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Clustering visualisation
# + pycharm={"name": "#%%\n"}
from sklearn.manifold import TSNE
import plotly.express as px
# Create dummies for categorical variables
df = pd.get_dummies(
pd.DataFrame(transformed_customers),
prefix="",
prefix_sep="",
columns=transformed_customers.select_dtypes(include=["object", "category"]).columns,
)
print(
"Silhouette Coefficient: %0.3f" % metrics.silhouette_score(df, customers["cluster"])
)
tsne = TSNE(n_components=2, learning_rate="auto", init="random", n_jobs=-1)
projected_customers = tsne.fit_transform(df)
fig = px.scatter(
projected_customers,
x=0,
y=1,
color=customers.cluster,
labels={"color": "cluster"},
opacity=0.8,
)
fig.show()
# + pycharm={"name": "#%%\n"}
tsne = TSNE(n_components=3, learning_rate="auto", init="random", n_jobs=-1)
projected_customers = tsne.fit_transform(df)
fig = px.scatter_3d(
projected_customers,
x=0,
y=1,
z=2,
color=customers.cluster,
labels={"color": "cluster"},
opacity=0.8,
)
fig.show()
# + pycharm={"name": "#%%\n"}
print("END")
| notebooks/modelisations/COMPLETE/K-Prototypes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Reliability Diagram
# +
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from os.path import join
from cal_methods import HistogramBinning, TemperatureScaling
from betacal import BetaCalibration
from sklearn.isotonic import IsotonicRegression
from sklearn.linear_model import LogisticRegression
# -
# Imports to get "utility" package
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath("utility") ) ) )
from utility.unpickle_probs import unpickle_probs
from utility.evaluation import get_bin_info, softmax
# Script for plotting reliability diagrams.
# ## Load in the data
PATH = join('..', '..', 'logits')
files = (
'probs_resnet110_c10_logits.p', 'probs_resnet110_c100_logits.p',
'probs_densenet40_c10_logits.p', 'probs_densenet40_c100_logits.p',
'probs_resnet_wide32_c10_logits.p', 'probs_resnet_wide32_c100_logits.p',
'probs_resnet50_birds_logits.p',
'probs_resnet110_SD_c10_logits.p', 'probs_resnet110_SD_c100_logits.p',
'probs_resnet152_SD_SVHN_logits.p',
'probs_resnet152_imgnet_logits.p', 'probs_densenet161_imgnet_logits.p'
)
# ### Reliability diagrams as subgraph
# reliability diagram plotting for subplot case.
def rel_diagram_sub(accs, confs, ax, M = 10, name = "Reliability Diagram", xname = "", yname=""):
acc_conf = np.column_stack([accs,confs])
acc_conf.sort(axis=1)
outputs = acc_conf[:, 0]
gap = acc_conf[:, 1]
bin_size = 1/M
positions = np.arange(0+bin_size/2, 1+bin_size/2, bin_size)
# Plot gap first, so its below everything
gap_plt = ax.bar(positions, gap, width = bin_size, edgecolor = "red", color = "red", alpha = 0.3, label="Gap", linewidth=2, zorder=2)
# Next add error lines
#for i in range(M):
#plt.plot([i/M,1], [0, (M-i)/M], color = "red", alpha=0.5, zorder=1)
#Bars with outputs
output_plt = ax.bar(positions, outputs, width = bin_size, edgecolor = "black", color = "blue", label="Outputs", zorder = 3)
# Line plot with center line.
ax.set_aspect('equal')
ax.plot([0,1], [0,1], linestyle = "--")
ax.legend(handles = [gap_plt, output_plt])
ax.set_xlim(0,1)
ax.set_ylim(0,1)
ax.set_title(name, fontsize=24)
ax.set_xlabel(xname, fontsize=22, color = "black")
ax.set_ylabel(yname, fontsize=22, color = "black")
def get_pred_conf(y_probs, normalize = False):
y_preds = np.argmax(y_probs, axis=1) # Take maximum confidence as prediction
if normalize:
y_confs = np.max(y_probs, axis=1)/np.sum(y_probs, axis=1)
else:
y_confs = np.max(y_probs, axis=1) # Take only maximum confidence
return y_preds, y_confs
# ## Calibration methods for both 1-vs-rest and mutliclass approach
# TODO add into scripts
def cal_res(method, path, file, M = 15, name = "", approach = "single", m_kwargs = {}):
bin_size = 1/M
FILE_PATH = join(path, file)
(y_logits_val, y_val), (y_logits_test, y_test) = unpickle_probs(FILE_PATH)
y_probs_val = softmax(y_logits_val) # Softmax logits
y_probs_test = softmax(y_logits_test)
if approach == "single":
K = y_probs_test.shape[1]
# Go through all the classes
for k in range(K):
# Prep class labels (1 fixed true class, 0 other classes)
y_cal = np.array(y_val == k, dtype="int")[:, 0]
# Train model
model = method(**m_kwargs)
model.fit(y_probs_val[:, k], y_cal) # Get only one column with probs for given class "k"
y_probs_val[:, k] = model.predict(y_probs_val[:, k]) # Predict new values based on the fittting
y_probs_test[:, k] = model.predict(y_probs_test[:, k])
# Replace NaN with 0, as it should be close to zero # TODO is it needed?
idx_nan = np.where(np.isnan(y_probs_test))
y_probs_test[idx_nan] = 0
idx_nan = np.where(np.isnan(y_probs_val))
y_probs_val[idx_nan] = 0
y_preds_val, y_confs_val = get_pred_conf(y_probs_val, normalize = True)
y_preds_test, y_confs_test = get_pred_conf(y_probs_test, normalize = True)
else:
model = method(**m_kwargs)
model.fit(y_logits_val, y_val)
y_probs_val = model.predict(y_logits_val)
y_probs_test = model.predict(y_logits_test)
y_preds_val, y_confs_val = get_pred_conf(y_probs_val, normalize = False)
y_preds_test, y_confs_test = get_pred_conf(y_probs_test, normalize = False)
accs_val, confs_val, len_bins_val = get_bin_info(y_confs_val, y_preds_val, y_val, bin_size = bin_size)
accs_test, confs_test, len_bins_test = get_bin_info(y_confs_test, y_preds_test, y_test, bin_size = bin_size)
return (accs_test, confs_test, len_bins_test), (accs_val, confs_val, len_bins_val)
# +
def get_uncalibrated_res(path, file, M = 15):
bin_size = 1/M
FILE_PATH = join(path, file)
(y_logits_val, y_val), (y_logits_test, y_test) = unpickle_probs(FILE_PATH)
y_probs_test = softmax(y_logits_test)
y_preds_test, y_confs_test = get_pred_conf(y_probs_test, normalize = False)
return get_bin_info(y_confs_test, y_preds_test, y_test, bin_size = bin_size)
# +
import pickle
def gen_plots(files, plot_names = [], M = 15, val_set = False):
if val_set: # Plot Reliability diagrams for validation set
k = 1
else:
k = 0
for i, file in enumerate(files):
bin_info_uncal = get_uncalibrated_res(PATH, file, M)
accs_confs = []
accs_confs.append(cal_res(TemperatureScaling, PATH, file, M, "", "multi"))
accs_confs.append(cal_res(HistogramBinning, PATH, file, M, "", "single", {'M':M}))
accs_confs.append(cal_res(IsotonicRegression, PATH, file, M, "", "single", {'y_min':0, 'y_max':1}))
accs_confs.append(cal_res(BetaCalibration, PATH, file, M, "", "single", {'parameters':"abm"}))
with open(plot_names[i] + "_bin_info.p", "wb") as f:
pickle.dump(accs_confs, f)
plt.style.use('ggplot')
fig, ax = plt.subplots(nrows=1, ncols=5, figsize=(22.5, 4), sharex='col', sharey='row')
names = [" (Uncal)", " (Temp)", " (Histo)", " (Iso)", " (Beta)"]
# Uncalibrated information
rel_diagram_sub(bin_info_uncal[0], bin_info_uncal[1], ax[0] , M = M, name = "\n".join(plot_names[i].split()) + names[0], xname="Confidence")
for j in range(4):
rel_diagram_sub(accs_confs[j][k][0], accs_confs[j][k][1], ax[j+1] , M = M, name = "\n".join(plot_names[i].split()) + names[j+1], xname="Confidence")
ax[0].set_ylabel("Accuracy", color = "black")
for ax_temp in ax:
plt.setp(ax_temp.get_xticklabels(), rotation='horizontal', fontsize=18)
plt.setp(ax_temp.get_yticklabels(), fontsize=18)
plt.savefig("_".join(plot_names[i].split()) + ".pdf", format='pdf', dpi=1000, bbox_inches='tight', pad_inches=0.2)
plt.show()
# -
gen_plots(files[:2], plot_names = ["ResNet-110(SD) CIFAR-10", "ResNet-110(SD) CIFAR-100",
#"DenseNet-40 CIFAR-10", "DenseNet-40 CIFAR-100",
#"WideNet-32 CIFAR-10", "WideNet-32 CIFAR-100",
#"ResNet-50 Birds", "ResNet-110(SD) CIFAR-10",
#"ResNet-110(SD) CIFAR-100",
#"ResNet-152(SD) SVHN",
#"ResNet-152 ImageNet", "DenseNet-161 ImageNet"
], M = 10, val_set=False)
gen_plots(files, plot_names = ["ResNet-110 CIFAR-10", "ResNet-110 CIFAR-100",
"DenseNet-40 CIFAR-10", "DenseNet-40 CIFAR-100",
"WideNet-32 CIFAR-10", "WideNet-32 CIFAR-100",
"ResNet-50 Birds",
"ResNet-110(SD) CIFAR-10", "ResNet-110(SD) CIFAR-100",
"ResNet-152(SD) SVHN",
#"ResNet-152 ImageNet", "DenseNet-161 ImageNet"
], val_set = False)
| scripts/calibration/Reliability Diagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # A Prototype U.S. CMS analysis facility
#
# by
#
# <NAME>, <NAME>, **<NAME>**,
# *University of Nebraska, Lincoln*
#
# <NAME>, <NAME>, <NAME>,
# *University of Nebraska Holland Computing Center*
#
# <NAME>,
# *University of Wisconsin, Madison*
#
# <NAME>,
# *Morgridge Institute*
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Coffea - Columnar Object Framework For Effective Analysis
#
# [](https://doi.org/10.5281/zenodo.3333797)
# [Coffea Team](https://github.com/CoffeaTeam) && [Coffea Framework](https://github.com/CoffeaTeam/coffea)
#
# * Leveraging large data and data analysis tools from Python to provide an array-based syntax for manipulating HEP event data
# * Stark contrast to well established event loop techniques
# * "+" Tremendous potential to fundamentally change the time-to-science in HEP
# * "+" **Scales well horizontally with available muliple executors for efficient and flexible computations**
# * "-" Cannot easily utilize current analysis facilities (T2s) as the analysis is not grid friendly, it's meant to be quasi-interactive
#
#
# <img src="https://coffeateam.github.io/coffea/_images/columnar.png" width="400" height="400">
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Facility design: Coffea Local Executors
#
# Two local executors: *iterative_executor* and *futures_executor*:
#
# * The *iterative* executor simply processes each chunk of an input dataset in turn, using the current python thread.
#
# * The *futures* executor employs python multiprocessing to spawn multiple python processes that process chunks in parallel on the machine. **Processes are used rather than threads to avoid performance limitations due to the CPython global interpreter lock (GIL))**
# + [markdown] slideshow={"slide_type": "slide"}
# ## Facility design: Coffea Distributed Executors
#
#
# Four types of distributed executors:
#
# * the **Parsl** distributed executor, accessed via *parsl_executor*,
#
# * the **Dask** distributed executor, accessed via *dask_executor*,
#
# * the **Apache Spark** distributed executor, accessed via *run_spark_job*,
#
# * and the **Work Queue** distributed executor, accessed via *work_queue_executor*.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Dask: scalable analytics in Python
#
# * Dask provides flexible library for parallel computing in Python
# * Think of Dask as run-time parallel + cluster plugin for Python
# * Easily installed via Conda as the module “distributed”
# * NOT really designed with multi-user environments in mind out-of-the-box
# * Integrates with HPC clusters running a variety of scheduler including SLURM & HTCondor via “dask-jobqueue”
#
# <img src="https://docs.dask.org/en/latest/_images/dask-overview.svg" width="600" height="600">
#
# -
from dask.distributed import Client, progress
client = Client(processes=False, threads_per_worker=8,
n_workers=1, memory_limit='4GB')
client
import dask.array as da
x = da.random.random((10000, 10000), chunks=(1000, 1000))
x
# What we got is a 10000x10000 array of random numbers, represented as many numpy arrays of size 1000x1000 (or smaller if the array cannot be divided evenly). In this case there are 100 (10x10) numpy arrays of size 1000x1000.
y = x + x.T
z = y[::2, 5000:].mean(axis=1)
z
# %time z.compute()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Dask?
#
# * <span style="color:green"> Dask Scales out to Clusters </span>: Dask figures out how to break up large computations and route parts of them efficiently onto distributed hardware.
#
# * <span style="color:blue"> Dask Scales Down to Single Computers </span>: Dask can enable efficient parallel computations on single machines by leveraging their multi-core CPUs and streaming data efficiently from disk.
#
# * <span style="color:orange"> Dask Integrates Natively with Python Code </span>: Python includes computational libraries like Numpy, Pandas, and Scikit-Learn, and many others for data access, plotting, statistics, image and signal processing, and more.
#
# * <span style="color:red"> Dask Supports Complex Applications </span>: Dask helps exposing low-level APIs to its internal task scheduler which is capable of executing very advanced computations. (e.g. the ability to build their own parallel computing system using the same engine that powers Dask’s arrays, DataFrames, and machine learning algorithms, but now with the institution’s own custom logic) **[similiar to our use case]**
#
# * <span style="color:purple"> Dask Delivers Responsive Feedback </span>: monitoring via real-time and responsive dashboard, installed profiler, embedded IPython kernel
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Requirements for Analysis Facility @ T2
#
# * Easy to use for users
# * Scalable (dynamically/automatically)
# * Responsive/Interactive
# * **Utilize currently deployed hardware/middleware**
# * **Minimally intrusive for site administrators**
# * In addition it is important to get work (‘effort’ & CPU) accounted for by CMS
#
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Kubernetes (k8s)?
#
# Stack Overflow blog: "*Kubernetes is about six years old, and over the last two years, it has risen in popularity to consistently be one of the most loved platforms. This year, it comes in as the number three most loved platform.*"
#
# **Kubernetes is a platform that allows to run and orchestrate container workloads.**
#
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Why Kubernetes (k8s)?
#
# * <span style="color:orange">Kubernetes is very extensible, and developers love that.</span>
# There are a set of existing resources like Pods, Deployments, StatefulSets, Secrets, ConfigMaps, etc. However, users and developers can add more resources in the form of Custom Resource Definitions.
# * <span style="color:green"> Infrastructure as YAML. </span>
# All the resources in Kubernetes, can simply be expressed in a YAML file.
# * <span style="color:blue">Scalability. </span>
# Software can be deployed for the first time in a scale-out manner across Pods, and deployments can be scaled in or out at any time.
# * <span style="color:red">Time savings. </span>
# Pause a deployment at any time and resume it later.
# * <span style="color:purple">Version control. </span>
# Update deployed Pods using newer versions of application images and roll back to an earlier deployment.
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Why Kubernetes (k8s)?
#
# * **Horizontal autoscaling.**
# Kubernetes autoscalers automatically size a deployment’s number of Pods based on the usage of specified resources (within defined limits).
#
# * **Rolling updates.**
# Updates to a Kubernetes deployment are orchestrated in “rolling fashion,” across the deployment’s Pods. These rolling updates are orchestrated while working with optional predefined limits on the number of Pods that can be unavailable and the number of spare Pods that may exist temporarily.
#
# * **Canary deployments.**
# A useful pattern when deploying a new version of a deployment is to first test the new deployment in production, in parallel with the previous version, and scale up the new deployment while simultaneously scaling down the previous deployment.
#
# * **Security and Controls.**
# YAML is a great way to validate what and how things get deployed in Kubernetes. For example, one of the significant concerns when it comes to security is whether your workloads are running as a non-root user.
#
# * <span style="color:purple">**Another big aspect of Kubernetes popularity is its strong community.**</span>
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## Proposed Analysis Facility @ T2 Nebraska
#
# <img src="pics/coffea-casa.png" width="900" height="900" />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Proposed Analysis Facility @ T2 Nebraska: challenges
#
# <img src="pics/coffea-casa-challenge.png" width="900" height="900" />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Proposed Analysis Facility @ T2 Nebraska: configuration
# <img src="pics/coffea-casa1.png" width="900" height="900" />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Proposed Analysis Facility @ T2 Nebraska: configuration
# <img src="pics/coffea-casa2.png" width="900" height="900" />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Proposed Analysis Facility @ T2 Nebraska: configuration
# <img src="pics/coffea-casa3.png" width="700" height="700" />
# + [markdown] slideshow={"slide_type": "slide"}
# ## Coffea-casa: why you would love it?
#
# * <span style="color:blue">No x509 </span>: authentification enabled via CILogin and *WLCG Bearear Tockens** (macaroons)
# * <span style="color:purple">Security</span>: enabled TLS protocol over TCP sockets
# * <span style="color:green"> No need to think about xrootd </span>: **We use XCache with new XRootD autorization plugin**
# * <span style="color:red"> Access to "grid-style" analysis but from Python notebook!</span>
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## XCache as an access point to "Anydata, Anytime, Anywhere" (AAA)
#
#
# * *Xcache is a Squid-like cache*, but it primarily uses the “xroot” protocol, with HTTP protocol being added on.
# * *XCache provides a multi-threaded file caching application that can asynchronously fetch and cache file segments or whole files*.
# * Its primary design use case is caching static scientific data files of any format, large or small.
# * *Xcache is built upon Xrootd* and is flexible to be customized for many usage scenarios, via configuration or plugins.
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## XRootd authorization plugin
#
# * Code: https://github.com/bbockelm/xrdcl-authz-plugin
# -
# ! xrdcp -f root://xcache//store/data/Run2017B/SingleElectron/MINIAOD/31Mar2018-v1/60000/9E0F8458-EA37-E811-93F1-008CFAC919F0.root /dev/null
# + [markdown] slideshow={"slide_type": "slide"}
# ## Coffea-casa secrets
#
# All secrets are available in the directory */etc/cmsaf-secrets* at container startup.
#
# * */etc/cmsaf-secrets/condor_token* is a condor IDTOKEN useful for submitting to T3.
# * */etc/cmsaf-secrets/ca.key* is a CA private key useful for Dask
# * */etc/cmsaf-secrets/ca.pem* is a CA public key useful for Dask
# * */etc/cmsaf-secrets/hostcert.pem* is a host certificate and private key useful for the Dask scheduler.
# * */etc/cmsaf-secrets/usercert.pem* is a user certificate and private key useful for the Dask workers.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Coffea-casa building blocks
#
# * JupyterHub setup (Helm K8s charts): https://github.com/CoffeaTeam/jhub (except of specific secrets)
# * Docker images for Dask Scheduler and Worker: https://github.com/CoffeaTeam/coffea-casa
# * https://hub.docker.com/r/coffeateam/coffea-casa
# * https://hub.docker.com/r/coffeateam/coffea-casa-analysis
# * Docker image for JupyterHub (to get authentification macaroons in the launch environment)
# https://github.com/clundst/jhubDocker
# + [markdown] slideshow={"slide_type": "slide"}
# ## When you will be able to use it?
#
# * Alfa version is expected to be available soon for preliminary tests by CMS community (ask your friend for an invitation :D).
# * Fully available during fall/winter 2020.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Demo time
#
# ... lets try to see how it works!
#
#
# 1. [Simple loop over events](adl1_tls.ipynb)
# 2. [More complex benchmark](adl8.ipynb)
# 3. [Using XCache](coffea_xcache.ipynb)
#
| Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: desc-python
# language: python
# name: desc-python
# ---
# %pylab inline
import pymaster as nmt
import healpy as hp
import numpy as np
import pyccl.nl_pt as pt
mask = hp.read_map("/global/homes/j/jharno/IA-infusion/SkySim5000/FullSky_weight.fits")
#Downgrading mask to reasonable size
mask = hp.ud_grade(mask, 256)
# Read mask and apodize it on a scale of ~1deg
mask = nmt.mask_apodization(mask, 1., apotype="Smooth")
hp.mollview(mask, coord=['E', 'E'], title='Apodized mask')
plt.show()
from astropy.table import Table
# Cool, so now, we need to load a catalog and bin it into healpix maps
catalog = Table.read('/global/cscratch1/sd/jharno/IA_Infusion/GalCat_tomo2_All_0.06GpAM_RA_Dec_g1_g2_w_den_Sij.dat',
format='ascii', descriptions=['ra_deg', 'dec_deg', 'shear1', 'shear2', 'weight', 'z_true', 'delta', 's11', 's22', 's12'])
pixels = hp.ang2pix(256, catalog['col1'], catalog['col2'], lonlat=True)
# +
gamma1_map = np.zeros_like(mask)
gamma2_map = np.zeros_like(mask)
counts_map = np.zeros_like(mask)
from tqdm import tqdm
for i in tqdm(range(len(catalog))):
gamma1_map[pixels[i]] += catalog['col3'][i]
gamma2_map[pixels[i]] += catalog['col4'][i]
counts_map[pixels[i]] += 1.
gamma1_map[counts_map >0] /= counts_map[counts_map >0]
gamma2_map[counts_map >0] /= counts_map[counts_map >0]
# -
# Read mask and apodize it on a scale of ~1deg
mask_effective = nmt.mask_apodization(counts_map, 1., apotype="Smooth")
hp.mollview(mask_effective, coord=['E', 'E'], title='Apodized mask')
plt.show()
hp.mollview(gamma1_map, coord=['E', 'E'], title='gamma1')
plt.show()
hp.mollview(gamma2_map, coord=['E', 'E'], title='gamma2')
plt.show()
f_2 = nmt.NmtField(mask_effective, [gamma1_map, gamma2_map])
b = nmt.NmtBin.from_nside_linear(256, 8)
cl_22 = nmt.compute_full_master(f_2, f_2, b)
ell_arr = b.get_effective_ells()
plt.plot(ell_arr, cl_22[0], 'b-', label='EE')
plt.plot(ell_arr, -cl_22[2], 'r-', label='EB')
plt.plot(ell_arr, cl_22[1], 'g-', label='BE')
plt.plot(ell_arr, cl_22[3], 'y-', label='BB')
plt.loglog()
plt.xlabel('$\\ell$', fontsize=16)
plt.ylabel('$C_\\ell$', fontsize=16)
plt.legend(loc='upper right', ncol=2, labelspacing=0.1)
plt.show()
# +
import pyccl as ccl
cosmo_ccl = ccl.Cosmology(
Omega_c=0.22, Omega_b=0.0448,
h=0.71, sigma8 = 0.801, n_s= 0.963,w0=-1.00,wa=0.0, Omega_k=0.0)
# -
nzs=np.loadtxt("/global/u1/d/dlan/SkySim5000_IA_infusion/StageIV_zcen_nz_bin1-5.dat", unpack = True)
z=nzs[0]
nzs_s=nzs[2]
plot(z, nzs_s); print(len(nzs[:,2]))
# +
# Let's first compute some Weak Lensing cls
tracer_ccl = ccl.WeakLensingTracer(cosmo_ccl, dndz=(z, nzs_s),use_A_ia=False )
# +
ell=np.arange(768)
cl_ccl = ccl.angular_cl(cosmo_ccl, tracer_ccl, tracer_ccl,ell)
# -
w = nmt.NmtWorkspace()
w.compute_coupling_matrix(f_2, f_2, b)
cl_convolved = w.decouple_cell(w.couple_cell( np.array([cl_ccl, np.zeros_like(cl_ccl), np.zeros_like(cl_ccl), np.zeros_like(cl_ccl)])))
# +
loglog(ell_arr, cl_convolved[0],label='CCL')
plot(ell_arr, cl_22[0], 'b-', label='catalog')
legend()
xlabel(r'$\ell$')
ylabel(r'Lensing angular $C_\ell$')
# -
| NaMaster.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Class
# A class is blueprint for creating objects. It is a basic concept which revolve around the real-life entities.
# Syntax of a class:
class Classname: #[classname is written in PascalCase]
pass # methods and variables
# # What is an Object?
# Object is an instance of class. It is a self contained component which consists of some methods and properties. For example color name, dog breed, or any other physical thing/object.
# +
class Student:
pass
Deepanshu = Student() #object-1 for class Student
Bhalla = Student() #object-2 for class Student
# -
# # Constructor
#
# Constructors are generally used for instantiating an object.The task is to initialise the data members of the class when an object of the class is created.
#
# __init__ method in python is called the Constructor and is always called when an object of the class is created
def __init__(self):
pass #constructor body
# ## Types of Constructors
# There are mainly two types of Constructors :
# 1) Default Constructors
# 2) Parameterised Constructors
# ## Default Constructors
# Constructor without any arguments. The definition has only one argument i.e self which is like a pointer in cpp and reference in java because this is reference to the object being constructed by this constructor.
# +
class GirlScript:
# default constructor
def __init__(self):
self.girlscript = "GirlScript"
# a method for printing data members
def printGirlScript(self):
print(self.girlscript)
# creating object of the class
obj = GirlScript()
# calling the instance method using the object obj
obj.printGirlScript()
# -
# ## Paramaterized Constructors
# Constructors with parameters are known as Paramaterized Constructors. The first argument is the reference and then the arguments are taken by the user
# +
class Student:
def __init__(self,fname,lname,rollno): #constructor -> self is the object reference
self.fname = fname
self.lname = lname
self.rollno = rollno
deep = Student('Deepanshu','Bhalla',191540005) #Student object(Template is Student which is fixed new
#objects are created to represent)
rahul = Student('Rahul','Kumar',191540006)
print(deep.fname,rahul.fname)
| Python/OOP/Class,Objects and Constructors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Benchmarking Behavior Planners in BARK
#
# This notebook the benchmarking workflow of BARK.
#
# Systematically benchmarking behavior consists of
# 1. A reproducable set of scenarios (we call it **BenchmarkDatabase**)
# 2. Metrics, which you use to study the performance (we call it **Evaluators**)
# 3. The behavior model(s) under test
#
# Our **BenchmarkRunner** can then run the benchmark and produce the results.
# +
import os
import matplotlib.pyplot as plt
from IPython.display import Video
from benchmark_database.load.benchmark_database import BenchmarkDatabase
from benchmark_database.serialization.database_serializer import DatabaseSerializer
from bark.benchmark.benchmark_runner import BenchmarkRunner, BenchmarkConfig, BenchmarkResult
from bark.benchmark.benchmark_analyzer import BenchmarkAnalyzer
from bark.runtime.commons.parameters import ParameterServer
from bark.runtime.viewer.matplotlib_viewer import MPViewer
from bark.runtime.viewer.video_renderer import VideoRenderer
from bark.core.models.behavior import BehaviorIDMClassic, BehaviorConstantAcceleration
# -
# # Database
# The benchmark database provides a reproducable set of scenarios.
# A scenario get's created by a ScenarioGenerator (we have a couple of them). The scenarios are serialized into binary files (ending `.bark_scenarios`) and packed together with the map file and the parameter files into a `.zip`-archive. We call this zipped archive a relase, which can be published at Github, or processed locally.
#
# ## We will first start with the DatabaseSerializer
#
# The **DatabaseSerializer** recursively serializes all scenario param files sets
# within a folder.
#
# We will process the database directory from Github.
# +
dbs = DatabaseSerializer(test_scenarios=1, test_world_steps=5, num_serialize_scenarios=10)
dbs.process("../../../benchmark_database/data/database1")
local_release_filename = dbs.release(version="tutorial")
print('Filename:', local_release_filename)
# -
# Then reload to test correct parsing
# +
db = BenchmarkDatabase(database_root=local_release_filename)
scenario_generation, _ = db.get_scenario_generator(scenario_set_id=0)
for scenario_generation, _ in db:
print('Scenario: ', scenario_generation)
# -
# ## Evaluators
#
# Evaluators allow to calculate a boolean, integer or real-valued metric based on the current simulation world state.
#
# The current evaluators available in BARK are:
# - StepCount: returns the step count the scenario is at.
# - GoalReached: checks if a controlled agent’s Goal Definitionis satisfied.
# - DrivableArea: checks whether the agent is inside its RoadCorridor.
# - Collision(ControlledAgent): checks whether any agent or only the currently controlled agent collided
#
# Let's now map those evaluators to some symbols, that are easier to interpret.
evaluators = {"success" : "EvaluatorGoalReached", \
"collision" : "EvaluatorCollisionEgoAgent", \
"max_steps": "EvaluatorStepCount"}
# We will now define the terminal conditions of our benchmark. We state that a scenario ends, if
# - a collision occured
# - the number of time steps exceeds the limit
# - the definition of success becomes true (which we defined to reaching the goal, using EvaluatorGoalReached)
terminal_when = {"collision" :lambda x: x, \
"max_steps": lambda x : x>10, \
"success" : lambda x: x}
# # Behaviors Under Test
# Let's now define the Behaviors we want to compare. We will compare IDM with Constant Velocity, but we could also compare two different parameter sets for IDM.
params = ParameterServer()
behaviors_tested = {"IDM": BehaviorIDMClassic(params), "Const" : BehaviorConstantAcceleration(params)}
# # Benchmark Runner
#
# The BenchmarkRunner allows to evaluate behavior models with different parameter configurations over the entire benchmarking database.
# +
benchmark_runner = BenchmarkRunner(benchmark_database=db,\
evaluators=evaluators,\
terminal_when=terminal_when,\
behaviors=behaviors_tested,\
log_eval_avg_every=10)
result = benchmark_runner.run(maintain_history=True)
# -
# We will now dump the files, to allow them to be postprocessed later.
result.dump(os.path.join("./benchmark_results.pickle"))
# # Benchmark Results
#
# Benchmark results contain
# - the evaluated metrics of each simulation run, as a Panda Dataframe
# - the world state of every simulation (optional)
result_loaded = BenchmarkResult.load(os.path.join("./benchmark_results.pickle"))
# We will now first analyze the dataframe.
# +
df = result_loaded.get_data_frame()
df.head()
# -
# # Benchmark Analyzer
#
# The benchmark analyzer allows to filter the results to show visualize what really happened. These filters can be set via a dictionary with lambda functions specifying the evaluation criteria which must be fullfilled.
#
# A config is basically a simulation run, where step size, controlled agent, terminal conditions and metrics have been defined.
#
# Let us first load the results into the BenchmarkAnalyzer and then filter the results.
# +
analyzer = BenchmarkAnalyzer(benchmark_result=result_loaded)
configs_idm = analyzer.find_configs(criteria={"behavior": lambda x: x=="IDM", "success": lambda x : not x})
configs_const = analyzer.find_configs(criteria={"behavior": lambda x: x=="Const", "success": lambda x : not x})
# -
# We will now create a video from them. We will use Matplotlib Viewer and render everything to a video.
# +
sim_step_time=0.2
params2 = ParameterServer()
fig = plt.figure(figsize=[10, 10])
viewer = MPViewer(params=params2, y_length = 80, enforce_y_length=True, enforce_x_length=False,\
follow_agent_id=True, axis=fig.gca())
video_exporter = VideoRenderer(renderer=viewer, world_step_time=sim_step_time)
analyzer.visualize(viewer = video_exporter, real_time_factor = 1, configs_idx_list=configs_idm[1:3], \
fontsize=6)
video_exporter.export_video(filename="/tmp/tutorial_video")
# -
| docs/tutorials/05_benchmarking.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
import torch
import torchvision
import os
import utils
import dataset
import engine
import model7
import config
import torchvision.datasets as datasets
import cuda
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import numpy as np
# -
cuda, device = cuda.initialize_cuda(config.SEED)
trainset = dataset.torch_datasets(True, True, dataset_name='cifar10')
testset = dataset.torch_datasets(False, True, dataset_name='cifar10')
utils.show_imgs(trainset, 16, (8, 8))
mean, std = utils.get_stats(trainset)[0], utils.get_stats(trainset)[1]
print(mean, std)
transformations_list = [transforms.Normalize(mean, std)]
augmentations_list = [transforms.RandomRotation(7)]
trainset = dataset.torch_datasets(True, True, transformations_list, augmentations_list, dataset_name='cifar10')
testset = dataset.torch_datasets(False, True, transformations_list, dataset_name='cifar10')
train_dl = dataset.data_loader(trainset, config.BATCH_SIZE, config.NUM_WORKERS, cuda, shuffle=True)
test_dl = dataset.data_loader(testset, config.BATCH_SIZE, config.NUM_WORKERS, cuda, shuffle=True)
utils.show_batch(train_dl)
#Set Dropout and define model
config.DROPOUT = 0.1
model = model7.Net(config.DROPOUT).to(device)
utils.summary(model, (3, 32, 32))
# +
config.DROPOUT = 0.1
model = model7.Net(config.DROPOUT).to(device)
loss_fn = utils.cross_entropy_loss_fn()
optimizer = utils.sgd_optimizer(model)
if config.DEBUG == True:
config.EPOCHS = 2
test_loss = []
test_accuracy = []
misclassified_imgs = []
for epoch in range(config.EPOCHS):
print(f"Running Epoch {epoch+1}/{config.EPOCHS}\n")
engine.train(model, train_dl, optimizer, loss_fn, device)
engine.test(model, test_dl, loss_fn, device, 25, test_loss, test_accuracy, misclassified_imgs)
# -
# # Misclassified Images
class_idx = trainset.class_to_idx
utils.plot_incorrect_images(misclassified_imgs, class_idx, plot_size=(8, 8))
# # Validation Plots
utils.plot_metrics([{'metric':test_loss, 'label':'Validation Loss'}], "Loss")
utils.plot_metrics([{'metric':test_accuracy, 'label':'Validation Accuracy'}], "Accuracy")
| Week7/EVA5S7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score
import math
import os
import gc
import sys
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score, plot_confusion_matrix, classification_report, confusion_matrix, ConfusionMatrixDisplay
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from catboost import CatBoostClassifier, CatBoostRegressor, Pool, cv, sum_models
import lightgbm as lgb
import xgboost as xgb
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.utils.class_weight import compute_class_weight
from six.moves import xrange
from sklearn import preprocessing
from matplotlib import pyplot as plt
import seaborn as sns
# %matplotlib inline
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and
modify the data type to reduce memory usage.
"""
start_mem = df.memory_usage().sum() / 1024**2
print(('Memory usage of dataframe is {:.2f}'
'MB').format(start_mem))
for col in df.columns:
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max <\
np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max <\
np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max <\
np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max <\
np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max <\
np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max <\
np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024**2
print(('Memory usage after optimization is: {:.2f}'
'MB').format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem)
/ start_mem))
return df
df_train = pd.read_csv("alfa1_df_train6.csv")
df_valid = pd.read_csv("alfa1_df_valid6.csv")
df_train.fillna('nothing', inplace=True)
df_valid.fillna('nothing', inplace=True)
df_train = reduce_mem_usage(df_train)
df_valid = reduce_mem_usage(df_valid)
df_train_exp = pd.read_csv("alfa1_df_train10.csv")
df_valid_exp = pd.read_csv("alfa1_df_valid10.csv")
df_train_exp = reduce_mem_usage(df_train_exp)
df_valid_exp = reduce_mem_usage(df_valid_exp)
df_train_exp1 = pd.read_csv("alfa1_df_train11.csv")
df_valid_exp1 = pd.read_csv("alfa1_df_valid11.csv")
df_train_exp1 = reduce_mem_usage(df_train_exp1)
df_valid_exp1 = reduce_mem_usage(df_valid_exp1)
df_train_exp2 = pd.read_csv("alfa1_df_train12.csv")
df_valid_exp2 = pd.read_csv("alfa1_df_valid12.csv")
df_train_exp2 = reduce_mem_usage(df_train_exp2)
df_valid_exp2 = reduce_mem_usage(df_valid_exp2)
aug = df_train_exp.drop(['client_pin', 'lag_1', 'lag_2', 'weight'], axis=1).columns
aug1 = df_train_exp1.drop(['client_pin', 'lag_1', 'lag_2', 'weight'], axis=1).columns
aug2 = df_train_exp2.drop(['client_pin', 'lag_1', 'lag_2', 'weight'], axis=1).columns
df_train[aug] = df_train_exp[aug]
df_valid[aug] = df_valid_exp[aug]
df_train[aug1] = df_train_exp1[aug1]
df_valid[aug1] = df_valid_exp1[aug1]
df_train[aug2] = df_train_exp2[aug2]
df_valid[aug2] = df_valid_exp2[aug2]
from_parq = ['application_id', 'event_type', 'event_category', 'event_name', 'device_screen_name', 'timezone', 'net_connection_type', 'net_connection_tech']
trn_input_lag_cols = []
for i in range(2, 36):
trn_input_lag_cols.append(f'lag_{i}')
to_drop = []
to_drop.append('lag_1')
to_drop.append('client_pin')
to_drop.append('weight')
to_drop.append('class_weight')
categorical = trn_input_lag_cols + from_parq + ['most_popular']
df_weight = pd.DataFrame()
df_weight['lag_1'] = df_train['lag_1'].unique()
df_weight['class_weight'] = compute_class_weight(classes=df_train['lag_1'].unique(), y=df_train['lag_1'], class_weight='balanced')**0.5
df_train = df_train.merge(df_weight, how='left', on='lag_1')
df_valid = df_valid.merge(df_weight, how='left', on='lag_1')
weights = np.array(df_train['weight'])**2 * np.array(df_train['class_weight'])
weights_val = np.array(df_valid['weight'])**2 * np.array(df_valid['class_weight'])
le1 = preprocessing.LabelEncoder()
le1.fit(df_valid['lag_1'].unique())
df_train['lag_1'] = le1.transform(df_train['lag_1'])
df_valid['lag_1'] = le1.transform(df_valid['lag_1'])
X, y = pd.DataFrame(pd.concat((df_valid.drop(to_drop, axis=1), df_train.drop(to_drop, axis=1))).reset_index(drop=True)), pd.concat((df_valid['lag_1'], df_train['lag_1'])).reset_index(drop=True)
weights = np.concatenate([weights_val,weights])
X['event_type'] = X['event_type'].astype('category')
X['net_connection_tech'] = X['net_connection_tech'].astype('category')
n_splits = 8
folds = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=777)
trn_idx, val_idx = list(folds.split(X, y))[2]
X, y = X.iloc[trn_idx], y.iloc[trn_idx]
weights = weights[trn_idx, ]
lgb_train = lgb.Dataset(X, y, weight=weights)
lgb_eval = lgb.Dataset(df_valid.drop(to_drop, axis=1), df_valid['lag_1'], reference=lgb_train, weight=weights_val)
def evalerror(preds, dtrain):
labels = dtrain.get_label()
preds = preds.reshape(10, -1).T
preds = preds.argmax(axis = 1)
f_score = f1_score(labels , preds, average = 'macro')
return 'f1_score', f_score, True
tree_params = {
"objective" : "multiclass",
'metric':'custom',
"num_class" : 10,
'learning_rate': 0.12,
'max_depth': 5,
'n_jobs': 5,
"num_leaves" : 24,
'boosting':'dart',
"bagging_fraction" : 0.9, # subsample
"feature_fraction" : 0.9, # colsample_bytree
"bagging_freq" : 5, # subsample_freq
"bagging_seed" : 2020,
'n_estimators': 1000
}
model = lgb.train(tree_params,
lgb_train,
feval = evalerror,
valid_sets=[lgb_eval],
early_stopping_rounds=250)
with open('lgb_model8_fold2.pkl', 'wb') as fout:
pickle.dump(model, fout)
| 2.models/.ipynb_checkpoints/alfa1_lgbm-fold2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import numpy as np
import pandas as pd
#from datetime import datetime, timedelta
import datetime
import xarray as xr
#from math import atan2, log
import sys
import uuid
import matplotlib.pyplot as plt
import seawater as sw
import cartopy.crs as ccrs # import projections
import cartopy.feature as cf # import features
# #%matplotlib
sys.path.append('./..//subroutines/')
from read_routines import read_all_usv
#read in email
text_file = open('C:/Users/gentemann/Google Drive/f_drive/docs/projects/saildrone/baja/docs/email_for_file.txt', "r")
email_str = text_file.readlines()
dir_data_out = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/felix/' #'f:/data/cruise_data/saildrone/saildrone_data/'
dir_data = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/' #'f:/data/cruise_data/saildrone/saildrone_data/'
dir_data_pattern = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/*.nc'
adir_usv = dir_data_pattern
# +
#input filename
dir_in='/home/ananda/jfpiolle/TMP/'
dir_out='/home/ananda/jfpiolle/TMP/'
filename_in='/home/ananda/jfpiolle/TMP/l3r_temp.nc'
#definitions and things you might have to set differently for each file
itow_mask1=45 #see code just a bit below for figure to determine where to set flags here
itow_mask2=-110 #see code just a bit below for figure to determine where to set flags here
ISDP = 'Saildrone'
SST_type = 'SSTdepth'
Annex_version = '01.1'
File_version = '01.0'
#astr_platform='SD1002'
#astr_title = 'Data from Saildrone cruise from SF to Guadalupe Island April-June 2018'
#astr_uuid = str(uuid.uuid4())
# +
#fname = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/saildrone_west_coast_survey_2019_sd1047.nc'
#ds=xr.open_dataset(fname)
#ds
# -
data_dict = read_all_usv(dir_data_pattern)
for iname,name in enumerate(data_dict):
# if iname>3:
# continue
ds = data_dict[name]
print('******************************')
print(iname,name)
indicative_date_time=(ds.time[0].dt.strftime("%Y%m%d%H%M%S")).data
if ds.TEMP_CTD_MEAN.attrs.get('model_name'):
Product_string = str(ds.TEMP_CTD_MEAN.attrs['model_name']).replace(' ','_').replace('/','_').replace('(','_').replace(')','_')
else:
Product_string = 'model_not_avail'
if ds.TEMP_CTD_MEAN.attrs.get('serial_number'):
tem = str(ds.TEMP_CTD_MEAN.attrs['serial_number'])
ipos = tem.find('\n')
tem=tem[:ipos]
Product_string = str(Product_string + '_' + str(tem)).replace(' ','_').replace('/','_').replace('(','_').replace(')','_')
else:
Product_string = Product_string + '_' + 'serial_not_avail'
filename_L3R = dir_data_out + indicative_date_time + \
'-' + ISDP + '_' + str(ds.attrs['vehicle_id']) + '-' + 'L2R' + '-' + SST_type + '-' +Product_string+ '-v' +Annex_version+ '-fv' +File_version+ '.nc'
filename_L3R
print(filename_L3R)
#add global attributes that are missing
#some of these will need to be changed for new cruises
astr_uuid = str(uuid.uuid4())
gstr = {'title': name,
'name_input_file': name+'.nc',
'name_of_converter_to_L2R':'https://github.com/cgentemann/Saildrone/blob/master/proc_files/Saildrone_netcdf_format_to_L2R_GHRSST_netcdf_format-v3.ipynb',
'summary':'none',
'references':'none',
'institution':'Saildrone',
'history':'Saildrone 6-hourly v1 files were used to create this file',
'comment':'none',
'license':'free and open',
'id':'SSTdepth',
'naming_authority':'org.shipborne-radiometer',
'product_version':'1.0',
'uuid':astr_uuid,
'l2r_version_id':'1.1',
'netcdf_version_id':'4.6.1',
'date_created':datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%SZ"), #yyyy-mm-ddThh:mm:ssZ
'file_quality_level':3,
'spatial_resolution':'1 min',
'start_time':str((ds.time[0].dt.strftime("%Y-%m-%dT%H:%M:%SZ")).data),
'time_coverage_start':str((ds.time[0].dt.strftime("%Y-%m-%dT%H:%M:%SZ")).data),
'stop_time':str((ds.time[-1].dt.strftime("%Y-%m-%dT%H:%M:%SZ")).data),
'time_coverage_end':str((ds.time[-1].dt.strftime("%Y-%m-%dT%H:%M:%SZ")).data),
'northernmost_latitude':ds.lat.max().data,
'geospatial_lat_max':ds.lat.max().data,
'southernmost_latitude':ds.lat.min().data,
'geospatial_lat_min':ds.lat.min().data,
'easternmost_longitude':ds.lon.max().data,
'geospatial_lon_max':ds.lon.max().data,
'westernmost_longitude':ds.lon.min().data,
'geospatial_lon_min':ds.lon.min().data,
'geospatial_lat_units':'degrees_north',
'geospatial_lon_units':'degrees_east',
'source':'SSTdepth, wind_speed',
'platform':'Saildrone'+ '_' + str(ds.attrs['vehicle_id']),
'metadata_link':'TBD',
'keywords':'Oceans > Ocean Temperature > Sea Surface Temperature',
'keywords_vocabulary':'NASA Global Change Master Directory (GCMD) Science Keywords',
'acknowledgment':'The Schmidt Family Foundation, Saildrone, NASA Physical Oceanography',
'project':'International Shipborne Radiometer Network',
'publisher_name':'The ISRN Project Office',
'publisher_url':'http://www.shipborne.radiometer.org',
'publisher_email':email_str,
'processing_level':'1.0'}
#'sensor':str(dataset.TEMP_CTD_MEAN.sensor_description + '_' +\
# dataset.TEMP_CTD_MEAN.model_number + '_' + dataset.TEMP_CTD_MEAN.serial_number + ', ' + \
# dataset.UWND_MEAN.sensor_description + '_' + dataset.UWND_MEAN.model_number + '_' + \
# dataset.UWND_MEAN.serial_number)
#del 'nodc_template_version']
ds.attrs=gstr
# convert names
COORD_ATTR = 'lat lon time'
FLOAT_FILLVALUE = np.finfo(np.float32).min
swapvar = {
'TEMP_CTD_MEAN': 'sea_water_temperature',
'COG_MEAN': 'course_over_ground',
'lat': 'lat',
'lon': 'lon',
'HDG_MEAN': 'true_bearing',
'ROLL_MEAN': 'platform_roll',
'PITCH_MEAN': 'platform_pitch',
'SOG_MEAN': 'speed_over_ground',
'TEMP_CTD_STDDEV':'sst_total_uncertainty'
}
ws_comment = 'Instrument located at to of Saildrone mast at 5.2 m' + '. This was adjusted ' +\
'to 10 m as ws_10m = ws*np.log(10./1e-4))/np.log(WS_height/1e-4'
ws_attr={'standard_name':'wind_speed',
'long_name':'wind_speed',
'valid_min':0,
'valid_max':100,
'units':'m s-1',
'_FillValue':FLOAT_FILLVALUE,
'source':'anemometer',
'comment':ws_comment,
'height':'10 m',"coordinates":COORD_ATTR}
wdir_attr={'standard_name':'wind_to_direction',
'long_name':'local wind direction',
'valid_min':0,
'valid_max':360,
'units':'degrees',
'_FillValue':FLOAT_FILLVALUE,
'source':'anemometer',
'comment':ws_comment,
'height':'10 m',"coordinates":COORD_ATTR}
# creates the new variables
# -------------------------
# 1. wind speed
WS=np.sqrt(ds.UWND_MEAN**2+ds.VWND_MEAN**2)
WS_height=int(5.2) #5.2 m installed
WS_10m = (WS*np.log(10./1e-4))/np.log(WS_height/1e-4)
ds['wind_speed']=WS_10m
ds.wind_speed.fillna(FLOAT_FILLVALUE)
ds.wind_speed.attrs = ws_attr
ds['wind_direction']=np.arctan2(ds.VWND_MEAN, ds.UWND_MEAN)*np.pi/180.
ds.wind_direction.fillna(FLOAT_FILLVALUE)
ds.wind_direction.attrs=wdir_attr
# SWAP VARIABLE NAMES
# DROP VARIABLES NOT LISTED IN LIST_VAR
for var in ds:
var2 = var
# print(var)
if swapvar.get(var):
ds = ds.rename({var:swapvar.get(var)})
var2 = swapvar.get(var)
# if any(vv==var2 for vv in list_var):
# ds #just a place holder does nothing
else:
ds = ds.drop(var2)
# 4. Quality level
flag_bytes=np.byte((0,1,2,3,4,5)) #bytearray([0,1,2,3,4,5])
iqual_byte = np.ones(shape=ds.time.shape, dtype='b')*5 #change byte to b1
iqual_byte[:itow_mask1] = 2 #set at top of file from looking at data
iqual_byte[itow_mask2:] = 2
vattrs = {
'long_name': 'measurement quality value',
'flag_meanings': 'no_data bad_data worst_quality low_quality acceptable_quality best_quality',
'flag_values': flag_bytes,
'_FillValue': -128
}
ds['quality_level'] = xr.DataArray(
iqual_byte,
dims=['time'],
attrs=vattrs
)
l3r=ds
l3r.sea_water_temperature.attrs['valid_min']=260.0
l3r.sea_water_temperature.attrs['valid_max']=320.0
l3r.sea_water_temperature.attrs['units']='kelvin'
l3r.sea_water_temperature.attrs['long_name']='sea surface depth temperature at 0.6m'
l3r.time.attrs['standard_name']='time'
l3r.time.attrs['long_name']='time'
l3r.lon.attrs['standard_name']='longitude'
l3r.lon.attrs['long_name']='longitude'
l3r.lat.attrs['long_name']='latitude'
l3r.lat.attrs['standard_name']='latitude'
l3r.true_bearing.attrs['long_name']='platform true bearing'
l3r.true_bearing.attrs['standard_name']='platform_orientation'
l3r.speed_over_ground.attrs['long_name']='platform speed over ground'
l3r.sst_total_uncertainty.attrs['standard_name']='sea_water_temperature standard error'
l3r.sst_total_uncertainty.attrs['long_name']=' sea water temperature total uncertainty'
l3r.sst_total_uncertainty.attrs['valid_min']=0.0
l3r.sst_total_uncertainty.attrs['valid_max']=2.0
l3r.sst_total_uncertainty.attrs['units']='kelvin'
l3r.to_netcdf(filename_L3R)
# # Make some plots of the data
#get list of all filenames in directory
from glob import glob
files = [x for x in glob(dir_data_out+'*.nc')]
print('number of file:',len(files))
# +
fig = plt.figure(figsize=(14,10))
ax = plt.axes(projection = ccrs.PlateCarree()) # create a set of axes with Mercator projection
for ifile,file in enumerate(files):
# if ifile>2:
# continue
print(ifile)
ds = xr.open_dataset(file)
ds.close()
im=ax.scatter(ds.lon,ds.lat,c=ds.sea_water_temperature,s=.15,transform=ccrs.PlateCarree(),label=ds.attrs['platform'],cmap='jet')
ax.coastlines(resolution='10m')
ax.set_extent([-180,180,-80,80])
#ax.legend()
cax = fig.add_axes([0.91, 0.26, 0.02, 0.48])
cbar = fig.colorbar(im,cax=cax, orientation='vertical')
cbar.set_label('SST ($^\circ$C)')
plt.show()
fig.savefig(dir_data_out+'map_all_data_2020-06-05.png')
# +
#file = 'C:/Users/gentemann/Google Drive/public/2019_saildrone/saildrone_tpos_sd1068_2019.nc'
#ds = xr.open_dataset(file)
#ds.close()
#ds
# -
| proc_files/.ipynb_checkpoints/Saildrone_netcdf_format_to_L2R_GHRSST_netcdf_format-v3-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
datafile = "nyc_311_data_subset-2.csv"
def read_311_data(datafile):
import pandas as pd
import numpy as np
#Add the fix_zip function
def fix_zip(input_zip):
try:
input_zip = int(float(input_zip))
except:
try:
input_zip = int(input_zip.split('-')[0])
except:
return np.NaN
if input_zip < 10000 or input_zip > 19999:
return np.NaN
return str(input_zip)
#Read the file
df = pd.read_csv(datafile,index_col='Unique Key')
df.head()
#fix the zip
df['Incident Zip'] = df['Incident Zip'].apply(fix_zip)
#drop all rows that have any nans in them (note the easier syntax!)
df = df.dropna(how='any')
#get rid of unspecified boroughs
df = df[df['Borough'] != 'Unspecified']
#Convert times to datetime and create a processing time column
import datetime
df['Created Date'] = df['Created Date'].apply(lambda x:datetime.
datetime.
strptime(x,'%m/%d/%Y %H:%M'))
df['Closed Date'] = df['Closed Date'].apply(lambda x:datetime.
datetime.
strptime(x,'%m/%d/%Y %H:%M'))
df['processing_time'] = df['Closed Date'].subtract(df['Created Date'])
#Finally, get rid of negative processing times and return the final data frame
df = df[df['processing_time']>=datetime.timedelta(0,0,0)]
return df
data = read_311_data(datafile)
# ## Plotting data on google maps
# !pip install gmplot --upgrade
# !python -m pip install --upgrade pip
# Our data dataframe contains latitudes and longitudes for each complaint. We can draw a heatmap that will help us see the relative concentration of complaints using lats and lons.
#
# ### GooleMapPlotter constructor
# - GooleMapPlotter(center_lat,center_lng, zoom)
# - from_geocode(location_string, zoom)
# +
import gmplot
#gmap = gmplot.GoogleMapPlotter(40.7128,74.0059,8)
gmap = gmplot.GoogleMapPlotter.from_geocode("New York",10)
# -
# ### Then generate the heatmap passing the two data series (latitude and longitude) to the function
gmap.heatmap(data['Latitude'],data['Longitude'])
# ### Save the heatmap to an html file
gmap.draw('incidents_311.html')
# ## Grouping operations
# ### Incidents by Borough
# #### Group data by borough and plot a bar chart of the incident count
# %matplotlib inline
borough_group = data.groupby('Borough')
borough_group.size().plot(kind='bar')
# ### Incidents by Agency
agency_group = data.groupby('Agency')
agency_group.size().plot(kind='bar')
# Let's combine the two in a single graph
agency_borough = data.groupby(['Agency','Borough'])
agency_borough.size().plot(kind='bar')
# #### This is quite unreadable and pointless!
# We can unstack the groups so that we get borough by agency
agency_borough.size().unstack().plot(kind='bar')
# #### Increase the size of the image and add a title
agency_borough = data.groupby(['Agency','Borough'])
agency_borough.size().unstack().plot(kind='bar',
title='Incidents in each Agency by Borough',
figsize=(15,15))
# ## Digression: The pandas.groupby()
import pandas as pd
writers = pd.DataFrame({'Author':['<NAME>','<NAME>',
'<NAME>','<NAME>'],
'Country':['UK','USA','USA','UK'],
'Gender':['M','M','F','F'],
'Age':[46,66,80,85],
'Salary':[100,288,980,344]})
writers
writers.groupby('Country').all()
writers.groupby('Country').first()
writers.groupby('Country').last()
writers.groupby('Country').sum()
writers.groupby("Country").mean()
writers.groupby("Country").all()
writers.groupby("Country").apply(sum)
writers.groupby("Country").groups
grouped = writers.groupby(["Country","Gender"])
grouped.groups
# #### Group by age groups
def age_groups(df,index,col):
print(index,col)
if df[col].iloc[index]<30:
return 'Young'
elif df[col].iloc[index]<60:
return 'Middle'
else:
return 'Old'
writers['Age'].iloc[0]
age_groups(writers,0,"Age")
grouped = writers.groupby(lambda x: age_groups(writers,x,'Age'))
grouped.groups
# ## Grouping by the values in a column
# #### For example, grouping the data by values in a column that are greater than or less than zero
import numpy as np
people = pd.DataFrame(np.random.randn(5,5),columns=['a','b','c','d','e'],
index=['Joe','Steve','Wes','Jim','Travis'])
people
# +
#people['a'][0]=0
# -
# #### Write a function that takes three arguments - a dataframe, an index, and a column name and returns the grouping for that row
def GroupColFunc(df,ind,col):
if df[col].loc[ind]>0:
return 'Positive'
elif df[col].loc[ind]<0:
return 'Negative'
else:
return 'Zero'
people.groupby(lambda x:GroupColFunc(people,x,'a')).groups
# ### Now we can compute stats on these groups
people.groupby(lambda x:GroupColFunc(people,x,'a')).mean()
people.groupby(lambda x:GroupColFunc(people,x,'b')).std()
# ## Incidents by time
# We know the creation date of each incident so we can build a bar graph of number of incidents by month
# We'll start by creating a new date field yyyymm
import datetime
data['yyyymm'] = data['Created Date'].apply(lambda x:datetime.datetime.
strftime(x,'%Y%m'))
data['yyyymm']
date_agency = data.groupby(['yyyymm','Agency'])
date_agency.size().unstack().plot(kind='bar',figsize=(15,15))
# ## Examining agencies
# #### We'll look at the frequency by agency and report the top 5 values
data.groupby('Agency').size().sort_values(ascending=False)
data.groupby('Agency').size().sort_values(ascending=False).plot(kind='bar',
figsize=(20,4))
# ### We can drill down into complaints by Agency by borough
agency_borough = data.groupby(['Agency','Borough']).size().unstack()
agency_borough
# ### We can create 'top 5 Agency' subplots for each borough
# +
COL_NUM = 2
ROW_NUM = 3
import matplotlib.pyplot as plt
fig, axes = plt.subplots(ROW_NUM, COL_NUM, figsize=(12,12))
for i,(label,col) in enumerate(agency_borough.iteritems()):
ax = axes[int(i/COL_NUM),i%COL_NUM]
col = col.sort_values(ascending=False)[:5]
col.plot(kind='barh',ax=ax)
ax.set_title(label)
plt.tight_layout()
# -
for i, (label,col) in enumerate(agency_borough.iteritems()):
print(i,label,col)
# ## Processing time
# We can compute simple statistics on processing time
grouped = data[['processing_time','Borough']].groupby('Borough')
grouped.describe()
# ### But it's easier to convert the timedelta processing_time into floats for calculation purposes
# +
import numpy as np
# The time it takes to process. Cleaned up
data['float_time'] = data['processing_time'].apply(lambda x:
x/np.timedelta64(1,'D'))
data
# -
# #### Now we can compute stats easily
grouped = data[['float_time','Agency']].groupby('Agency')
grouped.mean().sort_values('float_time',ascending=False)
data['float_time'].hist(bins=50)
| All iPython Notebooks/_6_BAMM_101_W_8_Visualization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ameliachoi/tutorial-python-machine-learning/blob/master/python_ml_08_opinion_review.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iA8Eh0ddE8GA"
# ## [Tutorial] Opinion Review 문서 군집화 소개와 실습
#
# + [markdown] id="0j2hLnH9a2Y9"
# ----
# + id="MxvrNhyZE7lz" outputId="80051861-49b8-4df3-911f-a4551b82b125" colab={"base_uri": "https://localhost:8080/", "height": 197}
import pandas as pd
import glob, os
path = r'/content/drive/My Drive/topics'
all_files = glob.glob(os.path.join(path, "*.data"))
filename_list = []
opinion_text = []
# 개별 파일들의 파일명은 filename_list 리스트로 취합,
# 개별 파일들의 파일내용은 DataFrame로딩 후 다시 string으로 변환하여 opinion_text 리스트로 취합
for file_ in all_files:
# 개별 파일을 읽어서 DataFrame으로 생성
df = pd.read_table(file_,index_col=None, header=0,encoding='latin1')
# 절대경로로 주어진 file 명을 가공. 만일 Linux에서 수행시에는 아래 \\를 / 변경. 맨 마지막 .data 확장자도 제거
filename_ = file_.split('/')[-1]
filename = filename_.split('.')[0]
#파일명 리스트와 파일내용 리스트에 파일명과 파일 내용을 추가.
filename_list.append(filename)
opinion_text.append(df.to_string())
# 파일명 리스트와 파일내용 리스트를 DataFrame으로 생성
document_df = pd.DataFrame({'filename':filename_list, 'opinion_text':opinion_text})
document_df.head()
# + id="ni307SAHYC-p" outputId="e7854812-d96e-462b-8e7a-7b6d13c64e00" colab={"base_uri": "https://localhost:8080/", "height": 87}
# ! pip install nltk
import nltk
import string
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
remove_punct_dict = dict((ord(punct), None) for punct in string.punctuation)
lemmar = WordNetLemmatizer()
def LemTokens(tokens):
return [lemmar.lemmatize(token) for token in tokens]
def LemNormalize(text):
return LemTokens(nltk.word_tokenize(text.lower().translate(remove_punct_dict)))
# + id="b6W-0YdgE4FN" outputId="8f992c7a-52d9-4fc3-9d45-9ec7335c1857" colab={"base_uri": "https://localhost:8080/", "height": 72}
# 각 파일 이름 자체만으로 의견의 텍스트가 어떤 제품/서비스에 대한 리뷰인 지 알 수 있음
# TF-IDF 형태로 피처 벡터화
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vect = TfidfVectorizer(tokenizer=LemNormalize,
stop_words='english',
ngram_range=(1,2),
min_df=0.05,
max_df=0.85)
# opinion_text 칼럼 값으로 피처 벡터화 수행
feature_vect = tfidf_vect.fit_transform(document_df['opinion_text'])
# + [markdown] id="SGDbMKBeYkmE"
# * K-평균 군집화 기법을 적용합니다
# * 전자제품(네비게이션, 아이팟, 킨들, 랩탑 등), 자동차, 호텔으로 나뉨
# * 먼저 5개 중심 기반으로 어떻게 군집화되는 지 확인합니다
# + id="VAbgGFGKKbPP"
from sklearn.cluster import KMeans
km_cluster = KMeans(n_clusters=5,
max_iter=10000,
random_state=0)
km_cluster.fit(feature_vect)
cluster_label = km_cluster.labels_
cluster_centers = km_cluster.cluster_centers_
# + id="i2v_cnVuZHtA" outputId="cd38b416-71f9-42bc-f9c0-a93a991d9c35" colab={"base_uri": "https://localhost:8080/", "height": 197}
# 각 데이터별로 할당된 군집의 레이블을 파일명과 파일 내용을 가지고 있는 df에 칼럼 추가해 저장함
document_df['cluster_label'] = cluster_label
document_df.head()
# + id="jLJKMBxKZUMa" outputId="2dfdd0c1-f468-4b7c-f6cc-c5a999ffcf27" colab={"base_uri": "https://localhost:8080/", "height": 257}
# 군집화 결과 확인하기
document_df[document_df['cluster_label']==0].sort_values(by='filename')
# + [markdown] id="YsE4LdzbZjcW"
# * cluster #0 : 자동차
# + id="eVKI7xFVZge6" outputId="4d326d33-7cbd-456c-edc1-2f82055b5cf7" colab={"base_uri": "https://localhost:8080/", "height": 527}
document_df[document_df['cluster_label']==1].sort_values(by='filename')
# + [markdown] id="SGdH0DqVZr7o"
# * cluster #1 : 호텔
# + id="Yt4ltEsmZq2s" outputId="7ee5a0f4-4de4-4a62-b00a-7f807d197c5a" colab={"base_uri": "https://localhost:8080/", "height": 527}
document_df[document_df['cluster_label']==2].sort_values(by='filename')
# + [markdown] id="O1wDtmnTZ0lQ"
# * cluster #2 : 주로 네비게이션
# + id="NM4hm9CRZu3B" outputId="df52906d-ae46-4c2c-ef0a-83706c743ab7" colab={"base_uri": "https://localhost:8080/", "height": 317}
document_df[document_df['cluster_label']==3].sort_values(by='filename')
# + [markdown] id="aOfDDTQPaAkS"
# * cluster #3 : 주로 소형 전자기기(아이팟, 노트북 등)
# + id="x-Gs4yi6Z538" outputId="032268c1-a4bb-4d75-81d5-e52a17d23f1b" colab={"base_uri": "https://localhost:8080/", "height": 137}
document_df[document_df['cluster_label']==4].sort_values(by='filename')
# + [markdown] id="1etNXPhzaLRM"
# * cluster #4 : 자동차
# + id="1cqM4MpSaG9u" outputId="b198e91d-e401-4f7b-ed9c-923f1ae53f19" colab={"base_uri": "https://localhost:8080/", "height": 1000}
# 중심 개수를 5개에서 3개로 낮추기
km_cluster = KMeans(n_clusters=3,
max_iter=10000,
random_state=0)
km_cluster.fit(feature_vect)
cluster_label = km_cluster.labels_
cluster_centers = km_cluster.cluster_centers_
# 소속 군집을 cluster_label로 할당, 해당 값으로 정렬
document_df['cluster_label'] = cluster_label
document_df.sort_values(by='cluster_label')
# + [markdown] id="sNgXePfnasSv"
# * 자동차, 호텔, 전자기기 순으로 제대로 정렬되었습니다.
# + [markdown] id="uzq2b6F_awQw"
# ---
# ### 군집별 핵심 단어 추출하기
# + id="8914kbEIanYb" outputId="6322953b-5c03-49cd-9388-c36474136650" colab={"base_uri": "https://localhost:8080/", "height": 87}
# 클러스터 속성값 확인
cluster_centers = km_cluster.cluster_centers_
print('cluster_centers shape: ', cluster_centers.shape)
print(cluster_centers)
# + [markdown] id="snMSVL8Yb0cP"
# * 3개의 군집에 word 피처가 4611개로 구성되어 있음
# * 각 행의 배열 값은 각 군집 내의 위치가 개별 중심과 얼마나 가까운가를 상대 값으로 나타낸 것
# * 군집별 핵심 단어를 찾아봅니다
# * ndarray의 argsort()[:, ::-1]를 이용하면 cluster_centers 배열 내 값이 큰 순으로 정렬된 위치 인덱스 값을 반환합니다.
# * 위치 인덱스 값이 필요한 이유는 핵심 단어 피처의 이름을 출력하기 위함입니다
# + id="COpS2lRZbRHF"
# 군집별 top n 핵심 단어, 그 단어의 중심 위치 상댓값, 대상 파일명을 반환함
def get_cluster_details(cluster_model,
cluster_data,
feature_names,
clusters_num,
top_n_features=10):
cluster_details = {}
# cluster_centers array의 값이 큰 순으로 정렬된 인덱스 값을 반환
# 군집 중심점별 할당된 word 피처들의 거리값이 큰 순으로 값을 구하기 위함
centroid_feature_ordered_ind = cluster_model.cluster_centers_.argsort()[:,::-1]
# 개별 군집 별로 반복하면서 핵심 단어, 그 단어의 중심 위치 상댓값, 대상 파일명 입력
for cluster_num in range(clusters_num):
# 개별 군집별 정보를 담을 데이터 초기화
cluster_details[cluster_num] = {}
cluster_details[cluster_num]['cluster'] = cluster_num
# cluster_centers_.argsort()[:,::-1]로 구한 인덱스를 이용해 top n 피처 단어 구함
top_feature_indexes = centroid_feature_ordered_ind[cluster_num, :top_n_features]
top_features = [feature_names[ind] for ind in top_feature_indexes]
# top_feature_indexes를 이용해 해당 피처 단어의 중심 위치 상댓값 구함
top_feature_values = cluster_model.cluster_centers_[cluster_num,
top_feature_indexes].tolist()
# cluster_details 딕셔너리 객체에 개별 군집별 핵심단어와 중심위치 상댓값, 해당 파일명 입력
cluster_details[cluster_num]['top_features'] = top_features
cluster_details[cluster_num]['top_features_value'] = top_feature_values
filenames = cluster_data[cluster_data['cluster_label'] == cluster_num]['filename']
filenames = filenames.values.tolist()
cluster_details[cluster_num]['filenames'] = filenames
return cluster_details
# + id="D4lDSUQpfdYO"
# print 함수 만들기
def print_cluster_details(cluster_details):
for cluster_num, cluster_detail in cluster_details.items():
print('##### Cluster {0}'.format(cluster_num))
print('Top Features: ', cluster_detail['top_features'])
print('Reviews 파일명: ', cluster_detail['filenames'][:7])
print('==============================================')
# + id="GsCF4b3Gf7V0" outputId="ad4458b7-c0f1-4501-bdd7-7a83987c7244" colab={"base_uri": "https://localhost:8080/", "height": 250}
feature_names = tfidf_vect.get_feature_names()
cluster_details = get_cluster_details(cluster_model = km_cluster,
cluster_data = document_df,
feature_names = feature_names,
clusters_num = 3,
top_n_features = 10)
print_cluster_details(cluster_details)
| python_ml_08_opinion_review.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/tbng/deep-fmri/blob/master/notebooks/VAE.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Nl9r7oRnLTF2" colab_type="code" outputId="55b8d599-6069-4ae2-9cb2-21f1480bcb0a" colab={"base_uri": "https://localhost:8080/", "height": 33}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
from google.colab import drive
from pathlib import Path
drive.mount('/content/gdrive', force_remount=True)
# + id="3pEP528cQKqu" colab_type="code" outputId="812ef565-2a2e-42e4-e492-9c4c51cd3e62" colab={"base_uri": "https://localhost:8080/", "height": 283}
HCP_FOLDER = Path("/content/gdrive/My Drive/HCP_900")
MASKED_DATA_FILES = sorted(HCP_FOLDER.glob("*REST*.npy"))
MASKED_DATA_FILES
# + id="c8RNlMnrLgsa" colab_type="code" colab={}
import glob
import numpy as np
import torch
from os.path import expanduser, join
from skimage.morphology import dilation, binary_dilation
from torch.utils.data import Dataset, ConcatDataset
class NumpyDataset(Dataset):
def __init__(self, filename):
self.filename = filename
def __len__(self):
return np.load(self.filename, mmap_mode='r').shape[3]
def __getitem__(self, index):
data = np.load(self.filename, mmap_mode='r')
return torch.Tensor(data[None, :, :, :, index])
class NumpyDatasetMem(Dataset):
def __init__(self, filename):
self.data = torch.Tensor(np.load(filename, mmap_mode=None))
def __len__(self):
return self.data.shape[3]
def __getitem__(self, index):
return self.data[None, :, :, :, index]
def get_dataset(subject=100307, data_dir=None, in_memory=True):
if in_memory:
dataset_type = NumpyDatasetMem
else:
dataset_type = NumpyDataset
if data_dir is None:
data_dir = HCP_FOLDER
datasets = [dataset_type(str(fp)) for fp in MASKED_DATA_FILES[:3]]
train_dataset = ConcatDataset(datasets[:-1])
test_dataset = datasets[-1]
mask = np.load(data_dir / ('%s_mask.npy' % subject))
mask = mask.astype('bool')
print('Mask', mask.astype('float').sum(), 'voxels')
for i in range(2):
mask = binary_dilation(mask)
mask = mask.astype('uint8')
print('Dilated mask', mask.astype('float').sum(), 'voxels')
mask = torch.from_numpy(mask).byte()
return train_dataset, test_dataset, mask
# + id="CcmB2vxVMrSf" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.testing import randn_like
class Encoder(nn.Module):
def __init__(self, embedding_size=128):
super().__init__()
self.pad = nn.ConstantPad3d((2, 3, 9, 10, 2, 3), 0)
self.conv = nn.Sequential(
nn.Conv3d(1, 16, 3, 2, 1),
nn.ReLU(),
nn.Conv3d(16, 16, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(16),
nn.Conv3d(16, 32, 3, 2, 1),
nn.ReLU(),
nn.Conv3d(32, 32, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(32),
nn.Conv3d(32, 64, 3, 2, 1),
nn.ReLU(),
nn.Conv3d(64, 64, 3, 1, 1),
nn.BatchNorm3d(64),
nn.ReLU(),
nn.Conv3d(64, 128, 3, 2, 1),
nn.ReLU(),
nn.Conv3d(128, 128, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(128),
nn.Conv3d(128, 256, 3, 2, 1),
nn.ReLU(),
nn.Conv3d(256, 256, 3, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(256),
)
self.dense = nn.Linear(256, embedding_size)
self.dense_var = nn.Linear(256, embedding_size)
def forward(self, img):
batch_size = img.shape[0]
img = self.pad(img)
conv_img = self.conv(img)
avg_channel = conv_img.view(batch_size, 256, -1).mean(dim=2)
# avg_channel = F.dropout(avg_channel, p=0.1)
mean = self.dense(avg_channel)
log_var = self.dense_var(avg_channel)
return mean, log_var
class Decoder(nn.Module):
def __init__(self, embedding_size=128):
super().__init__()
self.dense = nn.Linear(embedding_size, 256)
self.deconv = nn.Sequential(
nn.ConvTranspose3d(256, 256, 3, 1, 1, 0),
nn.ReLU(),
nn.ConvTranspose3d(256, 128, 3, 2, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(128),
nn.ConvTranspose3d(128, 128, 3, 1, 1, 0),
nn.ReLU(),
nn.ConvTranspose3d(128, 64, 3, 2, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(64),
nn.ConvTranspose3d(64, 64, 3, 1, 1, 0),
nn.ReLU(),
nn.ConvTranspose3d(64, 32, 3, 2, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(32),
nn.ConvTranspose3d(32, 32, 3, 1, 1, 0),
nn.ReLU(),
nn.ConvTranspose3d(32, 16, 3, 2, 1, 1),
nn.ReLU(),
nn.BatchNorm3d(16),
nn.ConvTranspose3d(16, 16, 3, 1, 1, 0),
nn.ReLU(),
nn.ConvTranspose3d(16, 1, 3, 2, 1, 1)
)
def forward(self, latent):
batch_size = latent.shape[0]
avg_channel = self.dense(latent)
# avg_channel = F.dropout(avg_channel, p=0.1)
avg_channel = avg_channel[:, :, None,
None, None].expand(batch_size, 256, 3, 4, 3) * 1
rec = self.deconv(avg_channel)
# self.pad = nn.ConstantPad3d((2, 3, 9, 10, 2, 3), 0)
rec = rec[:, :, 2:-3, 9:-10, 2:-3]
return rec
class VAE(nn.Module):
def __init__(self, embedding_dim=128):
super().__init__()
self.encoder = Encoder(embedding_dim)
self.decoder = Decoder(embedding_dim)
def forward(self, img):
mean, log_var = self.encoder(img)
penalty = gaussian_kl(mean, log_var)
if self.training:
eps = randn_like(mean)
latent = mean + torch.exp(log_var / 2) * eps
else:
latent = mean
return self.decoder(latent), penalty
def gaussian_kl(mean, log_var):
return .5 * torch.sum(mean ** 2 + torch.exp(log_var)
- log_var - 1) / mean.shape[0]
def masked_mse(pred, target, mask):
diff = pred - target
mask = mask ^ 1
mask = mask[None, None, ...]
diff.masked_fill_(mask, 0.)
return torch.sum(diff ** 2) / diff.shape[0]
# + id="CDJHcideMyWW" colab_type="code" outputId="489b7b99-66d8-4ad3-a822-14e0f09a5633" colab={"base_uri": "https://localhost:8080/", "height": 24500}
import functools
import math
import torch
from os.path import expanduser
from torch.optim import Adam
from torch.utils.data import DataLoader
from torchsummary import summary
batch_size = 48
in_memory = True
alpha = 10
residual = False
train_dataset, test_dataset, mask = get_dataset(in_memory=in_memory)
train_loader = DataLoader(train_dataset, batch_size=batch_size,
shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size,
shuffle=False)
model = VAE()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
mask = mask.to(device)
summary(model, (1, 91, 109, 91))
loss_function = functools.partial(masked_mse, mask=mask)
optimizer = Adam(model.parameters(), lr=1e-3, amsgrad=True)
n_epochs = 100
total_loss = 0
n_batch = math.ceil(len(train_dataset) / batch_size)
mean = torch.zeros_like(train_dataset[0])
# Compute mean
if residual:
length = 0
for this_data in train_loader:
length += this_data.shape[0]
mean += this_data.sum(dim=0)
mean /= length
mean = mean.to(device)
for epoch in range(n_epochs):
epoch_batch = 0
verbose_loss = 0
verbose_penalty = 0
verbose_batch = 0
for this_data in train_loader:
model.train()
model.zero_grad()
this_data[this_data >= 1] = 1
this_data = this_data.to(device)
this_data -= mean[None, ...]
rec, penalty = model(this_data)
penalty *= alpha
loss = loss_function(rec, this_data)
elbo = loss + penalty
elbo.backward()
optimizer.step()
verbose_loss += loss.item()
verbose_penalty += penalty.item()
epoch_batch += 1
verbose_batch += 1
print('Epoch %i, batch %i/%i,'
'train_objective: %4e,'
'train_penalty: %4e,' % (epoch, epoch_batch, n_batch,
verbose_loss / verbose_batch,
verbose_penalty / verbose_batch,))
if epoch_batch % 10 == 0:
model.eval()
with torch.no_grad():
val_batch = 0
val_loss = 0
val_penalty = 0
for this_test_data in test_loader:
this_test_data = this_test_data.to(device)
this_test_data -= mean[None, ...]
rec, this_val_penalty = model(this_test_data)
this_val_penalty *= alpha
this_val_loss = loss_function(rec, this_test_data)
val_loss += this_val_loss.item()
val_penalty += this_val_penalty.item()
val_batch += 1
val_loss /= val_batch
val_penalty /= val_batch
verbose_loss /= verbose_batch
verbose_penalty /= verbose_batch
print('Epoch %i, batch %i/%i,'
'train_objective: %4e,'
'train_penalty: %4e,'
'val_objective: %4e,'
'val_penalty: %4e' % (epoch, epoch_batch, n_batch,
verbose_loss, verbose_penalty,
val_loss, val_penalty))
verbose_batch = 0
train_loss = 0
penalty = 0
state_dict = model.state_dict()
name = 'vae_dilated_e_%03i_loss_%.4e.pkl' % (epoch, verbose_loss)
torch.save((state_dict, mean), name)
# + id="hh7aOn_9NQJe" colab_type="code" outputId="53bc4527-1004-4072-a8d6-ffbe2c1fa6e2" colab={"base_uri": "https://localhost:8080/", "height": 916}
# ls
# + id="TTOkdjelQuR4" colab_type="code" colab={}
| notebooks/VAE.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# # English Premier League Teams and Stadiums
# This notebook uses a property graph containing the teams that took part in the 2019/20 English Premier League season. The graph includes information about the teams, their stadiums and the cities where they play.
#
# ## Check the status of our connection
# The three cells below can be used to check the version of the workbench, the current configuration, and the status of the Neptune cluster.
# %graph_notebook_version
# %graph_notebook_config
# %status
# ## Create the graph
# The cell below creates the property graph. A set of queries that can be run against the data follows in the subsequent cells.
# +
# %%gremlin
// Leagues
g.addV("League").
property(id,'EPL-2019-20').
property("name", "English Premier League").
property("nickname", "EPL").
as("epl").
// Teams
addV("Team").
property(id,"Arsenal").
property("name","Arsenal").
property("fullName","<NAME>.",).
property("nickname","The Gunners").
property("founded",1886).
as("arsenal").
addV("Team").
property(id,"Chelsea").
property("name","Chelsea").
property("fullName","<NAME>.").
property("nickname", "The Blues").
property("founded",1905).
as("chelsea").
addV("Team").
property(id,"Spurs").
property("name","<NAME>").
property("fullName","<NAME>.").
property("nickname","Spurs").
property("founded",1882).
as("spurs").
addV("Team").
property(id,"WestHam").
property("name","<NAME>").
property("fullName","West Ham United F.C.").
property("nickname","The Hammers").
property("founded",1895).
as("westham").
addV("Team").
property(id,"Liverpool").
property("name","Liverpool").
property("fullName","<NAME>.").
property("nickname","The Reds").
property("founded",1892).
as("liverpool").
addV("Team").
property(id,"Everton").
property("name","Everton").
property("fullName","<NAME>.").
property("nickname","The Toffees").
property("founded",1878).
as("everton").
addV("Team").
property(id,"ManUtd").
property("name","<NAME>").
property("fullName","Manchester United F.C.").
property("nickname","<NAME>").
property("founded",1878).
as("manutd").
addV("Team").
property(id,"ManCity").
property("name","<NAME>").
property("fullName","Manchester City F.C").
property("nickname","<NAME>").
property("founded",1880).
as("mancity").
addV("Team").
property(id,"Wolves").
property("name", "<NAME>" ).
property("fullName","<NAME>" ).
property("nickname","Wolves").
property("founded",1877).
as("wolves").
addV("Team").
property(id,"Burnley").
property("name", "Burnley").
property("fullName", "<NAME>").
property("nickname", "The Clarets").
property("founded", 1882).
as("burnley").
addV("Team").
property(id,"SheffUtd").
property("name", "<NAME>").
property("fullName", "Sheffield United F.C").
property("nickname", "<NAME>").
property("founded", 1889).
as("sheffutd").
addV("Team").
property(id,"CrystalPalace").
property("name", "<NAME>").
property("fullName", "<NAME> F.C").
property("nickname", "Eagles").
property("founded", 1905).
as("palace").
addV("Team").
property(id,"Southampton").
property("name", "Southampton").
property("fullName", "<NAME>").
property("nickname", "<NAME>").
property("founded", 1885).
as("soton").
addV("Team").
property(id,"Newcastle").
property("name", "<NAME>").
property("fullName", "Newcastle United F.C").
property("nickname", "<NAME>").
property("founded", 1892).
as("newcastle").
addV("Team").
property(id,"Brighton").
property("name", "<NAME>").
property("fullName", "<NAME> F.C").
property("nickname", "Seagulls").
property("founded", 1901).
as("brighton").
addV("Team").
property(id,"Watford").
property("name", "Watford").
property("fullName", "<NAME>.").
property("nickname", "Hornets").
property("founded", 1898).
as("watford").
addV("Team").
property(id,"Bournemouth").
property("name", "Bournemouth").
property("fullName", "Bournemouth F.C.").
property("nickname", "<NAME>").
property("founded", 1899).
as("bournemouth").
addV("Team").
property(id,"AstonVilla").
property("name", "<NAME>").
property("fullName", "<NAME>.").
property("nickname", "<NAME>").
property("founded", 1897).
as("villa").
addV("Team").
property(id,"Leicester").
property("name", "<NAME>").
property("fullName", "<NAME>.").
property("nickname", "<NAME>").
property("founded", 1884).
as("leicester").
addV("Team").
property(id,"Norwich").
property("name", "<NAME>").
property("fullName", "<NAME>.").
property("nickname", "<NAME>").
property("founded", 1902).
as("norwich").
// Stadiums
addV("Stadium").
property(id,"The_Emirates").
property("name", "<NAME>").
property("capacity", 60704).
property("opened", 2006).
as("em").
addV("Stadium").
property(id,"Stamford_Bridge").
property("name", "<NAME>").
property("capacity", 40834).
property("opened", 1877).
as("sb").
addV("Stadium").
property(id,"Tottenham_Hotspur_Stadium").
property("name", "Tottenham Hotspur Stadium").
property("capacity", 62214).
property("opened", 2019).
as("th").
addV("Stadium").
property(id,"London_Stadium").
property("name", "London Stadium").
property("capacity", 60000).
property("opened", 2016).
as("ls").
addV("Stadium").
property(id,"Anfield").
property("name", "Anfield").
property("capacity", 53394).
property("opened", 1884).
as("af").
addV("Stadium").
property(id,"Goodison_Park").
property("name", "<NAME>").
property("capacity", 39414).
property("opened", 1892).
as("gp").
addV("Stadium").
property(id,"Old_Trafford").
property("name", "<NAME>ord").
property("capacity", 75643).
property("opened", 1910).
as("ot").
addV("Stadium").
property(id,"The_Etihad").
property("name", "Etihad Stadium").
property("capacity", 55107).
property("opened", 2003).
as("et").
addV("Stadium").
property(id,"Molineux").
property("name", "Molineux Stadium").
property("capacity", 32050).
property("opened", 1889).
as("mo").
addV("Stadium").
property(id,"Turf_Moor").
property("name", "<NAME>").
property("capacity", 21944).
property("opened", 1883).
as("tm").
addV("Stadium").
property(id,"Bramall_Lane").
property("name", "<NAME>").
property("capacity", 32125).
property("opened", 1855).
as("bl").
addV("Stadium").
property(id,"Selhurst_Park").
property("name", "Selhurst Park Stadium").
property("capacity", 25486).
property("opened", 1924).
as("sp").
addV("Stadium").
property(id,"St_Marys").
property("name", "St. Mary's Stadium").
property("capacity", 32384).
property("opened", 2001).
as("sm").
addV("Stadium").
property(id,"St_James_Park").
property("name", "St. James' Park").
property("capacity", 52305).
property("opened", 1880).
as("sjp").
addV("Stadium").
property(id,"Amex_Stadium").
property("name", "American Express Community Stadium").
property("capacity", 30750).
property("opened", 2011).
as("amx").
addV("Stadium").
property(id,"Vicarage_Road").
property("name", "Vicarage Road").
property("capacity", 22200).
property("opened", 1922).
as("vr").
addV("Stadium").
property(id,"Vitality_Stadium").
property("name", "Vitality Stadium").
property("capacity", 11364).
property("opened", 1910).
as("vs").
addV("Stadium").
property(id,"Villa_Park").
property("name", "Villa Park").
property("capacity", 42095).
property("opened", 1897).
as("vp").
addV("Stadium").
property(id,"King_Power_Stadium").
property("name", "King Power Stadium").
property("capacity", 32261).
property("opened", 2002).
as("kp").
addV("Stadium").
property(id,"Carrow_Road_Stadium").
property("name", "Carrow Road Stadium").
property("capacity", 27359).
property("opened", 1935).
as("cr").
// Cities
addV("City").
property(id,"City_London").
property("name", "London").
as("lon").
addV("City").
property(id,"City_Liverpool").
property("name", "Liverpool").
as("liv").
addV("City").
property(id,"City_Manchester").
property("name", "Manchester").
as("man").
addV("City").
property(id,"City_Wolverhampton").
property("name", "Wolverhampton").
as("wol").
addV("City").
property(id,"City_Leicester").
property("name", "Leicester").
as("lei").
addV("City").
property(id,"City_Brighton").
property("name", "Brighton").
as("bri").
addV("City").
property(id,"City_Southampton").
property("name", "Southampton").
as("sou").
addV("City").
property(id,"City_Newcastle").
property("name", "Newcastle").
as("nwc").
addV("City").
property(id,"City_Sheffield").
property("name", "Sheffield").
as("shf").
addV("City").
property(id,"City_Birmingham").
property("name", "Birmingham").
as("bmx").
addV("City").
property(id,"City_Watford").
property("name", "Watford").
as("wat").
addV("City").
property(id,"City_Norwich").
property("name", "Norwich").
as("nor").
addV("City").
property(id,"City_Bournemouth").
property("name", "Bournemouth").
as("bou").
addV("City").
property(id,"City_Burnley").
property("name", "Burnley").
as("bur").
// Edges for EPL membership, stadium and city
addE("CURRENT_LEAGUE").from("arsenal").to("epl").
addE("STADIUM").from("arsenal").to("em").
addE("CITY").from("em").to("lon").
addE("CURRENT_LEAGUE").from("chelsea").to("epl").
addE("STADIUM").from("chelsea").to("sb").
addE("CITY").from("sb").to("lon").
addE("CURRENT_LEAGUE").from("spurs").to("epl").
addE("STADIUM").from("spurs").to("th").
addE("CITY").from("th").to("lon").
addE("CURRENT_LEAGUE").from("westham").to("epl").
addE("STADIUM").from("westham").to("ls").
addE("CITY").from("ls").to("lon").
addE("CURRENT_LEAGUE").from("liverpool").to("epl").
addE("STADIUM").from("liverpool").to("af").
addE("CITY").from("af").to("liv").
addE("CURRENT_LEAGUE").from("everton").to("epl").
addE("STADIUM").from("everton").to("gp").
addE("CITY").from("gp").to("liv").
addE("CURRENT_LEAGUE").from("manutd").to("epl").
addE("STADIUM").from("manutd").to("ot").
addE("CITY").from("ot").to("man").
addE("CURRENT_LEAGUE").from("mancity").to("epl").
addE("STADIUM").from("mancity").to("et").
addE("CITY").from("et").to("man").
addE("CURRENT_LEAGUE").from("wolves").to("epl").
addE("STADIUM").from("wolves").to("mo").
addE("CITY").from("mo").to("wol").
addE("CURRENT_LEAGUE").from("burnley").to("epl").
addE("STADIUM").from("burnley").to("tm").
addE("CITY").from("tm").to("bur").
addE("CURRENT_LEAGUE").from("sheffutd").to("epl").
addE("STADIUM").from("sheffutd").to("bl").
addE("CITY").from("bl").to("shf").
addE("CURRENT_LEAGUE").from("palace").to("epl").
addE("STADIUM").from("palace").to("sp").
addE("CITY").from("sp").to("lon").
addE("CURRENT_LEAGUE").from("soton").to("epl").
addE("STADIUM").from("soton").to("sm").
addE("CITY").from("sm").to("sou").
addE("CURRENT_LEAGUE").from("newcastle").to("epl").
addE("STADIUM").from("newcastle").to("sjp").
addE("CITY").from("sjp").to("nwc").
addE("CURRENT_LEAGUE").from("watford").to("epl").
addE("STADIUM").from("watford").to("vr").
addE("CITY").from("vr").to("wat").
addE("CURRENT_LEAGUE").from("leicester").to("epl").
addE("STADIUM").from("leicester").to("kp").
addE("CITY").from("kp").to("lei").
addE("CURRENT_LEAGUE").from("villa").to("epl").
addE("STADIUM").from("villa").to("vp").
addE("CITY").from("vp").to("bmx").
addE("CURRENT_LEAGUE").from("brighton").to("epl").
addE("STADIUM").from("brighton").to("amx").
addE("CITY").from("amx").to("bri").
addE("CURRENT_LEAGUE").from("bournemouth").to("epl").
addE("STADIUM").from("bournemouth").to("vs").
addE("CITY").from("vs").to("bou").
addE("CURRENT_LEAGUE").from("norwich").to("epl").
addE("STADIUM").from("norwich").to("cr").
addE("CITY").from("cr").to("nor")
# -
# ### Verify the data was inserted
# %%gremlin
g.V().groupCount().by(label).unfold()
# %%gremlin
g.E().groupCount().by(label).unfold()
# ### Build a visualization of the graph
# Run the cell below and select the Graph tab to see a visualization of the results. The various hints you can provide and settings you can adjust when building visualizations are explained in the blog post located [here](https://aws.amazon.com/blogs/database/visualize-query-results-using-the-amazon-neptune-workbench/).
# %%gremlin -p v,ine,outv,oute,inv,oute,inv
g.V().hasLabel('League').inE().outV().outE().inV().outE().inV().path().by('name').by(label)
# ### Delete the entire data set
# This cell can be used to delete the data set. The drop is done using explicit IDs to try and avoid conflicts with any other data you may have loaded in your graph.
# %%gremlin
g.V().hasId('Arsenal', 'Chelsea', 'Spurs', 'WestHam', 'Liverpool', 'Everton', 'ManUtd', 'ManCity',
'Wolves', 'Burnley', 'SheffUtd', 'CrystalPalace', 'Southampton', 'Newcastle', 'Brighton',
'Watford', 'Bournemouth', 'AstonVilla', 'Leicester', 'Norwich', 'The_Emirates',
'Stamford_Bridge', 'Tottenham_Hotspur_Stadium', 'London_Stadium', 'Anfield', 'Goodison_Park',
'Old_Trafford', 'The_Etihad', 'Molineux', 'Turf_Moor', 'Bramall_Lane', 'Selhurst_Park',
'St_Marys', 'St_James_Park', 'Amex_Stadium', 'Vicarage_Road', 'Vitality_Stadium', 'Villa_Park',
'King_Power_Stadium', 'Carrow_Road_Stadium', 'City_London', 'City_Liverpool', 'City_Manchester',
'City_Wolverhampton', 'City_Leicester', 'City_Brighton', 'City_Southampton', 'City_Newcastle',
'City_Sheffield', 'City_Birmingham', 'City_Watford', 'City_Norwich', 'City_Bournemouth',
'City_Burnley', 'EPL-2019-20').drop()
# ### How many teams were in the league that season?
# %%gremlin
g.V().hasLabel('Team').count()
# %%gremlin
g.V('EPL-2019-20').in('CURRENT_LEAGUE').count()
# ### Team info
# Find the teams in the graph and their properties.
# %%gremlin
g.V().hasLabel('Team').valueMap()
# ### Team and stadium info
# %%gremlin
g.V().hasLabel('Team').
project('name','stadium','city').
by('name').
by(out('STADIUM').values('name')).
by(out('STADIUM').out('CITY').values('name'))
# ### Teams based in London
# %%gremlin
g.V().has('City','name','London').in('CITY').in('STADIUM').values('name')
# ### Stadiums in London
# %%gremlin -p v,outv
g.V().has('City','name','London').in('CITY').path().by('name')
# ### Teams in London plus their stadiums
# %%gremlin -p v,outv,outv
g.V().has('City','name','London').in('CITY').in('STADIUM').path().by('name')
# ### Was Coventry in the league that season?
#
# %%gremlin
g.V().has('Team','name','Coventry').
fold().
coalesce(unfold(),constant('Not in the EPL that year'))
# ### Using text predicates to find sub-strings
# This query looks for any teams that have the string "ou" in their name.
# %%gremlin
g.V().has('Team','name',containing('ou')).values('name')
# ### Find everything the graph contains related to Arsenal
# +
# %%gremlin
g.V().hasId('Arsenal').
project('names-and-age','stadium','city').
by(valueMap().by(unfold())).
by(out('STADIUM').values('name')).
by(out('STADIUM').out('CITY').values('name')).unfold()
# -
# ### How many stadiums are in each city?
# %%gremlin
g.V().hasLabel('Stadium').
groupCount().
by(out('CITY')).
order(local).
by(values,desc).
unfold()
# ### What year did each stadium open?
# %%gremlin
g.V().hasLabel('Stadium').
valueMap('name','opened').by(unfold()).
order().
by(select('opened'))
# ### Stadiums ordered by descending capacity
# %%gremlin
g.V().hasLabel('Stadium').
order().
by('capacity',desc).
valueMap('name','capacity').
by(unfold())
# ### Number of teams founded in a given year
# %%gremlin
g.V().hasLabel('Team').
group().
by('founded').
by('name').
order(local).
by(keys).
unfold()
# ### Page Width
# If you would like Jupyter to maximise the horizontal screen real estate run the cell below.
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
| src/graph_notebook/notebooks/02-Visualization/EPL-Gremlin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="pbRHalfkPETq"
import numpy as np
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/", "height": 437} id="1kToLNbTPHL8" outputId="f499bcff-5753-4894-86a9-7f058b362397"
df = pd.read_csv("wine.csv",sep=";")
df
# + id="YsZqhLEUPQtJ"
df['alcohol'] = pd.to_numeric(df['alcohol'],errors='coerce')
df['alcohol'].fillna(df['alcohol'].mean(), inplace = True)
# + id="JzLPdGUWSEKE"
lower_limit = df["free_sulfur_dioxide"].mean() - 3*df["free_sulfur_dioxide"].std()
upper_limit = df["free_sulfur_dioxide"].mean() + 3*df["free_sulfur_dioxide"].std()
# + id="Elhue5IdSEcD"
df2 = df[(df["free_sulfur_dioxide"] > lower_limit) & (df["free_sulfur_dioxide"] < upper_limit)]
# + id="HtktOBs2SEve"
lower_limit = df2['total_sulfur_dioxide'].mean() - 3*df2['total_sulfur_dioxide'].std()
upper_limit = df2['total_sulfur_dioxide'].mean() + 3*df2['total_sulfur_dioxide'].std()
# + id="Eu_WskNGSFCP"
df3 = df2[(df2['total_sulfur_dioxide'] > lower_limit) & (df2['total_sulfur_dioxide'] < upper_limit)]
# + id="p2PoHCyASFmo"
lower_limit = df3['residual_sugar'].mean() - 3*df3['residual_sugar'].std()
upper_limit = df3['residual_sugar'].mean() + 3*df3['residual_sugar'].std()
# + id="EocVMj71SXll"
df4 = df3[(df3['residual_sugar'] > lower_limit) & (df3['residual_sugar'] < upper_limit)]
# + id="kXEbDtjnSbGR"
lower_limit = df4["chlorides"].mean() - 3*df4["chlorides"].std()
upper_limit = df4["chlorides"].mean() + 3*df4["chlorides"].std()
# + id="6AptSaLsSbk0"
df5 = df4[(df4['chlorides'] > lower_limit) & (df4['chlorides'] < upper_limit)]
lower_limit = df5["sulphates"].mean() - 3*df5["sulphates"].std()
upper_limit = df5["sulphates"].mean() + 3*df5["sulphates"].std()
# + id="NTMgnrBmScEx"
df6 = df5[(df5['sulphates'] > lower_limit) & (df5['sulphates'] < upper_limit)]
lower_limit = df6["volatile_acidity"].mean() - 3*df6["volatile_acidity"].std()
upper_limit = df6["volatile_acidity"].mean() + 3*df6["volatile_acidity"].std()
# + id="jr0Wz8mrScdx"
df7 = df6[(df6['volatile_acidity'] > lower_limit) & (df6['volatile_acidity'] < upper_limit)]
lower_limit = df7["fixed_acidity"].mean() - 3*df7["fixed_acidity"].std()
upper_limit = df7["fixed_acidity"].mean() + 3*df7["fixed_acidity"].std()
# + id="W_6IN8G5Sok_"
df8 = df7[(df7['fixed_acidity'] > lower_limit) & (df7['fixed_acidity'] < upper_limit)]
lower_limit = df8["citric_acid"].mean() - 3*df8["citric_acid"].std()
upper_limit = df8["citric_acid"].mean() + 3*df8["citric_acid"].std()
# + id="j6zPjg3dSo3H"
df9 = df8[(df8['citric_acid'] > lower_limit) & (df8['citric_acid'] < upper_limit)]
lower_limit = df9["pH"].mean() - 3*df9["pH"].std()
upper_limit = df9["pH"].mean() + 3*df9["pH"].std()
# + id="uiVCVurdSpK_"
df10 = df9[(df9['pH'] > lower_limit) & (df9['pH'] < upper_limit)]
# + id="JGMFU4RFS-GJ"
quality_mapping = { 3 : "Low", 4 : "Low", 5: "Medium",6 : "Medium",7: "Medium",8 : "High",9 : "High"}
df10["quality"] = df10["quality"].map(quality_mapping)
# + id="YRPCq8JZTAUU"
quality_code = {"Low" : 0,"Medium": 1,"High" : 2}
df10["quality"] = df10["quality"].map(quality_code)
# + id="iZwKU4JwTCNk"
train=df10
# + id="-DZNwhKMTFCH"
from sklearn.model_selection import train_test_split
X=train.drop(['quality'],axis=1)
y=train['quality']
# + colab={"base_uri": "https://localhost:8080/"} id="_4Gq8NBJTUmG" outputId="42d5ee4f-4ad7-485d-825e-505aeb87e264"
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X = sc.fit_transform(X)
print(X.shape)
# + id="K98QYgAGTXmF"
from sklearn.decomposition import PCA
pca = PCA()
x_pca = pca.fit_transform(X)
# + id="-otK8OnVTctU"
pca_new = PCA(n_components=10)
x_new = pca_new.fit_transform(X)
# + id="-Ge6XmufTfpI"
X_train, X_test, y_train, y_test = train_test_split(x_new, y, test_size = 0.2,random_state=1)
# + [markdown] id="hybOBmQZTg4o"
# **Models**
# + [markdown] id="_DW3AtV_uyy8"
# Adaboost ( based on combining multiple “weak classifiers” into a single “strong classifier”)
# + colab={"base_uri": "https://localhost:8080/"} id="Q-OI6r7LgNHB" outputId="205a24a4-b474-41e6-dcf7-a13c0a5d28c8"
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf_ada = AdaBoostClassifier(n_estimators=100, random_state=0)
clf_ada.fit(X_train,y_train)
pred_ada=clf_ada.predict(X_test)
x_ada=accuracy_score(y_test,pred_ada)
print(f'the accuracy score is {x_ada}')
# + colab={"base_uri": "https://localhost:8080/"} id="A0jhlLzSg6q1" outputId="98d627cf-8db3-432e-bfd8-f38ec98e9def"
from sklearn.metrics import confusion_matrix
cm= confusion_matrix(y_test,pred_ada)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="2DdUjhoCy9rv" outputId="46af046d-b173-4710-f840-523232930a1e"
import matplotlib.pyplot as plt
from sklearn.metrics import plot_confusion_matrix
plot_confusion_matrix(clf_ada, X_test, y_test)
plt.show()
# + [markdown] id="5iB8O2KRu4po"
# XGBoost (decision-tree-based ensemble ML algorithm based on gradient boosting framework )
# + colab={"base_uri": "https://localhost:8080/"} id="m0Em8pzLUsB5" outputId="3e5496e9-29aa-4a8c-ae87-6451b88d2cd1"
# !pip3 install xgboost
# + colab={"base_uri": "https://localhost:8080/"} id="lcQ9XBFLTjEn" outputId="e2065c1b-50dc-4723-c8ab-5859b9d5df9e"
import xgboost as xgb
from sklearn.metrics import accuracy_score
model=xgb.XGBClassifier(n_estimators=400, random_state=42)
model.fit(X_train,y_train)
pred_xg=model.predict(X_test)
x=accuracy_score(y_test,pred_xg)
print(f'the accuracy score is {x}')
# + colab={"base_uri": "https://localhost:8080/"} id="eUpV_F3RUkM1" outputId="b3233884-dee6-4b63-839f-889f9b6b14a8"
from sklearn.metrics import confusion_matrix
cm= confusion_matrix(y_test,pred_xg)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="aHKpACkZ0ffF" outputId="a155661b-cbf3-404e-8670-b6bfccbd52e9"
plot_confusion_matrix(model, X_test, y_test)
plt.show()
# + [markdown] id="IYFH3fq8UrKW"
# Catboost (based on gradient boosted decision trees)
# + colab={"base_uri": "https://localhost:8080/"} id="oYcFK_cCUqFY" outputId="bcf4061f-a289-4533-ef1e-a2f78c91b258"
# !pip3 install catboost
# + colab={"base_uri": "https://localhost:8080/"} id="WnjB8wcLUqrH" outputId="31a3872e-00be-4915-95ed-0c6f4c73028e"
from catboost import CatBoostClassifier
clf = CatBoostClassifier(iterations=100, learning_rate=0.1)
clf.fit(X_train,y_train)
# + id="A-VzsDsqU8IM"
pred_cat=clf.predict(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="n16A0eQEU_T6" outputId="cf1da8ef-8fe6-426a-9129-e1d56f59df94"
x_cat=accuracy_score(y_test,pred_cat)
print(f'the accuracy score is {x_cat}')
# + colab={"base_uri": "https://localhost:8080/"} id="lSUGqX8-VBw7" outputId="e3746d6c-6f9e-42d8-9f87-389a3600bc18"
cm= confusion_matrix(y_test,pred_cat)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="OSRfukES0jdX" outputId="091a62f5-713c-4a16-d166-c66833ade1a9"
plot_confusion_matrix(clf, X_test, y_test)
plt.show()
# + [markdown] id="dnQG0GIpVEIr"
# Light GBM (fast, distrubted gradient boosting method)
# + colab={"base_uri": "https://localhost:8080/"} id="CrCE46XYVDzi" outputId="a242e4e5-aaf8-422a-ca75-06f8a53cc630"
from lightgbm import LGBMClassifier
model_ = LGBMClassifier(n_estimators=100,num_leaves=35,learning_rate=0.1,max_bin=255,max_depth=-1)
model_.fit(X_train,y_train)
pred_lgm=model_.predict(X_test)
x_lgm=accuracy_score(y_test,pred_lgm)
print(f'the accuracy score is {x_lgm}')
# + colab={"base_uri": "https://localhost:8080/"} id="1lEGbJ7wVRHD" outputId="4e2af671-afcd-49cc-b796-9e3268fb8151"
cm= confusion_matrix(y_test,pred_lgm)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="lxztJJoq0nzU" outputId="01af7d2f-8795-4fb9-d1bb-36f74d6b89c8"
plot_confusion_matrix(model_, X_test, y_test)
plt.show()
# + [markdown] id="6C5XdXWdVlKi"
# Bagged Light GBM
# + colab={"base_uri": "https://localhost:8080/"} id="sr029xKqVln4" outputId="d51368b4-8b23-48c1-c7f6-9cd99e93dfc0"
from sklearn.ensemble import BaggingClassifier
clf_2 = BaggingClassifier(base_estimator= LGBMClassifier(), n_estimators=200).fit(X_train,y_train)
pred_bag_=clf_2.predict(X_test)
x_bag_=accuracy_score(y_test,pred_bag_)
print(f'the accuracy score is {x_bag_}')
# + colab={"base_uri": "https://localhost:8080/"} id="QSY2OMEgVl7s" outputId="91646615-f53d-4e4d-881d-37b48547d160"
cm= confusion_matrix(y_test,pred_bag_)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="6DQUAMvl0uNJ" outputId="c1ab02d9-c06c-4c11-fd4f-08cca2a7272a"
plot_confusion_matrix(clf_2, X_test, y_test)
plt.show()
# + [markdown] id="PVQwkRV2VsZH"
# Bagged Catboost
# + colab={"base_uri": "https://localhost:8080/"} id="KAsdtKpaVmQF" outputId="749969e8-0e98-4fee-a132-5df3f1135bbc"
clf_3 = BaggingClassifier(base_estimator= CatBoostClassifier(), n_estimators=40).fit(X_train,y_train)
pred_bag_c=clf_3.predict(X_test)
x_bag_c=accuracy_score(y_test,pred_bag_c)
print(f'the accuracy score is {x_bag_c}')
# + colab={"base_uri": "https://localhost:8080/"} id="LsUs8te-VtDL" outputId="725099b2-dbed-4800-e6ec-34b9f9032974"
cm= confusion_matrix(y_test,pred_bag_c)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="ZCg1-uMZ0yMT" outputId="fb8feed1-b2cf-433e-9479-2d71b18c4cca"
plot_confusion_matrix(clf_3, X_test, y_test)
plt.show()
# + [markdown] id="fVVqWIAeXLKc"
# Bagged XGBoost
# + colab={"base_uri": "https://localhost:8080/"} id="1s0bnVCBXH6E" outputId="fb5181a7-0f36-4171-9c1f-1b41e159fcd0"
clf_4 = BaggingClassifier(base_estimator= xgb.XGBClassifier(), n_estimators=200,random_state=50).fit(X_train,y_train)
pred_bag_xg=clf_4.predict(X_test)
x_bag_xg=accuracy_score(y_test,pred_bag_xg)
print(f'the accuracy score is {x_bag_xg}')
# + colab={"base_uri": "https://localhost:8080/"} id="sfuw157TXV2J" outputId="dcf687eb-dff2-4058-a29e-5ffac3c5ec87"
cm= confusion_matrix(y_test,pred_bag_xg)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 279} id="KkzKPjHe03it" outputId="6a9ee48b-3411-4f00-9123-ecf7623f2cfd"
plot_confusion_matrix(clf_4, X_test, y_test)
plt.show()
# + [markdown] id="7vP2EdTDhEky"
# Bagged AdaBoost
# + colab={"base_uri": "https://localhost:8080/"} id="t7wPbW9AhFFJ" outputId="1fa1d46a-ccad-4c36-c8d7-db4bc4316544"
clf_5 = BaggingClassifier(base_estimator= AdaBoostClassifier(), n_estimators=200).fit(X_train,y_train)
pred_bag_ada=clf_5.predict(X_test)
x_bag_ada=accuracy_score(y_test,pred_bag_ada)
print(f'the accuracy score is {x_bag_ada}')
# + colab={"base_uri": "https://localhost:8080/"} id="BsD6GVbUhFYo" outputId="a842353d-13f4-4f0d-8fe9-3dea15384eeb"
cm= confusion_matrix(y_test,pred_bag_ada)
print(cm)
# + colab={"base_uri": "https://localhost:8080/", "height": 280} id="6QOFDG4o052F" outputId="b797c566-6aa0-4bfe-fb97-19d2369bc28b"
plot_confusion_matrix(clf_5, X_test, y_test)
plt.show()
| Classification/Classification_Boosting_algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# The following cell imports the packages that you will need.
import glob
import os.path
import collections
import numpy
import netCDF4
from gewittergefahr.gg_utils import error_checking
# The following cell defines the constants that you will need.
# +
NUM_BATCHES_PER_DIRECTORY = 1000
BATCH_NUMBER_REGEX = '[0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
TEMPERATURE_NAME = 'temperature_kelvins'
HEIGHT_NAME = 'height_m_asl'
SPECIFIC_HUMIDITY_NAME = 'specific_humidity_kg_kg01'
WET_BULB_THETA_NAME = 'wet_bulb_potential_temperature_kelvins'
U_WIND_GRID_RELATIVE_NAME = 'u_wind_grid_relative_m_s01'
V_WIND_GRID_RELATIVE_NAME = 'v_wind_grid_relative_m_s01'
VALID_PREDICTOR_NAMES = [
TEMPERATURE_NAME, HEIGHT_NAME, SPECIFIC_HUMIDITY_NAME, WET_BULB_THETA_NAME,
U_WIND_GRID_RELATIVE_NAME, V_WIND_GRID_RELATIVE_NAME
]
PREDICTOR_MATRIX_KEY = 'predictor_matrix'
TARGET_MATRIX_KEY = 'target_matrix'
TARGET_TIMES_KEY = 'target_times_unix_sec'
ROW_INDICES_KEY = 'row_indices'
COLUMN_INDICES_KEY = 'column_indices'
PREDICTOR_NAMES_KEY = 'narr_predictor_names'
NARR_MASK_KEY = 'narr_mask_matrix'
PRESSURE_LEVEL_KEY = 'pressure_level_mb'
DILATION_DISTANCE_KEY = 'dilation_distance_metres'
# -
# The following cell defines private methods that you will need. Since this is a notebook and not a proper Python package, there is really no distinction between public and private methods. However, I have used the syntax for private methods (underscore at the beginning of the method name), to emphasize that these are low-level helper methods and you shouldn't worry about them.
# +
def _check_predictor_name(field_name):
"""Ensures that name of model field is recognized.
:param field_name: Field name in GewitterGefahr format (not the original
NetCDF format).
:raises: ValueError: if field name is unrecognized.
"""
error_checking.assert_is_string(field_name)
if field_name not in VALID_PREDICTOR_NAMES:
error_string = (
'\n\n' + str(VALID_PREDICTOR_NAMES) +
'\n\nValid field names (listed above) do not include "' +
field_name + '".')
raise ValueError(error_string)
def _floor_to_nearest(input_value, rounding_base):
"""Rounds numbers *down* to nearest x, where x is a positive real number.
:param input_value: Either numpy array of real numbers or scalar real
number.
:param rounding_base: Numbers will be rounded *down* to this base.
:return: output_value: Same as input_value, except rounded.
"""
if isinstance(input_value, collections.Iterable):
error_checking.assert_is_real_numpy_array(input_value)
else:
error_checking.assert_is_real_number(input_value)
error_checking.assert_is_greater(rounding_base, 0)
return rounding_base * numpy.floor(input_value / rounding_base)
def _file_name_to_batch_number(downsized_3d_file_name):
"""Parses file name for batch number.
:param downsized_3d_file_name: See doc for `find_downsized_3d_example_file`.
:return: batch_number: Integer.
:raises: ValueError: if batch number cannot be parsed from file name.
"""
pathless_file_name = os.path.split(downsized_3d_file_name)[-1]
extensionless_file_name = os.path.splitext(pathless_file_name)[0]
return int(extensionless_file_name.split('downsized_3d_examples_batch')[-1])
# -
# The following method shrinks the dimensions of a training examples. The original examples (stored in the files) are 65 rows (latitudes) x 65 columns (longitudes). Shrinking the grids makes them easier to work with. The grid dimensions must always be odd numbers, which is why the input arguments are num_half_rows and num_half_columns, rather than num_rows and num_columns. This ensures that there is exactly one center grid cell, which is the grid cell whose label (no front, warm front, or cold front) we are trying to predict. For example, if you want to shrink the grids to 33 x 33, make num_half_rows=16 and num_half_columns=16. The grids will be cropped around the center, so the center grid cell will remain the same. It's just the number of surrounding grid cells that may shrink.
def decrease_example_size(predictor_matrix, num_half_rows, num_half_columns):
"""Decreases the grid size for each example.
M = original number of rows per example
N = original number of columns per example
m = new number of rows per example
n = new number of columns per example
:param predictor_matrix: E-by-M-by-N-by-C numpy array of predictor images.
:param num_half_rows: Determines number of rows returned for each example.
Examples will be cropped so that the center of the original image is the
center of the new image. If `num_half_rows`, examples will not be
cropped.
:param num_half_columns: Same but for columns.
:return: predictor_matrix: E-by-m-by-n-by-C numpy array of predictor images.
"""
if num_half_rows is not None:
error_checking.assert_is_integer(num_half_rows)
error_checking.assert_is_greater(num_half_rows, 0)
center_row_index = int(
numpy.floor(float(predictor_matrix.shape[1]) / 2)
)
first_row_index = center_row_index - num_half_rows
last_row_index = center_row_index + num_half_rows
predictor_matrix = predictor_matrix[
:, first_row_index:(last_row_index + 1), ...
]
if num_half_columns is not None:
error_checking.assert_is_integer(num_half_columns)
error_checking.assert_is_greater(num_half_columns, 0)
center_column_index = int(
numpy.floor(float(predictor_matrix.shape[2]) / 2)
)
first_column_index = center_column_index - num_half_columns
last_column_index = center_column_index + num_half_columns
predictor_matrix = predictor_matrix[
:, :, first_column_index:(last_column_index + 1), ...
]
return predictor_matrix
# The following method locates a file with training examples. On average each file contains 512 training examples: 256 NF examples (with no front at the center grid cell), 128 WF examples (warm front at the center grid cell), and 128 CF examples (cold front at the center grid cell). The original class distribution is much more skewed (98.95% of examples are NF), which makes the deep-learning model nearly insensitive to the minority classes (WF and CF), which leads to the predicted probabilities of WF and CF always being very low. Balancing the training data fixes the problem. Unfortunately it causes the DL models to overpredict the WF and CF classes, but this can be mitigated by post-processing.
def find_downsized_3d_example_file(
top_directory_name, batch_number, raise_error_if_missing=True):
"""Finds file with downsized 3-D examples.
:param top_directory_name: Name of top-level directory for files with
downsized 3-D examples.
:param batch_number: Batch number (integer).
:param raise_error_if_missing: Boolean flag. If file is missing and
`raise_error_if_missing = True`, this method will error out.
:return: downsized_3d_file_name: Path to file with downsized 3-D examples.
If file is missing and `raise_error_if_missing = False`, this is the
*expected* path.
:raises: ValueError: if file is missing and `raise_error_if_missing = True`.
"""
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
error_checking.assert_is_integer(batch_number)
error_checking.assert_is_geq(batch_number, 0)
first_batch_number = int(_floor_to_nearest(
batch_number, NUM_BATCHES_PER_DIRECTORY))
last_batch_number = first_batch_number + NUM_BATCHES_PER_DIRECTORY - 1
downsized_3d_file_name = (
'{0:s}/batches{1:07d}-{2:07d}/downsized_3d_examples_batch{3:07d}.nc'
).format(top_directory_name, first_batch_number, last_batch_number,
batch_number)
if raise_error_if_missing and not os.path.isfile(downsized_3d_file_name):
error_string = 'Cannot find file. Expected at: "{0:s}"'.format(
downsized_3d_file_name)
raise ValueError(error_string)
return downsized_3d_file_name
# This method locates many files with training examples.
def find_downsized_3d_example_files(
top_directory_name, first_batch_number, last_batch_number):
"""Finds many files with downsized 3-D examples.
:param top_directory_name: See doc for `find_downsized_3d_example_file`.
:param first_batch_number: First batch number.
:param last_batch_number: Last batch number.
:return: downsized_3d_file_names: 1-D list of file paths.
:raises: ValueError: if no files are found.
"""
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_integer(first_batch_number)
error_checking.assert_is_integer(last_batch_number)
error_checking.assert_is_geq(first_batch_number, 0)
error_checking.assert_is_geq(last_batch_number, first_batch_number)
downsized_3d_file_pattern = (
'{0:s}/batches{1:s}-{1:s}/downsized_3d_examples_batch{1:s}.nc'
).format(top_directory_name, BATCH_NUMBER_REGEX)
downsized_3d_file_names = glob.glob(downsized_3d_file_pattern)
if len(downsized_3d_file_names) == 0:
error_string = 'Cannot find any files with the pattern: "{0:s}"'.format(
downsized_3d_file_pattern)
raise ValueError(error_string)
batch_numbers = numpy.array(
[_file_name_to_batch_number(f) for f in downsized_3d_file_names],
dtype=int)
good_indices = numpy.where(numpy.logical_and(
batch_numbers >= first_batch_number,
batch_numbers <= last_batch_number
))[0]
if len(good_indices) == 0:
error_string = (
'Cannot find any files with batch number in [{0:d}, {1:d}].'
).format(first_batch_number, last_batch_number)
raise ValueError(error_string)
return [downsized_3d_file_names[i] for i in good_indices]
# This method reads a file with training examples.
def read_downsized_3d_examples(
netcdf_file_name, metadata_only=False, predictor_names_to_keep=None,
num_half_rows_to_keep=None, num_half_columns_to_keep=None):
"""Reads downsized 3-D examples from NetCDF file.
:param netcdf_file_name: Path to input file.
:param metadata_only: Boolean flag. If True, will return only metadata
(everything except predictor and target matrices).
:param predictor_names_to_keep: 1-D list with names of predictor variables
to keep (each name must be accepted by `_check_predictor_name`). If
`predictor_names_to_keep is None`, all predictors in the file will be
returned.
:param num_half_rows_to_keep: [used iff `metadata_only == False`]
Determines number of rows to keep for each example. Examples will be
cropped so that the center of the original image is the center of the
new image. If `num_half_rows_to_keep is None`, examples will not be
cropped.
:param num_half_columns_to_keep: [used iff `metadata_only == False`]
Same but for columns.
:return: example_dict: Dictionary with the following keys.
example_dict['predictor_matrix']: See doc for
`prep_downsized_3d_examples_to_write`.
example_dict['target_matrix']: Same.
example_dict['target_times_unix_sec']: Same.
example_dict['row_indices']: Same.
example_dict['column_indices']: Same.
example_dict['predictor_names_to_keep']: See doc for
`write_downsized_3d_examples`.
example_dict['pressure_level_mb']: Same.
example_dict['dilation_distance_metres']: Same.
example_dict['narr_mask_matrix']: Same.
"""
error_checking.assert_is_boolean(metadata_only)
if predictor_names_to_keep is not None:
error_checking.assert_is_numpy_array(
numpy.array(predictor_names_to_keep), num_dimensions=1)
for this_name in predictor_names_to_keep:
_check_predictor_name(this_name)
netcdf_dataset = netCDF4.Dataset(netcdf_file_name)
narr_predictor_names = netCDF4.chartostring(
netcdf_dataset.variables[PREDICTOR_NAMES_KEY][:])
narr_predictor_names = [str(s) for s in narr_predictor_names]
if predictor_names_to_keep is None:
predictor_names_to_keep = narr_predictor_names + []
target_times_unix_sec = numpy.array(
netcdf_dataset.variables[TARGET_TIMES_KEY][:], dtype=int)
row_indices = numpy.array(
netcdf_dataset.variables[ROW_INDICES_KEY][:], dtype=int)
column_indices = numpy.array(
netcdf_dataset.variables[COLUMN_INDICES_KEY][:], dtype=int)
if not metadata_only:
predictor_matrix = numpy.array(
netcdf_dataset.variables[PREDICTOR_MATRIX_KEY][:])
target_matrix = numpy.array(
netcdf_dataset.variables[TARGET_MATRIX_KEY][:])
these_indices = numpy.array(
[narr_predictor_names.index(p) for p in predictor_names_to_keep],
dtype=int)
predictor_matrix = predictor_matrix[..., these_indices]
predictor_matrix = decrease_example_size(
predictor_matrix=predictor_matrix,
num_half_rows=num_half_rows_to_keep,
num_half_columns=num_half_columns_to_keep)
example_dict = {
TARGET_TIMES_KEY: target_times_unix_sec,
ROW_INDICES_KEY: row_indices,
COLUMN_INDICES_KEY: column_indices,
PREDICTOR_NAMES_KEY: predictor_names_to_keep,
PRESSURE_LEVEL_KEY: int(getattr(netcdf_dataset, PRESSURE_LEVEL_KEY)),
DILATION_DISTANCE_KEY: getattr(netcdf_dataset, DILATION_DISTANCE_KEY),
NARR_MASK_KEY:
numpy.array(netcdf_dataset.variables[NARR_MASK_KEY][:], dtype=int)
}
if metadata_only:
netcdf_dataset.close()
return example_dict
example_dict.update({
PREDICTOR_MATRIX_KEY: predictor_matrix.astype('float32'),
TARGET_MATRIX_KEY: target_matrix.astype('float64')
})
netcdf_dataset.close()
return example_dict
| aiml_symposium/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import re
import os
import numpy as np
import json
##This script outputs a JSON dictionary where keys = pdb id and values = nested list of CC regions defined by
##chain ID first residue and last residue of CC region.
def get_CCregion(line_withCCregion):
#Takes line and extracts and returns the coiled coil chain, starting residue and last residues
count = 0
first_residue = []
last_residue = []
chain = []
isitfirst_residue = False
chainnameover = False
for x in reversed(line_withCCregion):
if x == ' ':
break
elif x == '-':
isitfirst_residue = True
elif x == ':':
chainnameover = True
elif x.isdigit() and isitfirst_residue == True:
first_residue.append(x)
elif x.isdigit():
last_residue.append(x)
elif count != 0:
chain.append(x)
else:
count += 1
first_residue.reverse()
last_residue.reverse()
chain.reverse()
first_residue_str = ''.join(map(str,first_residue))
last_residue_str = ''.join(map(str,last_residue))
chain_str = ''.join(map(str,chain))
return chain_str, first_residue_str, last_residue_str
import_directory = os.fsencode('./')
cc_regions = {}
for socket_file in sorted(os.listdir(import_directory)):
filename = os.fsdecode(socket_file)
directory = os.fsdecode(import_directory)
if os.path.isfile(directory+filename) and not filename.startswith('.') :
with open(directory+filename, 'r') as socket_output:
##numbering will differ from input as blank lines are stripped
socket_lines = [line for line in socket_output.readlines() if line.strip()]
##extract lines that assigned CC regions
if socket_file[2:6] not in cc_regions:
cc_regions[str(socket_file[-8:-4].decode("utf-8"))] = [(get_CCregion(x)) for x in socket_lines if x[0:6] == 'assign']
socket_output.close
else:
cc_regions[str(socket_file[-8:-4].decode("utf-8"))].append([(get_CCregion(x)) for x in socket_lines if x[0:6] == 'assign'])
socket_output.close
else:
pass
with open("./CCregions2020.json", "w") as outfile:
json.dump(cc_regions, outfile)
| SOCKET_extract-CCregions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
# -
sns.set_context("talk")
sns.set_style('whitegrid')
sns.set_palette('deep')
import matplotlib.style as style
style.use('seaborn-poster')
style.use('ggplot')
df = pd.read_csv('../data/processed/got.csv')
df.shape
df.head()
df['character'] = df['character'].str.title()
# remove surname to unify character names
df['character'] = df['character'].apply(lambda name: name.split()[0])
# removing character called 'Man' that it's a generic character and it isn't relevant for the analysis
no_name_chars = df['character'] == 'Man'
df = df[~no_name_chars]
n_characters = df['character'].nunique()
print('Number of unique characters in the show:', n_characters)
# There are more than one thousand characters across the lines of dialogue. In the first place, we are going to select a subset of the characters to be able to visualize them.
# +
# lines counts by each character
n_lines = df['character'].value_counts()[:20]
plt.figure(figsize=(15, 8))
sns.barplot(x = n_lines, y=n_lines.index, color="steelblue", alpha=.85);
plt.title('Line count per character through all seasons of the show\nShowing Top 20',
fontsize=20, horizontalalignment='left', loc='left')
plt.xlabel('Lines of dialogue', fontsize=16, weight='bold');
# -
# * It is clear that Tyrion is the one who said the most lines of dialogue throughout the series.
# * The following with a high counter are <NAME>, Cersei, Daenerys, Jaime, Sansa and Arya. Basically these were the character that survive all the seasons and they were of major importance in the plot
# Now, let's check how are the lines count by season. This will give us an idea of how the presence of each character evolved throughout the series.
# +
lines_by_season = df.groupby('season')['character'].value_counts() \
.groupby(level=0).head(10).rename('count').reset_index()
fig, axes = plt.subplots(3, 3, figsize=(15,10))
axes = axes.ravel()
colors = plt.rcParams["axes.prop_cycle"]()
for i in range(8):
c = next(colors)['color']
i_season = lines_by_season[lines_by_season['season'] == i+1]
sns.barplot(x='count', y='character', data=i_season, ax=axes[i], color=c, alpha=.8);
axes[i].set_ylabel('')
axes[i].set_xlabel('')
axes[i].set_title(f'Season {i+1}', weight='bold')
axes[-1].set_axis_off()
fig.suptitle('Number of lines per character\nShowing Top 10 per season',
fontsize=22, horizontalalignment='left',
x=0, y=1)
plt.tight_layout();
# -
# * The most repeated characters are: Tyrion, <NAME>, Daenerys, Jaime and Arya
# * It's surprising how Tyrion is in the top 2 in all seasons.
# * Jon is a character with a large presence in the first season. However, in the following seasons he stands out a little less. And in the last two he is again one of the characters with more dialogue.
# * Daenerys, like Jon, has more lines of dialogue as ending approaches.
# * There are characters that have many lines in some seasons but then disappear in the following ones. This is the case of characters like Eddard, Tywin, Robb or Cersei, which die during the course of the show.
# As we can see Game of Thrones has many characters and the spotlight is fairly evenly distributed throughout the series with the exception of Tyrion who always stands out.
# \
# \
# Now that we know which people we’ll be working with, we can start doing some text analysis.
# ## Word Frequency
# Let's take a look at which words are the most used by each character.
df['text']
# We are goint to create a dictionary to store word counts for each character
nltk.word_tokenize('hola mi nombre es predo. Es asi')
# +
"""
Dict structure.
word_counter = {
key: character_name
value: {
key: word root,
value: (word, count)
}
}
"""
stemmer = nltk.stem.snowball.SnowballStemmer('english')
stopwords = nltk.corpus.stopwords.words('english')
def get_counter(lines):
word_counter = {}
for token in nltk.word_tokenize(lines):
word = token.lower()
if not word.isalpha() or word in stopwords:
continue
else:
root = stemmer.stem(word)
if root in word_counter.keys():
word_counter[root]['count'] += 1
if len(word) < len(word_counter[root]['word']):
word_counter[root]['word'] = word
else:
word_counter[root] = {'count': 1,'word': word}
return word_counter
def get_word_counter(df, character_name):
dialogue_lines = df[df['character'] == character_name]['text'].copy()
lines_string = ' '.join(dialogue_lines.astype(str).to_list())
counter = get_counter(lines_string)
word_counts = [(value['word'],value['count']) for value in counter.values()]
word_counts.sort(key=lambda x: x[1], reverse=True)
return word_counts
# +
# We just show top 12 character with more lines.
character_name = 'Jon'
word_counts = get_word_counter(df, character_name)
word_counts[:5]
# -
pd.DataFrame(word_counts, columns=['word', 'counts']).head()
# +
fig, axes = plt.subplots(3, 4, figsize=(15,10))
colors = plt.rcParams["axes.prop_cycle"]()
axes = axes.ravel()
characters = ['Tyrion', 'Jon', 'Cersei', 'Daenerys', 'Jaime', 'Sansa',
'Arya', 'Davos', 'Theon', 'Varys', 'Bronn', 'Sam']
for i, character in enumerate(characters):
c = next(colors)['color']
word_counts = get_word_counter(df, character)
word_count_df = pd.DataFrame(word_counts, columns=['word', 'count'])
sns.barplot(x='count', y='word', data=word_count_df[:10], ax=axes[i], color=c, alpha=.8);
axes[i].set_ylabel('')
axes[i].set_xlabel('')
axes[i].set_title(f'{character}', weight='bold')
fig.suptitle('Top words counts for each character\nShowing top 10 words by top 12 character after removing stopwords',
fontsize=22, horizontalalignment='left', x=0, y=1)
plt.tight_layout()
# -
| notebooks/explore.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="-UOOzCs9ukul" executionInfo={"status": "ok", "timestamp": 1627893771333, "user_tz": -330, "elapsed": 908, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
project_name = "reco-tut-arr"; branch = "main"; account = "sparsh-ai"
# + colab={"base_uri": "https://localhost:8080/"} id="PYvHGli8ukum" executionInfo={"status": "ok", "timestamp": 1627893775113, "user_tz": -330, "elapsed": 2891, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="51b83a8a-2aba-4c20-b571-690a004f18ca"
# !cp /content/drive/MyDrive/mykeys.py /content
import mykeys
# !rm /content/mykeys.py
path = "/content/" + project_name;
# !mkdir "{path}"
# %cd "{path}"
import sys; sys.path.append(path)
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "reco-tut-arr"
# !git init
# !git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git
# !git pull origin "{branch}"
# !git checkout main
# + id="XWXfp83ZDPbJ"
# !pip install autoviz
# + id="J6GnSXizHC08" executionInfo={"status": "ok", "timestamp": 1627894816568, "user_tz": -330, "elapsed": 581, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import os
import numpy as np
import pandas as pd
# from scipy import stats # statistical library
# from statsmodels.stats.weightstats import ztest # statistical library for hypothesis testing
# import plotly.graph_objs as go # interactive plotting library
# import pandas_profiling # library for automatic EDA
# from autoviz.AutoViz_Class import AutoViz_Class
# from IPython.display import display # display from IPython.display
# from itertools import cycle # function used for cycling over values
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.preprocessing import StandardScaler
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 10)
pd.set_option('display.width', 1000)
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/"} id="Z12sTqxTDvMB" executionInfo={"status": "ok", "timestamp": 1627894310995, "user_tz": -330, "elapsed": 1469, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="9f1b3ab8-d325-48f9-cb39-09f630120f9e"
df = dict()
for dirname, _, filenames in os.walk('./data/bronze'):
for filename in filenames:
if filename.endswith('.parquet.gz'):
name = filename.split('.')[0]
df[name] = pd.read_parquet(os.path.join(dirname, filename))
print(os.path.join(dirname, filename))
# + [markdown] id="3GUO_RYmMD5T"
# | filename | description |
# | -------- | ----------- |
# | test_customers.csv | customer id’s in the test set |
# | test_locations.csv | latitude and longitude for the different locations of each customer |
# | train_locations.csv | customer id’s in the test set |
# | train_customers.csv | latitude and longitude for the different locations of each customer |
# | orders.csv | orders that the customers train_customers.csv from made |
# | vendors.csv | vendors that customers can order from |
# + [markdown] id="zHU1zCzTMk32"
# ### Train Customers
# Information on the customers in the training set.
# - 'akeed_customer_id': Unique customer ID, used in train_locations and train_orders
# - 'gender': Customer gender
# - 'dob': Birth Year (if entered)
# - 'status' and 'verified': Account status
# - 'language': Chosen language
# - 'Created_at' and 'updated_at': dates when account was created/updated
#
# ### Train Locations
# Each customer orders from one or more locations. Each is assigned a location number.
# - 'customer_id': The unique customer ID
# - 'location_number': Location number (most customers have one or two)
# - 'location_type': Home, Work, Other or NA
# - 'Latitude' and 'longitude': Not true latitude and longitude - locations have been masked, but nearby locations remain nearby in the new reference frame and can thus be used for clustering. However, not all locations are useful due to GPS errors and missing data - you may want to treat outliers separately.
#
# ### Train Orders
# This is a record of all orders made by customers in the train set from the vendors. Each order contains:
# - 'akeed_order_id': The order ID used internally - can be ignored
# - 'customer_id': The customer making the order, used to link with customer info
# - 'item_count': how many items were in the order
# - 'grand_total': total cost
# - Payment related columns: 'payment_mode', 'Promo_code', 'vendor_discount_amount', 'Promo_code_discount_percentage'
# - Vendor related columns: 'is_favorite', 'is_rated', 'vendor_rating', 'driver_rating'
# - Order details: 'deliverydistance', 'preparationtime', 'delivery_time', 'order_accepted_time', 'driver_accepted_time', 'ready_for_pickup_time', 'picked_up_time', 'delivered_time', 'delivery_date','created_at'
# - 'vendor_id': the unique ID of the vendor
# - 'LOCATION_NUMBER': The location number specifies which of the customers locations the delivery was made to
# - 'LOCATION_TYPE': same as location type in the train_locations table
# - 'CID X LOC_NUM X VENDOR': Customer ID, location number and Vendor number
#
# ### Vendors
# Contains info on the different vendors. Important columns are:
# - 'id': The vendor ID used for the competition
# - 'latitude' and 'longitude' : masked the same way as the customer locations
# - 'vendor_tag_name': Tags describing the vendor
#
# > Note: Test Customers and Test Locations follow the same format as train.
#
# > Note: Other columns are mostly self-explanatory.
# + colab={"base_uri": "https://localhost:8080/", "height": 343} id="uRdAH95GIxA9" executionInfo={"status": "ok", "timestamp": 1627894349065, "user_tz": -330, "elapsed": 498, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8a59917e-47ef-4c40-abee-50f63d973a3c"
df['orders'].head()
# + colab={"base_uri": "https://localhost:8080/"} id="8zeqHNF1Ixr-" executionInfo={"status": "ok", "timestamp": 1627894360464, "user_tz": -330, "elapsed": 732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c6e6832a-c932-4f1e-e95d-17fc6d444e55"
df['orders'].info()
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="uJM--o8nJbY5" executionInfo={"status": "ok", "timestamp": 1627894378188, "user_tz": -330, "elapsed": 662, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="8962fc78-611d-47d2-dd0a-d6bb6d8347d3"
df['orders'].describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="SPhUBPhzKCo5" executionInfo={"status": "ok", "timestamp": 1627894383287, "user_tz": -330, "elapsed": 1184, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f99655f0-d377-449e-bdc5-6707acd8512f"
df['orders'].describe(include=['O'])
# + id="DnUhXx9RLGVk" executionInfo={"status": "ok", "timestamp": 1627895085918, "user_tz": -330, "elapsed": 5, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Distribution graphs (histogram/bar graph) of column data
def plotPerColumnDistribution(df, nGraphShown, nGraphPerRow, figsize=None):
nunique = df.nunique()
df = df[[col for col in df if nunique[col] > 1 and nunique[col] < 50]] # For displaying purposes, pick columns that have between 1 and 50 unique values
nRow, nCol = df.shape
columnNames = list(df)
nGraphRow = (nCol + nGraphPerRow - 1) / nGraphPerRow
if not figsize:
figsize = (6 * nGraphPerRow, 8 * nGraphRow)
plt.figure(num = None, figsize = figsize, dpi = 80, facecolor = 'w', edgecolor = 'k')
for i in range(min(nCol, nGraphShown)):
plt.subplot(nGraphRow, nGraphPerRow, i + 1)
columnDf = df.iloc[:, i]
if (not np.issubdtype(type(columnDf.iloc[0]), np.number)):
valueCounts = columnDf.value_counts()
valueCounts.plot.bar()
else:
columnDf.hist()
plt.ylabel('counts')
plt.xticks(rotation = 90)
plt.title(f'{columnNames[i]} (column {i})')
plt.tight_layout(pad = 1.0, w_pad = 1.0, h_pad = 1.0)
plt.show()
# + id="MW1tvrwAFdpZ" executionInfo={"status": "ok", "timestamp": 1627895318170, "user_tz": -330, "elapsed": 433, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Correlation matrix
def plotCorrelationMatrix(df, graphWidth):
df = df.dropna('columns') # drop columns with NaN
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
if df.shape[1] < 2:
print(f'No correlation plots shown: The number of non-NaN or constant columns ({df.shape[1]}) is less than 2')
return
corr = df.corr()
plt.figure(num=None, figsize=(graphWidth, graphWidth), dpi=80, facecolor='w', edgecolor='k')
corrMat = plt.matshow(corr, fignum = 1)
plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.gca().xaxis.tick_bottom()
plt.colorbar(corrMat)
plt.title(f'Correlation Matrix', fontsize=15)
plt.show()
# + id="ryP3v3LYFfdr" executionInfo={"status": "ok", "timestamp": 1627894716197, "user_tz": -330, "elapsed": 449, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Scatter and density plots
def plotScatterMatrix(df, plotSize, textSize):
df = df.select_dtypes(include =[np.number]) # keep only numerical columns
# Remove rows and columns that would lead to df being singular
df = df.dropna('columns')
df = df[[col for col in df if df[col].nunique() > 1]] # keep columns where there are more than 1 unique values
columnNames = list(df)
if len(columnNames) > 10: # reduce the number of columns for matrix inversion of kernel density plots
columnNames = columnNames[:10]
df = df[columnNames]
ax = pd.plotting.scatter_matrix(df, alpha=0.75, figsize=[plotSize, plotSize], diagonal='kde')
corrs = df.corr().values
for i, j in zip(*plt.np.triu_indices_from(ax, k = 1)):
ax[i, j].annotate('Corr. coef = %.3f' % corrs[i, j], (0.8, 0.2), xycoords='axes fraction', ha='center', va='center', size=textSize)
plt.suptitle('Scatter and Density Plot')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="5qzM_PxMFhJI" executionInfo={"status": "ok", "timestamp": 1627895171826, "user_tz": -330, "elapsed": 4273, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4483fba6-3c29-4629-f9df-5719dfe06c59"
plotPerColumnDistribution(df['orders'], 10, 5, (20,6))
# + colab={"base_uri": "https://localhost:8080/", "height": 617} id="36kQL-eGFs2w" executionInfo={"status": "ok", "timestamp": 1627895324960, "user_tz": -330, "elapsed": 1167, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="57d20ebd-8f86-4331-fbca-5d077385c913"
plotCorrelationMatrix(df['orders'], 8)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="m9qSt1yMHaLs" executionInfo={"status": "ok", "timestamp": 1627895464260, "user_tz": -330, "elapsed": 63399, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3b2ed11c-a9b7-4805-dac8-9e53abcc7b14"
plotScatterMatrix(df['orders'], 20, 10)
# + id="eb3FHfKEIIND" executionInfo={"status": "ok", "timestamp": 1627896062537, "user_tz": -330, "elapsed": 468, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
## Reduce memory usage
def reduce_mem_usage(df, verbose=True):
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
start_mem = df.memory_usage().sum() / 1024**2
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
end_mem = df.memory_usage().sum() / 1024**2
if verbose: print('Mem. usage decreased to {:5.2f} Mb ({:.1f}% reduction)'.format(end_mem, 100 * (start_mem - end_mem) / start_mem))
return df
# + colab={"base_uri": "https://localhost:8080/"} id="UVddrSJdKp2L" executionInfo={"status": "ok", "timestamp": 1627896088906, "user_tz": -330, "elapsed": 950, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="6675ed3b-3a25-43c0-d48d-ba380ce0644d"
orders_df = reduce_mem_usage(df['orders'])
| _docs/nbs/reco-tut-arr-02-eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import pandas as pd
import numpy as np
import missingno as msno
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
import seaborn as sns
with open('..\dtypes.json', 'r') as jsonfile:
dtyp = json.load(jsonfile)
dtyp
data = pd.read_csv('..\Datasets\earthquake_data.csv', dtype = dtyp)
description_features = [
'injuries_description', 'damage_description',
'total_injuries_description', 'total_damage_description'
]
imp = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='NA')
data[description_features] = imp.fit_transform(data[description_features])
print(data[pd.isnull(data.damage_millions_dollars)].shape[0])
print(data[pd.isnull(data.damage_millions_dollars) &
(data.damage_description != 'NA')].shape[0])
category_means = data[['damage_description', 'damage_millions_dollars']].groupby('damage_description').mean()
category_means
replacement_values = category_means.damage_millions_dollars.to_dict()
replacement_values['NA'] = -1
replacement_values['0'] = 0
replacement_values
imputed_values = data.damage_description.map(replacement_values)
data['damage_millions_dollars'] = np.where(data.damage_millions_dollars.isnull(),
data.damage_description.map(replacement_values),
data.damage_millions_dollars)
data[['damage_millions_dollars']].info()
| Chapter02/Exercise2.05/Exercise_2_05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Systems
#
# Solving linear systems of the form
# $$
# A \mathbf{x} = \mathbf{b}
# $$
#
# where $A$ is symmetric positive definite is arguably one of the most fundamental computations in statistics, machine learning and scientific computation at large. Many problems can be reduced to the solution of one or many (large-scale) linear systems. Some examples include least-squares regression, kernel methods, second-order optimization, quadratic programming, Kalman filtering, linear differential equations and all Gaussian (process) inference. Here, we will solve such a system using a *probabilistic linear solver*.
# + pycharm={"is_executing": false}
# Make inline plots vector graphics instead of raster graphics
# %matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats("pdf", "svg")
# Plotting
import matplotlib.pyplot as plt
plt.style.use("../../probnum.mplstyle")
# -
# ## Test Problems
# We begin by creating a random linear system with a symmetric positive definite matrix. ProbNum lets you quickly generate test problems via its problem zoo.
# +
import numpy as np
from probnum.problems.zoo.linalg import random_spd_matrix
# Random symmetric positive definite linear system
rng = np.random.default_rng(42)
n = 25
A = random_spd_matrix(
rng=rng, dim=n, spectrum=10 * np.linspace(0.5, 1, n) ** 4
)
b = rng.normal(size=(n, 1))
print("Matrix condition: {:.2f}".format(np.linalg.cond(A)))
print("Eigenvalues: {}".format(np.linalg.eigvalsh(A)))
# + tags=["nbsphinx-thumbnail"]
# Plot linear system
fig, axes = plt.subplots(
nrows=1,
ncols=4,
figsize=(5, 3.5),
sharey=True,
squeeze=False,
gridspec_kw={"width_ratios": [4, 0.25, 0.25, 0.25]},
)
vmax = np.max(np.abs(np.hstack([A, b])))
vmin = -vmax
axes[0, 0].imshow(A, cmap="bwr", vmin=vmin, vmax=vmax)
axes[0, 0].set_title("$A$", fontsize=24)
axes[0, 1].text(0.5, A.shape[0] / 2, "$\\bm{x}$", va="center", ha="center", fontsize=32)
axes[0, 1].axis("off")
axes[0, 2].text(0.5, A.shape[0] / 2, "$=$", va="center", ha="center", fontsize=32)
axes[0, 2].axis("off")
axes[0, 3].imshow(b, cmap="bwr", vmin=vmin, vmax=vmax)
axes[0, 3].set_title("$\\bm{b}$", fontsize=24)
for ax in axes[0, :]:
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
plt.show()
# -
# For example ``suitesparse_matrix`` lets you download matrices from the [SuiteSparse Matrix Collection](http://sparse.tamu.edu/), which is a repository of sparse benchmark matrices.
# +
from probnum.problems.zoo.linalg import suitesparse_matrix
Asparse = suitesparse_matrix(name="662_bus", group="HB")
Asparse
# -
# ## Prior Information
#
# We might have access to prior information about the inverse of $A$. Suppose we know something about the eigenvalue structure of $H=A^{-1}$. This is for example the case for Gram matrices generated by a specific kernel. In this case we assume that the average eigenvalue of the inverse is $\bar{\sigma}=\text{avg}(\sigma(H)) \approx 0.5$.
# Average eigenvalue of inverse
print(np.mean(1 / np.linalg.eigvalsh(A)))
# Prior information is encoded in random variables modelling $A$ and $H$. Here, we will use our prior information about the spectrum by providing a prior mean for the inverse of the form $H_0 = \operatorname{diag}(\bar{\sigma})$.
# +
import probnum.randvars as randvars
from probnum.linops import Identity, Scaling, SymmetricKronecker
from probnum.linalg import problinsolve
# Prior distribution on A
W0H = Identity(n)
covA = SymmetricKronecker(W0H)
Ainv0 = randvars.Normal(mean=Scaling(0.5, shape=(n, n)), cov=covA)
# -
# ## Probabilistic Linear Solvers
#
# We now use a *probabilistic linear solver*, taking into account the prior information we just specified, to solve the linear system. The algorithm iteratively chooses *actions* $\mathbf{s}$ and makes linear *observations* $\mathbf{y}=A \mathbf{s}$ to update its belief over the solution, the matrix and its inverse.
# Probabilistic linear solver
x, Ahat, Ainv, info = problinsolve(A=A, b=b, Ainv0=Ainv0, maxiter=10)
print(info)
# ## Numerical Uncertainty
#
# The solver returns random variables $\mathsf{x}$, $\mathsf{A}$ and $\mathsf{H}$, which quantify numerical uncertainty in the solution, the linear operator itself and the estimate of the inverse. For illustration we stopped the solver early after $k=10 < n$ iterations. We plot means and samples from the resulting distributions of $\mathsf{A}$ and $\mathsf{H}$.
# +
# Draw samples
rng = np.random.default_rng(seed=42)
Ahat_samples = Ahat.sample(rng=rng, size=3)
Ainv_samples = Ainv.sample(rng=rng, size=3)
# Plot
rvdict = {
"$A$": A,
"$\mathbb{E}(\mathsf{A})$": Ahat.mean.todense(),
"$\mathsf{A}_1$": Ahat_samples[0],
"$\mathsf{A}_2$": Ahat_samples[1],
}
fig, axes = plt.subplots(nrows=1, ncols=len(rvdict), figsize=(10, 3), sharey=True)
for i, (title, rv) in enumerate(rvdict.items()):
axes[i].imshow(rv, vmin=vmin, vmax=vmax, cmap="bwr")
axes[i].set_axis_off()
axes[i].title.set_text(title)
plt.tight_layout()
# + pycharm={"is_executing": false}
# Plot
rvdict = {
"$A^{-1}": np.linalg.inv(A),
"$\mathbb{E}(\mathsf{H})": Ainv.mean.todense(),
"$\mathsf{H}_1": Ainv_samples[0],
"$\mathsf{H}_2": Ainv_samples[1],
}
fig, axes = plt.subplots(nrows=2, ncols=len(rvdict), figsize=(10, 6), sharey=True)
for i, (title, rv) in enumerate(rvdict.items()):
axes[0, i].imshow(rv, vmin=vmin, vmax=vmax, cmap="bwr")
axes[0, i].set_axis_off()
axes[0, i].title.set_text(title + "$")
axes[1, i].imshow(rv @ A, vmin=vmin, vmax=vmax, cmap="bwr")
axes[1, i].set_axis_off()
axes[1, i].title.set_text(title + "A$")
plt.tight_layout()
# -
# Even though the solver has only explored a subspace of dimension $k \ll n$, the mean estimates for the matrix and its inverse are already close. This is in part due to the informative prior that we used. We can see that the uncertainty of the solver about these quantities is still relatively high by looking at the samples from $\mathsf{A}$ and $\mathsf{H}$ .
| docs/source/tutorials/linalg/linear_systems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ratthachat/prompt_engineering/blob/main/gpt3_commonsense_prompt.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="RRRCIAHWbF7e"
# # 1. Introduction
#
# This notebook is a supplementary to [GPT3 and Commonsense Reasoning](https://agi.miraheze.org/wiki/GPT3_and_Commonsense_Reasoning) article.
#
# The readers who have GPT-3 access can use the prompt generated below directly in OpenAI Playground.
#
# ### TL;DR
# To generate the main article prompt, execute \
#
# ```prompt = main_2shots + main_stories[i] + main_end_part```
#
# where `i` is the index of the story you want to test
#
# Similarly in the appendix prompt, execute
#
# `prompt = appendix_3shots + appendix_stories[i] + appendix_end_part`
# + [markdown] id="6ehIOyWjYKfy"
# # 2. Main Article Prompt
#
# In the main article linked above, we test the ability of GPT-3 when reading a short story on 8 basis commonsense reasoning dimensions. The following 2-shots prompt, containing comprehensive non-trivial reasoning examples on two stories, is provided:
# + id="h0NJOBgTalKW"
main_2shots = """
Reasoning on Common Sense Knowledge / Guessing the Story
===
Story 1: Alice and Elsa were running toward the finish line. However, as Alice somehow fell down to the ground, Elsa turned her back and went to help Alice. A teacher and friends also went to see what happen.
Analysis:
1) Notable characters, their roles and status
Elsa and Alice are likely students and friends because there are teacher and friends in the context. By their names, they both should be girls. When they competed a race, they were competitors and runners. The teacher was probably their referee.
2) Notable believes, feelings and motivations of characters before, during and after the story
Before the story, both Elsa and Alice wanted to win the race and they should feel a bit excitement. In the story, when something happen to Alice, Elsa belived that Alice was injured. Elsa, and everybody else, felt care toward Alice. Then Elsa wanted to help Alice instead of going to win.
3) Notable possessions of characters before, during and after the story
Throughout the story, each runner should have a pair of running shoes, a running uniform. The teacher might have a whistle and perhaps a clock watch.
4) Basic usages and general knowledge on some notable possessions
People can use a pair of running shoes for running or just wear them casually.
A pair of running shoes typically costs $10 to $100. Famous and expensive brands include Nike, Adidas, Asics and New Balance.
5) Probable locations, positions and daytime of characters before, during and after the story
They probably stayed at a sport stadium, on a running race at their school throughout the story. It should be at day in the story since the class is rarely at night. Before the race started, Elsa and Alice should stay at the same starting point while the teacher and friends stay near the race. Shortly after the story, Elsa, Alice and everybody should stay closely to investigate Alice's condition.
6) Guess story genre, and general information about location and time period
The story was quite ordinary, so it could be non-fantasy or realistic fiction, maybe a bit drama. Since it looks peaceful, it might locate in no-war country. The event might took place after 1900s where the sport has been popular, and more probable after 1950s where WW-II already ended.
7) Probable events before and after the story
Before the story, it may be the time for PE class for Elsa and Alice, so they should change uniforms for the class. After the strory, if Alice was seriously hurt, maybe we had to bring Alice to a hospital, or otherwise, Alice might just take a rest.
8) Analyze the interesting event in the story, if any, and hypothesize that the interesting event would not occur if
The interesting part was when Alice got fell down. She might trip over stone or injured somewhere. The event would not happen if Alice was perfectly healthy, slept well and there were no stone on the race.
===
Story 2: A man called his son and daughter the day before Christmas and said he and their mom were going to divorce. The son and daughter were hurry to go back home to stop their parents. The old man turned to his wife and said "they're coming for Christmas now"
Analysis:
1) Notable characters ,their roles and status
A family of dad, mom, son and daughter. Their family status look very healthy.
2) Notable believes, feelings and motivations of characters before, during and after the story
Before the story, dad believed that their children would not come home in Christmas, so he might felt lonely and was motivated to trick their children to come home. At the end, dad believed that the children would come back home and might be happy. The children would believed the family was healthy before the story. In the story, they felt worried of the parents divorce, and that motivated them to back home. After the story, the children would initially got angry knowing that they were tricked, but were happy eventually to be back with the parents.
3) Notable possessions of characters before, during and after the story
Dad and children had phones, which could be either landline or mobile. All family members also belonged to each other in some sense.
4) Basic usages and general knowledge on some notable possessions
Average landline phone and mobile phone may cost around $100, but mobile phone price can be as high $2000. After the invention of smartphones by <NAME>, mobile phone can be used just like a small computer while landline phones would become obsolete.
5) Probable locations, positions and daytime of characters before, during and after the story
Before and in the story, the parents and children likely stayed in different cities or far enough that sometimes the children will not back home in Christmas. After the story, all of them would be at their home. The story could happen either day or night, but not on working hours.
6) Guess story genre, and general information about location and time period
This story genre should be a realistic fiction and comedy. The story was likely occured in either Europe or North America where most people are Chistian, so that Chirstmas day are very important. The story had to occur after phones were common to households and not in war-time which would be after 1980s.
7) Probable events before and after the story
Before the story, dad and mom would talk about possibilities that the children would not come home. So they thought about a fake divorce plan. After the story, children would be home in Chirstmas and the family should spend great time together.
8) Analyze the interesting event in the story, if any, and hypothesize that the interesting event would not occur if
The interesting part of the story was when dad happily spoke the truth that he tricked his children. This would turn out another way if the children would not care about the divorce and not back home no matter what.
===
Story 3: """
# + [markdown] id="m4-3BviH3YJ2"
# This following is a list of stories in various genres
# + id="cv7u7gQS3Xtd"
main_stories = []
# 0. Biography
main_stories.append("On the contrary to his colleagues believes, <NAME> thought that people could stay alive in the sea by drinking sea water and eating small fish and plants from the sea. He set out in a small boat to cross the Atlantic Ocean. He was able to stay alive for 65 days before finishing the journey.")
# 1. Sci-fi
main_stories.append("Alien race seeking refuge landed on earth on a small island in the south pacific. For a hundred years they've managed to keep the island cloaked and secret from our human population. But now they've exhausted the resources.")
# 2. Shopping
main_stories.append("Ling went to a big-box store selling everything on the planet to buy his favorite tennis racket. But a staff named Xin said that the store would not sell the racket since it's defective. Ling complained that he has a ATP master to participate tomorrow and he needed the racket now.")
# 3. Mystery
main_stories.append("As a new job for a prominent wealthy family, one of Chandra's first task is to water all of the house plants. While Chandra is watering the lilies, one of the plants starts talking to warn him of a dark family secret.")
# 4. Travel
main_stories.append('It was very exciting to arrive the legendary island where "Origin of Species" was inspired from. However, as Giulia was not well-prepared, she did not even know where should he sleep tonight! At least, she had $1000 which hopefully was enough.')
# 5. Shakespere
main_stories.append("Being <NAME>’s apprentice would be great if he weren’t always stealing your ideas and claiming them as his own. So, James write a brilliant satiric play exposing him. He loves it and takes it to the stage.")
# 6. CoronaVirus
main_stories.append("In 2020, Coronavirus surprises everybody by spreading everywhere, killing millions of people and turn off most world travels. Uğur Şahin told all staffs in his company to work extremely hard on their mRNA vaccine research before situations got worse.")
# 7. Comedy
main_stories.append("Eriko never used a crystal punch set she got as a wedding gift. When Praew got married, Eriko wrapped the set as her gift. When Praew opened the gift, she looked curiously and told Eriko it was the same punch set she gave her Years ago.")
# + [markdown] id="tR2cesUu3dAD"
# The end part for the model to make a reasoning on the 1st dimension (Character Analysis). Once the model finish this dimension, it will automatically make reasoning on the remaining dimensions because of the pattern in the 2-shots prompt
# + id="TnBLJRSb3ceU"
main_end_part = """
Analysis:
1) Notable characters ,their roles and status
"""
# + colab={"base_uri": "https://localhost:8080/"} id="APtwqEeG3wKa" outputId="d635a2c3-aa55-4764-89c8-980515094488"
i = 0 # select each story genre here
prompt = main_2shots + main_stories[i] + main_end_part
print(prompt)
# + [markdown] id="AQ3g8z-5XZC8"
# # 3. Appendix Article Prompt
#
# As explained in the [article's appendix](https://agi.miraheze.org/wiki/Appendix_on_GPT3_and_Commonsense_Reasoning), here, we shall focus only on the two hardest dimensions, causal and counterfactual inferences. By focusing only on these two dimensions we are able to provide high-quality prompt with one more shot.
# + id="4qp-Syb531hs"
appendix_3shots = """
Causal and Couterfactual Reasoning on Common Sense Knowledge from Narrative
===
Story 1: Alice and Elsa were running toward the finish line. However, as Alice somehow fell down to the ground, Elsa turned her back and went to help Alice. A teacher and friends also went to see what happen.
- Probable events before and after the story
Before the story, it may be the time for PE class for Elsa and Alice, so they should change uniforms for the class. Then, in the class, a teacher may randomly asked a pair of students to make a running competition.
After the strory, Case 1.1) Alice was seriously hurt 1.2) a teacher had to bring Alice to a hospital 1.3) a doctor asked Alice where did she feel hurt 1.4) the doctor examined Alice's conditions at the mentioned place 1.5) the doctor gave her some medicine and necessary treatment 1.6) Alice went back home to tell her parents what happen
Case 2.1) Alice was not seriously hurt 2.2) Alice might just take a rest while the teacher brought her to the first aid room 2.3) she had a basic treatment 2.4) she would be able to go back to the PE class 2.5) she could cheer on the other competitors 2.6) she could attend other classes until the school finished
- Analyze the interesting event in the story, if any, and hypothesize that the interesting event would not occur if
The interesting part was when Alice got fell down since normally people can continue to run if there is no abnormal conditions. Therefore, she might trip over stone or injured somewhere.
- Hypothesize Counterfactual Story : Alice was perfectly healthy, slept well and there were no stone on the race
Alternative A1) Elsa might have a little winning edge A2) Elsa won the race A3) Elsa got more points than Alice A4) Alice would promise to try harder next time A5) the next competition of other students began
Alternative B1) Alice was trying to use the superman-dive to win B2) she succeeded without injury this time B3) She just flew pass Elsa and the goal fist B4) everyone applaused for her great performance B5) Elsa and Alice shook their hands promised to rematch again.
===
Story 2: A man called his son and daughter the day before Christmas and said he and their mom were going to divorce. The son and daughter were hurry to go back home to stop their parents. The old man turned to his wife and said "they're coming for Christmas now"
- Probable events before and after the story
Before the story, dad and mom would talk about possibilities that the children would not come home since the children may be too busy at their works. But the parents thought that Christmas was more important to works. So they thought about a fake divorce plan.
After the story, at home, Case 1.1) the children felt very angry knowing that they were fooled. 1.2) they promised the parents they would never come back in Christmas again 1.3) the parents said sorry and explained they really missed the kids 1.4) the parents made the best dinner
Case 2.1) the children did not resent their parent 2.2) they understood the value of family reunion in this special time 2.3) the family help make the best party 2.4) They spent great time together.
- Analyze the interesting event in the story
The interesting part of the story was when dad happily spoke the truth that he tricked his children. This part is interesting because normally parents will not lie to their children unless something really important.
- Hypothesize Counterfactual Story : either dad really did not happy or dad told the divorce was true
Alternative A1) Dad felt guilty about lying to their children A2) dad called them back to tell the truth A3) the children got annoy at first A4) eventually they understood each other A5) the children still came back on Chirstmas
Alternative B1) Dad confirmed the truth of divorce B2) Children came back begging their parents to change their minds B3) the parents would not change their minds B4) the parents told them that even though the divorce would happen, they still loved the children anyway B5) this was not quite a happy Christmas for the family
===
Story 3: It was very exciting to arrive the legendary island where "Origin of Species" was inspired from. However, as Giulia was not well-prepared, she did not even know where should he sleep tonight! At least, she had $1000 which hopefully was enough.
- Probable events before and after the story
The story suggested that she was alone. Since Giulia was not well-prepared, it is possible that she went to other places, e.g. Santa Elena, nearby the island first. Then, she might just had a sudden thought that this place was not too far from Galapagos so it was worth a try. she contacted some local tourist for a ticket, but forgot about the hotel.
After the story Case 1.1) She somehow found a cheap hotel 1.2) she had enough money left so she hired a local guide 1.3) the guide brought her to many famous islands e.g. Floreana and Bartolome 1.4) she likely also met great animals like Galapagos Tortoises and Lava Lizards
Case 2.1) She could find only an expensive hotel left 2.2) She used most money for the hotel 2.3) Since she had not much budget, she decided to explore the travel by walking 2.4) she asked a lot of locals for great places nearby 2.5) she would find exotic animals if she were really lucky
- Analyze the interesting event in the story
The most interesting part is when she realized that she had no place to sleep tonight. Since every person has to find a safe and comfort place to take a rest especially at night. And since she had never been in the island before, it was exciting how would she find out the hotel.
- Hypothesize Counterfactual Story : she decided to sleep elsewhere
Alternative A1) She decided to sleep at the port A2) she bought a sleeping bag A3) she was able to sleep there and travel for a few days A4) a port officer found out and came to tell her that she could not sleep here
Alternative B1) She decided to search for a homestay B2) She walked every nearby villages to find out a comfortable place B3) with some luck, she should able to find a good local homestay B4) she would ask the house how to have a great travel here B5) she learned local tips and able to make her great adventure
===
Story 4: """
# + [markdown] id="E9qFEiJzZWKR"
# The test stories are the same as those in the main article, except the "Travel" story which is already given to the model as the 3rd shot example.
# + id="q3l05RsnZWga"
appendix_stories = []
# 0. Biography
appendix_stories.append("On the contrary to his colleagues believes, <NAME> thought that people could stay alive in the sea by drinking sea water and eating small fish and plants from the sea. He set out in a small boat to cross the Atlantic Ocean. He was able to stay alive for 65 days before finishing the journey.")
# 1. Sci-fi
appendix_stories.append("Alien race seeking refuge landed on earth on a small island in the south pacific. For a hundred years they've managed to keep the island cloaked and secret from our human population. But now they've exhausted the resources.")
# 2. Shopping
appendix_stories.append("Ling went to a big-box store selling everything on the planet to buy his favorite tennis racket. But a staff named Xin said that the store would not sell the racket since it's defective. Ling complained that he has a ATP master to participate tomorrow and he needed the racket now.")
# 3. Mystery
appendix_stories.append("As a new job for a prominent wealthy family, one of Chandra's first task is to water all of the house plants. While Chandra is watering the lilies, one of the plants starts talking to warn him of a dark family secret.")
# 4. Shakespere
appendix_stories.append("Being <NAME>’s apprentice would be great if he weren’t always stealing your ideas and claiming them as his own. So, James write a brilliant satiric play exposing him. He loves it and takes it to the stage.")
# 5. CoronaVirus
appendix_stories.append("In 2020, Coronavirus surprises everybody by spreading everywhere, killing millions of people and turn off most world travels. Uğur Şahin told all staffs in his company to work extremely hard on their mRNA vaccine research before situations got worse.")
# 6. Comedy
appendix_stories.append("Eriko never used a crystal punch set she got as a wedding gift. When Praew got married, Eriko wrapped the set as her gift. When Praew opened the gift, she looked curiously and told Eriko it was the same punch set she gave her Years ago.")
# + id="MEou92s4Zuev"
appendix_end_part = """
- Probable events before and after the story
"""
# + colab={"base_uri": "https://localhost:8080/"} id="qCtHpj7IZ6bb" outputId="c4ffa674-b0f3-4a34-d7b2-4e2eda5bbbaa"
i = -1 # select each story genre here
prompt = appendix_3shots + appendix_stories[i] + appendix_end_part
print(prompt)
# + id="urXBGl2taDRp"
| common_sense/gpt3_commonsense_prompt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# import the modules
import pandas as pd
pd.set_option('max_columns',100)
import numpy as np
import os
import matplotlib.pyplot as plt
import seaborn as sns
# -
df= pd.read_csv('../Employee Attrition project/train.csv')
df.info()
df.head(10)
df.Attrition.value_counts()
df.drop('Id',1,inplace = True)
df.head().transpose()
df.corr()
df
# Visulazing the distibution of the data for every feature
df[['Age', 'MonthlyIncome', 'NumCompaniesWorked', 'PercentSalaryHike', 'TotalWorkingYears', \
'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', \
'YearsWithCurrManager']].hist(edgecolor='black', linewidth=1.2, figsize=(20, 20));
# +
# df.drop(columns=['DistanceFromHome', 'EmployeeCount', 'EmployeeNumber', 'Over18', 'StandardHours', 'Gender'], \
# axis=1, inplace=True)
# -
#Categorical Variable|
print('Categorical Coulmns : ', list(df.select_dtypes(include=['object']).columns))
#Copy only categorical variable to perform operaion on it.
hr_cat_df = df.select_dtypes(include=['object']).copy()
hr_cat_df.info()
#Convert Object to Category as Datatype to process
hr_cat_df['BusinessTravel'] = hr_cat_df['BusinessTravel'].astype('category')
hr_cat_df['Attrition'] = hr_cat_df['Attrition'].astype('category')
hr_cat_df['Department'] = hr_cat_df['Department'].astype('category')
hr_cat_df['EducationField'] = hr_cat_df['EducationField'].astype('category')
hr_cat_df['JobRole'] = hr_cat_df['JobRole'].astype('category')
hr_cat_df['MaritalStatus'] = hr_cat_df['MaritalStatus'].astype('category')
hr_cat_df['OverTime'] = hr_cat_df['OverTime'].astype('category')
hr_cat_df['Gender'] = hr_cat_df['Gender'].astype('category')
hr_cat_df['Over18'] = hr_cat_df['Over18'].astype('category')
#To Confirm if Object type converted to category
hr_cat_df.info()
#Copy Original dataframe to other dataframe
hr_final_df = df.copy()
hr_final_df.head()
#Apply Feature Engineering Technique.
#Replacing values Method
hr_final_df['BusinessTravel'] = hr_cat_df['BusinessTravel'].cat.codes
hr_final_df['Attrition'] = hr_cat_df['Attrition'].cat.codes
hr_final_df['Department'] = hr_cat_df['Department'].cat.codes
hr_final_df['EducationField'] = hr_cat_df['EducationField'].cat.codes
hr_final_df['JobRole'] = hr_cat_df['JobRole'].cat.codes
hr_final_df['MaritalStatus'] = hr_cat_df['MaritalStatus'].cat.codes
hr_final_df['OverTime'] = hr_cat_df['OverTime'].cat.codes
hr_final_df['Gender'] = hr_cat_df['Gender'].cat.codes
hr_final_df['Over18'] = hr_cat_df['Over18'].cat.codes
hr_final_df
#Correlation betn all columns
corr = hr_final_df.corr()
f,ax = plt.subplots(figsize=(16,9))
sns.heatmap(corr, cbar = True ,annot = True,fmt ='.2f',annot_kws ={'size':10})
hr_final_df.drop(columns=['MonthlyIncome','PerformanceRating', 'TotalWorkingYears', 'YearsInCurrentRole', 'YearsWithCurrManager'],axis=1, inplace=True)
hr_final_df.shape
import time as time
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score,roc_auc_score
from sklearn.ensemble import RandomForestClassifier
# +
from imblearn.over_sampling import SMOTE, RandomOverSampler, SMOTENC, SVMSMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.combine import SMOTETomek
from xgboost import XGBClassifier
from sklearn.ensemble import AdaBoostClassifier
# +
# covert all catogorical data into numeric data
from sklearn.preprocessing import LabelEncoder
for column in df.columns:
if df[column].dtype == np.number:
continue
df[column] = LabelEncoder().fit_transform(df[column])
# -
df
df.Attrition.value_counts()
# +
t0 = time.time()
X = hr_final_df.drop('Attrition',1)
y = hr_final_df.Attrition
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 0, test_size = 0.3)
# scaler = StandardScaler()
# X_train = scaler.fit_transform(X_train)
# X_test = scaler.transform(X_test)
model = RandomForestClassifier(random_state = 42)
model.fit(X_train, y_train)
pred = model.predict(X_test)
t1 = time.time()
print('Time Taken:', t1-t0)
print('Accuracy:', accuracy_score(y_test, pred))
print('Precision:', precision_score(y_test, pred))
print('Recall:', recall_score(y_test, pred))
print('roc_score:',roc_auc_score(y_test, pred))
print('Confusion Matrix:', confusion_matrix(y_test, pred))
# +
from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,GradientBoostingClassifier
from sklearn.metrics import confusion_matrix
ad = AdaBoostClassifier()
ad.fit(X_train,y_train)
y_pred_a= ad.predict(X_test)
confusion_matrix(y_test,y_pred_a)
from sklearn.metrics import precision_score,accuracy_score,f1_score
f1_score(y_test, y_pred_a,average='weighted')
accuracy_ad=ad.score(X_test,y_test)
print(accuracy_ad)
roc_score = roc_auc_score(y_test, y_pred_a)
print("roc_score: ", roc_score)
# +
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import f1_score,precision_score,recall_score,roc_auc_score,confusion_matrix,classification_report,accuracy_score
de =DecisionTreeClassifier()
de.fit(X_train,y_train)
y_pred_de= de.predict(X_test)
confusion_matrix(y_test,y_pred_de)
accuracy_de=de.score(X_test,y_test)
print(accuracy_de)
roc_score = roc_auc_score(y_test, y_pred_de)
print("roc_score: ", roc_score)
# +
import sklearn.metrics as metrics
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
#Random forest with GridSearchCV
parameters = {'n_estimators':[500]}
# instantiate random forest model
rf= RandomForestClassifier()
rf.fit(X_train,y_train)
# grid search on random forest classifier
grid_rf = GridSearchCV(estimator=rf, param_grid=parameters)
grid_rf.fit(X_train, y_train)
# make predictions
y_pred_rf = grid_rf.predict(X_test)
precision_rf= accuracy_score(y_pred_rf,y_test)
# plot_cm(grid_rf, y_pred_rf)
print(precision_rf)
# Plot the auc-roc curve
score = roc_auc_score(y_pred_rf , y_test)
y_pred_proba = grid_rf.predict_proba(X_test)[:,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="RandomForest Model with grid search , auc="+str(auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=4)
plt.show()
# +
#Logistic regression with GridSearchCV
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import sklearn.metrics as metrics
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
# Code starts here
# instantiate logistic regression model
lr=LogisticRegression(random_state=9)
lr.fit(X_train,y_train)
# grid search on logistic regression
grid = GridSearchCV(estimator=lr, param_grid=parameters)
grid.fit(X_train, y_train)
# make predictions
y_pred = grid.predict(X_test)
precision= precision_score(y_pred,y_test)
# plot_cm(grid, y_pred)
print(precision)
# Plot the auc-roc curve
score = roc_auc_score(y_pred , y_test)
y_pred_proba = lr.predict_proba(X_test)[:,1]
fpr, tpr, _ = metrics.roc_curve(y_test,y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="Logistic model, auc="+str(auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=4)
plt.show()
# +
# Creating list of class weights
class_wts = range(50)
prs = []
auc = []
#Loop to iterate different weights to the minority class(1)
for wt in class_wts:
rf7 = RandomForestClassifier(random_state=0, class_weight={0:1,1:wt})
rf7.fit(X_train, y_train)
prs.append(precision_score(y_test, rf7.predict(X_test)))
auc.append(roc_auc_score(y_test, rf7.predict(X_test)))
#Selecting the max f1 score
max_scorer = prs.index(np.max(prs))
#Selecting the model with the max score
rf7 = RandomForestClassifier(random_state=42, class_weight={0:1,1:max_scorer})
rf7.fit(X_train, y_train)
print ("F1 score:", f1_score(y_test, rf7.predict(X_test)))
print ("Precision:",precision_score(y_test, rf7.predict(X_test)))
print ("Recall:",recall_score(y_test, rf7.predict(X_test)))
print ("ROC Score:",roc_auc_score(y_test, rf7.predict(X_test)))
print ("Confusion Matrix:\n",confusion_matrix(y_test, rf7.predict(X_test)))
plt.figure(figsize=(10, 8))
plt.plot(class_wts, prs, label="Precision scores")
plt.plot(class_wts, auc, label="AUC scores")
plt.xlabel("class weight")
plt.ylabel("scores")
plt.title("Effect of Class Wt. in Imbalanced Classes")
plt.ylim(0.45,1)
plt.legend()
plt.show()
# +
from imblearn.over_sampling import SMOTE
# Code starts here
smote = SMOTE(random_state=0)
#Fitting and transforming data points
X_sample, y_sample = smote.fit_sample(X_train, y_train)
# +
from sklearn.ensemble import RandomForestClassifier
rf_smote=RandomForestClassifier(random_state=0)
rf_smote.fit(X_sample,y_sample)
y_pred_smote= rf_smote.predict(X_test)
accuracy_smote= rf_smote.score(X_test,y_test)
recall_smote= recall_score(y_test,y_pred_smote)
precision_smote= precision_score(y_test,y_pred_smote)
f1_smote= f1_score(y_test,y_pred_smote)
#After Sampling
print("precision is :",precision_smote)
print("acuracy is :",accuracy_smote)
print("recall is :",recall_smote)
print("f1 score is :",f1_smote)
print ("ROC Score:",roc_auc_score(y_test, rf_smote.predict(X_test)))
# Plot the auc-roc curve
score = roc_auc_score(y_pred_smote , y_test)
y_pred_proba = rf_smote.predict_proba(X_test)[:,1]
fpr, tpr, _ = metrics.roc_curve(y_test, y_pred_proba)
auc = metrics.roc_auc_score(y_test, y_pred_proba)
plt.plot(fpr,tpr,label="RandomForest Model with smote, auc="+str(auc))
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.legend(loc=4)
plt.show()
# +
from imblearn.over_sampling import RandomOverSampler
os = RandomOverSampler(0.8)
X_train_ns,y_train_ns = os.fit_sample(X_train, y_train)
classfier = RandomForestClassifier()
classfier.fit(X_train_ns, y_train_ns)
y_pred_o = classfier.predict(X_test)
print("ROC AUC SCORE: ",roc_auc_score(y_test, y_pred_o))
# -
# ### Load the dataset
#
# - Load the train data and using all your knowledge try to explore the different statistical properties of the dataset.
# +
# Code starts here
# Code ends here
# -
# ### Visualize the data
#
# - Check for the categorical & continuous features.
# - Check out the best plots for plotting between categorical target and continuous features and try making some inferences from these plots.
# - Clean the data, apply some data preprocessing and engineering techniques.
# +
# Code starts here
# Code ends here
# -
# ### Model building
#
# - Now let's come to the actual task, using Decision Tree/Ensemble Technique, predict the `Attrition`. Use different techniques you have learned to imporove the performance of the model.
# - Try improving upon the [ROC-AUC Score](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html)
# +
# Code Starts here
# Code ends here
# -
# ### Prediction on the test data and creating the sample submission file.
#
# - Load the test data and store the `Id` column in a separate variable.
# - Perform the same operations on the test data that you have performed on the train data.
# - Create the submission file as a `csv` file consisting of the `Id` column from the test data and your prediction as the second column.
# Code Starts here
# Code starts here
test_data=pd.read_csv('test.csv')
test_data.head()
test_data.drop(['Id'],axis = 1, inplace = True)
# # Code ends here
test_data
#Convert Object to Category as Datatype to process
test_data['BusinessTravel'] = test_data['BusinessTravel'].astype('category')
test_data['Department'] = test_data['Department'].astype('category')
test_data['EducationField'] = test_data['EducationField'].astype('category')
test_data['JobRole'] = test_data['JobRole'].astype('category')
test_data['MaritalStatus'] = test_data['MaritalStatus'].astype('category')
test_data['OverTime'] = test_data['OverTime'].astype('category')
test_data['Gender'] = test_data['Gender'].astype('category')
test_data['Over18'] = test_data['Over18'].astype('category')
test_data.info()
# +
#Replacing values Method
test_data['BusinessTravel'] = test_data['BusinessTravel'].cat.codes
test_data['Department'] = test_data['Department'].cat.codes
test_data['EducationField'] = test_data['EducationField'].cat.codes
test_data['JobRole'] = test_data['JobRole'].cat.codes
test_data['MaritalStatus'] = test_data['MaritalStatus'].cat.codes
test_data['OverTime'] = test_data['OverTime'].cat.codes
test_data['Gender'] = test_data['Gender'].cat.codes
test_data['Over18'] = test_data['Over18'].cat.codes
# -
test_data.drop(columns=['MonthlyIncome','PerformanceRating', 'TotalWorkingYears', 'YearsInCurrentRole', 'YearsWithCurrManager'],axis=1, inplace=True)
test_data
test=pd.read_csv('test.csv')
id_ = test['Id']
# +
# Code starts here
# Predict on the test data
y_de= de.predict(test_data)
print(y_de)
# Create a sample submission file
sample_submission = pd.DataFrame({'Id':id_,'Attrition':y_de})
# Convert the sample submission file into a csv file
sample_submission.to_csv('Attrition_de.csv',index=False)
# +
# Code Starts here
sample_submission['Attrition'].value_counts()
# Code ends here
# -
| Employee_Attrition_Student_Template.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## `010`: Regression in `scikit-learn`
#
# Goals:
# * practice with the `fit` and `predict` interface of sklearn models
# * Get a visual sense of how different regression models work.
# ## Setup
# Let's import necessary modules: Pandas and NumPy for data wrangling, Matplotlib for plotting, and some sklearn models.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.metrics import mean_absolute_error, mean_squared_error, log_loss, accuracy_score, classification_report
from sklearn.model_selection import train_test_split
# We'll load the data. We're using a dataset of home sale prices from the Ames, Iowa assessor's database, described in [this paper](http://ww2.amstat.org/publications/jse/v19n3/decock.pdf).
ames = pd.read_csv('https://github.com/kcarnold/AmesHousing/blob/master/data/ames.csv.gz?raw=true', compression="gzip")
ames['price'] = ames["Sale_Price"] / 100_000 # Make `price` be in units of $100k, to be easier to interpret.
ames.head()
# We'll define some functions to plot the data and models. Since we have latitude and longitude for each home, we can plot this data in 2D with a color for the sale price.
#
# (Sorry, you'll just have to imagine there's a map underneath.)
def plot_data():
# You don't have to know how this function works.
plt.scatter(ames['Longitude'], ames['Latitude'], c=ames["price"], s=.5)
plt.xlabel("Longitude"); plt.ylabel("Latitude")
plt.colorbar(label="Sale Price ($100k)")
plot_data()
# We'll try to predict home price based on *location* (which the realtors assure us is the most important factor anyway). So we'll grab the Latitude and Longitude columns of the data. We'll call that input data `X`, by convention.
X = ames[['Longitude', 'Latitude']].values
X.shape
# Our target, called `y` by convention, will be the home price (we'll soon introduce a different *y*, but start with this one).
y = ames['price'].values
y.shape
# Notice that `X` has two axes and thus is written in uppercase; `y` has 1 and thus is written in lowercase. (This is `sklearn` convention; other libraries are less consistent about this.)
# Now let's split the data into a `train` and `valid` set (which sklearn calls train-*test*, but that's fine). `random_state` is how `sklearn` specifies the random seed (it's actually slightly more flexible than a seed).
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=.2, random_state=42)
# We'll verify that the shapes make sense.
X_train.shape, y_train.shape
X_valid.shape, y_valid.shape
# Here's a function to plot our regression model in "data space" (i.e., what it would predict everywhere on the map.
#
# This function is pretty customized to our specific use case, though you can get inspiration from it for use in other situations.
def plot_model(clf):
# Compute extents
lat_min = ames.Latitude.min()
lat_max = ames.Latitude.max()
lon_min = ames.Longitude.min()
lon_max = ames.Longitude.max()
price_min = ames.price.min()
price_max = ames.price.max()
# Ask the classifier for predictions on a grid
xx, yy = np.meshgrid(np.linspace(lon_min, lon_max, 250), np.linspace(lat_min, lat_max, 250))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
# Show the predictions. Superimpose the original data.
plt.figure(figsize=(16, 8))
plt.contourf(xx, yy, Z, alpha=.5, cmap=plt.cm.viridis, vmin=price_min, vmax=price_max)
plt.scatter(ames['Longitude'], ames['Latitude'], c=ames["price"], s=1, cmap='viridis', vmin=price_min, vmax=price_max)
plt.xlabel("Longitude"); plt.ylabel("Latitude")
plt.colorbar(label="Sale Price ($100k)");
# ## Task
# ### Part A: Linear regression
#
# 1. Fit a linear regression model (call it `linreg`) to the training set (`X_train`, `y_train`).
# 2. Plot the model's predictions in data space. Describe the result qualitatively.
# 3. Compute the model's predictions on the validation set (call them `y_pred`). What does the model predict for the first house in the validation set? How does that compare with the actual price for that home?
# 4. Compute and show the mean squared error and the mean absolute error for the validation set predictions. You may use the `mean_absolute_error` and `mean_squared_error` functions that were imported from `sklearn.metrics` above.
# **Fit a linear regression model (call it `linreg`) to the training set (`X_train`, `y_train`).**
linreg = LinearRegression().fit(...)
# **Plot the model's predictions in data space. Describe the result qualitatively**. The code for step is filled in for you because there's not a generic way to do this; our approach here is customized to our particular model and task so you don't have to understand the details of how it works.
#
# The important aspect of this step is the qualitative description of how this model compares with the decision tree models used later. So come back and write this description once you've also seen the decision tree model outputs.
plot_model(linreg)
# **Compute the model's predictions on the validation set (call them `y_pred`). What does the model predict for the first house in the validation set? How does that compare with the actual price for that home?**
y_pred = linreg.predict(...)
# +
# your code here
# -
# **Compute and show the mean squared error and the mean absolute error for the validation set.**
#
# * You may use the `mean_absolute_error` and `mean_squared_error` functions (imported from `sklearn.metrics` above).
# * Use the predictions you already made above.
# * Use Shift-TAB or `?` to get the documentation for these functions to ensure you're passing the arguments in the correct order.
# +
# your code here
# -
# ### Part B: Decision tree regression
#
# 1. Fit a decision tree model (call it `dtree_reg`) to the training set.
# 2. Repeat steps 2 and 4 from Part A using this model.
# 3. Compare `dtree_reg` with `linreg`. Which is better? How can you tell?
# **Fit a decision tree model (call it `dtree_reg`) to the training set (`X_train`, `y_train`).**
dtree_reg = DecisionTreeRegressor()...
# **Repeat steps 2 and 4**...
plot_model(dtree_reg)
# +
# your code here
# -
# ### Part C: Random Forest regression
#
# 1. Fit a random forest regression model to this data.
# 2. Compare its performance quantitatively with the linear regression and decision tree models fit above.
# 3. Compare its data-space plot with the decision tree model.
# **Fit a random forest regression model to this data.** Use the default hyperparameters.
rf_reg = ...
# **Compare its performance quantitatively with the linear regression and decision tree models fit above.**
#
# You might notice differences in the shapes of the boundaries it draws and, if you look more closely, a difference in how the boundaries relate to the data.
plot_model(rf_reg)
# +
# your code here
# -
# ## Analysis
# ## Extension
#
# *optional*
#
# 1. Compute the loss on the *training* set for each of these models. Can that help you tell whether the model overfit or not?
# 2. Try using more features in the dataset. How well can you predict the price? Be careful about *categorical* features.
| portfolio/fundamentals/010-sklearn-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### The objective of this Jupyter Notebook is know our datasets and take an idea of how is related the data and what insights we can take.
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, mean_squared_error
import seaborn as sns
# %matplotlib inline
seattle_listings = pd.read_csv('./Seattle/listings.csv')
seattle_listings.head()
# -
seattle_listings["neighbourhood_group"].value_counts()
seattle_scraped = pd.read_csv('./Seattle/scraped.csv')
seattle_scraped.head()
list(seattle_scraped.columns)
seattle_scraped["neighbourhood_group_cleansed"].value_counts()
seattle_reviews = pd.read_csv('./Seattle/reviews.csv')
seattle_reviews.head()
seattle_calendar = pd.read_csv('./Seattle/calendar.csv')
seattle_calendar.head()
seattle_neighbourhoods = pd.read_csv('./Seattle/neighbourhoods.csv')
seattle_neighbourhoods.head()
pd.set_option('display.max_columns', None)
seattle_scraped.describe()
seattle_calendar["listing_id"].value_counts
| .ipynb_checkpoints/Business Understanding and Data Understanding-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import seaborn as sns
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
import torch
from torch import nn
from torchsummary import summary
from torch.autograd import Variable
import scikitplot as skplt
import matplotlib.pyplot as plt
# %matplotlib inline
# -
iris = sns.load_dataset('iris')
sns.set(style="ticks", color_codes=True)
g = sns.pairplot(iris, hue='species')
iris.head()
classes = {'setosa': 0, 'versicolor': 1, 'virginica': 2}
X = iris[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']].values
X[:5]
y = iris['species'].apply(lambda x: classes[x]).values
y
# +
X_train_array, X_test_array, y_train_array, y_test_array = train_test_split(X, y, test_size=0.33, random_state=0)
# y_train_array = np.expand_dims(y_train_array, axis=0)
# y_test_array = np.expand_dims(y_test_array, axis=0)
print(X_train_array[:5], np.shape(X_train_array))
print(y_train_array[:5], np.shape(y_train_array))
print(X_test_array[:5], np.shape(X_test_array))
print(y_test_array[:5], np.shape(y_test_array))
# +
iris_model = nn.Sequential(
torch.nn.Linear(4,10),
torch.nn.SELU(),
torch.nn.Linear(10,3),
torch.nn.Softmax(dim=1)
)
print(iris_model)
# -
summary(iris_model, (1,4))
X_train = Variable(
torch.from_numpy(X_train_array).float()
)
y_train = Variable(
torch.from_numpy(y_train_array).long()
)
X_test = Variable(
torch.from_numpy(X_test_array).float()
)
y_test = Variable(
torch.from_numpy(y_test_array).long()
)
y_train
# ## Training
criterion = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.RMSprop(
iris_model.parameters(), lr=0.01
)
for epoch in range(1000):
optimizer.zero_grad()
out = iris_model(X_train)
loss = criterion(out, y_train)
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print('number of epoch:', epoch, 'loss:', loss)
# +
fig, ax = plt.subplots(figsize=[5,5])
y_pred = iris_model(X_test).detach().numpy()
skplt.metrics.plot_confusion_matrix(
y_test,
y_pred.argmax(axis=1),
normalize=True,
ax=ax
)
labels = ['setosa', 'versicolor', 'virginica']
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
# -
| notebooks/modeling_in_pytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Indoor Positioning over h-Estimated LoS
# Notebook for the CTW Competition 2019:
#
# By <NAME>, <NAME>, <NAME>, and <NAME>.
# Contact: arnold / gauger / <EMAIL>
#
# This code is provided for the CTW Competition: Indoor user localisation.
# It is licensed under the GPLv2 license. If you in any way use this code for research that results in publications, please cite it appropriately.
#
# Paper: Novel Massive MIMO Channel Sounding Data Applied to Deep Learning-based Indoor Positioning
#
# # Settings LoS
# +
# Data Sets
Nb_of_Datasets = 1
Data_Foldername = './1_Measured_Data'
Meas_Comb_h = "%s/h_Estimated_CTW_Train.mat" % (Data_Foldername)
Meas_Comb_r = "%s/r_Position_CTW_Train.mat" % (Data_Foldername)
Meas_Comb_SNR = "%s/SNR_CTW_Train.mat" % (Data_Foldername)
# Filename Array
Filenames_h = [Meas_Comb_h]
Filenames_r = [Meas_Comb_r]
# Traniningsize
TrainingsSize = 0.9# 90 %
# +
## Input Settings ########################################
# NN Settings
nb_epoch =50 # number of learning epochs
batch_sizes = [8,64,128,256,1024] # size of batches for calculation the gradient
# Number of Antennas
Nb_Antennas = [2,4,8,16]
# OutputName
Filename_Pre = './2_Results/';
# -
# # Standard Includes
# +
import os
import random
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import tensorflow as tf
import keras.backend.tensorflow_backend as KTF
def get_session(gpu_fraction=1):
num_threads = os.environ.get('OMP_NUM_THREADS')
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)
if num_threads:
return tf.Session(config=tf.ConfigProto(
gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))
else:
return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
KTF.set_session(get_session())
import numpy as np
import hdf5storage
from keras.models import Model, Sequential
from keras.layers import Input, Dense, Lambda, Reshape, Conv1D, Conv2D, AveragePooling2D,Flatten, Dropout, SimpleRNN, LSTM, concatenate
from keras import backend as K
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping
from IPython.core.display import Image, display
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# %matplotlib inline
from mpl_toolkits.mplot3d import axes3d
from numpy import ma
import scipy.io as sio
from IPython.display import Image
from matplotlib import cm as CM
from nbconvert import HTMLExporter
import keras
keras.__version__
# #!nvidia-smi
# Distance Functions
def dist(y_true, y_pred):
return tf.reduce_mean((tf.sqrt(tf.square(tf.abs(y_pred[:,0]-y_true[:,0]))+tf.square(tf.abs(y_pred[:,1]-y_true[:,1]))+tf.square(tf.abs(y_pred[:,2]-y_true[:,2])))))
def true_dist(y_true, y_pred):
return (np.sqrt(np.square(np.abs(y_pred[:,0]-y_true[:,0]))+np.square(np.abs(y_pred[:,1]-y_true[:,1]))+np.square(np.abs(y_pred[:,2]-y_true[:,2]))))
# +
# Example for Measurement Quality
SNR_Mat = hdf5storage.loadmat(Meas_Comb_SNR)
SNR_Meas = np.transpose(SNR_Mat['SNR_Est'])
num_bins = 50
# Use the histogram function to bin the data
counts, bin_edges = np.histogram(SNR_Meas, bins=num_bins, normed=True)
# Now find the cdf
cdf = np.cumsum(counts)
# And finally plot the cdf
plt.plot(bin_edges[1:], cdf)
plt.xlim(0, 20)
plt.xlabel('SNR [dB]')
plt.ylabel('F(X>x)')
plt.title('Cdfplot of all antennas stacked')
plt.show()
# -
# ## Sweep over Datasets
# +
# Allocate Space for Results
Mean_Error_Train = np.empty([np.int(Nb_of_Datasets),len(Nb_Antennas)])
Mean_Error_Test = np.empty([np.int(Nb_of_Datasets),len(Nb_Antennas)])
# Sweep over Datasets
for Current_Dataset_Index in range(0,Nb_of_Datasets,1):
## Load Current Dataset
VariableName_h = Filenames_h[Current_Dataset_Index]
VariableName_r = Filenames_r[Current_Dataset_Index]
# h_Estimated
h_Estimated_Mat = hdf5storage.loadmat(VariableName_h)
h_Estimated = np.transpose(h_Estimated_Mat['h_Estimated'])
h_Estimated = np.swapaxes(h_Estimated ,0,2)
h_Estimated = np.stack((np.real(h_Estimated),np.imag(h_Estimated)),axis=-1)
# Labels
position = sio.loadmat(VariableName_r)
target_pos = np.transpose(position['r_Position'])
target_pos = np.swapaxes(target_pos,0,1)
# Schuffle
shuffling_index =random.sample(range(target_pos[:,0].size),target_pos[:,0].size);
h_Estimated = h_Estimated[shuffling_index,:,:,:]
target_pos = target_pos[shuffling_index,:]
target_pos = target_pos
target_pos[:,0] = target_pos[:,0]
target_pos[:,1] = target_pos[:,1]
# Split Data
h_Estimated_train_saved = h_Estimated[0:int(TrainingsSize*target_pos[:,0].size),:,:,:]
Positions_train = target_pos[0:int(TrainingsSize*target_pos[:,0].size),:]
h_Estimated_test_saved = h_Estimated[int(TrainingsSize*target_pos[:,0].size):,:,:,:]
Positions_test = target_pos[int(TrainingsSize*target_pos[:,0].size):,:]
Length_Test = len(Positions_test[:,0])
for Antenna_index in range (0,len(Nb_Antennas)):
# Current Nb Antennas
Current_Nb_Antennas = Nb_Antennas[Antenna_index]
h_Estimated_train = h_Estimated_train_saved[:,0::int(16/Current_Nb_Antennas),:,:]
h_Estimated_test = h_Estimated_test_saved[:,0::int(16/Current_Nb_Antennas),:,:]
#Create Net
nn_input = Input((Current_Nb_Antennas,924,2))
nn_output = Flatten()(nn_input)
nn_output = Dense(128,activation='relu')(nn_output)
nn_output = Dense(256,activation='relu')(nn_output)
nn_output = Dense(128,activation='relu')(nn_output)
nn_output = Dense(3,activation='linear')(nn_output)
nn = Model(inputs=nn_input,outputs=nn_output)
nn.compile(optimizer='Adam', loss='mse',metrics=[dist])
nn.summary()
# Train Neural Network
for b in batch_sizes:
train_hist = nn.fit(x=h_Estimated_train,y=Positions_train,batch_size=b,epochs=nb_epoch,validation_data=(h_Estimated_test, Positions_test))
# Evaluate Performance
r_Positions_pred_train = nn.predict(h_Estimated_train)
r_Positions_pred_test = nn.predict(h_Estimated_test)
errors_train = true_dist(Positions_train,r_Positions_pred_train)
errors_test = true_dist(Positions_test,r_Positions_pred_test)
Mean_Error_Train[Current_Dataset_Index,Antenna_index] = np.mean(np.abs(errors_train))
Mean_Error_Test[Current_Dataset_Index,Antenna_index] = np.mean(np.abs(errors_test))
print("Mean error on Train area:", Mean_Error_Train[Current_Dataset_Index,Antenna_index])
print("Mean error on Test area:",Mean_Error_Test[Current_Dataset_Index,Antenna_index])
# -
# Median Error
plt.plot(Nb_Antennas,Mean_Error_Test[0,:])
plt.xlabel('Number of antennas')
plt.ylabel('Distance error')
# Histogramm of errors on test Area
errors = true_dist(r_Positions_pred_test , Positions_test)
plt.hist(errors,bins=64,range=(0,4))
plt.ylabel('Number of occurence')
plt.xlabel('Distance error')
# Error Vector over Area in XY
error_vectors = np.real(r_Positions_pred_test - Positions_test)
plt.figure(figsize=(15,15))
plt.quiver(np.real(Positions_test[:,0]),np.real(Positions_test[:,1]),error_vectors[:,0],error_vectors[:,1],errors)
plt.xlabel("x in m")
plt.ylabel("y in m")
plt.show()
| CTW2019_StartingNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ## Interface statistics basics & comparing Interface statistics for two structures
#
# ### Preface
#
# #### Getting table of Interactions Statistics for a structure from under PDBsum's 'Prot-prot' tab via command line.
#
# [Here is the page](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiv&template=interfaces.html&c=999) you'd see if you looked at the 'Prot-Prot' tab for PDB id code [6kiv](https://www.rcsb.org/structure/6KIV). At the bottom of [that page](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiv&template=interfaces.html&c=999) is a table with the heading 'Interface statistics'.
# First, in this notebook we are going to bring that table into Python via a couple routes using the same script.
#
# Then in the remainder of that notebook, that process will be used as a basis via another, related script to make a single summary dataframe that compares the 'Interface statistics' for two different structures conveniently.
#
# ### Basic use of the protein-protein inferface statistics-to-dataframe script
#
# The script is easy to use. You just need the script in current working directory and then call it, providing a PDB code to retrieve the inferface statistics table as a Pandas dataframe.
#
# Running the next cell will copy the script from Github into the current working directory.
import os
file_needed = "pdbsum_prot_interface_statistics_to_df.py"
if not os.path.isfile(file_needed):
# !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}
# Next, call that script, providing a PDB identification code for a structure at the Protein Data Bank to examine.
# %run pdbsum_prot_interface_statistics_to_df.py 6kiz
# That will have generated a dataframe and saved it in a form of a serialized Python object, which is a fancy way of saying it was saved as a compressed file in a special form so that it is still a Python object. To use the generared dataframe, we need to read it in to the namespace of this running notebook by running the next cell.
import pandas as pd
df = pd.read_pickle("int_stats_pickled_df.pkl")
# Now by running the next cell we'll display the dataframe.
df
# That shows the Interface statistics has been convered to a Pandas dataframe.
#
# So far this isn't overly helpful since this same table can be viewed at PDBsum [here](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiv&template=interfaces.html&c=999). However, if you know some Pandas you can already see this is much more useful going forward for you than the table at the PDBsum page.
# For example, to **sort on the summed interface area**:
df.assign(ia_sum = pd.to_numeric(df['Interface area (Å2)'].str.split(":").str[0]) + pd.to_numeric(df['Interface area (Å2)'].str.split(":").str[1])).sort_values('ia_sum',ascending=False).drop('ia_sum', axis=1) # `.str[0]` after the split based on https://datascience.stackexchange.com/a/39493 ; int() isn't vectorized so using `pd.to_numeric()`
# Additionally, the script used to make the above dataframe will form the behind-the-scenes of the bulk of the effort when two structures are compared below.
#
# First, to complete the 'Basics' section, the following will demonstrate using the script in the notebook as a function to go directly to a dataframe without needing to save and read the Pandas dataframe as a serialized Python object (pickle) first. This provides a more convenient way to use the script if you are working in a Jupyter notebook.
# #### Basics part 2: Using the main function of the protein-protein inferface statistics-to-dataframe script within a notebook
#
# First we'll fetch the script if it isn't already here. Running the next cell won't cause any issues of the script has already been retrieved.
import os
file_needed = "pdbsum_prot_interface_statistics_to_df.py"
if not os.path.isfile(file_needed):
# !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}
# This is going to rely on approaches very similar to those illustrated [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/PatMatch%20with%20more%20Python.ipynb#Passing-results-data-into-active-memory-without-a-file-intermediate) and [here](https://github.com/fomightez/patmatch-binder/blob/6f7630b2ee061079a72cd117127328fd1abfa6c7/notebooks/Sending%20PatMatch%20output%20directly%20to%20Python.ipynb##Running-Patmatch-and-passing-the-results-to-Python-without-creating-an-output-file-intermediate). See the first notebook in this series, [Working with PDBsum in Jupyter Basics](Working%20with%20PDBsum%20in%20Jupyter%20Basics.ipynb), for a related, more fully-explained example with a different script.
# By running the following command. we'll bring the main function into the namespace of the notebook in a way that we can call that function later.
from pdbsum_prot_interface_statistics_to_df import pdbsum_prot_interface_statistics_to_df
# The next cell will make the dataframe by calling the function and supplying it with a PDB code as an argument. Then the `df` line at the bottom allows for displaying the produced dataframe.
df = pdbsum_prot_interface_statistics_to_df("6kiz")
df
# That ends the coverage of the 'basics' where PDBsum's Interface statistics is converted to a dataframe. We can use that process as the basis for further efforts. The remainder of this notebook will demonstrate using that as a basis for making a summary dataframe to compare the interface statistics for two structures.
#
#
# ### Comparing Interface statistics for two structures
#
# Next, the remainder of this Jupyter notebook demonstrates use of the script `pdbsum_prot_interface_statistics_comparing_two_structures.py` to compare the Interface Statistics for two structures conveniently.
# The script above forms the core function behind this and so if you are looking for more information, make sure you have looked at the top of this notebook first. The comparison script simply uses that script to get the table for two structures and then rearranges the data for easy viewing as a dataframe.
#
# Running the next cell will copy the script from Github into the current working directory, if it isn't there already.
import os
file_needed = "pdbsum_prot_interface_statistics_comparing_two_structures.py"
if not os.path.isfile(file_needed):
# !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}
# Next, call that script providing a PDB identification codes for the two structures at the Protein Data Bank to examine.
# %run pdbsum_prot_interface_statistics_comparing_two_structures.py 6kiv 6kiz
# That will have generated a dataframe and saved it in a form of a serialized Python object. As with in the upper section of this notebook, we need to read the generated object in to the namespace of this running notebook by running the next cell.
import pandas as pd
df = pd.read_pickle("int_stats_comparison_pickled_df.pkl")
# Now by running the next cell we'll display it.
df
# The produced summary table (dataframe) makes it much easier to compare the interactions between two different chains.
#
# Note that `NaN` (meaning 'not a number') is filled in for columns where the particular structure doesn't represent interactions for that chain pairing. For example, [6kiz](http://www.ebi.ac.uk/thornton-srv/databases/cgi-bin/pdbsum/GetPage.pl?pdbcode=6kiz&template=interfaces.html&c=999) doesn't show chains C and K interacting, see row `C:K` above. In both structures, chain C is histone H2A and chain K is KMT2A, and so it isn't that they represent different chains. While the data in the 6kiz structure has seven less residues experimentally observed for chain C, the interactions in 6kiv don't involve those residues, and so the 'missing residues' don't account for this interactions loss. Clearly, the structures differ in regards to this interaction.
#
# Let's do the same by using the main function of the script to allow skipping saving the file intermediate.
# #### Compaing part 2: Using the main function of the protein-protein inferface statistics comparing script within a notebook
#
# First, we'll fetch the script if it isn't already here. Running the next cell won't cause any issues of the script has already been retrieved.
import os
file_needed = "pdbsum_prot_interface_statistics_comparing_two_structures.py"
if not os.path.isfile(file_needed):
# !curl -OL https://raw.githubusercontent.com/fomightez/structurework/master/pdbsum-utilities/{file_needed}
# By running the following command. we'll bring the main function into the namespace of the notebook in a way that we can call that function later.
from pdbsum_prot_interface_statistics_comparing_two_structures import pdbsum_prot_interface_statistics_comparing_two_structures
# The next cell will make the dataframe by calling the function and supplying it with **two** PDB codes as arguments. Then the `df` line at the bottom allows for displaying the produced dataframe.
df = pdbsum_prot_interface_statistics_comparing_two_structures("6kiv","6kiz")
df
# Because the function was supplied with the same PDB codes as when the script was run using `%run` (commandline-like), we see the same result. This route is convenient when working in a Jupyter notebook.
# -----
# Enjoy.
| notebooks/Interface statistics basics and comparing Interface statistics for two structures.ipynb |
# + [markdown] colab_type="text" id="gQskE9NgL-ZB"
# Copyright 2018 The TensorFlow Datasets Authors, Licensed under the Apache License, Version 2.0
# + colab={} colab_type="code" id="RPo1Cw2p83pb"
# !pip install -q tfds-nightly tf-nightly
# + colab={} colab_type="code" id="S-RWB9G48uJA"
import tensorflow as tf
import tensorflow_datasets as tfds
# tfds works in both Eager and Graph modes
tf.enable_eager_execution()
# See available datasets
print(tfds.list_builders())
# Construct a tf.data.Dataset
dataset = tfds.load(name="mnist", split=tfds.Split.TRAIN)
# Build your input pipeline
dataset = dataset.shuffle(1024).batch(32).prefetch(tf.data.experimental.AUTOTUNE)
for features in dataset.take(1):
image, label = features["image"], features["label"]
| docs/_index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 7.0
# language: sagemath
# name: sagemath
# ---
%matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
#import scipy.io.wavfile
import scipy.ndimage as sp
#import calendar
# ## Histograms, Means, and Standard Deviations
# +
h = [63, 66, 71, 65, 70, 66, 67, 65, 67, 74, 64, 75, 68, 67, 70, 73, 66, 70, 72, 62, 68,
70, 62, 69, 66, 70, 70, 68, 69, 70, 71, 65, 64, 71, 64, 78, 69, 70, 65, 66, 72, 64]
d = {}
for i in h:
d[i] = d.get(i, 0)+1
histlist = []
for i in d:
histlist.append((i, d.get(i)))
sorted(histlist)
hist0 = [i for (i,j) in histlist]
hist1 = [j for (i,j) in histlist]
# +
plt.bar(hist0, hist1, width=1)
plt.title("Faculty Heights Histogram")
plt.xlabel("Height")
plt.xticks(np.arange(78-62+2)+62)
plt.ylabel("Frequency")
fig = plt.gcf()
# +
heights_mean = sum(h)/len(h)
heights_stdDev = sqrt(sum([(heights_mean-i)**2 for i in h])//len(h))
heights_mean_auto = np.mean(h)
heights_stdDev_auto = np.std(h)
print [heights_mean, heights_mean_auto] #these are equal
print [heights_stdDev, heights_stdDev_auto] #these are equal
# -
# # Correlation
#not used any more - panda data frames are easier
stringData = []
with open('./stateData.csv','rb') as csvfile:
stateData = csv.reader(csvfile, delimiter=' ', quotechar='|')
for line in stateData:
stringData.append(line)
data = []
for j in range(len(stringData)-1):
data.append([i for i in stringData[j][0].split(',')])
pd.read_csv('./stateData.csv')
# +
#This was easier than calculating it by hand, and more useful to me.
def findCorrelation(df, test1, test2):
mean1 = df[test1].mean()
mean2 = df[test2].mean()
r=0
elements = len(df[test1])
for i in range(elements):
r+=((df[test1][i])-mean1)*((df[test2][i])-mean2)
rxy = r/elements/df[test1].std()/df[test2].std()
return rxy
data = pd.read_csv('./stateData.csv')
findCorrelation(data,'University','Income')
# +
#Create three column vector of (data-mean)/stdDev for 3 given categories in dataframe
row_vectors = []
for i in ['University','Income','Infant Mort']:
l = data[i]
l_vector = (l-l.mean())/l.std()
row_vectors.append(l_vector)
col_vectors = np.transpose(row_vectors)
print col_vectors
row_vectors = np.transpose(col_vectors)
print row_vectors
# +
correlation = sp.imread('./correlation.png')
fig = plt.figure(figsize=(15,15))
plt.grid(False)
plt.imshow(correlation)
# -
# ## Linear Regression
# +
t = [53,54,58,66,69,70,71,73,81]
c = [19,26,21,33,31,36,36,38,45]
xi = sum(t)
yi = sum(c)
xsqr = sum([i**2 for i in t])
xiyi = sum([t[i]*c[i] for i in range(len(t))])
n = len(t)
print xi
print yi
print xsqr
print xiyi
print n
# +
a = [[xsqr, xi],[xi,n]]
a_inv = np.linalg.inv(a)
v = [[xiyi],[yi]]
[[a],[b]] = np.dot(a_inv,v) #find a and b
#find two points on line of best fit for plotting
y1 = a*t[0]+b
y2 = a*t[n-1]+b
# -
plt.plot(t,c)
plt.plot([t[0], t[n-1]],[y1, y2]) #line of best fit
plt.show()
| Day5/bb9_qea_day5_beforeclass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This script was written for post-processing output from Allen Center segmentation directly from gridded array for KDP column related analysis
# +
import numpy as np
# package for 3d visualization
from itkwidgets import view
from aicssegmentation.core.visual import seg_fluo_side_by_side, single_fluorescent_view, segmentation_quick_view
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [16, 12]
plt.rcParams["font.size"] = 13
# package for io
from aicsimageio import AICSImage, omeTifWriter
#function for core algorithm
from aicssegmentation.core.seg_dot import dot_3d, dot_3d_wrapper, dot_2d_slice_by_slice_wrapper
from aicssegmentation.core.vessel import filament_2d_wrapper
from aicssegmentation.core.pre_processing_utils import intensity_normalization, image_smoothing_gaussian_slice_by_slice
from aicssegmentation.core.utils import hole_filling
from skimage.morphology import remove_small_objects, watershed, dilation, erosion, ball # function for post-processing (size filter)
from skimage.feature import peak_local_max
from skimage.measure import label,regionprops,regionprops_table
from scipy.ndimage import distance_transform_edt
from scipy.stats import norm
# for dataframe compatibility of kdp object properties and matplotlib features for lightning plot
import pandas as pd
import glob
import skimage
from aicsimageio import AICSImage
from aicssegmentation.cli.to_analysis import simple_builder, masked_builder
from datetime import datetime
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
from matplotlib.patches import Patch
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
from datetime import timedelta
import matplotlib
import pyart
from copy import deepcopy
from skimage.external import tifffile
from natsort import natsorted
import matplotlib.pyplot as plt
import os
import xarray as xr
# %load_ext autoreload
# %autoreload 2
# -
# ### Create gridded radar files after generating K$_{DP}$ field
# +
#dimensions of grid in meters
x_lower = -75000
x_upper = 45000
xsize = x_upper - x_lower
y_lower = 0
y_upper = 120000
ysize = y_upper - y_lower
frz_lvl=4000
xyresolution=500
min_height=0
max_height=15000 #TODO make max_height dynamic to make script more efficient
zresolution=500
def get_grid(radar):
""" Returns grid object from radar object. """
ref=radar.fields['reflectivity']['data']
#remove data with rhohv<0.8
rhohv = radar.fields['cross_correlation_ratio']['data']
ref_for_kdp = np.greater(ref, 20)
rhohv_low = np.less(rhohv, 0.80)
badData = np.logical_and(ref_for_kdp, rhohv_low)
radar.fields['reflectivity']['data'] = np.ma.masked_where(badData, ref)
coh_pwr = deepcopy(radar.fields['differential_phase'])
coh_pwr['data'] = coh_pwr['data']*0.+1.
radar.fields['norm_coherent_power'] = coh_pwr
# I chose to grid kdp field separately because the cylp (linear programming)
# Python package had inconsistencies with existing packages in default conda
# environment. Therefore, I cretaed a new conda env to install cylp.
phidp,kdp = pyart.correct.phase_proc_lp(radar,0.0,refl_field='reflectivity',
ncp_field='norm_coherent_power',
rhv_field='cross_correlation_ratio',
phidp_field='differential_phase',
LP_solver='cylp_mp',proc=10, #...Default
low_z=25.,fzl=4200.)
radar.add_field('phidp',phidp,replace_existing=True)
radar.add_field('kdp',kdp,replace_existing=True)
fields = ['kdp']
grids = pyart.map.grid_from_radars(radar, grid_shape=(int((max_height-min_height)/zresolution) +1 , int((ysize)/xyresolution) +1,
int((xsize)/xyresolution) +1),grid_limits=((min_height, max_height),
(y_lower, y_upper), (x_lower, x_upper)), fields=fields,
roi_func='constant', gridding_algo="map_gates_to_grid",weighting_function='BARNES',
constant_roi=1149.)
return grids
# -
# ### Apply 3D segmentation on gridded K$_{DP}$ field
def suggest_normalization_param_custom(structure_img0):
m, s = norm.fit(structure_img0.flat)
# print(f'mean intensity of the stack: {m}')
# print(f'the standard deviation of intensity of the stack: {s}')
p99 = np.percentile(structure_img0, 99.99)
# print(f'0.9999 percentile of the stack intensity is: {p99}')
pmin = structure_img0.min()
# print(f'minimum intensity of the stack: {pmin}')
pmax = structure_img0.max()
# print(f'maximum intensity of the stack: {pmax}')
up_ratio = 0
for up_i in np.arange(0.5, 1000, 0.5):
if m+s * up_i > p99:
if m+s * up_i > pmax:
# print(f'suggested upper range is {up_i-0.5}, which is {m+s*(up_i-0.5)}')
up_ratio = up_i-0.5
else:
# print(f'suggested upper range is {up_i}, which is {m+s*up_i}')
up_ratio = up_i
break
low_ratio = 0
for low_i in np.arange(0.5, 1000, 0.5):
if m-s*low_i < pmin:
# print(f'suggested lower range is {low_i-0.5}, which is {m-s*(low_i-0.5)}')
low_ratio = low_i-0.5
break
return low_ratio,up_ratio
# +
filenames = sorted(glob.glob('/path/to/KTLX_data/*V06'))
normal = matplotlib.colors.Normalize(vmin=0.75, vmax=6)
cm = matplotlib.cm.ScalarMappable(norm=normal,cmap='cubehelix_r')
for f in filenames:
radar = pyart.io.read(f)
grid = get_grid(radar)
data = grid.fields['kdp']['data']
data_kdp_thresh = np.ma.masked_where(data < 0.75,data)
arrays = [cm.to_rgba(data_kdp_thresh[i,:,:]) for i in range(31)]
kdp_stack = np.stack(arrays)
kdp_stack = np.interp(kdp_stack, (kdp_stack.min(), kdp_stack.max()), (255, 0))
kdp_stack = kdp_stack[8:23]
reader = AICSImage(kdp_stack,dims="ZYXC")
IMG = reader.data
struct_img0 = IMG[0,:,:,3,:].copy() # NOTE: It is kind of important to note that channel 3 worked best for direct array based segmentation of KDP col objects
# intensity normalization
struct_imgcopy = struct_img0.copy()
low_ratio,up_ratio = suggest_normalization_param_custom(struct_imgcopy)
################################
intensity_scaling_param = [low_ratio,up_ratio]
gaussian_smoothing_sigma = 1
################################
struct_img = intensity_normalization(struct_imgcopy, scaling_param=intensity_scaling_param)
while ((low_ratio != intensity_scaling_param[0]) or (up_ratio != intensity_scaling_param[1])):
struct_img = intensity_normalization(struct_imgcopy, scaling_param=intensity_scaling_param)
low_ratio,up_ratio = suggest_normalization_param_custom(struct_imgcopy)
# smoothing with gaussian filter
structure_img_smooth = image_smoothing_gaussian_slice_by_slice(struct_img, sigma=gaussian_smoothing_sigma)
# s2_param = [[1.25,0.9],[1,0.07],[1,0.01],[1.5,0.005]]
s2_param = [[1,0.008]]
################################
fill_max_size = 100000
fill_2d = True
bw_spot = dot_2d_slice_by_slice_wrapper(structure_img_smooth, s2_param)
# bw_spot_fill = hole_filling(bw_spot, 100, fill_max_size, fill_2d)
################################
## PARAMETERS for this step ##
# f2_param = [[1.25, 0.07],[1.25,0.05]]
f2_param = [[1, 0.01]]
################################
bw_filament = filament_2d_wrapper(structure_img_smooth, f2_param)
# bw_filament_fill = hole_filling(bw_filament, 100, fill_max_size, fill_2d)
# Combine the output from spot and filament filters
bw = np.logical_or(bw_spot, bw_filament)
bw_fill = hole_filling(bw, 100, fill_max_size, fill_2d)
bw_fill = np.invert(bw_fill)
# watershed
minArea = 50
Mask = remove_small_objects(bw_fill>0, min_size=minArea, connectivity=1, in_place=False)
Seed = dilation(peak_local_max(struct_img,labels=label(Mask), min_distance=2, indices=False), selem=ball(1))
Watershed_Map = -1*distance_transform_edt(bw_fill)
seg = watershed(Watershed_Map, label(Seed), mask=Mask, watershed_line=True)
################################
## PARAMETERS for this step ##
minArea = 50
################################
seg = remove_small_objects(seg>0, min_size=minArea, connectivity=1, in_place=False)
# seg = np.ma.masked_where(mask==False,seg)
# np.ma.set_fill_value(seg,-999)
seg = np.swapaxes(seg,0,2)
seg = np.invert(seg)
seg = seg >0
out=seg.astype(np.uint8)
out = 1 - out
out[out>0]=255
out = np.rot90(out[:,:,:],axes=[1,2])
# plt.pcolormesh(out[0,:,:])
fsave_name = f.split('/')[-1][13:19]
plt.pcolormesh(out[0,:,:])
writer = omeTifWriter.OmeTifWriter(f'/path/to/segmentation_direct_array/{fsave_name}.tiff')
writer.save(out)
# +
# This cell is just for sanity check and see if segmented objects are labelled as per expectations
# We are plotting the 4km grid level for each gridded radar volume
segmented_files = sorted(glob.glob('/path/to/segmentation_direct_array/*.tiff'))
# plt.pcolormesh(np.rot90(cell_seg_labeled[1,:,::-1],axes=[0,1]))
for f in segmented_files:
reader1 = AICSImage(f)
dttt = reader1.data[0,0,:,:,:]
cell_seg_labeled = skimage.measure.label(dttt)
n_obj = len(regionprops(cell_seg_labeled))
plt.pcolormesh(cell_seg_labeled[0,::-1,:])
plt.show()
# +
# This will create a napari viewer for the entire 4D (t,x,y,z) stack created for segmented images
import napari
from dask_image.imread import imread
from skimage import measure
stack = imread("/path/to/segmentation_direct_array/*.tiff")
from vispy.color import Colormap
import matplotlib.cm as cm
import pyart
# sicne napari has limited colormaps and we want to use our custom colormap
cmap = cm.get_cmap('pyart_HomeyerRainbow', 15)
rgb_list = []
for i in range(cmap.N):
rgb = cmap(i)[:3]
rgb_list.append(rgb)
rgb_list[0] = (0,0,0)
cmap = Colormap(rgb_list)
# define a function which reads only the last three dimensions since our stacked object is 4D
# concept credit: https://napari.org/tutorials/dask
def last3dims(f):
# this is just a wrapper because the pycudadecon function
# expects ndims==3 but our blocks will have ndim==4
def func(array):
return f(array[0])[None,...]
return func
label = last3dims(measure.label)
labeled = stack.map_blocks(label)
with napari.gui_qt():
napari.view_image(labeled, contrast_limits=[0,15], colormap = ('HomeyerRainbow',cmap), is_pyramid=False)
# -
| notebooks/segment_from_grid_array_kdp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Perform Chi-Square test for Bank Churn prediction (find out different patterns on customer leaves the bank) . Here I am considering only few columns to make things clear
# ### Import libraries
import numpy as numpy
import pandas as pd
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', -1)
# ### Get the data
churn_df = pd.read_csv("https://raw.githubusercontent.com/divyanshu132/Bank-Customer-Churn-Prediction/master/Churn_Modelling.csv")
churn_df.head()
churn_df.CreditScore.value_counts()
churn_df.EstimatedSalary.value_counts()
churn_df.Tenure.value_counts()
# +
# user is with bank for 9 months --> tenure 1
# user is with bank for 13 months---> tenure 2
# -
churn_df.shape
churn_df.describe()
churn_df.isna().sum()
# ### Here we have 4 category predictors and one category response. Exited, the response column represnts customer left the bank or not.
# ## Before performig Ch-Square test we have to make sure data is label encoded.
# +
label_encoder = LabelEncoder()
label_encoder.fit(churn_df['Geography'])
# types of encoding
churn_df['Geography'] = label_encoder.fit_transform(churn_df['Geography'])
churn_df['Gender'] = label_encoder.fit_transform(churn_df['Gender'])
churn_df['Surname'] = label_encoder.fit_transform(churn_df['Surname'])
# -
churn_df.Geography.value_counts()
churn_df.head()
# ## Chi-Square test
from sklearn.feature_selection import chi2
churn_df
X = churn_df.drop('Exited',axis=1)
y = churn_df['Exited']
categorical_columns = ["Surname", "Geography", "Gender", "Tenure", "NumOfProducts", "HasCrCard", "IsActiveMember"]
chi_scores = chi2(X, y)
chi_scores
X.columns
chi_scores[1]
p_values
# ### here first array represents chi square values and second array represnts p-values
p_values = pd.Series(chi_scores[1], index = X.columns)
p_values.sort_values(ascending = False , inplace = True)
p_values.plot.bar()
# # Since HasCrCard has higher the p-value, it says that this variables is independent of the repsone --> Exited and can not be considered for model training
| FeatureSelection_ChiSquareTest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # [Machine Learning with CoreML](https://www.packtpub.com/big-data-and-business-intelligence/machine-learning-core-ml)
# **By:** <NAME> (Author)
# **Publisher:** [Packt Publishing](https://www.packtpub.com/)
# ## Chapter 7 - Fast Neural Style Transfer
# This notebook is concerned with extracting the **content** from an image and using this to *steer* the network (loss function).
#
# At a highlevel; this is achieved by using a model ([VGG16](https://gist.github.com/baraldilorenzo/07d7802847aaad0a35d3)) that has been trained to perform object recognition. The features it learns is classify the object within the image is what we use for both style and content.
#
# The model is made up of a series of convolutional layers where these layers establish **feature maps** that can be seen as the models internal representation of the image content. Typically; the shallow layers represent basic shapes but deeper layers represent more abstract features (as they operate on a layer scale and thus have a higher-level representation of the image). The image below illustrates the *features* of an image which are activated at each of the layers.
# <img src="images/layer_activations.png" />
# Therefore; to compare our generated image to the content image we can extract features vectors from the deeper layers and calculate a distance (with the goal of nearing 0). The image below illustrates this process and is the purpose of this notebook.
# <img src="images/content_loss.png" width="80%" />
import warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
from builtins import range, input
from keras.layers import Input, Lambda, Dense, Flatten
from keras.layers import AveragePooling2D, MaxPooling2D
from keras.layers.convolutional import Conv2D
from keras.models import Model, Sequential
from keras.applications.vgg16 import VGG16
from keras.applications.vgg16 import preprocess_input
from keras.preprocessing import image
import keras.backend as K
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import fmin_l_bfgs_b
from datetime import datetime
# +
### Re-create VGG16; replacing MaxPooling with AveragePooling
# -
def VGG16_AvgPool(shape):
vgg16 = VGG16(input_shape=shape, weights='imagenet', include_top=False)
avg_vgg16 = Sequential()
for layer in vgg16.layers:
if layer.__class__ == MaxPooling2D:
avg_vgg16.add(AveragePooling2D())
else:
avg_vgg16.add(layer)
return avg_vgg16
def VGG16_Cutoff(shape, num_convs):
"""
There are 13 convolutions in total,
we can choose any of them for our output
"""
vgg = VGG16_AvgPool(shape)
vgg16_cutoff = Sequential()
n = 0
for layer in vgg.layers:
vgg16_cutoff.add(layer)
if layer.__class__ == Conv2D:
n += 1
if n >= num_convs:
break
return vgg16_cutoff
def unpreprocess(img):
img[...,0] += 103.939
img[...,1] += 116.779
img[...,2] += 126.68
img = img[...,::-1]
return img
def scale_img(img):
img = img - img.min()
img = img / img.max()
return img
def gram_matrix(img):
"""
Input is (H, W, C) (C = # feature maps);
we first need to convert it to HW, C
"""
X = K.batch_flatten(K.permute_dimensions(img, (2, 0, 1)))
# Now calculate the gram matrix
# gram = XX^T / N
# The constant is not important since we'll be weighting these
G = K.dot(X, K.transpose(X)) / img.get_shape().num_elements()
return G
def style_loss(y, t):
"""
y: generated image
t: target image
"""
return K.mean(K.square(gram_matrix(y) - gram_matrix(t)))
def minimize(fn, epochs, batch_shape):
t0 = datetime.now()
losses = []
# initilise our generated image with random values
x = np.random.randn(np.prod(batch_shape))
for i in range(epochs):
x, l, _ = fmin_l_bfgs_b(
func=fn,
x0=x,
maxfun=20)
x = np.clip(x, -127, 127)
print("iteration {} loss {}".format(i, l))
losses.append(l)
t1 = datetime.now()
print("duration: {}".format(t1-t0))
plt.plot(losses)
plt.show()
output_img = x.reshape(*batch_shape)
output_img = unpreprocess(output_img)
return output_img[0]
def process(img_path):
img = image.load_img(img_path)
# convert image to array and preprocess for vgg
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# grab the shape
batch_shape = x.shape
shape = x.shape[1:]
# lets take the first convolution of each block
# to be the target outputs
vgg = VGG16_AvgPool(shape)
# Note: you need to select the output at index 1, since the
# output at index 0 corrosponds to the original vgg with maxpool
symbloic_conv_outputs = [
layer.get_output_at(1) for layer in vgg.layers if layer.name.endswith('conv1')
]
# Pick the earlier layers for more "localised" representaiton;
# this is the opposute to the content model where the
# later layers represent a more "global" structure
# symbloic_conv_outputs = symbloic_conv_outputs[:2] # example of a subset
# Make a big model that outputs multiple output layers
multi_output_model = Model(vgg.input, symbloic_conv_outputs)
# calcualte the targets that are outputs for each layer
style_layer_outputs = [K.variable(y) for y in multi_output_model.predict(x)]
# calculate the total style loss
loss = 0
for symbolic, actual in zip(symbloic_conv_outputs, style_layer_outputs):
# gram_matrix() expects a (H, W, C) as input
loss += style_loss(symbolic[0], actual[0])
grads = K.gradients(loss, multi_output_model.input)
get_loss_and_grads = K.function(
inputs=[multi_output_model.input],
outputs=[loss] + grads)
def get_loss_and_grads_wrapper(x_vec):
"""
scipy's minimizer allows us to pass back
function value f(x) and its gradient f'(x)
simultaneously rather than using the fprime arg
We cannot use get_loss_and_grads() directly,
the minimizer func must be a 1-D array.
Input to get_loss_and_grads must be [batch_of_images]
Gradient must also be a 1-D array and both,
loss and graident, must be np.float64 otherwise we will
get an error
"""
l, g = get_loss_and_grads([x_vec.reshape(*batch_shape)])
return l.astype(np.float64), g.flatten().astype(np.float64)
final_img = minimize(get_loss_and_grads_wrapper, 10, batch_shape)
plt.imshow(scale_img(final_img))
plt.show()
STYLE_IMAGE = "../images/Van_Gogh-Starry_Night.jpg"
process(STYLE_IMAGE)
| Chapter06/Notebooks/Training/NeuralStyleTransfer_Training_Content.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import cv2
import numpy as np
img = cv2.imread('apples.jpg')
px=img[100, 100]
print(px)
cv2.imshow('ORIGINAL', img)
cv2.waitKey(0)
img_gray=cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
cv2.imshow('GRAY', img_gray)
cv2.waitKey(0)
cv2.destroyAllWindows()
| opencv/core/basic_ops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../images/qiskit-heading.gif" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" width="500 px" align="left">
from qiskit import *
from qiskit.compiler import transpile
from qiskit.visualization import plot_histogram
from qiskit.tools.monitor import job_monitor
IBMQ.load_accounts()
# %matplotlib inline
# # Mapping virtual qubits in a circuit to physical qubits on a quantum device
#
#
# In this tutorial we will see how to manually set the mapping from virtual circuit qubits to the physical qubits on a real quantum backend. To begin, let us consider the following circuit that constructs a five qubit GHZ state:
# +
qr = QuantumRegister(5,'qr')
cr = ClassicalRegister(5, 'cr')
ghz = QuantumCircuit(qr, cr, name='ghz')
ghz.h(qr[2])
ghz.cx(qr[2], qr[1])
ghz.cx(qr[1], qr[0])
ghz.cx(qr[2], qr[3])
ghz.cx(qr[3], qr[4])
ghz.barrier(qr)
ghz.measure(qr, cr)
ghz.draw()
# -
# ## Default circuit mapping via the transpiler
# Let us use the transpiler to generate the circuit that is mapped onto the
device = IBMQ.get_backend('ibmq_16_melbourne')
# device.
trans_ghz = transpile(ghz, device)
trans_ghz.draw()
# We see that the output circuit has the gate set mapped to the first five qubits on the device (0->4).
#
# To gauge the quality of this mapping, lets compute the depth of the output circuit.
trans_ghz.depth()
# This depth is greater than that of the input
ghz.depth()
# because the transpiler had to reverse some of the CNOT gates in the circuit to match the device topology (as seen below).
# ## Better results via manual mapping
# It turns out that the GHZ circuit can be mapped exactly onto the Melbourne backend; the output circuit has the same depth as the input. To do the mapping we just need to find which qubits to map to. This is most easily done using the Jupyter magic function:
from qiskit.tools.jupyter import *
# %qiskit_backend_monitor device
# Upon inspection one can see that the GHZ circuit matches the topology provided that the circuit qubits are mapped to the device qubits using the following mapping: `0->10, 1->4, 2->5, 3->6, 4->8`. Whats better, these qubits also have lower error rates, in both gates and measurements, than the qubits selected in the default mapping.
#
# In qiskit we can express this mapping using a list:
layout = [10, 4, 5, 6, 8]
# or equivilently using a dictionary that uses quantum register qubits as the keys, and device qubits as the corresponding values:
layout = {qr[0]: 10, qr[1]: 4, qr[2]: 5, qr[3]: 6, qr[4]: 8}
# To use this mapping in the transpiler or in the `execute` function we can just set the layout as the `initial_layout` keyword argument to the functions:
trans_ghz_mapped = transpile(ghz, device, initial_layout=layout)
trans_ghz_mapped.draw()
trans_ghz_mapped.depth()
# As we have verified, the returned circuit does exactly match the topology with the given layout.
# ## Running on the device
# Lets now compare the performance of the circuits transpiled with the default and hand-selected layouts by running them on the device:
job = execute([trans_ghz, trans_ghz_mapped], device)
job_monitor(job)
res = job.result()
plot_histogram([res.get_counts(0), res.get_counts(1)],
figsize=(15,5),
legend=['default', 'mapped'])
# It is clear that the circuits that we mapped to the ideal sub-graph of the device (red) give an answer that is much closer to the ideal distribution than those mapped in the default way.
| qiskit/terra/device_mapping.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Policy Gradients and A2C
#
# In the <a href="../dqn/dqns_on_gcp.ipynb">previous notebook</a>, we learned how to use hyperparameter tuning to help DQN agents balance a pole on a cart. In this notebook, we'll explore two other types of alogrithms: Policy Gradients and A2C.
#
# ## Setup
#
# Hypertuning takes some time, and in this case, it can take anywhere between **10 - 30 minutes**. If this hasn't been done already, run the cell below to kick off the training job now. We'll step through what the code is doing while our agents learn.
# !pip install tensorflow==2.5 --user
# + language="bash"
# BUCKET=<your-bucket-here> # Change to your bucket name
# JOB_NAME=pg_on_gcp_$(date -u +%y%m%d_%H%M%S)
# REGION='us-central1' # Change to your bucket region
# IMAGE_URI=gcr.io/cloud-training-prod-bucket/pg:latest
#
# gcloud ai-platform jobs submit training $JOB_NAME \
# --staging-bucket=gs://$BUCKET \
# --region=$REGION \
# --master-image-uri=$IMAGE_URI \
# --scale-tier=BASIC_GPU \
# --job-dir=gs://$BUCKET/$JOB_NAME \
# --config=templates/hyperparam.yaml
# -
# !pip install gym==0.12.5 --user
# **Note**: Restart the kernel if the above libraries needed to be installed
#
# Thankfully, we can use the same environment for these algorithms as DQN, so this notebook will focus less on the operational work of feeding our agents the data, and more on the theory behind these algorthims. Let's start by loading our libraries and environment.
# +
import gym
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras import backend as K
CLIP_EDGE = 1e-8
def print_state(state, step, reward=None):
format_string = 'Step {0} - Cart X: {1:.3f}, Cart V: {2:.3f}, Pole A: {3:.3f}, Pole V:{4:.3f}, Reward:{5}'
print(format_string.format(step, *tuple(state), reward))
env = gym.make('CartPole-v0')
# -
# ## The Theory Behind Policy Gradients
#
# Whereas Q-learning attempts to assign each state a value, Policy Gradients tries to find actions directly, increasing or decreaing a chance to take an action depending on how an episode plays out.
#
# To compare, Q-learning has a table that keeps track of the value of each combination of state and action:
#
# || Meal | Snack | Wait |
# |-|-|-|-|
# | Hangry | 1 | .5 | -1 |
# | Hungry | .5 | 1 | 0 |
# | Full | -1 | -.5 | 1.5 |
#
# Instead for Policy Gradients, we can imagine that we have a similar table, but instead of recording the values, we'll keep track of the probability to take the column action given the row state.
#
# || Meal | Snack | Wait |
# |-|-|-|-|
# | Hangry | 70% | 20% | 10% |
# | Hungry | 30% | 50% | 20% |
# | Full | 5% | 15% | 80% |
#
# With Q learning, whenever we take one step in our environment, we can update the value of the old state based on the value of the new state plus any rewards we picked up based on the [Q equation](https://en.wikipedia.org/wiki/Q-learning):
#
# <img style="background-color:white;" src="https://wikimedia.org/api/rest_v1/media/math/render/svg/47fa1e5cf8cf75996a777c11c7b9445dc96d4637">
#
# Could we do the same thing if we have a table of probabilities instead values? No, because we don't have a way to calculate the value of each state from our table. Instead, we'll use a different <a href="http://incompleteideas.net/papers/sutton-88-with-erratum.pdf"> Temporal Difference Learning</a> strategy.
#
# Q Learning is an evolution of TD(0), and for Policy Gradients, we'll use TD(1). We'll calculate TD(1) accross and entire episode, and use that to indicate whether to increase or decrease the probability correspoding to the action we took. Let's look at a full day of eating.
#
# | Hour | State | Action | Reward |
# |-|-|-|-|
# |9| Hangry | Wait | -.9 |
# |10| Hangry | Meal | 1.2 |
# |11| Full | Wait | .5 |
# |12| Full | Snack | -.6 |
# |13| Full | Wait | 1 |
# |14| Full | Wait | .6 |
# |15| Full | Wait | .2 |
# |16| Hungry | Wait | 0 |
# |17| Hungry | Meal | .4 |
# |18| Full | Wait| .5 |
#
# We'll work backwards from the last day, using the same discount, or `gamma`, as we did with DQNs. The `total_rewards` variable is equivalent to the value of state prime. Using the [Bellman Equation](https://en.wikipedia.org/wiki/Bellman_equation), everytime we calculate the value of a state, s<sub>t</sub>, we'll set that as the value of state prime for the state before, s<sub>t-1</sub>.
# +
test_gamma = .5 # Please change me to be between zero and one
episode_rewards = [-.9, 1.2, .5, -.6, 1, .6, .2, 0, .4, .5]
def discount_episode(rewards, gamma):
discounted_rewards = np.zeros_like(rewards)
total_rewards = 0
for t in reversed(range(len(rewards))):
total_rewards = rewards[t] + total_rewards * gamma
discounted_rewards[t] = total_rewards
return discounted_rewards
discount_episode(episode_rewards, test_gamma)
# -
# Wherever our discounted reward is positive, we'll increase the probability corresponding to the action we took. Similarly, wherever our discounted reward is negative, we'll decrease the probabilty.
#
# However, with this strategy, any actions with a positive reward will have it's probability increase, not necessarily the most optimal action. This puts us in a feedback loop, where we're more likely to pick less optimal actions which could further increase their probability. To counter this, we'll divide the size of our increases by the probability to choose the corresponding action, which will slow the growth of popular actions to give other actions a chance.
#
# Here is our update rule for our neural network, where alpha is our learning rate, and pi is our optimal policy, or the probability to take the optimal action, a<sup>*</sup>, given our current state, s.
#
# <img src="images/weight_update.png" width="200" height="100">
#
# Doing some fancy calculus, we can combine the numerator and denominator with a log function. Since it's not clear what the optimal action is, we'll instead use our discounted rewards, or G, to increase or decrease the weights of the respective action the agent took. A full breakdown of the math can be found in [this article by <NAME>](https://medium.com/@thechrisyoon/deriving-policy-gradients-and-implementing-reinforce-f887949bd63).
#
# <img src="images/weight_update_calculus.png" width="300" height="150">
#
# Below is what it looks like in code. `y_true` is the [one-hot encoding](https://en.wikipedia.org/wiki/One-hot) of the action that was taken. `y_pred` is the probabilty to take each action given the state the agent was in.
def custom_loss(y_true, y_pred):
y_pred_clipped = K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE)
log_likelihood = y_true * K.log(y_pred_clipped)
return K.sum(-log_likelihood*g)
# We won't have the discounted rewards, or `g`, when our agent is acting in the environment. No problem, we'll have one neural network with two types of pathways. One pathway, `predict`, will be the probability to take an action given an inputed state. It's only used for prediction and is not used for backpropogation. The other pathway, `policy`, will take both a state and a discounted reward, so it can be used for training.
#
# The code in its entirety looks like this. As with Deep Q Networks, the hidden layers of a Policy Gradient can use a CNN if the input state is pixels, but the last layer is typically a [Dense](https://keras.io/layers/core/) layer with a [Softmax](https://en.wikipedia.org/wiki/Softmax_function) activation function to convert the output into probabilities.
def build_networks(
state_shape, action_size, learning_rate, hidden_neurons):
"""Creates a Policy Gradient Neural Network.
Creates a two hidden-layer Policy Gradient Neural Network. The loss
function is altered to be a log-likelihood function weighted
by the discounted reward, g.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
hidden_neurons (int): the number of neurons to use per hidden
layer.
"""
state_input = layers.Input(state_shape, name='frames')
g = layers.Input((1,), name='g')
hidden_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
hidden_2 = layers.Dense(hidden_neurons, activation='relu')(hidden_1)
probabilities = layers.Dense(action_size, activation='softmax')(hidden_2)
def custom_loss(y_true, y_pred):
y_pred_clipped = K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE)
log_lik = y_true*K.log(y_pred_clipped)
return K.sum(-log_lik*g)
policy = models.Model(
inputs=[state_input, g], outputs=[probabilities])
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
policy.compile(loss=custom_loss, optimizer=optimizer)
predict = models.Model(inputs=[state_input], outputs=[probabilities])
return policy, predict
# Let's get a taste of how these networks function. Run the below cell to build our test networks.
# +
space_shape = env.observation_space.shape
action_size = env.action_space.n
# Feel free to play with these
test_learning_rate = .2
test_hidden_neurons = 10
test_policy, test_predict = build_networks(
space_shape, action_size, test_learning_rate, test_hidden_neurons)
# -
# We can't use the policy network until we build our learning function, but we can feed a state to the predict network so we can see our chances to pick our actions.
state = env.reset()
test_predict.predict(np.expand_dims(state, axis=0))
# Right now, the numbers should be close to `[.5, .5]`, with a little bit of variance due to the randomization of initializing the weights and the cart's starting position. In order to train, we'll need some memories to train on. The memory buffer here is simpler than DQN, as we don't have to worry about random sampling. We'll clear the buffer every time we train as we'll only hold one episode's worth of memory.
class Memory():
"""Sets up a memory replay buffer for Policy Gradient methods.
Args:
gamma (float): The "discount rate" used to assess TD(1) values.
"""
def __init__(self, gamma):
self.buffer = []
self.gamma = gamma
def add(self, experience):
"""Adds an experience into the memory buffer.
Args:
experience: a (state, action, reward) tuple.
"""
self.buffer.append(experience)
def sample(self):
"""Returns the list of episode experiences and clears the buffer.
Returns:
(list): A tuple of lists with structure (
[states], [actions], [rewards]
}
"""
batch = np.array(self.buffer).T.tolist()
states_mb = np.array(batch[0], dtype=np.float32)
actions_mb = np.array(batch[1], dtype=np.int8)
rewards_mb = np.array(batch[2], dtype=np.float32)
self.buffer = []
return states_mb, actions_mb, rewards_mb
# Let's make a fake buffer to get a sense of the data we'll be training on. The cell below initializes our memory and runs through one episode of the game by alternating pushing the cart left and right.
#
# Try running it to see the data we'll be using for training.
# +
test_memory = Memory(test_gamma)
actions = [x % 2 for x in range(200)]
state = env.reset()
step = 0
episode_reward = 0
done = False
while not done and step < len(actions):
action = actions[step] # In the future, our agents will define this.
state_prime, reward, done, info = env.step(action)
episode_reward += reward
test_memory.add((state, action, reward))
step += 1
state = state_prime
test_memory.sample()
# -
# Ok, time to start putting together the agent! Let's start by giving it the ability to act. Here, we don't need to worry about exploration vs exploitation because we already have a random chance to take each of our actions. As the agent learns, it will naturally shift from exploration to exploitation. How conveient!
class Partial_Agent():
"""Sets up a reinforcement learning agent to play in a game environment."""
def __init__(self, policy, predict, memory, action_size):
"""Initializes the agent with Policy Gradient networks
and memory sub-classes.
Args:
policy: The policy network created from build_networks().
predict: The predict network created from build_networks().
memory: A Memory class object.
action_size (int): The number of possible actions to take.
"""
self.policy = policy
self.predict = predict
self.action_size = action_size
self.memory = memory
def act(self, state):
"""Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
Returns:
(int) The index of the action to take.
"""
# If not acting randomly, take action with highest predicted value.
state_batch = np.expand_dims(state, axis=0)
probabilities = self.predict.predict(state_batch)[0]
action = np.random.choice(self.action_size, p=probabilities)
return action
# Let's see the act function in action. First, let's build our agent.
test_agent = Partial_Agent(test_policy, test_predict, test_memory, action_size)
# Next, run the below cell a few times to test the `act` method. Is it about a 50/50 chance to push right instead of left?
action = test_agent.act(state)
print("Push Right" if action else "Push Left")
# Now for the most important part. We need to give our agent a way to learn! To start, we'll [one-hot encode](https://en.wikipedia.org/wiki/One-hot) our actions. Since the output of our network is a probability for each action, we'll have a 1 corresponding to the action that was taken and 0's for the actions we didn't take.
#
# That doesn't give our agent enough information on whether the action that was taken was actually a good idea, so we'll also use our `discount_episode` to calculate the TD(1) value of each step within the episode.
#
# One thing to note, is that CartPole doesn't have any negative rewards, meaning, even if it does terribly, the agent will still think the run went well. To help counter this, we'll take the mean and standard deviation of our discounted rewards, or `discount_mb`, and use that to find the [Standard Score](https://en.wikipedia.org/wiki/Standard_score) for each discounted reward. With this, steps close to dropping the poll will have a negative reward.
# +
def learn(self, print_variables=False):
"""Trains a Policy Gradient policy network based on stored experiences."""
state_mb, action_mb, reward_mb = self.memory.sample()
# One hot enocde actions
actions = np.zeros([len(action_mb), self.action_size])
actions[np.arange(len(action_mb)), action_mb] = 1
if print_variables:
print("action_mb:", action_mb)
print("actions:", actions)
# Apply TD(1) and normalize
discount_mb = discount_episode(reward_mb, self.memory.gamma)
discount_mb = (discount_mb - np.mean(discount_mb)) / np.std(discount_mb)
if print_variables:
print("reward_mb:", reward_mb)
print("discount_mb:", discount_mb)
return self.policy.train_on_batch([state_mb, discount_mb], actions)
Partial_Agent.learn = learn
test_agent = Partial_Agent(test_policy, test_predict, test_memory, action_size)
# -
# Try adding in some print statements to the code above to get a sense of how the data is transformed before feeding it into the model, then run the below code to see it in action.
# Finally, it's time to put it all together. Policy Gradient Networks have less hypertuning parameters than DQNs, but since our custom loss constructs a [TensorFlow Graph](https://www.tensorflow.org/api_docs/python/tf/Graph) under the hood, we'll set up lazy execution by wrapping our traing steps in a default graph.
#
# By changing `test_gamma`, `test_learning_rate`, and `test_hidden_neurons`, can you help the agent reach a score of 200 within 200 episodes? It takes a little bit of thinking and a little bit of luck.
#
# Hover the curser <b title="gamma=.9, learning rate=0.002, neurons=50">on this bold text</b> to see a solution to the challenge.
# +
test_gamma = .5
test_learning_rate = .01
test_hidden_neurons = 100
with tf.Graph().as_default():
test_memory = Memory(test_gamma)
test_policy, test_predict = build_networks(
space_shape, action_size, test_learning_rate, test_hidden_neurons)
test_agent = Partial_Agent(test_policy, test_predict, test_memory, action_size)
for episode in range(200):
state = env.reset()
episode_reward = 0
done = False
while not done:
action = test_agent.act(state)
state_prime, reward, done, info = env.step(action)
episode_reward += reward
test_agent.memory.add((state, action, reward))
state = state_prime
test_agent.learn()
print("Episode", episode, "Score =", episode_reward)
# -
# # The Theory Behind Actor - Critic
#
# Now that we have the hang of Policy Gradients, let's combine this strategy with Deep Q Agents. We'll have one architecture to rule them all!
#
# Below is the setup for our neural networks. There are plenty of ways to go combining the two strategies. We'll be focusing on one varient called A2C, or Advantage Actor Critic.
#
# <img src="images/a2c_equation.png" width="300" height="150">
#
# Here's the philosophy: We'll use our critic pathway to estimate the value of a state, or V(s). Given a state-action-new state transition, we can use our critic and the Bellman Equation to calculate the discounted value of the new state, or r + γ * V(s').
#
# Like DQNs, this discounted value is the label the critic will train on. While that is happening, we can subtract V(s) and the discounted value of the new state to get the advantage, or A(s,a). In human terms, how much value was the action the agent took? This is what the actor, or the policy gradient portion or our network, will train on.
#
# Too long, didn't read: the critic's job is to learn how to asses the value of a state. The actor's job is to assign probabilities to it's available actions such that it increases its chance to move into a higher valued state.
#
# Below is our new `build_networks` function. Each line has been tagged with whether it comes from Deep Q Networks (`# DQN`), Policy Gradients (`# PG`), or is something new (`# New`).
def build_networks(state_shape, action_size, learning_rate, critic_weight,
hidden_neurons, entropy):
"""Creates Actor Critic Neural Networks.
Creates a two hidden-layer Policy Gradient Neural Network. The loss
function is altered to be a log-likelihood function weighted
by an action's advantage.
Args:
space_shape: a tuple of ints representing the observation space.
action_size (int): the number of possible actions.
learning_rate (float): the nueral network's learning rate.
critic_weight (float): how much to weigh the critic's training loss.
hidden_neurons (int): the number of neurons to use per hidden layer.
entropy (float): how much to enourage exploration versus exploitation.
"""
state_input = layers.Input(state_shape, name='frames')
advantages = layers.Input((1,), name='advantages') # PG, A instead of G
# PG
actor_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
actor_2 = layers.Dense(hidden_neurons, activation='relu')(actor_1)
probabilities = layers.Dense(action_size, activation='softmax')(actor_2)
# DQN
critic_1 = layers.Dense(hidden_neurons, activation='relu')(state_input)
critic_2 = layers.Dense(hidden_neurons, activation='relu')(critic_1)
values = layers.Dense(1, activation='linear')(critic_2)
def actor_loss(y_true, y_pred): # PG
y_pred_clipped = K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE)
log_lik = y_true*K.log(y_pred_clipped)
entropy_loss = y_pred * K.log(K.clip(y_pred, CLIP_EDGE, 1-CLIP_EDGE)) # New
return K.sum(-log_lik * advantages) - (entropy * K.sum(entropy_loss))
# Train both actor and critic at the same time.
actor = models.Model(
inputs=[state_input, advantages], outputs=[probabilities, values])
actor.compile(
loss=[actor_loss, 'mean_squared_error'], # [PG, DQN]
loss_weights=[1, critic_weight], # [PG, DQN]
optimizer=tf.keras.optimizers.Adam(lr=learning_rate))
critic = models.Model(inputs=[state_input], outputs=[values])
policy = models.Model(inputs=[state_input], outputs=[probabilities])
return actor, critic, policy
# The above is one way to go about combining both of the algorithms. Here, we're combining training of both pwathways into on operation. Keras allows for the [training against multiple outputs](https://keras.io/models/model/). They can even have their own loss functions as we have above. When minimizing the loss, Keras will take the weighted sum of all the losses, with the weights provided in `loss_weights`. The `critic_weight` is now another hyperparameter for us to tune.
#
# We could even have completely separate networks for the actor and the critic, and that type of design choice is going to be problem dependent. Having shared nodes and training between the two will be more efficient to train per batch, but more complicated problems could justify keeping the two separate.
#
# The loss function we used here is also slightly different than the one for Policy Gradients. Let's take a look.
def actor_loss(y_true, y_pred): # PG
y_pred_clipped = K.clip(y_pred, 1e-8, 1-1e-8)
log_lik = y_true*K.log(y_pred_clipped)
entropy_loss = y_pred * K.log(K.clip(y_pred, 1e-8, 1-1e-8)) # New
return K.sum(-log_lik * advantages) - (entropy * K.sum(entropy_loss))
# We've added a new tool called [entropy](https://arxiv.org/pdf/1912.01557.pdf). We're calculating the [log-likelihood](https://en.wikipedia.org/wiki/Likelihood_function#Log-likelihood) again, but instead of comparing the probabilities of our actions versus the action that was taken, we calculating it for the probabilities of our actions against themselves.
#
# Certainly a mouthful, but the idea is to encourage exploration: if our probability prediction is very confident (or close to 1), our entropy will be close to 0. Similary, if our probability isn't confident at all (or close to 0), our entropy will again be zero. Anywhere inbetween, our entropy will be non-zero. This encourages exploration versus exploitation, as the entropy will discourage overconfident predictions.
#
# Now that the networks are out of the way, let's look at the `Memory`. We could go with Experience Replay, like with DQNs, or we could calculate TD(1) like with Policy Gradients. This time, we'll do something in between. We'll give our memory a `batch_size`. Once there are enough experiences in the buffer, we'll use all the experiences to train and then clear the buffer to start fresh.
#
# In order to speed up training, instead of recording state_prime, we'll record the value of state prime in `state_prime_values` or `next_values`. This will give us enough information to calculate the discounted values and advantages.
class Memory():
"""Sets up a memory replay for actor-critic training.
Args:
gamma (float): The "discount rate" used to assess state values.
batch_size (int): The number of elements to include in the buffer.
"""
def __init__(self, gamma, batch_size):
self.buffer = []
self.gamma = gamma
self.batch_size = batch_size
def add(self, experience):
"""Adds an experience into the memory buffer.
Args:
experience: (state, action, reward, state_prime_value, done) tuple.
"""
self.buffer.append(experience)
def check_full(self):
return len(self.buffer) >= self.batch_size
def sample(self):
"""Returns formated experiences and clears the buffer.
Returns:
(list): A tuple of lists with structure [
[states], [actions], [rewards], [state_prime_values], [dones]
]
"""
# Columns have different data types, so numpy array would be awkward.
batch = np.array(self.buffer).T.tolist()
states_mb = np.array(batch[0], dtype=np.float32)
actions_mb = np.array(batch[1], dtype=np.int8)
rewards_mb = np.array(batch[2], dtype=np.float32)
dones_mb = np.array(batch[3], dtype=np.int8)
value_mb = np.squeeze(np.array(batch[4], dtype=np.float32))
self.buffer = []
return states_mb, actions_mb, rewards_mb, dones_mb, value_mb
# Ok, time to build out the agent! The `act` method is the exact same as it was for Policy Gradients. Nice! The `learn` method is where things get interesting. We'll find the discounted future state like we did for DQN to train our critic. We'll then subtract the value of the discount state from the value of the current state to find the advantage, which is what the actor will train on.
class Agent():
"""Sets up a reinforcement learning agent to play in a game environment."""
def __init__(self, actor, critic, policy, memory, action_size):
"""Initializes the agent with DQN and memory sub-classes.
Args:
network: A neural network created from deep_q_network().
memory: A Memory class object.
epsilon_decay (float): The rate at which to decay random actions.
action_size (int): The number of possible actions to take.
"""
self.actor = actor
self.critic = critic
self.policy = policy
self.action_size = action_size
self.memory = memory
def act(self, state):
"""Selects an action for the agent to take given a game state.
Args:
state (list of numbers): The state of the environment to act on.
traning (bool): True if the agent is training.
Returns:
(int) The index of the action to take.
"""
# If not acting randomly, take action with highest predicted value.
state_batch = np.expand_dims(state, axis=0)
probabilities = self.policy.predict(state_batch)[0]
action = np.random.choice(self.action_size, p=probabilities)
return action
def learn(self, print_variables=False):
"""Trains the Deep Q Network based on stored experiences."""
gamma = self.memory.gamma
experiences = self.memory.sample()
state_mb, action_mb, reward_mb, dones_mb, next_value = experiences
# One hot enocde actions
actions = np.zeros([len(action_mb), self.action_size])
actions[np.arange(len(action_mb)), action_mb] = 1
#Apply TD(0)
discount_mb = reward_mb + next_value * gamma * (1 - dones_mb)
state_values = self.critic.predict([state_mb])
advantages = discount_mb - np.squeeze(state_values)
if print_variables:
print("discount_mb", discount_mb)
print("next_value", next_value)
print("state_values", state_values)
print("advantages", advantages)
else:
self.actor.train_on_batch(
[state_mb, advantages], [actions, discount_mb])
# Run the below cell to initialize an agent, and the cell after that to see the variables used for training. Since it's early, the critic hasn't learned to estimate the values yet, and the advatanges are mostly positive because of it.
#
# Once the crtic has learned how to properly assess states, the actor will start to see negative advantages. Try playing around with the variables to help the agent see this change sooner.
# +
# Change me please.
test_gamma = .9
test_batch_size = 32
test_learning_rate = .02
test_hidden_neurons = 50
test_critic_weight = 0.5
test_entropy = 0.0001
test_memory = Memory(test_gamma, test_batch_size)
test_actor, test_critic, test_policy = build_networks(
space_shape, action_size,
test_learning_rate, test_critic_weight,
test_hidden_neurons, test_entropy)
test_agent = Agent(
test_actor, test_critic, test_policy, test_memory, action_size)
# +
state = env.reset()
episode_reward = 0
done = False
while not done:
action = test_agent.act(state)
state_prime, reward, done, _ = env.step(action)
episode_reward += reward
next_value = test_agent.critic.predict([[state_prime]])
test_agent.memory.add((state, action, reward, done, next_value))
state = state_prime
test_agent.learn(print_variables=True)
# -
# Have a set of variables you're happy with? Ok, time to shine! Run the below cell to see how the agent trains.
with tf.Graph().as_default():
test_memory = Memory(test_gamma, test_batch_size)
test_actor, test_critic, test_policy = build_networks(
space_shape, action_size,
test_learning_rate, test_critic_weight,
test_hidden_neurons, test_entropy)
test_agent = Agent(
test_actor, test_critic, test_policy, test_memory, action_size)
for episode in range(200):
state = env.reset()
episode_reward = 0
done = False
while not done:
action = test_agent.act(state)
state_prime, reward, done, _ = env.step(action)
episode_reward += reward
next_value = test_agent.critic.predict([[state_prime]])
test_agent.memory.add((state, action, reward, done, next_value))
#if test_agent.memory.check_full():
#test_agent.learn(print_variables=True)
state = state_prime
test_agent.learn()
print("Episode", episode, "Score =", episode_reward)
# Any luck? No sweat if not! It turns out that by combining the power of both algorithms, we also combined some of their setbacks. For instance, actor-critic can fall into local minimums like Policy Gradients, and has a large number of hyperparameters to tune like DQNs.
#
# Time to check how our agents did [in the cloud](https://console.cloud.google.com/ai-platform/jobs)! Any lucky winners? Find it in [your bucket](https://console.cloud.google.com/storage/browser) to watch a recording of it play.
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| quests/rl/a2c/a2c_on_gcp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Resources for building my website.
#
# - [w3scools](https://www.w3schools.com/)
#
# - [w3schools on Bootstrap 4](https://www.w3schools.com/bootstrap4/default.asp)
#
# - [dominate docs](https://github.com/Knio/dominate/)
#
# - [draperjames.github.io](https://draperjames.github.io/)
#
# - [github projects through the API](https://api.github.com/users/draperjames/repos?per_page=100)
#
# - [nbviewer rendering of this page from the live github repo](http://nbviewer.jupyter.org/github/draperjames/draperjames.github.io/blob/master/dominate_builder.ipynb)
#
#
# <a href="https://stackoverflow.com/users/3727854/james-draper">
# <img src="https://stackoverflow.com/users/flair/3727854.png" width="208" height="58" alt="SO flair button" title="<NAME> at Stack Overflow">
# </a>
# +
from dominate import tags
# Set the title of the site.
title = tags.title("<NAME> : Computational Biologist")
# Generate the metadata.
meta_data = [title]
meta_data += [tags.meta(charset="utf-8")]
meta_data += [tags.meta(name="viewport", content="width=device-width, initial-scale=1")]
# Bootstrap CSS CDN
css = [tags.link(rel="stylesheet", href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css")]
# JQuery CDN
js = [tags.script(src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js")]
# Pooper CDN
js = [tags.script(src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js")]
# BootStrapJS CDN
js += [tags.script(src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js")]
extenal_code = css + js
# Build the HTML head
head = tags.head(meta_data+extenal_code)
# Create a jumbtron wrapper function
# FIXME: Remove this for readibility
@tags.div(cls='jumbotron text-center')
def jumbotron_center(title=None, subtitle=None, **kwargs):
tags.h1('{}'.format(title))
if subtitle is not None:
tags.p(subtitle)
projects = dict()
projects["guipyter"] = "https://draperjames.github.io/guipyter/"
projects["pandomics"] = "https://draperjames.github.io/pandomics/"
projects["notebook for build this page"] = "http://nbviewer.jupyter.org/github/draperjames/draperjames.github.io/blob/master/dominate_builder.ipynb"
lnk_list = []
for k,v in projects.items():
lnk = tags.a(k, href=v)
lnk_list += [lnk, tags.br()]
# Container content added.
container = tags.div(tags.h3("My OSS Projects"), tags.p(lnk_list), cls="col-sm-4")
container = tags.div(container, cls="container")
# Build the body.
body = []
body += [jumbotron_center(title="<NAME>", subtitle="Computational Biologist")]
body += [container]
body = tags.body(body)
# body.render()
page = tags.html(head, body)
def make_page(file_path=None, page=None):
with open(file_path,"w") as f:
f.write(page.render())
# Overwrite index.html with the generated page.
make_page("index.html", page)
# -
# ---
# # On going development
# ---
#
# ## Investigating the github API.
#
# - https://developer.github.com/v3/repos/
#
# - https://api.github.com/users/draperjames/repos?per_page=100
#
#
# +
import time
import json
import pandas as pd
from urllib import request
github_api = "https://api.github.com/users/draperjames/repos?per_page=100"
# Request to github API
page = request.urlopen(github_api)
# Format the JSON content.
jpage = json.load(page)
# Make into DataFrame for handling and data reduction.
jdf = pd.DataFrame(jpage)
# +
jdfs = jdf[["name", "pushed_at", "updated_at", "created_at", "size", "fork", "forks_count"]]
# Format the dates as datetime objects.
dt_format = pd.DataFrame([pd.to_datetime(jdfs.pushed_at),
pd.to_datetime(jdfs.updated_at),
pd.to_datetime(jdfs.created_at)]).T
jdfs = pd.concat([jdfs["name"], dt_format, jdfs.iloc[:, 4:]], axis=1)
# -
# Just my repos
jdfs.loc[~jdfs.fork]
# ## Creating a projects heatmap
#
# - [Styling pandas DataFrame](https://pandas.pydata.org/pandas-docs/stable/style.html)
time.mktime(jdfs.pushed_at[0].timetuple())
# +
# Convert all of the pushed_at dates to time
pt = jdfs.pushed_at.apply(lambda x: time.mktime(x.timetuple()))
# Normalizing to the earliest date.
npt = pt - pt.min()
# -
list(filter(lambda x:"commit" in x, jdf.columns))
jdf.git_commits_url[10]
| .ipynb_checkpoints/dominate_builder-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# biblioteca para conexão do Python com o PostgreSQL
import psycopg2
# Informação da string de conexão
host = "127.0.0.1" # nome ou ip da máquina que tem o SGBD
dbname = "company" # nome do banco de dados
user = "postgres" # nome do usuario
password = "<PASSWORD>" # senha
#sslmode = "verify-full" #"require"
# Construa a string de conexão
#conn_string = "host={0} user={1} dbname={2} password={3} sslmode={4}".format(host, user, dbname, password, sslmode)
conn_string = "host={0} user={1} dbname={2} password={3}".format(host, user, dbname, password)
# Executa a conexão com o banco
conn = psycopg2.connect(conn_string)
print("Connection established")
# Ponteiro para trabalhar com o banco de dados especificado
cursor = conn.cursor()
# Excluir tabelas anteriores com o mesmo nome se alguma existir
cursor.execute("DROP TABLE IF EXISTS inventory;")
print("Finished dropping table (if existed)")
# Cria uma tabela de nome inventory
cursor.execute("CREATE TABLE inventory (id serial PRIMARY KEY, name VARCHAR(50), quantity INTEGER);")
print("Finished creating table")
# Insere alguns dados na tabela
cursor.execute("INSERT INTO inventory (name, quantity) VALUES (%s, %s);", ("banana", 150))
cursor.execute("INSERT INTO inventory (name, quantity) VALUES (%s, %s);", ("orange", 154))
cursor.execute("INSERT INTO inventory (name, quantity) VALUES (%s, %s);", ("apple", 100))
print("Inserted 3 rows of data")
# Confirma a gravação dos dados
conn.commit()
# Fecha a conexão com o banco
cursor.close()
conn.close()
# +
# biblioteca para conexão do Python com o PostgreSQL
import psycopg2
# Informação da string de conexão
host = "127.0.0.1" # nome ou ip da máquina que tem o SGBD
dbname = "company" # nome do banco de dados
user = "postgres" # nome do usuario
password = "<PASSWORD>" # senha
# Construa a string de conexão
conn_string = "host={0} user={1} dbname={2} password={3}".format(host, user, dbname, password)
# Executa a conexão com o banco
conn = psycopg2.connect(conn_string)
print("Connection established")
# Ponteiro para trabalhar com o banco de dados especificado
cursor = conn.cursor()
# Recupera todas as linhas da tabela inventory
cursor.execute("SELECT * FROM inventory;")
# Recupera todas as linhas de dados do cursor
rows = cursor.fetchall()
# Imprime todas as linhas
for row in rows:
print("Data row = (%s, %s, %s)" %(str(row[0]), str(row[1]), str(row[2])))
# Confirma a gravação dos dados
conn.commit()
# Fecha a conexão com o banco
cursor.close()
conn.close()
# +
# biblioteca para conexão do Python com o PostgreSQL
import psycopg2
# Informação da string de conexão
host = "127.0.0.1" # nome ou ip da máquina que tem o SGBD
dbname = "company" # nome do banco de dados
user = "postgres" # nome do usuario
password = "<PASSWORD>" # senha
# Construa a string de conexão
conn_string = "host={0} user={1} dbname={2} password={3}".format(host, user, dbname, password)
# Executa a conexão com o banco
conn = psycopg2.connect(conn_string)
print("Connection established")
# Ponteiro para trabalhar com o banco de dados especificado
cursor = conn.cursor()
# Atualizar uma linha de dados na tabela
cursor.execute("UPDATE inventory SET quantity = %s WHERE name = %s;", (200, "banana"))
print("Updated 1 row of data")
# Confirma a gravação dos dados
conn.commit()
# Fecha a conexão com o banco
cursor.close()
conn.close()
# +
# biblioteca para conexão do Python com o PostgreSQL
import psycopg2
# Informação da string de conexão
host = "127.0.0.1" # nome ou ip da máquina que tem o SGBD
dbname = "company" # nome do banco de dados
user = "postgres" # nome do usuario
password = "<PASSWORD>" # senha
# Construa a string de conexão
conn_string = "host={0} user={1} dbname={2} password={3}".format(host, user, dbname, password)
# Executa a conexão com o banco
conn = psycopg2.connect(conn_string)
print("Connection established")
# Ponteiro para trabalhar com o banco de dados especificado
cursor = conn.cursor()
# Apaga a linha de dados da tabela
cursor.execute("DELETE FROM inventory WHERE name = %s;", ("orange",))
print("Deleted 1 row of data")
# Confirma a gravação dos dados
conn.commit()
# Fecha a conexão com o banco
cursor.close()
conn.close()
# +
# biblioteca para conexão do Python com o PostgreSQL
import psycopg2
# Informação da string de conexão
host = "127.0.0.1" # nome ou ip da máquina que tem o SGBD
dbname = "company" # nome do banco de dados
user = "postgres" # nome do usuario
password = "<PASSWORD>" # <PASSWORD>ha
# Construa a string de conexão
conn_string = "host={0} user={1} dbname={2} password={3}".format(host, user, dbname, password)
# Executa a conexão com o banco
conn = psycopg2.connect(conn_string)
print("Connection established")
# Ponteiro para trabalhar com o banco de dados especificado
cursor = conn.cursor()
quant=input("Digite a quantidade da qual deseja apresentar os produtos=")
# Recupera todas as linhas da tabela inventory
cursor.execute("SELECT * FROM inventory WHERE quantity >= {};".format(quant))
# Recupera todas as linhas de dados do cursor
rows = cursor.fetchall()
# Imprime todas as linhas
for row in rows:
print("Data row = (%s, %s, %s)" %(str(row[0]), str(row[1]), str(row[2])))
# Confirma a gravação dos dados
conn.commit()
# Fecha a conexão com o banco
cursor.close()
conn.close()
# -
| Jupyter/primeiroProjeto/teste2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### You are looking at data from an e-commerce website. The site is very simple and has just 4 pages:
#
# #### The first page is the home page. When you come to the site for the first time, you can only land on the home page as a first page.
#
# #### From the home page, the user can perform a search and land on the search page. From the search page, if the user clicks on a product, she will get to the payment page, where she is asked to provide payment information in order to buy that product.
#
# #### If she does decide to buy, she ends up on the confirmation page
#
# ### The company CEO isn't very happy with the volume of sales and, especially, of sales coming from new users. Therefore, she asked you to investigate whether there is something wrong in the conversion funnel or, in general, if you could suggest how conversion rate can be improved.
#
# ### Specifically, she is interested in:
#
# #### (1) A full picture of the funnel conversion rate for both desktop and mobile.
#
# #### (2) Some insights on what the product team should focus on in order to improve conversion rate as well as anything you might discover that could help improve conversion rate.
# ### Load the package would be used
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# ### Read in the data set
# +
user = pd.read_csv("../Datasets/Funnel/user_table.csv")
#### Check if unique user ID
print(len(user["user_id"]) == len(np.unique(user["user_id"])))
print(user.head())
home = pd.read_csv("../Datasets/Funnel/home_page_table.csv")
#### Check if unique user ID
print(len(home["user_id"]) == len(np.unique(home["user_id"])))
print(home.head())
search = pd.read_csv("../Datasets/Funnel/search_page_table.csv")
#### Check if unique user ID
print(len(search["user_id"]) == len(np.unique(search["user_id"])))
print(search.head())
payment = pd.read_csv("../Datasets/Funnel/payment_page_table.csv")
#### Check if unique user ID
print(len(payment["user_id"]) == len(np.unique(payment["user_id"])))
print(payment.head())
confirm = pd.read_csv("../Datasets/Funnel/payment_confirmation_table.csv")
#### Check if unique user ID
print(len(confirm["user_id"]) == len(np.unique(confirm["user_id"])))
print(confirm.head())
# -
dat0 = pd.merge(user, home, on = ["user_id"], how = "left")
dat0 = pd.merge(dat0, search, on = ["user_id"], how = "left", suffixes=("_home", "_search"))
dat0 = pd.merge(dat0, payment, on = ["user_id"], how = "left")
dat0 = pd.merge(dat0, confirm, on = ["user_id"], how = "left", suffixes=("_payment", "_confirm"))
dat0.head()
dat0.replace(np.nan, 0, inplace = True)
dat0["page_home"] = dat0["page_home"].replace("home_page", 1)
dat0["page_search"] = dat0["page_search"].replace("search_page", 1)
dat0["page_payment"] = dat0["page_payment"].replace("payment_page", 1)
dat0["page_confirm"] = dat0["page_confirm"].replace("payment_confirmation_page", 1)
dat0["date"] = pd.to_datetime(dat0["date"])
dat0.head()
print(dat0.describe())
print(dat0.groupby(["device"]).size())
print(dat0.groupby(["sex"]).size())
# ### A full picture of the funnel conversion rate for both desktop and mobile.
# #### Overall Conversion Rate
# +
overall_rates = [sum(dat0["page_search"])/sum(dat0["page_home"]),
sum(dat0["page_payment"])/sum(dat0["page_search"]),
sum(dat0["page_confirm"])/sum(dat0["page_payment"])]
col_names = ["to_search", "to_pay", "to_confirm"]
overall_convert = pd.DataFrame({"Step": col_names, "Conversion Rate": overall_rates},
columns = ["Step", "Conversion Rate"])
print(overall_convert)
plt.figure(figsize = [12, 6])
sns.barplot(x = "Step", y = "Conversion Rate", data = overall_convert, palette = "PuBuGn")
# -
# #### Conversion rate by device type
# +
device = dat0.groupby("device")
search_dict = (device.sum()["page_search"]/device.sum()["page_home"]).to_dict()
pay_dict = (device.sum()["page_payment"]/device.sum()["page_search"]).to_dict()
confirm_dict = (device.sum()["page_confirm"]/device.sum()["page_payment"]).to_dict()
search_df = pd.DataFrame(search_dict, index = ["Search"])
pay_df = pd.DataFrame(pay_dict, index = ["Pay"])
confirm_df = pd.DataFrame(confirm_dict, index = ["Confirm"])
device_rates = search_df.append(pay_df).append(confirm_df)
device_convert0 = pd.DataFrame(device_rates.stack()).rename(columns = {0: "Conversion Rate"})
device_convert = device_convert0.reset_index().rename(columns = {"level_0": "Step", "level_1": "Device"})
plt.figure(figsize = [12, 6])
sns.barplot(x = "Step", y = "Conversion Rate", data = device_convert, palette = "PuBuGn", hue = "Device")
# -
# #### Conversion rate by sex
# +
sex = dat0.groupby("sex")
search_dict = (sex.sum()["page_search"]/sex.sum()["page_home"]).to_dict()
pay_dict = (sex.sum()["page_payment"]/sex.sum()["page_search"]).to_dict()
confirm_dict = (sex.sum()["page_confirm"]/sex.sum()["page_payment"]).to_dict()
search_df = pd.DataFrame(search_dict, index = ["Search"])
pay_df = pd.DataFrame(pay_dict, index = ["Pay"])
confirm_df = pd.DataFrame(confirm_dict, index = ["Confirm"])
sex_rates = search_df.append(pay_df).append(confirm_df)
sex_convert0 = pd.DataFrame(sex_rates.stack()).rename(columns = {0: "Conversion Rate"})
sex_convert = sex_convert0.reset_index().rename(columns = {"level_0": "Step", "level_1": "Sex"})
plt.figure(figsize = [12, 6])
sns.barplot(x = "Step", y = "Conversion Rate", data = sex_convert, palette = "PuBuGn", hue = "Sex")
# -
# #### Conversion rate by date
date = dat0.groupby("date")
search_dict = (date.sum()["page_search"]/date.sum()["page_home"]).to_dict()
pay_dict = (date.sum()["page_payment"]/date.sum()["page_search"]).to_dict()
confirm_dict = (date.sum()["page_confirm"]/date.sum()["page_payment"]).to_dict()
search_df = pd.DataFrame(search_dict, index = ["Search"])
pay_df = pd.DataFrame(pay_dict, index = ["Pay"])
confirm_df = pd.DataFrame(confirm_dict, index = ["Confirm"])
date_rates = search_df.append(pay_df).append(confirm_df)
date_convert0 = pd.DataFrame(date_rates.stack()).rename(columns = {0: "Conversion Rate"})
date_convert = date_convert0.reset_index().rename(columns = {"level_0": "Step", "level_1": "Date"})
plt.figure(figsize = [12, 6])
sns.lineplot(x = "Date", y = "Conversion Rate", data = date_convert, palette = "PuBuGn", hue = "Step")
# #### Conversion rate by date \& device
# +
dev_date = dat0.groupby(["device", "date"])
search_dict = (dev_date.sum()["page_search"]/dev_date.sum()["page_home"]).to_dict()
pay_dict = (dev_date.sum()["page_payment"]/dev_date.sum()["page_search"]).to_dict()
confirm_dict = (dev_date.sum()["page_confirm"]/dev_date.sum()["page_payment"]).to_dict()
search_df = pd.DataFrame(search_dict, index = ["Search"])
pay_df = pd.DataFrame(pay_dict, index = ["Pay"])
confirm_df = pd.DataFrame(confirm_dict, index = ["Confirm"])
dev_date_rates0 = search_df.append(pay_df).append(confirm_df)
dev_date_rates = pd.DataFrame(dev_date_rates0.stack())
dev_date_convert0 = pd.DataFrame(dev_date_rates.stack()).rename(columns = {0: "Conversion Rate"})
dev_date_convert = dev_date_convert0.reset_index().rename(columns = {"level_0": "Step", "level_1": "Date", "level_2": "Device"})
plt.figure(figsize = [12, 4])
plt.title("Conversion Rate of Desktop", fontsize = 16)
sns.lineplot(x = "Date", y = "Conversion Rate", data = dev_date_convert[dev_date_convert["Device"] == "Desktop"], palette = "PuBuGn", hue = "Step")
plt.figure(figsize = [12, 4])
plt.title("Conversion Rate of Mobile", fontsize = 16)
sns.lineplot(x = "Date", y = "Conversion Rate", data = dev_date_convert[dev_date_convert["Device"] == "Mobile"], palette = "PuBuGn", hue = "Step")
# -
| 5.Funnel Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:AKSDeployment]
# language: python
# name: conda-env-AKSDeployment-py
# ---
# ### Test web application locally
# This notebook pulls some images and tests them against the local web app running inside the Docker container we made previously.
# +
import os
import matplotlib.pyplot as plt
import numpy as np
from testing_utilities import to_img, img_url_to_json, plot_predictions
import requests
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# %load_ext dotenv
# %dotenv
# -
image_name = os.getenv('docker_login') + os.getenv('image_repo')
image_name
# Run the Docker conatainer in the background and open port 80. Notice we are using nvidia-docker and not docker
# + magic_args="--bg -s \"$image_name\"" language="bash"
# nvidia-docker run -p 80:80 $1
# -
# Wait a few seconds for the application to spin up and then check that everything works
# !curl 'http://0.0.0.0:80/version'
# Pull an image of a Lynx to test our local web app with
IMAGEURL = "https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg"
headers = {'content-type': 'application/json'}
jsonimg = img_url_to_json(IMAGEURL)
jsonimg[:100] # Example of json string
plt.imshow(to_img(IMAGEURL))
# %time r = requests.post('http://0.0.0.0:80/score', data=jsonimg, headers=headers)
r.json()
# Let's try a few more images
images = ('https://upload.wikimedia.org/wikipedia/commons/thumb/6/68/Lynx_lynx_poing.jpg/220px-Lynx_lynx_poing.jpg',
'https://upload.wikimedia.org/wikipedia/commons/3/3a/Roadster_2.5_windmills_trimmed.jpg',
'http://www.worldshipsociety.org/wp-content/themes/construct/lib/scripts/timthumb/thumb.php?src=http://www.worldshipsociety.org/wp-content/uploads/2013/04/stock-photo-5495905-cruise-ship.jpg&w=570&h=370&zc=1&q=100',
'http://yourshot.nationalgeographic.com/u/ss/fQYSUbVfts-T7pS2VP2wnKyN8wxywmXtY0-FwsgxpiZv_E9ZfPsNV5B0ER8-bOdruvNfMD5EbP4SznWz4PYn/',
'https://cdn.arstechnica.net/wp-content/uploads/2012/04/bohol_tarsier_wiki-4f88309-intro.jpg',
'http://i.telegraph.co.uk/multimedia/archive/03233/BIRDS-ROBIN_3233998b.jpg')
url='http://0.0.0.0:80/score'
results = [requests.post(url, data=img_url_to_json(img), headers=headers) for img in images]
plot_predictions(images, results)
# Next lets quickly check what the request response performance is for the locally running Docker container.
image_data = list(map(img_url_to_json, images)) # Retrieve the images and data
timer_results = list()
for img in image_data:
# res=%timeit -r 1 -o -q requests.post(url, data=img, headers=headers)
timer_results.append(res.best)
timer_results
print('Average time taken: {0:4.2f} ms'.format(10**3 * np.mean(timer_results)))
# Stop our Docker container
# + language="bash"
# docker stop $(docker ps -q)
# -
# We can move onto [deploying our web application on AKS](04_DeployOnAKS.ipynb)
| Tensorflow/03_TestLocally.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load train_CNN.py
# +
# # %load mergedata.py
import pandas as pd
from pandas import DataFrame as df
import numpy as np
import os
inputfile_dir='./inputfile'
outputfile='all2.csv'
for inputfile in os.listdir(inputfile_dir):
inputfile1=os.path.join("./inputfile",inputfile)
df = pd.read_csv(inputfile1)
df.to_csv(outputfile,index=False, header=False, mode='a+')
# -
| game2048/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import os
import json
import shutil
import sh
import yaml
from pandas import DataFrame, Series
from itertools import islice
REPOS_LIST = "/Users/raymondyee/C/src/gitenberg/Second-Folio/list_of_repos.txt"
GITENBERG_DIR = "/Users/raymondyee/C/src/gitenberg/"
METADATA_DIR = "/Users/raymondyee/C/src/gitenberg-dev/giten_site/metadata"
COVERS_DATA = "/Users/raymondyee/C/src/gitenberg/Second-Folio/covers_data.json"
repos=open(REPOS_LIST).read().strip().split("\n")
# -
# # how is the yaml file being serialized?
#
# https://github.com/gitenberg-dev/metadata/blob/f220022fd7b08a3e40c56870170b3294d05c039b/gitenberg/metadata/pg_rdf.py#L260
#
#
for repo in islice(repos,0,2):
pg_id = repo.split("_")[-1]
source = os.path.join(METADATA_DIR, "{}.yaml".format(pg_id))
print (source)
if os.path.exists(source):
with open(source, "r") as yfile:
yaml_0 = open(source).read()
yfile.seek(0)
y = yaml.load(yfile)
yaml_1 = yaml.safe_dump(y,default_flow_style=False,allow_unicode=True)
print (source, yaml_0 == yaml_1)
# compare the yaml file with what it would be if we serialized it in the way I think it's being done by Eric
# Now ready to compare files in /Users/raymondyee/C/src/gitenberg-dev/giten_site/metadata
# with the repos
# +
# comparing source / destination
for repo in islice(repos,0,1):
pg_id = repo.split("_")[-1]
source = os.path.join(METADATA_DIR, "{}.yaml".format(pg_id))
dest = os.path.join(GITENBERG_DIR, repo, "metadata.yaml")
#print (repo, source, dest)
#print "diff {0} {1}".format(source, dest)
try:
output = sh.diff(source, dest)
except Exception as e:
shutil.copyfile(source, dest)
print (source, dest)
#print (output)
#print (e)
# -
def git_reload_metadata(repo):
metadata_path = os.path.join(GITENBERG_DIR, repo, "metadata.yaml")
try:
if os.path.exists(metadata_path):
sh.cd(os.path.join(GITENBERG_DIR, repo))
print ("add")
sh.git("add", "metadata.yaml")
print ("commit")
try:
sh.git("commit", "-m", "update metadata.yaml with RTC as publisher; adding _version")
except:
pass
print ("push")
sh.git.push()
else:
return None
except Exception as e:
return e
# +
# logic for adding cover metadata
def do_git_reload_metadata():
for (i,repo) in enumerate(islice(repos,1,None)):
print (i, repo)
print (git_reload_metadata(repo))
do_git_reload_metadata()
# +
import yaml
# +
covers_data = json.loads(open(COVERS_DATA).read())
covers_data_dict = dict([(c['GitHub repo'], c) for c in covers_data])
for repo in islice(repos,0,1):
pg_id = repo.split("_")[-1]
dest = os.path.join(GITENBERG_DIR, repo, "metadata.yaml")
cover_artist = covers_data_dict[repo]['cover_artist']
cover_metadata = {
"cover_type": "original",
"image_path": "cover.jpg",
"rights": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)",
"rights_url": "https://creativecommons.org/licenses/by-nc/4.0/",
"attribution": u"{}, 2015".format(cover_artist)
}
print (dest)
# is cover_metadata already in the YAML?
if os.path.exists(dest):
with open(dest, "r+") as yfile:
y = yaml.load(yfile)
covers = y.get('covers', [])
covers_set = [set(c.items()) for c in covers]
if set(cover_metadata.items()) not in covers_set:
covers.append(cover_metadata)
y['covers'] = covers
yfile.seek(0)
yfile.write(yaml.safe_dump(y,default_flow_style=False,allow_unicode=True))
yfile.truncate()
else:
print ("already in", dest, cover_metadata)
# -
def git_cover_metadata(repo):
metadata_path = os.path.join(GITENBERG_DIR, repo, "metadata.yaml")
try:
if os.path.exists(metadata_path):
sh.cd(os.path.join(GITENBERG_DIR, repo))
print ("add")
sh.git("add", "metadata.yaml")
print ("commit")
try:
sh.git("commit", "-m", "update metadata.yaml with cover attribution")
except:
pass
print ("push")
sh.git.push()
else:
return None
except Exception as e:
return e
for (i,repo) in enumerate(islice(repos[1:],None)):
print (i, repo)
print (git_cover_metadata(repo))
dest
# +
# yaml.safe_dump(pg_json,default_flow_style=False,allow_unicode=True)
# http://pyyaml.org/wiki/PyYAMLDocumentation
with open(dest, "rw") as yfile:
y = yaml.load(yfile)
print(y)
# -
yfile = open(dest, "rw")
y = yaml.load(yfile)
yfile.close()
y.get('covers')
"""u"中国"
"""
# +
import codecs
def lit_to_unicode(s):
ok_type = [unicode, int]
if type(s) in ok_type:
return s
elif isinstance(s, str):
return codecs.decode(s, "unicode_escape")
elif isinstance (s, list):
return [lit_to_unicode(item) for item in s]
elif isinstance(s, dict):
return dict([(k,lit_to_unicode(v)) for (k,v) in s.items()])
else:
raise Exception("unexpected type", type(s))
# +
# fix unicode problems in the source yamls
from IPython.display import (HTML, display)
import difflib
differ = difflib.HtmlDiff()
changed_yaml = []
for repo in islice(repos,0,None):
pg_id = repo.split("_")[-1]
source = os.path.join(METADATA_DIR, "{}.yaml".format(pg_id))
with open(source, "r+") as yfile:
y = yaml.load(yfile)
yfile.seek(0)
old_dump = yfile.read()
new_dump = yaml.safe_dump(lit_to_unicode(y),default_flow_style=False,allow_unicode=True)
if old_dump != new_dump:
yfile.seek(0)
yfile.write(new_dump)
yfile.truncate()
yfile.close()
changed_yaml.append(repo)
# -
changed_yaml
# +
# now update publisher metadata in repos
"""
publication_date: 2015-08-01
publisher: Recovering the Classics
rights: CC BY-NC
rights_url: http://creativecommons.org/licenses/by-nc/4.0/
"""
for repo in islice(repos,1,None):
pg_id = repo.split("_")[-1]
dest = os.path.join(GITENBERG_DIR, repo, "metadata.yaml")
print (dest)
with open(dest, "r+") as yfile:
y = yaml.load(yfile)
yfile.seek(0)
old_dump = yfile.read()
# make changes
y['publisher'] = 'Recovering the Classics'
y['publication_date'] = '2015-08-01'
y['rights'] = 'CC BY-NC'
y['rights_url'] = 'http://creativecommons.org/licenses/by-nc/4.0/'
y['_version'] = '0.0.1'
new_dump = yaml.safe_dump(y,default_flow_style=False,allow_unicode=True)
if old_dump != new_dump:
print ("writing new_dump")
yfile.seek(0)
yfile.write(new_dump)
yfile.truncate()
yfile.close()
# -
| metadata.yaml.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="KdUFcDsdzRyw"
# # Clonamos el repositorio para obtener los dataSet
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="mHReFf3_y9ms" outputId="c17545fd-c7dd-42c2-e3ad-4f55db21611f"
# !git clone https://github.com/joanby/machinelearning-az.git
# + [markdown] colab_type="text" id="vNKZXgtKzU2x"
# # Damos acceso a nuestro Drive
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="5gu7KWnzzUQ0" outputId="abe602b4-3a59-470e-d508-037c6966002b"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] colab_type="text" id="1gUxIkHWzfHV"
# # Test it
# + colab={} colab_type="code" id="mIQt3jBMzYRE"
# !ls '/content/drive/My Drive'
# + [markdown] colab_type="text" id="mHsK36uN0XB-"
# # Google colab tools
# + colab={} colab_type="code" id="kTzwfUPWzrm4"
from google.colab import files # Para manejar los archivos y, por ejemplo, exportar a su navegador
import glob # Para manejar los archivos y, por ejemplo, exportar a su navegador
from google.colab import drive # Montar tu Google drive
# + [markdown] colab_type="text" id="uab9OAbV8hYN"
# # Instalar dependendias
# + colab={"base_uri": "https://localhost:8080/", "height": 122} colab_type="code" id="qukjDgj98kE4" outputId="95b5f2b5-7149-436a-b1fb-ad567cc783bd"
# !pip install sklearn
# + [markdown] colab_type="text" id="3yFpBwmNz70v"
# # Upper Confidence Bound (UCB)
# + [markdown] colab_type="text" id="v8OxSXXSz-OP"
# # Cómo importar las librerías
#
# + colab={} colab_type="code" id="edZX51YLzs59"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + [markdown] colab_type="text" id="8XfXlqtF0B58"
# # Importar el data set
#
# + colab={} colab_type="code" id="-nnozsHsz_-N"
dataset = pd.read_csv('/content/machinelearning-az/datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/Ads_CTR_Optimisation.csv')
# + [markdown] colab_type="text" id="SsVEdPzf4XmV"
# # Algoritmo de UCB
# + colab={} colab_type="code" id="v9CtwK834bjy"
import math
N = 10000
d = 10
number_of_selections = [0] * d
sums_of_rewards = [0] * d
ads_selected = []
total_reward = 0
for n in range(0, N):
max_upper_bound = 0
ad = 0
for i in range(0, d):
if(number_of_selections[i]>0):
average_reward = sums_of_rewards[i] / number_of_selections[i]
delta_i = math.sqrt(3/2*math.log(n+1)/number_of_selections[i])
upper_bound = average_reward + delta_i
else:
upper_bound = 1e400
if upper_bound > max_upper_bound:
max_upper_bound = upper_bound
ad = i
ads_selected.append(ad)
number_of_selections[ad] = number_of_selections[ad] + 1
reward = dataset.values[n, ad]
sums_of_rewards[ad] = sums_of_rewards[ad] + reward
total_reward = total_reward + reward
# + [markdown] colab_type="text" id="5AH_uCEz68rb"
# # Histograma de resultados
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="oeuAy8LI69vi" outputId="10346439-d6ac-4abd-b5bb-033e9a284716"
plt.hist(ads_selected)
plt.title("Histograma de anuncios")
plt.xlabel("ID del Anuncio")
plt.ylabel("Frecuencia de visualización del anuncio")
plt.show()
| datasets/Part 6 - Reinforcement Learning/Section 32 - Upper Confidence Bound (UCB)/upper_confidence_bound.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import add_to_syspath
import tensorflow as tf
import tensorflow.keras.backend as K
from lucid.modelzoo import audio_base
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
import lucid.optvis.transform_audio2 as ta2
from tqdm import tqdm
import numpy as np
model = audio_base.Model.load("../models/audionet2_lucid/frozen_graph_inference.pb")
# +
#transforms = ta2.standard_transforms
def vertical_scaling(scales, seed=None):
def inner(t):
t = tf.convert_to_tensor(t, preferred_dtype=tf.float32)
scale = ta2._rand_select(scales, seed=seed)
return scale*t
return inner
# Build a dataflow graph.
c = tf.constant([[1.0, 2.0], [3.0, 4.0]])
d = tf.constant([[1.0, 1.0], [0.0, 1.0]])
e = vertical_scaling([2.])(c)
f = horizontal_scaling()(vis)
# Construct a `Session` to execute the graph.
sess = tf.Session()
# Execute the graph and store the value that `e` represents in `result`.
r = sess.run(f)
import matplotlib.pyplot as plt
plt.plot(vis[0,:,0])
plt.show()
plt.plot(r[0,:,0])
plt.show()
def horizontal_scaling(seed=None):
def inner(t):
freq = tf.signal.rfft(t[0,:,0])
print(freq)
changed_freq = tf.constant([freq[0,i*,0]])
res = tf.expand_dims(tf.expand_dims(tf.signal.irfft((np.random.random()+0.5)*freq), 0), -1)
print(res)
return res
return inner
transforms = [vertical_scaling([1 + (i - 5) / 50. for i in range(11)]), horizontal_scaling()]
# -
with tf.Graph().as_default() as graph, tf.Session() as sess:
T = render.make_vis_T(model, objectives.channel("conv5/Relu",2) + objectives.channel("conv5/Relu",5), param_f=lambda : param.audio(8000), transforms=transforms, relu_gradient_override=True)
loss, vis_op, t_audio = T("loss"), T("vis_op"), T("input")
tf.global_variables_initializer().run()
try:
for i in tqdm(range(1000)):
loss_, _ = sess.run([loss, vis_op])
vis = t_audio.eval()
print(loss_)
except KeyboardInterrupt:
log.warning("Interrupted optimization at step {:d}.".format(i+1))
vis = t_audio.eval()
show(np.hstack(vis))
import matplotlib.pyplot as plt
plt.plot(vis[0,:,0])
from IPython.display import Audio
Audio(vis[0,:,0], rate=8000)
model.show_graph()
| notebooks/audio_viz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Import Library and Dataset
import pandas as pd
data = pd.read_csv('https://storage.googleapis.com/dqlab-dataset/pythonTutorial/ecommerce_banner_promo.csv')
data
# ### Exploratory Data Analysis
# +
print("Top & Bottom 5 Data")
print(data.head())
print("Dataset Information")
print(data.info())
print("Descriptive Statistics")
print(data.describe())
print("Data's Shape")
print(data.shape)
# -
# #### Finding Correlation
print(data.corr())
sns.heatmap(data.corr())
# #### Checking labels distribution
print(data.groupby('Clicked on Ad').size())
# #### EDA Visualization
# +
import matplotlib.pyplot as plt
import seaborn as sns
# Matplotlib and Seaborn setup
sns.set_style('whitegrid')
plt.style.use('fivethirtyeight')
# -
plt.figure(figsize=(10, 5))
plt.hist(data['Age'], bins = data.Age.nunique())
plt.xlabel('Age')
plt.tight_layout()
plt.show()
plt.figure()
sns.pairplot(data)
plt.show()
# #### Checking Missing Value
print(data.isnull().sum().sum())
# ### Splitting the Data
# +
from sklearn.model_selection import train_test_split
X = data.drop(['Ad Topic Line','City','Country','Timestamp','Clicked on Ad'], axis = 1)
y = data['Clicked on Ad']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 42)
# -
# ### Make the model using Logistic Regression
from sklearn.linear_model import LogisticRegression
logreg = LogisticRegression()
logreg = logreg.fit(X_train,y_train)
y_pred = logreg.predict(X_test)
# ### Evaluation
print("Evaluasi Model Performance:")
print("Training Accuracy :", logreg.score(X_train, y_train))
print("Testing Accuracy :", logreg.score(X_test, y_test))
# +
from sklearn.metrics import confusion_matrix, classification_report
# Apply classification_report function to y_test and y_pred
print("Classification report:")
cr = classification_report(y_test, y_pred)
print(cr)
# -
# Apply confusion_matrix function to y_test and y_pred
print("Confusion matrix:")
cm = confusion_matrix(y_test, y_pred)
print(cm)
# ### Confusion Matrix Visualization
confusion_matrix_df = pd.DataFrame((confusion_matrix(y_test, y_pred)))
# +
plt.figure()
heatmap = sns.heatmap(confusion_matrix_df, annot=True, annot_kws={'size': 14}, fmt='d', cmap='YlGnBu')
heatmap.yaxis.set_ticklabels(heatmap.yaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
heatmap.xaxis.set_ticklabels(heatmap.xaxis.get_ticklabels(), rotation=0, ha='right', fontsize=14)
plt.title('Confusion Matrix for Training Model\n(Logistic Regression)\n', fontsize=18, color='darkblue')
plt.ylabel('True label', fontsize=14)
plt.xlabel('Predicted label', fontsize=14)
plt.tight_layout()
plt.show()
| 09 - Logistic Regression Mini Project/Logistic_Regression_Mini_Project.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="../../../images/qiskit_header.png" alt="Note: In order for images to show up in this jupyter notebook you need to select File => Trusted Notebook" align="middle">
# # Qiskit Aer: Applying noise to custom unitary gates
#
# The latest version of this notebook is available on https://github.com/Qiskit/qiskit-tutorial.
# ## Introduction
#
# This notebook shows how to add custom unitary gates to a quantum circuit, and use them for noise simulations in Qiskit Aer.
# +
from qiskit import execute, QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit.quantum_info.operators import Operator
from qiskit.quantum_info import process_fidelity
from qiskit.providers.aer import QasmSimulator
from qiskit.providers.aer.noise import NoiseModel, errors
from qiskit.tools.visualization import plot_histogram
# -
# ## Creating matrix operators
#
# We can use the `Operator` class in `qiskit.quantum_info.operators` to represent arbitrary matrix operators. If the operator is unitary it can then be added to a quantum circuit and used for simulation on Qiskit Aer.
#
# Lets create two operators below for a CNOT gate, and an iSWAP gates:
#
# $$\mbox{CNOT} = \left(\begin{array}
# & 1 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 1 \\
# 0 & 0 & 1 & 0 \\
# 0 & 1 & 0 & 0
# \end{array}\right), \quad
# \mbox{iSWAP} = \left(\begin{array}
# & 1 & 0 & 0 & 0 \\
# 0 & 0 & i & 0 \\
# 0 & i & 0 & 0 \\
# 0 & 0 & 0 & 1
# \end{array}\right)$$
#
# +
# CNOT matrix operator with qubit-0 as control and qubit-1 as target
cx_op = Operator([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]])
# iSWAP matrix operator
iswap_op = Operator([[1, 0, 0, 0],
[0, 0, 1j, 0],
[0, 1j, 0, 0],
[0, 0, 0, 1]])
# -
# **Note:** The matrix is specified with respect to the tensor product $U_{b}\otimes U_{a}$ for qubits specified by list `[a, b]`.
# ## Using operators in circuits
#
# Let us demonstrate how these can be used in a circuit. We will consider an example of implementing a CNOT gate decomposed in terms of single-qubit gates and the iSWAP gate as follows.
# +
# CNOT in terms of iSWAP and single-qubit gates
cx_circ = QuantumCircuit(2)
# Add gates
cx_circ.sdg(1)
cx_circ.h(1)
cx_circ.sdg(0)
cx_circ.unitary(iswap_op, [0, 1], label='iswap')
cx_circ.sdg(0)
cx_circ.h(0)
cx_circ.sdg(0)
cx_circ.unitary(iswap_op, [0, 1], label='iswap')
cx_circ.s(1)
print(cx_circ)
# -
# Note that we have assigned an optional *label* of `"iswap"` to the unitary when it is inserted. This allows us to identify this unitary in a Qiskit Aer `NoiseModel` so that we can add errors to these custom unitary gates in noisy circuit simulations.
# We can confirm this circuit returns the correct using the `Operator` class as a simulator for the circuit:
# Simulate the unitary for the circuit using Operator:
unitary = Operator(cx_circ)
print(unitary)
# And to confirm the output is corrct we can compute the process fidelity:
fpro = process_fidelity(cx_op, unitary)
print("Process Fidelity = ", fpro)
# ## Creating a custom unitary in a noise model
#
# The Qiskit Aer QasmSimulator supports simulation of arbitrary unitary operators directly as specified by the `"unitary"` in the basis gates.
'unitary' in QasmSimulator().configuration().basis_gates
# This allows us to add noise models to arbitrary unitaries in our simulation when we identity them using the optional `label` argument of `QuantumCircuit.unitary`.
#
# We will now do this by creating a `NoiseModel` that includes a quantum error channel on our custom iSWAP gate. For our example we will create an 2-qubit error consisting of two single-qubit amplitude damping channels with different damping parameters. For now we will assume all the other circuit instructions are ideal.
# +
# Error parameters
param_q0 = 0.05 # damping parameter for qubit-0
param_q1 = 0.1 # damping parameter for qubit-1
# Construct the error
qerror_q0 = errors.amplitude_damping_error(param_q0)
qerror_q1 = errors.amplitude_damping_error(param_q1)
iswap_error = qerror_q1.tensor(qerror_q0)
# Build the noise model by adding the error to the "iswap" gate
noise_model = NoiseModel()
noise_model.add_all_qubit_quantum_error(iswap_error, 'iswap')
# -
# Note that when we add an error to a custom label such as `"iswap"` the `NoiseModel` does not know what gate this label is supposed to apply to, so we must manually add the desired gate string to the noise model `basis_gates`. This ensure that the compiler will unroll to the correct basis gates for the noise model simulation. This can done using the `NoiseModel.add_basis_gates` function:
noise_model.add_basis_gates(['unitary'])
print(noise_model.basis_gates)
# By default the basis gates of a noise model are `['cx','id','u3']` plus any standard QasmSimulator basis gates that are added to the noise model.
# ## Simulating a custom unitary noise model
# Let us first take our previous CX circuit and add an initial Hadamard gate and final measurement to create a Bell-state preparation circuit that we may simulator on the `QasmSimulator` both for the ideal and noisy case
# Bell state circuit where iSWAPS should be inserted at barrier locations
bell_circ = QuantumCircuit(2, 2, name='bell')
bell_circ.h(0)
bell_circ = bell_circ + cx_circ
bell_circ.measure([0,1], [0,1])
print(bell_circ)
# ### Ideal output
#
# Lets first see the ideal output. Since this generates a Bell-state we expect two peaks for 00 and 11
# Execute on the simulator without noise
job = execute(bell_circ, QasmSimulator(),
basis_gates=noise_model.basis_gates)
ideal_result = job.result()
ideal_counts = ideal_result.get_counts(bell_circ)
plot_histogram(ideal_counts, title='Ideal output for iSWAP bell-state preparation')
# ### Noisy circuit execution
#
# Finally lets now simulate it with our custom noise model. Since there is a small amplitude damping error on the two qubit gates we expect small additional peaks for the 01 and 10 outcome probabilities
# Execute on the simulator without noise
job = execute(bell_circ, QasmSimulator(),
basis_gates=noise_model.basis_gates,
noise_model=noise_model)
noise_result = job.result()
noise_counts = noise_result.get_counts(bell_circ)
plot_histogram(noise_counts, title='Noisy output for iSWAP bell-state preparation')
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
| qiskit/advanced/aer/4_custom_gate_noise.ipynb |
# # [Classes](https://docs.python.org/3/tutorial/classes.html#a-first-look-at-classes)
class MyFirstClass:
def __init__(self, name):
self.name = name
def greet(self):
print('Hello {}!'.format(self.name))
my_instance = MyFirstClass('<NAME>')
print('my_instance: {}'.format(my_instance))
print('type: {}'.format(type(my_instance)))
print('my_instance.name: {}'.format(my_instance.name))
# ## Methods
# The functions inside classes are called methods. They are used similarly as functions.
alice = MyFirstClass(name='Alice')
alice.greet()
# ### `__init__()`
# `__init__()` is a special method that is used for initialising instances of the class. It's called when you create an instance of the class.
# +
class Example:
def __init__(self):
print('Now we are inside __init__')
print('creating instance of Example')
example = Example()
print('instance created')
# -
# `__init__()` is typically used for initialising instance variables of your class. These can be listed as arguments after `self`. To be able to access these instance variables later during your instance's lifetime, you have to save them into `self`. `self` is the first argument of the methods of your class and it's your access to the instance variables and other methods.
# +
class Example:
def __init__(self, var1, var2):
self.first_var = var1
self.second_var = var2
def print_variables(self):
print('{} {}'.format(self.first_var, self.second_var))
e = Example('abc', 123)
e.print_variables()
# -
# ### `__str__()`
# `__str__()` is a special method which is called when an instance of the class is converted to string (e.g. when you want to print the instance). In other words, by defining `__str__` method for your class, you can decide what's the printable version of the instances of your class. The method should return a string.
# +
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return 'Person: {}'.format(self.name)
jack = Person('Jack', 82)
print('This is the string presentation of jack: {}'.format(jack))
# -
# ## Class variables vs instance variables
# Class variables are shared between all the instances of that class whereas instance variables can hold different values between different instances of that class.
# +
class Example:
# These are class variables
name = 'Example class'
description = 'Just an example of a simple class'
def __init__(self, var1):
# This is an instance variable
self.instance_variable = var1
def show_info(self):
info = 'instance_variable: {}, name: {}, description: {}'.format(
self.instance_variable, Example.name, Example.description)
print(info)
inst1 = Example('foo')
inst2 = Example('bar')
# name and description have identical values between instances
assert inst1.name == inst2.name == Example.name
assert inst1.description == inst2.description == Example.description
# If you change the value of a class variable, it's changed across all instances
Example.name = 'Modified name'
inst1.show_info()
inst2.show_info()
# -
# ## Public vs private
# In python there's now strict separation for private/public methods or instance variables. The convention is to start the name of the method or instance variable with underscore if it should be treated as private. Private means that it should not be accessed from outside of the class.
#
# For example, let's consider that we have a `Person` class which has `age` as an instance variable. We want that `age` is not directly accessed (e.g. changed) after the instance is created. In Python, this would be:
# +
class Person:
def __init__(self, age):
self._age = age
example_person = Person(age=15)
# You can't do this:
# print(example_person.age)
# Nor this:
# example_person.age = 16
# -
# If you want the `age` to be readable but not writable, you can use `property`:
# +
class Person:
def __init__(self, age):
self._age = age
@property
def age(self):
return self._age
example_person = Person(age=15)
# Now you can do this:
print(example_person.age)
# But not this:
#example_person.age = 16
# -
# This way you can have a controlled access to the instance variables of your class:
# +
class Person:
def __init__(self, age):
self._age = age
@property
def age(self):
return self._age
def celebrate_birthday(self):
self._age += 1
print('Happy bday for {} years old!'.format(self._age))
example_person = Person(age=15)
example_person.celebrate_birthday()
# -
# ## Introduction to inheritance
# +
class Animal:
def greet(self):
print('Hello, I am an animal')
@property
def favorite_food(self):
return 'beef'
class Dog(Animal):
def greet(self):
print('wof wof')
class Cat(Animal):
@property
def favorite_food(self):
return 'fish'
# +
dog = Dog()
dog.greet()
print("Dog's favorite food is {}".format(dog.favorite_food))
cat = Cat()
cat.greet()
print("Cat's favorite food is {}".format(cat.favorite_food))
# -
| Exercise_notebooks/On_topic/07_Object_oriented/02_classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + [markdown] origin_pos=0
# # Convolutional Neural Networks (LeNet)
# :label:`sec_lenet`
#
# We now have all the ingredients required to assemble
# a fully-functional CNN.
# In our earlier encounter with image data,
# we applied
# a softmax regression model (:numref:`sec_softmax_scratch`)
# and
# an MLP model (:numref:`sec_mlp_scratch`)
# to pictures of clothing in the Fashion-MNIST dataset.
# To make such data amenable to softmax regression and MLPs,
# we first flattened each image from a $28\times28$ matrix
# into a fixed-length $784$-dimensional vector,
# and thereafter processed them with fully-connected layers.
# Now that we have a handle on convolutional layers,
# we can retain the spatial structure in our images.
# As an additional benefit of replacing fully-connected layers with convolutional layers,
# we will enjoy more parsimonious models that require far fewer parameters.
#
# In this section, we will introduce *LeNet*,
# among the first published CNNs
# to capture wide attention for its performance on computer vision tasks.
# The model was introduced by (and named for) <NAME>,
# then a researcher at AT&T Bell Labs,
# for the purpose of recognizing handwritten digits in images :cite:`LeCun.Bottou.Bengio.ea.1998`.
# This work represented the culmination
# of a decade of research developing the technology.
# In 1989, LeCun published the first study to successfully
# train CNNs via backpropagation.
#
#
# At the time LeNet achieved outstanding results
# matching the performance of support vector machines,
# then a dominant approach in supervised learning.
# LeNet was eventually adapted to recognize digits
# for processing deposits in ATM machines.
# To this day, some ATMs still run the code
# that Yann and his colleague <NAME> wrote in the 1990s!
#
#
# ## LeNet
#
# At a high level, LeNet (LeNet-5) consists of two parts:
# (i) a convolutional encoder consisting of two convolutional layers; and
# (ii) a dense block consisting of three fully-connected layers;
# The architecture is summarized in :numref:`img_lenet`.
#
# 
# :label:`img_lenet`
#
# The basic units in each convolutional block
# are a convolutional layer, a sigmoid activation function,
# and a subsequent average pooling operation.
# Note that while ReLUs and max-pooling work better,
# these discoveries had not yet been made in the 1990s.
# Each convolutional layer uses a $5\times 5$ kernel
# and a sigmoid activation function.
# These layers map spatially arranged inputs
# to a number of two-dimensional feature maps, typically
# increasing the number of channels.
# The first convolutional layer has 6 output channels,
# while the second has 16.
# Each $2\times2$ pooling operation (stride 2)
# reduces dimensionality by a factor of $4$ via spatial downsampling.
# The convolutional block emits an output with shape given by
# (batch size, number of channel, height, width).
#
# In order to pass output from the convolutional block
# to the dense block,
# we must flatten each example in the minibatch.
# In other words, we take this four-dimensional input and transform it
# into the two-dimensional input expected by fully-connected layers:
# as a reminder, the two-dimensional representation that we desire
# has uses the first dimension to index examples in the minibatch
# and the second to give the flat vector representation of each example.
# LeNet's dense block has three fully-connected layers,
# with 120, 84, and 10 outputs, respectively.
# Because we are still performing classification,
# the 10-dimensional output layer corresponds
# to the number of possible output classes.
#
# While getting to the point where you truly understand
# what is going on inside LeNet may have taken a bit of work,
# hopefully the following code snippet will convince you
# that implementing such models with modern deep learning frameworks
# is remarkably simple.
# We need only to instantiate a `Sequential` block
# and chain together the appropriate layers.
#
# + origin_pos=1 tab=["mxnet"]
from d2l import mxnet as d2l
from mxnet import autograd, gluon, init, np, npx
from mxnet.gluon import nn
npx.set_np()
net = nn.Sequential()
net.add(nn.Conv2D(channels=6, kernel_size=5, padding=2, activation='sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
nn.Conv2D(channels=16, kernel_size=5, activation='sigmoid'),
nn.AvgPool2D(pool_size=2, strides=2),
# `Dense` will transform an input of the shape (batch size, number of
# channels, height, width) into an input of the shape (batch size,
# number of channels * height * width) automatically by default
nn.Dense(120, activation='sigmoid'),
nn.Dense(84, activation='sigmoid'),
nn.Dense(10))
# + [markdown] origin_pos=4
# We took a small liberty with the original model,
# removing the Gaussian activation in the final layer.
# Other than that, this network matches
# the original LeNet-5 architecture.
#
# By passing a single-channel (black and white)
# $28 \times 28$ image through the network
# and printing the output shape at each layer,
# we can inspect the model to make sure
# that its operations line up with
# what we expect from :numref:`img_lenet_vert`.
#
# 
# :label:`img_lenet_vert`
#
# + origin_pos=5 tab=["mxnet"]
X = np.random.uniform(size=(1, 1, 28, 28))
net.initialize()
for layer in net:
X = layer(X)
print(layer.name, 'output shape:\t', X.shape)
# + [markdown] origin_pos=8
# Note that the height and width of the representation
# at each layer throughout the convolutional block
# is reduced (compared with the previous layer).
# The first convolutional layer uses 2 pixels of padding
# to compensate for the reduction in height and width
# that would otherwise result from using a $5 \times 5$ kernel.
# In contrast, the second convolutional layer forgoes padding,
# and thus the height and width are both reduced by 4 pixels.
# As we go up the stack of layers,
# the number of channels increases layer-over-layer
# from 1 in the input to 6 after the first convolutional layer
# and 16 after the second convolutional layer.
# However, each pooling layer halves the height and width.
# Finally, each fully-connected layer reduces dimensionality,
# finally emitting an output whose dimension
# matches the number of classes.
#
#
#
# ## Training
#
# Now that we have implemented the model,
# let us run an experiment to see how LeNet fares on Fashion-MNIST.
#
# + origin_pos=9 tab=["mxnet"]
batch_size = 256
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size=batch_size)
# + [markdown] origin_pos=10
# While CNNs have fewer parameters,
# they can still be more expensive to compute
# than similarly deep MLPs
# because each parameter participates in many more
# multiplications.
# If you have access to a GPU, this might be a good time
# to put it into action to speed up training.
#
# + [markdown] origin_pos=11 tab=["mxnet"]
# For evaluation, we need to make a slight modification
# to the `evaluate_accuracy` function that we described
# in :numref:`sec_softmax_scratch`.
# Since the full dataset is in the main memory,
# we need to copy it to the GPU memory before the model uses GPU to compute with the dataset.
#
# + origin_pos=12 tab=["mxnet"]
def evaluate_accuracy_gpu(net, data_iter, device=None): #@save
"""Compute the accuracy for a model on a dataset using a GPU."""
if not device: # Query the first device where the first parameter is on
device = list(net.collect_params().values())[0].list_ctx()[0]
# No. of correct predictions, no. of predictions
metric = d2l.Accumulator(2)
for X, y in data_iter:
X, y = X.as_in_ctx(device), y.as_in_ctx(device)
metric.add(d2l.accuracy(net(X), y), y.size)
return metric[0] / metric[1]
# + [markdown] origin_pos=14
# We also need to update our training function to deal with GPUs.
# Unlike the `train_epoch_ch3` defined in :numref:`sec_softmax_scratch`,
# we now need to move each minibatch of data
# to our designated device (hopefully, the GPU)
# prior to making the forward and backward propagations.
#
# The training function `train_ch6` is also similar
# to `train_ch3` defined in :numref:`sec_softmax_scratch`.
# Since we will be implementing networks with many layers
# going forward, we will rely primarily on high-level APIs.
# The following training function assumes a model created from high-level APIs
# as input and is optimized accordingly.
# We initialize the model parameters
# on the device indicated by the `device` argument, using Xavier initialization as introduced in :numref:`subsec_xavier`.
# Just as with MLPs, our loss function is cross-entropy,
# and we minimize it via minibatch stochastic gradient descent.
# Since each epoch takes tens of seconds to run,
# we visualize the training loss more frequently.
#
# + origin_pos=15 tab=["mxnet"]
#@save
def train_ch6(net, train_iter, test_iter, num_epochs, lr,
device=d2l.try_gpu()):
"""Train a model with a GPU (defined in Chapter 6)."""
net.initialize(force_reinit=True, ctx=device, init=init.Xavier())
loss = gluon.loss.SoftmaxCrossEntropyLoss()
trainer = gluon.Trainer(net.collect_params(),
'sgd', {'learning_rate': lr})
animator = d2l.Animator(xlabel='epoch', xlim=[1, num_epochs],
legend=['train loss', 'train acc', 'test acc'])
timer, num_batches = d2l.Timer(), len(train_iter)
for epoch in range(num_epochs):
# Sum of training loss, sum of training accuracy, no. of examples
metric = d2l.Accumulator(3)
for i, (X, y) in enumerate(train_iter):
timer.start()
# Here is the major difference from `d2l.train_epoch_ch3`
X, y = X.as_in_ctx(device), y.as_in_ctx(device)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y)
l.backward()
trainer.step(X.shape[0])
metric.add(l.sum(), d2l.accuracy(y_hat, y), X.shape[0])
timer.stop()
train_l = metric[0] / metric[2]
train_acc = metric[1] / metric[2]
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches,
(train_l, train_acc, None))
test_acc = evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f'loss {train_l:.3f}, train acc {train_acc:.3f}, '
f'test acc {test_acc:.3f}')
print(f'{metric[2] * num_epochs / timer.sum():.1f} examples/sec '
f'on {str(device)}')
# + [markdown] origin_pos=18
# Now let us train and evaluate the LeNet-5 model.
#
# + origin_pos=19 tab=["mxnet"]
lr, num_epochs = 0.9, 10
train_ch6(net, train_iter, test_iter, num_epochs, lr)
# + [markdown] origin_pos=20
# ## Summary
#
# * A CNN is a network that employs convolutional layers.
# * In a CNN, we interleave convolutions, nonlinearities, and (often) pooling operations.
# * In a CNN, convolutional layers are typically arranged so that they gradually decrease the spatial resolution of the representations, while increasing the number of channels.
# * In traditional CNNs, the representations encoded by the convolutional blocks are processed by one or more fully-connected layers prior to emitting output.
# * LeNet was arguably the first successful deployment of such a network.
#
# ## Exercises
#
# 1. Replace the average pooling with max pooling. What happens?
# 1. Try to construct a more complex network based on LeNet to improve its accuracy.
# 1. Adjust the convolution window size.
# 1. Adjust the number of output channels.
# 1. Adjust the activation function (e.g., ReLU).
# 1. Adjust the number of convolution layers.
# 1. Adjust the number of fully connected layers.
# 1. Adjust the learning rates and other training details (e.g., initialization and number of epochs.)
# 1. Try out the improved network on the original MNIST dataset.
# 1. Display the activations of the first and second layer of LeNet for different inputs (e.g., sweaters and coats).
#
# + [markdown] origin_pos=21 tab=["mxnet"]
# [Discussions](https://discuss.d2l.ai/t/73)
#
| d2l-en/mxnet/chapter_convolutional-neural-networks/lenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="8kdsGkYJXXKc"
#@title Copyright 2020 The Earth Engine Community Authors { display-mode: "form" }
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="l18M9_r5XmAQ"
# # Time Series Visualization with Altair
# Author: jdbcode
#
# This tutorial provides methods for generating time series data in Earth Engine and visualizing it with the [Altair](https://altair-viz.github.io/) library using drought and vegetation response as an example.
#
# Topics include:
#
# - Time series region reduction in Earth Engine
# - Formatting a table in Earth Engine
# - Transferring an Earth Engine table to a Colab Python kernel
# - Converting an Earth Engine table to a [pandas](https://pandas.pydata.org/) DataFrame
# - Data representation with various Altair chart types
#
# **Note** that this tutorial uses the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) in a [Colab notebook](https://developers.google.com/earth-engine/python_install-colab.html).
#
#
# + [markdown] id="e0cJv0RW40rx"
# ## Context
#
# At the heart of this tutorial is the notion of data reduction and the need to transform data into insights to help inform our understanding of Earth processes and human's role in them. It combines a series of technologies, each best suited to a particular task in the data reduction process. **Earth Engine** is used to access, clean, and reduce large amounts of spatiotemporal data, **pandas** is used to analyze and organize the results, and **Altair** is used to visualize the results.
#
# **Note**: This notebook demonstrates an analysis template and interactive workflow that is appropriate for a certain size of dataset, but there are limitations to interactive computation time and server-to-client data transfer size imposed by Colab and Earth Engine. To analyze even larger datasets, you may need to modify the workflow to [export](https://developers.google.com/earth-engine/python_install#exporting-data) `FeatureCollection` results from Earth Engine as static assets and then use the static assets to perform the subsequent steps involving Earth Engine table formatting, conversion to pandas DataFrame, and charting with Altair.
# + [markdown] id="5UmHTUZJUkUG"
# ## Materials
# + [markdown] id="T8joJ18b9LA3"
# ### Datasets
#
# Climate
#
# - Drought severity ([PDSI](https://developers.google.com/earth-engine/datasets/catalog/GRIDMET_DROUGHT))
# - Historical climate ([PRISM](https://developers.google.com/earth-engine/datasets/catalog/OREGONSTATE_PRISM_AN81m))
# - Projected climate ([NEX-DCP30](https://developers.google.com/earth-engine/datasets/catalog/NASA_NEX-DCP30))
#
# Vegetation proxies
#
# - NDVI ([MODIS](https://developers.google.com/earth-engine/datasets/catalog/MODIS_006_MOD13A2))
# - NBR ([Landsat](https://developers.google.com/earth-engine/datasets/catalog/landsat/))
# + [markdown] id="QtiMHMPj5Uha"
# ### Region of interest
#
# The region of interest for these examples is the Sierra Nevada ecoregion of California. The vegetation grades from mostly ponderosa pine and Douglas-fir at low elevations on the western side, to pines and Sierra juniper on the eastern side, and to fir and other conifers at higher elevations.
#
# + [markdown] id="YLDFvqCczdVc"
# ## General workflow
#
# Preparation of every dataset for visualization follows the same basic steps:
#
# 1. Filter the dataset (server-side Earth Engine)
# 2. Reduce the data region by a statistic (server-side Earth Engine)
# 3. Format the region reduction into a table (server-side Earth Engine)
# 4. Convert the Earth Engine table to a DataFrame (server-side Earth Engine > client-side Python kernel)
# 5. Alter the DataFrame (client-side pandas)
# 6. Plot the DataFrame (client-side Altair)
#
# The first dataset will walk through each step in detail. Following examples will provide less description, unless there is variation that merits note.
# + [markdown] id="_9YDwlnVQU5w"
# ## Python setup
# + [markdown] id="xCezQcI-Qd3o"
# ### Earth Engine API
#
# 1. Import the Earth Engine library.
# 2. Authenticate access (registration verification and Google account access).
# 3. Initialize the API.
# + id="DUBNDgqyiPZW"
import ee
ee.Authenticate()
ee.Initialize()
# + [markdown] id="i6rS3zTFht5P"
# ### Other libraries
#
# Import other libraries used in this notebook.
#
# - [**pandas**](https://pandas.pydata.org/): data analysis (including the [DataFrame](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html) data structure)
# - [**altair**](https://altair-viz.github.io/): declarative visualization library (used for charting)
# - [**numpy**](https://numpy.org/): array-processing package (used for linear regression)
# - [**folium**](https://python-visualization.github.io/folium/): interactive web map
#
# + id="wo_AiuPLh0kr"
import pandas as pd
import altair as alt
import numpy as np
import folium
# + [markdown] id="oxeeVDke3L8K"
# ## Region reduction function
#
# Reduction of pixels intersecting the region of interest to a statistic will be performed multiple times. Define a reusable function that can perform the task for each dataset. The function accepts arguments such as scale and reduction method to parameterize the operation for each particular analysis.
#
# **Note**: most of the reduction operations in this tutorial use a large pixel scale so that operations complete quickly. In your own application, set the scale and other parameter arguments as you wish.
#
# + id="0mkEjSrFlkjL"
def create_reduce_region_function(geometry,
reducer=ee.Reducer.mean(),
scale=1000,
crs='EPSG:4326',
bestEffort=True,
maxPixels=1e13,
tileScale=4):
"""Creates a region reduction function.
Creates a region reduction function intended to be used as the input function
to ee.ImageCollection.map() for reducing pixels intersecting a provided region
to a statistic for each image in a collection. See ee.Image.reduceRegion()
documentation for more details.
Args:
geometry:
An ee.Geometry that defines the region over which to reduce data.
reducer:
Optional; An ee.Reducer that defines the reduction method.
scale:
Optional; A number that defines the nominal scale in meters of the
projection to work in.
crs:
Optional; An ee.Projection or EPSG string ('EPSG:5070') that defines
the projection to work in.
bestEffort:
Optional; A Boolean indicator for whether to use a larger scale if the
geometry contains too many pixels at the given scale for the operation
to succeed.
maxPixels:
Optional; A number specifying the maximum number of pixels to reduce.
tileScale:
Optional; A number representing the scaling factor used to reduce
aggregation tile size; using a larger tileScale (e.g. 2 or 4) may enable
computations that run out of memory with the default.
Returns:
A function that accepts an ee.Image and reduces it by region, according to
the provided arguments.
"""
def reduce_region_function(img):
"""Applies the ee.Image.reduceRegion() method.
Args:
img:
An ee.Image to reduce to a statistic by region.
Returns:
An ee.Feature that contains properties representing the image region
reduction results per band and the image timestamp formatted as
milliseconds from Unix epoch (included to enable time series plotting).
"""
stat = img.reduceRegion(
reducer=reducer,
geometry=geometry,
scale=scale,
crs=crs,
bestEffort=bestEffort,
maxPixels=maxPixels,
tileScale=tileScale)
return ee.Feature(geometry, stat).set({'millis': img.date().millis()})
return reduce_region_function
# + [markdown] id="T-k7xPg-3oog"
# ### Formatting
#
# The result of the region reduction function above applied to an `ee.ImageCollection` produces an `ee.FeatureCollection`. This data needs to be transferred to the Python kernel, but serialized feature collections are large and awkward to deal with. This step defines a function to convert the feature collection to an `ee.Dictionary` where the keys are feature property names and values are corresponding lists of property values, which `pandas` can deal with handily.
#
# 1. Extract the property values from the `ee.FeatureCollection` as a list of lists stored in an `ee.Dictionary` using `reduceColumns()`.
# 2. Extract the list of lists from the dictionary.
# 3. Add names to each list by converting to an `ee.Dictionary` where keys are property names and values are the corresponding value lists.
#
# The returned `ee.Dictionary` is essentially a table, where keys define columns and list elements define rows.
# + id="MuiFsdRL-qpY"
# Define a function to transfer feature properties to a dictionary.
def fc_to_dict(fc):
prop_names = fc.first().propertyNames()
prop_lists = fc.reduceColumns(
reducer=ee.Reducer.toList().repeat(prop_names.size()),
selectors=prop_names).get('list')
return ee.Dictionary.fromLists(prop_names, prop_lists)
# + [markdown] id="fYFE71iWobNp"
# ## Drought severity
#
# In this section we'll look at a time series of drought severity as a calendar heat map and a bar chart.
# + [markdown] id="fPqa1d4v2vkV"
# ### Import data
#
# 1. Load the gridded Palmer Drought Severity Index (PDSI) data as an `ee.ImageCollection`.
# 2. Load the EPA Level-3 ecoregion boundaries as an `ee.FeatureCollection` and filter it to include only the Sierra Nevada region, which defines the area of interest (AOI).
# + id="iTl381vFnZs0"
pdsi = ee.ImageCollection('GRIDMET/DROUGHT').select('pdsi')
aoi = ee.FeatureCollection('EPA/Ecoregions/2013/L3').filter(
ee.Filter.eq('na_l3name', 'Sierra Nevada')).geometry()
# + [markdown] id="6bV4k9Z-Abxx"
# **Note**: the `aoi` defined above will be used throughout this tutorial. In your own application, redefine it for your own area of interest.
# + [markdown] id="hLJ_XNtEog_L"
# ### Reduce data
#
# 1. Create a region reduction function.
# 2. Map the function over the `pdsi` image collection to reduce each image.
# 3. Filter out any resulting features that have null computed values (occurs when all pixels in an AOI are masked).
# + id="p_9MyvRrogoT"
reduce_pdsi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=5000, crs='EPSG:3310')
pdsi_stat_fc = ee.FeatureCollection(pdsi.map(reduce_pdsi)).filter(
ee.Filter.notNull(pdsi.first().bandNames()))
# + [markdown] id="q7tN6uQ1-5Pw"
# ---
# **STOP**:
#
# ### _Optional export_
#
# _If your process is long-running_, you'll want to export the `pdsi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on. Please see the Developer Guide section on [exporting with the Python API](https://developers.google.com/earth-engine/python_install#exporting-data).
#
# Export to asset:
# + id="Q-WOTuEujehM"
task = ee.batch.Export.table.toAsset(
collection=pdsi_stat_fc,
description='pdsi_stat_fc export',
assetId='users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
# task.start()
# + [markdown] id="jgNv-hlNj0sO"
# Import the asset after the export completes:
# + id="v3Dx_z5KkOeI"
# pdsi_stat_fc = ee.FeatureCollection('users/YOUR_USER_NAME/pdsi_stat_fc_ts_vis_with_altair')
# + [markdown] id="_H4246-UPGVX"
# _\* Remove comments (#) to run the above cells._
# + [markdown] id="nOUWkPIRjnz5"
# ---
#
# **CONTINUE**:
# + [markdown] id="k71MycvMoUce"
# ### Server to client transfer
# + [markdown] id="5vmsFl28_O0x"
# The `ee.FeatureCollection` needs to be converted to a dictionary and transferred to the Python kernel.
#
# 1. Apply the `fc_to_dict` function to convert from `ee.FeatureCollection` to `ee.Dictionary`.
# 2. Call `getInfo()` on the `ee.Dictionary` to transfer the data client-side.
# + id="VgP2H-2f_bj-"
pdsi_dict = fc_to_dict(pdsi_stat_fc).getInfo()
# + [markdown] id="u-HLx08ynhV-"
# The result is a Python dictionary. Print a small part to see how it is formatted.
# + id="dQ_Nu0mZy5Wh"
print(type(pdsi_dict), '\n')
for prop in pdsi_dict.keys():
print(prop + ':', pdsi_dict[prop][0:3] + ['...'])
# + [markdown] id="dUEiS9qOHCDn"
# Convert the Python dictionary to a pandas DataFrame.
# + id="P-5u7KQE2k5M"
pdsi_df = pd.DataFrame(pdsi_dict)
# + [markdown] id="e0he_CQbkAhh"
# Preview the DataFrame and check the column data types.
# + id="1Mpcb6s62pmy"
display(pdsi_df)
print(pdsi_df.dtypes)
# + [markdown] id="GZZsiwC6DgvZ"
# ### Add date columns
# + [markdown] id="eiqSmB_KlYt6"
# Add date columns derived from the milliseconds from Unix epoch column. The pandas library provides functions and objects for timestamps and the DataFrame object allows for easy mutation.
#
# Define a function to add date variables to the DataFrame: year, month, day, and day of year (DOY).
# + id="7SqbstkjlYA3"
# Function to add date variables to DataFrame.
def add_date_info(df):
df['Timestamp'] = pd.to_datetime(df['millis'], unit='ms')
df['Year'] = pd.DatetimeIndex(df['Timestamp']).year
df['Month'] = pd.DatetimeIndex(df['Timestamp']).month
df['Day'] = pd.DatetimeIndex(df['Timestamp']).day
df['DOY'] = pd.DatetimeIndex(df['Timestamp']).dayofyear
return df
# + [markdown] id="KL9T59AYDfpD"
# **Note**: the above function for adding date information to a DataFrame will be used throughout this tutorial.
# + [markdown] id="AqKafJMfpqbF"
# Apply the `add_date_info` function to the PDSI DataFrame to add date attribute columns, preview the results.
# + id="kSJDElx4NLxf"
pdsi_df = add_date_info(pdsi_df)
pdsi_df.head(5)
# + [markdown] id="SHzIm-TrrRsf"
# ### Rename and drop columns
# + [markdown] id="MtacuRy4QDwS"
# Often it is desirable to rename columns and/or remove unnecessary columns. Do both here and preview the DataFrame.
# + id="tQ72vESQQDUW"
pdsi_df = pdsi_df.rename(columns={
'pdsi': 'PDSI'
}).drop(columns=['millis', 'system:index'])
pdsi_df.head(5)
# + [markdown] id="Sg6XQWritHcs"
# Check the data type of each column.
# + id="QsV2q0wvtPrK"
pdsi_df.dtypes
# + [markdown] id="BfVRIsBdtqPE"
# At this point the DataFrame is in good shape for charting with Altair.
# + [markdown] id="VbbOWe4OEWKz"
# ### Calendar heatmap
# + [markdown] id="HnRr12VRIxqG"
# Chart PDSI data as a calendar heatmap. Set observation year as the x-axis variable, month as y-axis, and PDSI value as color.
#
# Note that Altair features a convenient [method for aggregating values within groups](https://altair-viz.github.io/user_guide/transform/aggregate.html) while encoding the chart (i.e., no need to create a new DataFrame). The mean aggregate transform is applied here because each month has three PDSI observations (year and month are the grouping factors).
#
# Also note that a tooltip has been added to the chart; hovering over cells reveals the values of the selected variables.
# + id="BY4FJBtNuLeY"
alt.Chart(pdsi_df).mark_rect().encode(
x='Year:O',
y='Month:O',
color=alt.Color(
'mean(PDSI):Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('Month:O', title='Month'),
alt.Tooltip('mean(PDSI):Q', title='PDSI')
]).properties(width=600, height=300)
# + [markdown] id="TpDDqSMJQeUz"
# The calendar heat map is good for interpretation of relative intra- and inter-annual differences in PDSI. However, since the PDSI variable is represented by color, estimating absolute values and magnitude of difference is difficult.
# + [markdown] id="OrNfrsd-ifa8"
# ### Bar chart
# + [markdown] id="MSYnvI9pRUpz"
# Chart PDSI time series as a bar chart to more easily interpret absolute values and compare them over time. Here, the observation timestamp is represented on the x-axis and PDSI is represented by both the y-axis and color. Since each PDSI observation has a unique timestamp that can be plotted to the x-axis, there is no need to aggregate PDSI values as in the above chart. A tooltip is added to the chart; hover over the bars to reveal the values for each variable.
# + id="58p76nLEwaEY"
alt.Chart(pdsi_df).mark_bar(size=1).encode(
x='Timestamp:T',
y='PDSI:Q',
color=alt.Color(
'PDSI:Q', scale=alt.Scale(scheme='redblue', domain=(-5, 5))),
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('PDSI:Q', title='PDSI')
]).properties(width=600, height=300)
# + [markdown] id="AU5FYG7bTaao"
# This temporal bar chart makes it easier to interpret and compare absolute values of PDSI over time, but relative intra- and inter-annual variability are arguably harder to interpret because the division of year and month is not as distinct as in the calendar heatmap above.
#
# Take note of the extended and severe period of drought from 2012 through 2016. In the next section, we'll look for a vegetation response to this event.
# + [markdown] id="pWviMm-Q2VMF"
# ## Vegetation productivity
#
# NDVI is a proxy measure of photosynthetic capacity and is used in this tutorial to investigate vegetation response to the 2012-2016 drought identified in the PDSI bar chart above.
#
# MODIS provides an analysis-ready 16-day NDVI composite that is well suited for regional investigation of temporal vegetation dynamics. The following steps reduce and prepare this data for charting in the same manner as the PDSI data above; please refer to previous sections to review details.
# + [markdown] id="tigBp8Y9IWjC"
# ### Import and reduce
#
# 1. Load the MODIS NDVI data as an `ee.ImageCollection`.
# 1. Create a region reduction function.
# 3. Apply the function to all images in the time series.
# 4. Filter out features with null computed values.
# + id="YfTfhCiX8Ew4"
ndvi = ee.ImageCollection('MODIS/006/MOD13A2').select('NDVI')
reduce_ndvi = create_reduce_region_function(
geometry=aoi, reducer=ee.Reducer.mean(), scale=1000, crs='EPSG:3310')
ndvi_stat_fc = ee.FeatureCollection(ndvi.map(reduce_ndvi)).filter(
ee.Filter.notNull(ndvi.first().bandNames()))
# + [markdown] id="qwtO27KMIemk"
# ---
# **STOP**:
#
# _If your process is long-running_, you'll want to export the `ndvi_stat_fc` variable as an asset using a batch task. Wait until the task finishes, import the asset, and continue on.
#
# Please see the above **_Optional export_** section for more details.
#
# **CONTINUE**:
#
# ---
#
#
# + [markdown] id="B7TI_Rd9xAqz"
# ### Prepare DataFrame
# + [markdown] id="aQM61YQFxKeb"
# 1. Transfer data from the server to the client.
# 2. Convert the Python dictionary to a pandas DataFrame.
# 3. Preview the DataFrame and check data types.
# + id="_P-3PUnfIeEM"
ndvi_dict = fc_to_dict(ndvi_stat_fc).getInfo()
ndvi_df = pd.DataFrame(ndvi_dict)
display(ndvi_df)
print(ndvi_df.dtypes)
# + [markdown] id="wIOh1UhGJvSz"
# 4. Remove the NDVI scaling.
# 5. Add date attribute columns.
# 6. Preview the DataFrame.
# + id="9wIikfzeJ2Yw"
ndvi_df['NDVI'] = ndvi_df['NDVI'] / 10000
ndvi_df = add_date_info(ndvi_df)
ndvi_df.head(5)
# + [markdown] id="D2zu-3XLxp3D"
# These NDVI time series data are now ready for plotting.
# + [markdown] id="OnYEVl-d3t63"
# ### DOY line chart
# + [markdown] id="_uBkZEtyH_wJ"
# Make a day of year (DOY) line chart where each line represents a year of observations. This chart makes it possible to compare the same observation date among years. Use it to compare NDVI values for years during the drought and not.
#
# Day of year is represented on the x-axis and NDVI on the y-axis. Each line represents a year and is distinguished by color. Note that this plot includes a tooltip and has been made interactive so that the axes can be zoomed and panned.
# + id="p3GeYhrjYBwI"
highlight = alt.selection(
type='single', on='mouseover', fields=['Year'], nearest=True)
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=[0, 353], clamp=True)),
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=[0.1, 0.6])),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')))
points = base.mark_circle().encode(
opacity=alt.value(0),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('DOY:Q', title='DOY'),
alt.Tooltip('NDVI:Q', title='NDVI')
]).add_selection(highlight)
lines = base.mark_line().encode(
size=alt.condition(~highlight, alt.value(1), alt.value(3)))
(points + lines).properties(width=600, height=350).interactive()
# + [markdown] id="VkDcTpYRJKef"
# The first thing to note is that winter dates (when there is snow in the Sierra Nevada ecoregion) exhibit highly variable inter-annual NDVI, but spring, summer, and fall dates are more consistent. With regard to drought effects on vegetation, summer and fall dates are the most sensitive time. Zooming into observations for the summer/fall days (224-272), you'll notice that many years have a u-shaped pattern where NDVI values decrease and then rise.
# + [markdown] id="cAaoCg7fmcPc"
# Another way to view these data is to plot the distribution of NDVI by DOY represented as an interquartile range envelope and median line. Here, these two charts are defined and then combined in the following snippet.
#
# 1. Define a base chart.
# 2. Define a line chart for median NDVI (note the use of aggregate median transform grouping by DOY).
# 3. Define a band chart using `'iqr'` (interquartile range) to represent NDVI distribution grouping on DOY.
# 4. Combine the line and band charts.
# + id="G-zFjVyPeK0f"
base = alt.Chart(ndvi_df).encode(
x=alt.X('DOY:Q', scale=alt.Scale(domain=(150, 340))))
line = base.mark_line().encode(
y=alt.Y('median(NDVI):Q', scale=alt.Scale(domain=(0.47, 0.53))))
band = base.mark_errorband(extent='iqr').encode(
y='NDVI:Q')
(line + band).properties(width=600, height=300).interactive()
# + [markdown] id="yaC4Sdj_VUGv"
# The summary statistics for the summer/fall days (224-272) certainly show an NDVI reduction, but there is also variability; some years exhibit greater NDVI reduction than others as suggested by the wide interquartile range during the middle of the summer. Assuming that NDVI reduction is due to water and heat limiting photosynthesis, we can hypothesize that during years of drought, photosynthesis (NDVI) will be lower than non-drought years. We can investigate the relationship between photosynthesis (NDVI) and drought (PDSI) using a scatter plot and linear regression.
# + [markdown] id="ZG_muCDTKmFP"
# ## Dought and productivity relationship
#
# A scatterplot is a good way to visualize the relationship between two variables. Here, PDSI (drought indicator) will be plotted on the x-axis and NDVI (vegetation productivity) on the y-axis. To achieve this, both variables must exist in the same DataFrame. Each row will be an observation in time and columns will correspond to PDSI and NDVI values. Currently, PDSI and NDVI are in two different DataFrames and need to be merged.
# + [markdown] id="y3jRaH-mWqxd"
# ### Prepare DataFrames
#
# Before they can be merged, each variable must be reduced to a common temporal observation unit to define correspondence. There are a number of ways to do this and each will define the relationship between PDSI and NDVI differently. Here, our temporal unit will be an annual observation set where NDVI is reduced to the intra-annual minimum from DOY 224 to 272 and PDSI will be the mean from DOY 1 to 272. We are proposing that average drought severity for the first three quarters of a year are related to minimum summer NDVI for a given year.
#
# 1. Filter the NDVI DataFrame to observations that occur between DOY 224 and 272.
# 2. Reduce the DOY-filtered subset to intra-annual minimum NDVI.
# + id="pMRpUId911I2"
ndvi_doy_range = [224, 272]
ndvi_df_sub = ndvi_df[(ndvi_df['DOY'] >= ndvi_doy_range[0])
& (ndvi_df['DOY'] <= ndvi_doy_range[1])]
ndvi_df_sub = ndvi_df_sub.groupby('Year').agg('min')
# + [markdown] id="tV0G7eQhR9Dp"
# **Note**: in your own application you may find that a different DOY range is more suitable, change the `ndvi_doy_range` as needed.
# + [markdown] id="LF3Y76torIae"
# 3. Filter the PDSI DataFrame to observations that occur between DOY 1 and 272.
# 4. Reduce the values within a given year to the mean of the observations.
# + id="ge9xtTU5ayeJ"
pdsi_doy_range = [1, 272]
pdsi_df_sub = pdsi_df[(pdsi_df['DOY'] >= pdsi_doy_range[0])
& (pdsi_df['DOY'] <= pdsi_doy_range[1])]
pdsi_df_sub = pdsi_df_sub.groupby('Year').agg('mean')
# + [markdown] id="g6_AUSguSLDI"
# **Note**: in your own application you may find that a different DOY range is more suitable, change the `pdsi_doy_range` as needed.
# + [markdown] id="CdBx1sLsbIrB"
# 5. Perform a join on 'Year' to combine the two reduced DataFrames.
# 6. Select only the columns of interest: 'Year', 'NDVI', 'PDSI'.
# 7. Preview the DataFrame.
# + id="myucC6MxbHxt"
ndvi_pdsi_df = pd.merge(
ndvi_df_sub, pdsi_df_sub, how='left', on='Year').reset_index()
ndvi_pdsi_df = ndvi_pdsi_df[['Year', 'NDVI', 'PDSI']]
ndvi_pdsi_df.head(5)
# + [markdown] id="SfHPBeA0Nf8v"
# NDVI and PDSI are now included in the same DataFrame linked by Year. This format is suitable for determining a linear relationship and drawing a line of best fit through the data.
#
# Including a line of best fit can be a helpful visual aid. Here, a 1D polynomial is fit through the xy point cloud defined by corresponding NDVI and PDSI observations. The resulting fit is added to the DataFrame as a new column 'Fit'.
#
# 8. Add a line of best fit between PDSI and NDVI by determining the linear relationship and predicting NDVI based on PDSI for each year.
# + id="K6w8Kqj5Wk34"
ndvi_pdsi_df['Fit'] = np.poly1d(
np.polyfit(ndvi_pdsi_df['PDSI'], ndvi_pdsi_df['NDVI'], 1))(
ndvi_pdsi_df['PDSI'])
ndvi_pdsi_df.head(5)
# + [markdown] id="KSpWaIhwN6k9"
# ### Scatter plot
#
# The DataFrame is ready for plotting. Since this chart is to include points and a line of best fit, two charts need to be created, one for the points and one for the line. The results are combined into the final plot.
# + id="pV2TIsuLga97"
base = alt.Chart(ndvi_pdsi_df).encode(
x=alt.X('PDSI:Q', scale=alt.Scale(domain=(-5, 5))))
points = base.mark_circle(size=60).encode(
y=alt.Y('NDVI:Q', scale=alt.Scale(domain=(0.4, 0.6))),
color=alt.Color('Year:O', scale=alt.Scale(scheme='magma')),
tooltip=[
alt.Tooltip('Year:O', title='Year'),
alt.Tooltip('PDSI:Q', title='PDSI'),
alt.Tooltip('NDVI:Q', title='NDVI')
])
fit = base.mark_line().encode(
y=alt.Y('Fit:Q'),
color=alt.value('#808080'))
(points + fit).properties(width=600, height=300).interactive()
# + [markdown] id="DucYGPjXONyg"
# As you can see, there seems to be some degree of positive correlation between PDSI and NDVI (i.e., as wetness increases, vegetation productivity increases; as wetness decreases, vegetation productivity decreases). Note that some of the greatest outliers are 2016, 2017, 2018 - the three years following recovery from the long drought. It is also important to note that there are many other factors that may influence the NDVI signal that are not being considered here.
# + [markdown] id="Xb5kyCMmcJYQ"
# ## Patch-level vegetation mortality
#
# At a regional scale there appears to be a relationship between drought and vegetation productivity. This section will look more closely at effects of drought on vegetation at a patch level, with a specific focus on mortality. Here, a Landsat time series collection is created for the period 1984-present to provide greater temporal context for change at a relatively precise spatial resolution.
# + [markdown] id="LPSzZWf_uMzE"
# ### Find a point of interest
#
# Use [aerial imagery](https://developers.google.com/earth-engine/datasets/catalog/USDA_NAIP_DOQQ) from the National Agriculture Imagery Program (NAIP) in an interactive [Folium](https://python-visualization.github.io/folium/) map to identify a location in the Sierra Nevada ecoregion that appears to have patches of dead trees.
#
# 1. Run the following code block to render an interactive Folium map for a selected NAIP image.
# 2. Zoom and pan around the image to identify a region of recently dead trees (standing silver snags with no fine branches or brown/grey snags with fine branches).
# 3. Click the map to list the latitude and longitude for a patch of interest. Record these values for use in the following section (the example location used in the following section is presented as a yellow point).
# + id="jeYO1qmfb3jg"
# Define a method for displaying Earth Engine image tiles to folium map.
def add_ee_layer(self, ee_image_object, vis_params, name):
map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles=map_id_dict['tile_fetcher'].url_format,
attr='Map Data © <a href="https://earthengine.google.com/">Google Earth Engine, USDA National Agriculture Imagery Program</a>',
name=name,
overlay=True,
control=True).add_to(self)
# Add an Earth Engine layer drawing method to folium.
folium.Map.add_ee_layer = add_ee_layer
# Import a NAIP image for the area and date of interest.
naip_img = ee.ImageCollection('USDA/NAIP/DOQQ').filterDate(
'2016-01-01',
'2017-01-01').filterBounds(ee.Geometry.Point([-118.6407, 35.9665])).first()
# Display the NAIP image to the folium map.
m = folium.Map(location=[35.9665, -118.6407], tiles='Stamen Terrain', zoom_start=16)
m.add_ee_layer(naip_img, None, 'NAIP image, 2016')
# Add the point of interest to the map.
folium.Circle(
radius=15,
location=[35.9665, -118.6407],
color='yellow',
fill=False,
).add_to(m)
# Add the AOI to the map.
folium.GeoJson(
aoi.getInfo(),
name='geojson',
style_function=lambda x: {'fillColor': '#00000000', 'color': '#000000'},
).add_to(m)
# Add a lat lon popup.
folium.LatLngPopup().add_to(m)
# Display the map.
display(m)
# + [markdown] id="bpiOjZwEX4BI"
# ### Prepare Landsat collection
#
# Landsat surface reflectance data need to be prepared before being reduced. The steps below will organize data from multiple sensors into congruent collections where band names are consistent, cloud and cloud shadows have been masked out, and the normalized burn ratio (NBR) transformation is calculated and returned as the image representative (NBR is a good indicator of forest disturbance). Finally, all sensor collections will be merged into a single collection and annual composites calculated based on mean annual NBR using a join.
# + [markdown] id="WKsbAOU5YA8_"
# 1. Define Landsat observation date window inputs based on NDVI curve plotted previously and set latitude and longitude variables from the map above.
# + id="Ud1vjM8zHpdo"
start_day = 224
end_day = 272
latitude = 35.9665
longitude = -118.6407
# + [markdown] id="iMuPRxnPvIz6"
# **Note**: in your own application it may be necessary to change these values.
# + [markdown] id="rOvKDV_A14QM"
# 2. Prepare a Landsat surface reflectance collection 1984-present. Those unfamiliar with Landsat might find the following acronym definitions and links helpful.
#
# - [OLI](https://www.usgs.gov/land-resources/nli/landsat/landsat-8?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Operational Land Imager sensor)
# - [ETM+](https://www.usgs.gov/land-resources/nli/landsat/landsat-7?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Enhanced Thematic Mapper Plus sensor)
# - [TM](https://www.usgs.gov/land-resources/nli/landsat/landsat-5?qt-science_support_page_related_con=0#qt-science_support_page_related_con) (Landsat's Thematic Mapper sensor)
# - [CFMask](https://www.usgs.gov/land-resources/nli/landsat/cfmask-algorithm) (Landsat USGS surface reflectance mask based on the CFMask algorithm)
# - [NBR](https://www.usgs.gov/land-resources/nli/landsat/landsat-normalized-burn-ratio#:~:text=NBR%20is%20used%20to%20identify,SWIR%20values%20in%20traditional%20fashion.&text=In%20Landsat%204%2D7%2C%20NBR,Band%205%20%2B%20Band%207). (Normalized Burn Ratio: a spectral vegetation index)
# - Understanding [Earth Engine joins](https://developers.google.com/earth-engine/joins_intro)
# + id="1DOwu0zt-UcV"
# Make lat. and long. vars an `ee.Geometry.Point`.
point = ee.Geometry.Point([longitude, latitude])
# Define a function to get and rename bands of interest from OLI.
def rename_oli(img):
return (img.select(
ee.List(['B2', 'B3', 'B4', 'B5', 'B6', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to get and rename bands of interest from ETM+.
def rename_etm(img):
return (img.select(
ee.List(['B1', 'B2', 'B3', 'B4', 'B5', 'B7', 'pixel_qa']),
ee.List(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'pixel_qa'])))
# Define a function to mask out clouds and cloud shadows.
def cfmask(img):
cloud_shadow_bi_mask = 1 << 3
cloud_bit_mask = 1 << 5
qa = img.select('pixel_qa')
mask = qa.bitwiseAnd(cloud_shadow_bi_mask).eq(0).And(
qa.bitwiseAnd(cloud_bit_mask).eq(0))
return img.updateMask(mask)
# Define a function to add year as an image property.
def set_year(img):
year = ee.Image(img).date().get('year')
return img.set('Year', year)
# Define a function to calculate NBR.
def calc_nbr(img):
return img.normalizedDifference(ee.List(['NIR', 'SWIR2'])).rename('NBR')
# Define a function to prepare OLI images.
def prep_oli(img):
orig = img
img = rename_oli(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Define a function to prepare TM/ETM+ images.
def prep_etm(img):
orig = img
img = rename_etm(img)
img = cfmask(img)
img = calc_nbr(img)
img = img.copyProperties(orig, orig.propertyNames())
return set_year(img)
# Import image collections for each Landsat sensor (surface reflectance).
tm_col = ee.ImageCollection('LANDSAT/LT05/C01/T1_SR')
etm_col = ee.ImageCollection('LANDSAT/LE07/C01/T1_SR')
oli_col = ee.ImageCollection('LANDSAT/LC08/C01/T1_SR')
# Filter collections and prepare them for merging.
oli_col = oli_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_oli)
etm_col = etm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
tm_col = tm_col.filterBounds(point).filter(
ee.Filter.calendarRange(start_day, end_day, 'day_of_year')).map(prep_etm)
# Merge the collections.
landsat_col = oli_col.merge(etm_col).merge(tm_col)
# Get a distinct year collection.
distinct_year_col = landsat_col.distinct('Year')
# Define a filter that identifies which images from the complete collection
# match the year from the distinct year collection.
join_filter = ee.Filter.equals(leftField='Year', rightField='Year')
# Define a join.
join = ee.Join.saveAll('year_matches')
# Apply the join and convert the resulting FeatureCollection to an
# ImageCollection.
join_col = ee.ImageCollection(
join.apply(distinct_year_col, landsat_col, join_filter))
# Define a function to apply mean reduction among matching year collections.
def reduce_by_join(img):
year_col = ee.ImageCollection.fromImages(ee.Image(img).get('year_matches'))
return year_col.reduce(ee.Reducer.mean()).rename('NBR').set(
'system:time_start',
ee.Image(img).date().update(month=8, day=1).millis())
# Apply the `reduce_by_join` function to the list of annual images in the
# properties of the join collection.
landsat_col = join_col.map(reduce_by_join)
# + [markdown] id="Rp0HPyK8wsk7"
# The result of the above code block is an image collection with as many images as there are years present in the merged Landsat collection. Each image represents the annual mean NBR constrained to observations within the given date window.
# + [markdown] id="2pCpfWiUXxri"
# ### Prepare DataFrame
#
# 1. Create a region reduction function; use `ee.Reducer.first()` as the reducer since no spatial aggregation is needed (we are interested in the single pixel that intersects the point). Set the region as the geometry defined by the lat. and long. coordinates identified in the above map.
# 2. Apply the function to all images in the time series.
# 3. Filter out features with null computed values.
# + id="Zx0Vk49_duQM"
reduce_landsat = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=30, crs='EPSG:3310')
nbr_stat_fc = ee.FeatureCollection(landsat_col.map(reduce_landsat)).filter(
ee.Filter.notNull(landsat_col.first().bandNames()))
# + [markdown] id="uCkzwsWxdgfN"
# 4. Transfer data from the server to the client.<br>
# _Note: if the process times out, you'll need to export/import the `nbr_stat_fc` feature collection as described in the **Optional export** section_.
# 5. Convert the Python dictionary to a pandas DataFrame.
# 6. Preview the DataFrame and check data types.
# + id="ySnV4y-6cDR3"
nbr_dict = fc_to_dict(nbr_stat_fc).getInfo()
nbr_df = pd.DataFrame(nbr_dict)
display(nbr_df.head())
print(nbr_df.dtypes)
# + [markdown] id="wTGhOqymdrjy"
# 7. Add date attribute columns.
# 8. Preview the DataFrame.
# + id="068PiyGVdwrh"
nbr_df = add_date_info(nbr_df)
nbr_df.head(5)
# + [markdown] id="OZCj3bwZkOru"
# ### Line chart
#
# Display the Landsat NBR time series for the point of interest as a line plot.
# + id="rx8iDh4HEeUl"
alt.Chart(nbr_df).mark_line().encode(
x=alt.X('Timestamp:T', title='Date'),
y='NBR:Q',
tooltip=[
alt.Tooltip('Timestamp:T', title='Date'),
alt.Tooltip('NBR:Q')
]).properties(width=600, height=300).interactive()
# + [markdown] id="iHpgJVUiiCsH"
# As you can see from the above time series of NBR observations, a dramatic decrease in NBR began in 2015, shortly after the severe and extended drought began. The decline continued through 2017, when a minor recovery began. Within the context of the entire time series, it is apparent that the decline is outside of normal inter-annual variability and that the reduction in NBR for this site is quite severe. The lack of major recovery response in NBR in 2017-19 (time of writing) indicates that the event was not ephemeral; the loss of vegetation will have a lasting impact on this site. The corresponding onset of drought and reduction in NBR provides further evidence that there is a relationship between drought and vegetation response in the Sierra Nevada ecoregion.
# + [markdown] id="KJqMUKIyWeIx"
# ## Past and future climate
# The previous data visualizations suggest there is a relationship between drought and vegetation stress and mortality in the Sierra Nevada ecoregion.
#
# This section will look at how climate is projected to change in the future, which can give us a sense for what to expect with regard to drought conditions and speculate about its impact on vegetation.
#
# We'll look at historical and projected temperature and precipitation. Projected data are represented by NEX-DCP30, and historical observations by PRISM.
# + [markdown] id="9wpu02REWI6u"
# ### Future climate
#
# NEX-DCP30 data contain 33 climate models projected to the year 2100 using several scenarios of greenhouse gas concentration pathways (RCP). Here, we'll use the median of all models for RCP 8.5 (the worst case scenario) to look at potential future temperature and precipitation.
# + [markdown] id="wBgoSEEB_U91"
# #### Import and prepare collection
#
# 1. Filter the collection by date and scenario.
# 2. Calculate 'mean' temperature from median min and max among 33 models.
# + id="BBD6cDZSWdRD"
dcp_col = (ee.ImageCollection('NASA/NEX-DCP30_ENSEMBLE_STATS')
.select(['tasmax_median', 'tasmin_median', 'pr_median'])
.filter(
ee.Filter.And(ee.Filter.eq('scenario', 'rcp85'),
ee.Filter.date('2019-01-01', '2070-01-01'))))
def calc_mean_temp(img):
return (img.select('tasmax_median')
.add(img.select('tasmin_median'))
.divide(ee.Image.constant(2.0))
.addBands(img.select('pr_median'))
.rename(['Temp-mean', 'Precip-rate'])
.copyProperties(img, img.propertyNames()))
dcp_col = dcp_col.map(calc_mean_temp)
# + [markdown] id="EJzPaDN9qIHc"
# #### Prepare DataFrame
#
# 1. Create a region reduction function.
# 2. Apply the function to all images in the time series.
# 3. Filter out features with null computed values.
# + id="3LqsXbDWnOxm"
reduce_dcp30 = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
dcp_stat_fc = ee.FeatureCollection(dcp_col.map(reduce_dcp30)).filter(
ee.Filter.notNull(dcp_col.first().bandNames()))
# + [markdown] id="R-g71SoO8INL"
# 4. Transfer data from the server to the client. _Note: if the process times out, you'll need to export/import the `dcp_stat_fc` feature collection as described in the **Optional export** section_.
# 5. Convert the Python dictionary to a pandas DataFrame.
# 6. Preview the DataFrame and check the data types.
# + id="QiA8t7HMBGIv"
dcp_dict = fc_to_dict(dcp_stat_fc).getInfo()
dcp_df = pd.DataFrame(dcp_dict)
display(dcp_df)
print(dcp_df.dtypes)
# + [markdown] id="AXlv_fqtr4tL"
# 7. Add date attribute columns.
# 8. Preview the DataFrame.
# + id="hzpDZFfqr53G"
dcp_df = add_date_info(dcp_df)
dcp_df.head(5)
# + [markdown] id="jpKd9JNs_BLR"
# 9. Convert precipitation rate to mm.
# 10. Convert Kelvin to celsius.
# 11. Add the model name as a column.
# 12. Remove the 'Precip-rate' column.
# + id="4mHupoR_bIEU"
dcp_df['Precip-mm'] = dcp_df['Precip-rate'] * 86400 * 30
dcp_df['Temp-mean'] = dcp_df['Temp-mean'] - 273.15
dcp_df['Model'] = 'NEX-DCP30'
dcp_df = dcp_df.drop('Precip-rate', 1)
dcp_df.head(5)
# + [markdown] id="gSxhiDhs0p4E"
# ### Past climate
#
# PRISM data are climate datasets for the conterminous United States. Grid cells are interpolated based on station data assimilated from many networks across the country. The datasets used here are monthly averages for precipitation and temperature. They provide a record of historical climate.
# + [markdown] id="nfQ3XFdb0mMz"
# #### Reduce collection and prepare DataFrame
#
# 1. Import the collection and filter by date.
# 2. Reduce the collection images by region and filter null computed values.
# 3. Convert the feature collection to a dictionary and transfer it client-side.<br>
# _Note: if the process times out, you'll need to export/import the `prism_stat_fc` feature collection as described in the **Optional export** section_.
# 4. Convert the dictionary to a DataFrame.
# 5. Preview the DataFrame.
# + id="NI-SWEQp7Ylh"
prism_col = (ee.ImageCollection('OREGONSTATE/PRISM/AN81m')
.select(['ppt', 'tmean'])
.filter(ee.Filter.date('1979-01-01', '2019-12-31')))
reduce_prism = create_reduce_region_function(
geometry=point, reducer=ee.Reducer.first(), scale=5000, crs='EPSG:3310')
prism_stat_fc = (ee.FeatureCollection(prism_col.map(reduce_prism))
.filter(ee.Filter.notNull(prism_col.first().bandNames())))
prism_dict = fc_to_dict(prism_stat_fc).getInfo()
prism_df = pd.DataFrame(prism_dict)
display(prism_df)
print(prism_df.dtypes)
# + [markdown] id="e5b9CEeFA_gd"
# 6. Add date attribute columns.
# 7. Add model name.
# 8. Rename columns to be consistent with the NEX-DCP30 DataFrame.
# 9. Preview the DataFrame.
# + id="iASS9-kQ1tEd"
prism_df = add_date_info(prism_df)
prism_df['Model'] = 'PRISM'
prism_df = prism_df.rename(columns={'ppt': 'Precip-mm', 'tmean': 'Temp-mean'})
prism_df.head(5)
# + [markdown] id="f2ub1LvfAos4"
# ### Combine DataFrames
# + [markdown] id="bTvmvSKTBJX8"
# At this point the PRISM and NEX-DCP30 DataFrames have the same columns, the same units, and are distinguished by unique entries in the 'Model' column. Use the `concat` function to concatenate these DataFrames into a single DataFrame for plotting together in the same chart.
# + id="cCZE_7S_9XJR"
climate_df = pd.concat([prism_df, dcp_df], sort=True)
climate_df
# + [markdown] id="6KbnhXleCk5U"
# ### Charts
#
# Chart the past and future precipitation and temperature together to get a sense for where climate has been and where it is projected to go under RCP 8.5.
# + [markdown] id="WDnkRaXWAzTX"
# #### Precipitation
# + id="tgtpmiuskuFN"
base = alt.Chart(climate_df).encode(
x='Year:O',
color='Model')
line = base.mark_line().encode(
y=alt.Y('median(Precip-mm):Q', title='Precipitation (mm/month)'))
band = base.mark_errorband(extent='iqr').encode(
y=alt.Y('Precip-mm:Q', title='Precipitation (mm/month)'))
(band + line).properties(width=600, height=300)
# + [markdown] id="qLdKpyzIA_7S"
# #### Temperature
# + id="S7T9n3y10MLn"
line = alt.Chart(climate_df).mark_line().encode(
x='Year:O',
y='median(Temp-mean):Q',
color='Model')
band = alt.Chart(climate_df).mark_errorband(extent='iqr').encode(
x='Year:O',
y=alt.Y('Temp-mean:Q', title='Temperature (°C)'), color='Model')
(band + line).properties(width=600, height=300)
# + [markdown] id="-RuZV6h7BH22"
# Future climate projections suggest that precipitation will decrease and temperature will increase for the selected point of interest. We can hypothesize, given the RCP 8.5 trajectory, that future conditions will more regularly resemble the 2012-2016 drought, which could lead to the same vegetation reduction response documented here and that more frequent drought events could lead to development of plant communities that are better adapted to low precipitation, high temperature conditions.
| tutorials/time-series-visualization-with-altair/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="Tpon0qB_5EYH" colab_type="code" colab={}
import torch
import torch.nn.functional as F
import torch.nn as nn
# + id="NQgI0F_H5_jP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 84} outputId="7e6dfd4e-8b70-4efd-bd68-85a8b3895887" executionInfo={"status": "ok", "timestamp": 1588097440289, "user_tz": -330, "elapsed": 3320, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggcx5PjxEyf1Uh8l40upDWFfhesSD2iXwiYCjzo=s64", "userId": "17053721761535178718"}}
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(4, 3)
self.fc2 = nn.Linear(3, 1)
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return x
net = Net()
print(net)
# + id="PlrnLb8R7ZnA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="2fd2783d-46ca-4e7d-ca37-c2666fe493cb" executionInfo={"status": "ok", "timestamp": 1588097442789, "user_tz": -330, "elapsed": 1330, "user": {"displayName": "Zappy Team", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggcx5PjxEyf1Uh8l40upDWFfhesSD2iXwiYCjzo=s64", "userId": "17053721761535178718"}}
input = torch.randn(100, 1, 4)
target = torch.randint(0, 2, (100, 1, 1))
input = input.float()
target = target.float()
criterion = nn.MSELoss()
learning_rate = 0.1
num_epochs = 100
for epoch in range(num_epochs):
losses = 0.0
net.zero_grad()
output = net(input)
output = output.float()
preds = torch.round(output)
loss = criterion(output, target)
losses += loss
acc = torch.sum(preds == target)
print("Epoch: " + str(epoch + 1) + "\nAccuracy = " + str(acc) + "\nLoss = " + str(losses) + "\n")
losses.backward()
for f in net.parameters():
f.data.sub_(f.grad.data * learning_rate)
| Artificial Intelligence with PyTorch/Lesson 4_ Build your First Neural Network using PyTorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
def find_neighbours(points, point, eps):
neighbors = []
for Pn in range(0, len(points)):
distance = np.linalg.norm(points[point][0:-1] - points[Pn][0:-1])
if distance < eps:
neighbors.append(Pn)
return neighbors
def grow_cluster(data, labels, point, current_cluster_label, eps, min_points):
search_queue = [point]
# For each point in the queue:
# - Determine whether it is a branch or a leaf
# - For branch points, add their unclaimed neighbors to the search queue
i = 0
while i < len(search_queue):
point = search_queue[i]
neighbours = find_neighbours(data, point, eps)
# leaf point, move on
if len(neighbours) < min_points:
i += 1
continue
for Pn in neighbours:
if labels[Pn] == -1:
# Noise, can't be branch
labels[Pn] = current_cluster_label
elif labels[Pn] == 0:
labels[Pn] = current_cluster_label
search_queue.append(Pn)
i += 1
def DBSCAN(points, eps, MinPts):
labels = [0]*len(points)
current_cluster_label = 0
# Pick a random point, check if it is valid seed point and grow a new cluster
# out of it if it is.
for point in range(0, len(points)):
if not (labels[point] == 0):
continue
neighbours = find_neighbours(points, point, eps)
if len(neighbours) < MinPts:
labels[point] = -1 # noise
continue
current_cluster_label += 1
labels[point] = current_cluster_label
grow_cluster(points, labels, point, current_cluster_label, eps, MinPts)
return labels
# +
import pandas as pd
columns = ['SepalLength', 'SepalWidth', 'PetalLength', 'PetalWidth', 'Name']
dataset = pd.read_csv("../datasets/iris.csv", delimiter=',', names=columns)
dataset.head()
# +
df = pd.DataFrame(dataset[1:-1], columns = columns)
df = df.astype({'SepalLength':'float64', 'SepalWidth':'float64', 'PetalLength':'float64', 'PetalWidth':'float64'})
# -
sns.FacetGrid(df, hue="Name", height=5) \
.map(plt.scatter, "SepalLength", "SepalWidth") \
.add_legend()
sns.FacetGrid(df, hue="Name", height = 6) \
.map(plt.scatter, "PetalLength", "PetalWidth") \
.add_legend()
sns.boxplot(x="Name", y="PetalLength", data=df)
sns.violinplot(x="Name", y="PetalLength", data=df, size=6)
data = np.array(df)
cluster = DBSCAN(data, 0.7, 15)
print(cluster)
import random
data = np.array(df)
np.random.shuffle(data)
shuffled = DBSCAN(data, 1, 5)
print(shuffled)
# +
colormap = np.array(['red', 'lime', 'black'])
predictedY = cluster
plt.subplot(1, 3, 1)
plt.scatter(df['PetalLength'], df['PetalWidth'], s=40)
plt.title('Before classification')
plt.subplot(1, 3, 2)
plt.scatter(df['PetalLength'], df['PetalWidth'], c=predictedY, s=40)
plt.title("Model's classification")
plt.subplot(1, 3, 3)
plt.scatter(df['PetalLength'], df['PetalWidth'], c=shuffled, s=40)
plt.title("Model's classification")
# +
sns.FacetGrid(df, hue="Name", height=5) \
.map(plt.scatter, "SepalLength", "SepalWidth") \
.add_legend()
plt.figure(figsize=(5, 5))
plt.scatter(df['SepalLength'],df['SepalWidth'], s=40, c=predictedY)
| notebooks/DBScan.ipynb |