code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import time
import tqdm
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as utils
import torch.nn.init as init
from torch.autograd import Variable
import sys
sys.path.append("../new_flows")
from flows import RealNVP, Planar, MAF
from models import NormalizingFlowModel
import torch
import torch.optim as optim
import torch.nn.functional as F
from argparse import ArgumentParser
from torch.distributions import MultivariateNormal
from models import NormalizingFlowModel
# +
####MAF
class VAE_NF(nn.Module):
def __init__(self, K, D):
super().__init__()
self.dim = D
self.K = K
self.encoder = nn.Sequential(
nn.Linear(10, 50),
nn.LeakyReLU(True),
nn.Linear(50, 30),
nn.LeakyReLU(True),
nn.Linear(30, 20),
nn.LeakyReLU(True),
nn.Linear(20, D * 2)
)
self.decoder = nn.Sequential(
nn.Linear(D, 20),
nn.LeakyReLU(True),
nn.Linear(20, 30),
nn.LeakyReLU(True),
nn.Linear(30, 50),
nn.LeakyReLU(True),
nn.Linear(50, 10)
)
flow_init = MAF(dim=D)
flows_init = [flow_init for _ in range(K)]
prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda())
self.flows = NormalizingFlowModel(prior, flows_init)
def forward(self, x):
# Run Encoder and get NF params
enc = self.encoder(x)
mu = enc[:, :self.dim]
log_var = enc[:, self.dim: self.dim * 2]
# Re-parametrize
sigma = (log_var * .5).exp()
z = mu + sigma * torch.randn_like(sigma)
kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
# Construct more expressive posterior with NF
z_k, _, sum_ladj = self.flows(z)
kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch
# Run Decoder
x_prime = self.decoder(z_k)
return x_prime, kl_div
class VAE_NF_12feat(nn.Module):
def __init__(self, K, D):
super().__init__()
self.dim = D
self.K = K
self.encoder = nn.Sequential(
nn.Linear(12, 50),
nn.LeakyReLU(True),
nn.Linear(50, 30),
nn.LeakyReLU(True),
nn.Linear(30, 20),
nn.LeakyReLU(True),
nn.Linear(20, D * 2)
)
self.decoder = nn.Sequential(
nn.Linear(D, 20),
nn.LeakyReLU(True),
nn.Linear(20, 30),
nn.LeakyReLU(True),
nn.Linear(30, 50),
nn.LeakyReLU(True),
nn.Linear(50, 12)
)
flow_init = MAF(dim=D)
flows_init = [flow_init for _ in range(K)]
prior = MultivariateNormal(torch.zeros(D).cuda(), torch.eye(D).cuda())
self.flows = NormalizingFlowModel(prior, flows_init)
def forward(self, x):
# Run Encoder and get NF params
enc = self.encoder(x)
mu = enc[:, :self.dim]
log_var = enc[:, self.dim: self.dim * 2]
# Re-parametrize
sigma = (log_var * .5).exp()
z = mu + sigma * torch.randn_like(sigma)
kl_div = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
# Construct more expressive posterior with NF
z_k, _, sum_ladj = self.flows(z)
kl_div = kl_div / x.size(0) - sum_ladj.mean() # mean over batch
# Run Decoder
x_prime = self.decoder(z_k)
return x_prime, kl_div
#sigmodel = VAE_NF(N_FLOWS, Z_DIM).cuda()
sigmodel = VAE_NF_12feat(10, 4).cuda()
bkgmodel = VAE_NF_12feat(10, 4).cuda()
bkgmodel2 = VAE_NF_12feat(10, 8).cuda()
sigae_def = {
"type":"sig",
"trainon":"2prong",
"features":"12features",
"architecture":"MAF",
"selection":"mjjcut",
"trainloss":"MSELoss",
"beta":f"beta10p0",
"zdimnflow":f"z4f10",
"version":f"ver0"
}
bkgae_def = {
"type":"bkg",
"trainon":"rndbkg",
"features":"12features",
"architecture":"MAF",
"selection":"mjjcut",
"trainloss":"MSELoss",
"beta":f"beta10p0",
"zdimnflow":f"z4f10",
"version":f"ver0"
}
sigae3prong_def = {
"type":"sig",
"trainon":"3prong",
"features":"12features",
"architecture":"MAF",
"selection":"mjjcut",
"trainloss":"MSELoss",
"beta":f"beta10p0",
"zdimnflow":f"z4f10",
"version":f"ver0"
}
directory = '/data/t3home000/spark/QUASAR/weights'
bkgmodel.load_state_dict(torch.load(f"{directory}/{bkgae_def['type']}_{bkgae_def['trainon']}_{bkgae_def['features']}_{bkgae_def['architecture']}_{bkgae_def['selection']}_{bkgae_def['trainloss']}_{bkgae_def['beta']}_{bkgae_def['zdimnflow']}_{bkgae_def['version']}.h5"))
sigmodel.load_state_dict(torch.load(f"{directory}/{sigae_def['type']}_{sigae_def['trainon']}_{sigae_def['features']}_{sigae_def['architecture']}_{sigae_def['selection']}_{sigae_def['trainloss']}_{sigae_def['beta']}_{sigae_def['zdimnflow']}_{sigae_def['version']}.h5"))
bkgloss_rndbkg = np.load(f"../data_strings/{bkgae_def['type']}_{bkgae_def['trainon']}_{bkgae_def['features']}_{bkgae_def['architecture']}_{bkgae_def['selection']}_{bkgae_def['trainloss']}_{bkgae_def['beta']}_{bkgae_def['zdimnflow']}_{bkgae_def['version']}_bkgloss.npy")
sigloss_rndbkg = np.load(f"../data_strings/{sigae_def['type']}_{sigae_def['trainon']}_{sigae_def['features']}_{sigae_def['architecture']}_{sigae_def['selection']}_{sigae_def['trainloss']}_{sigae_def['beta']}_{sigae_def['zdimnflow']}_{sigae_def['version']}_bkgloss.npy")
bkgloss_prong2 = np.load(f"../data_strings/{bkgae_def['type']}_{bkgae_def['trainon']}_{bkgae_def['features']}_{bkgae_def['architecture']}_{bkgae_def['selection']}_{bkgae_def['trainloss']}_{bkgae_def['beta']}_{bkgae_def['zdimnflow']}_{bkgae_def['version']}_sigloss.npy")
sigloss_prong2 = np.load(f"../data_strings/{sigae_def['type']}_{sigae_def['trainon']}_{sigae_def['features']}_{sigae_def['architecture']}_{sigae_def['selection']}_{sigae_def['trainloss']}_{sigae_def['beta']}_{sigae_def['zdimnflow']}_{sigae_def['version']}_sigloss.npy")
sig2loss_rndbkg = np.load(f"../data_strings/{sigae3prong_def['type']}_{sigae3prong_def['trainon']}_{sigae3prong_def['features']}_{sigae3prong_def['architecture']}_{sigae3prong_def['selection']}_{sigae3prong_def['trainloss']}_{sigae3prong_def['beta']}_{sigae3prong_def['zdimnflow']}_{sigae3prong_def['version']}_bkgloss.npy")
sig2loss_prong2 = np.load(f"../data_strings/{sigae3prong_def['type']}_{sigae3prong_def['trainon']}_{sigae3prong_def['features']}_{sigae3prong_def['architecture']}_{sigae3prong_def['selection']}_{sigae3prong_def['trainloss']}_{sigae3prong_def['beta']}_{sigae3prong_def['zdimnflow']}_{sigae3prong_def['version']}_sigloss.npy")
# +
def get_mass_and_loss(filename):
bkg_mean = [229.63703733315916,
0.5040889514020693,
0.7057013083107537,
0.7997482828680078,
1.962088799505638,
255.28311110430732,
104.75378636669201,
0.5826962213587913,
0.7337977883611374,
0.7995755223103627,
2.4171146383976034,
159.75951298419648]
bkg_std = [131.99460164832075,
0.1849760683347428,
0.12816197632833803,
0.0865553075423761,
0.7978444985340304,
111.36202699158525,
68.17314726646633,
0.17458368821556094,
0.11581000682757887,
0.08481896882883566,
1.1084488583498446,
83.52566008451547]
sig_mean = [489.6699791129437,
0.22463354907052815,
0.639679230886038,
0.7742464847783065,
1.4122267488712905,
253.95296492556145,
167.8782919951573,
0.3429350853771621,
0.6460772390759577,
0.7653966882661628,
1.4541020071952622,
165.34100428967955]
sig_std = [66.87936517220672,
0.1361910116638729,
0.15309413231859276,
0.10054671892302457,
0.3833027691949484,
87.45977372109115,
50.53622336909848,
0.14930126292179366,
0.15096516491915418,
0.10415175051343364,
0.44297590229588857,
55.44436453890166]
sig2_mean = [539.8230906175115,
0.3093919504152834,
0.5743852002513076,
0.7250999951321894,
1.3980643297136106,
305.10717416378316,
417.50598164552935,
0.3423973996112709,
0.5859205785605542,
0.7492831266044925,
1.6350968023267969,
261.31617070357555]
sig2_std = [104.54956422929888,
0.12679648500311186,
0.13543619747203547,
0.11808533829680096,
0.34103400809131684,
100.12586819876644,
88.97967256083908,
0.1495063860755517,
0.14289850921322486,
0.11224475793826029,
0.5509216479879365,
82.84269505360622]
#supervised_loss_list = []
#sigae_loss_list = []
#bkgae_loss_list = []
f = pd.read_hdf(filename)
dt = f.values
correct = (dt[:,3]>0) &(dt[:,19]>0) & (dt[:,1]>0) & (dt[:,2]>0)
dt = dt[correct]
for i in range(13,19):
dt[:,i] = dt[:,i]/dt[:,3]
for i in range(29,35):
dt[:,i] = dt[:,i]/(dt[:,19])
correct = (dt[:,0]>=2800)
dt = dt[correct]
correct = (dt[:,3]>100)
dt = dt[correct]
correct = (dt[:,19]>20)
dt = dt[correct]
X_no_jetmass = dt[:,[4,5,6,11,12,20,21,22,27,28]]
X_with_jetmass = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_sig = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
X_bkg = dt[:,[3,4,5,6,11,12,19,20,21,22,27,28]]
for i, (bkgmean,bkgstd,sigmean,sigstd) in enumerate(zip(bkg_mean,bkg_std,sig_mean, sig_std)):
X_bkg[:,i] = (X_with_jetmass[:,i]-bkgmean)/bkgstd
X_sig[:,i] = (X_with_jetmass[:,i]-sigmean)/sigstd
#print(bkgmean,bkgstd)
bkgae_test = torch.tensor(X_bkg)
sigae_test = torch.tensor(X_sig)
sigmodel.eval()
bkgmodel.eval()
sigae_loss = torch.mean((sigmodel(sigae_test.float().cuda())[0]- sigae_test.float().cuda())**2,dim=1).data.cpu().numpy()
bkgae_loss = torch.mean((bkgmodel(bkgae_test.float().cuda())[0]- bkgae_test.float().cuda())**2,dim=1).data.cpu().numpy()
return dt[:,0], sigae_loss, bkgae_loss
# -
mass, sigloss, bkgloss = get_mass_and_loss('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_BB1_rnd.h5')
bkg_mass, bkg_sigloss, bkg_bkgloss = get_mass_and_loss('/data/t3home000/spark/QUASAR/preprocessing/conventional_tau_background.h5')
plt.scatter(bkgloss, sigloss)
plt.xlim([0,10])
plt.ylim([0,20])
idx = np.where((bkgloss<10)&(sigloss<20))[0]
idx
plt.hexbin(bkgloss[idx],sigloss[idx])
plt.xlim([0,10])
plt.ylim([0,20])
bb_mass = np.load('blackbox_mass.npy')
bb_sigloss = np.load('blackbox_sigloss.npy')
bb_bkgloss = np.load('blackbox_bkgloss.npy')
qcd_mass = np.load('QCDMC_mass.npy')
qcd_sigloss = np.load('QCDMC_sigloss.npy')
qcd_bkgloss = np.load('QCDMC_bkgloss.npy')
plt.hist(bkg_mass[], bins = np.arange(2800,8000,100));
plt.hist(mass, bins = np.arange(2800,8000,100));
plt.hist(bkgloss,bins=np.arange(0,10,0.01));
plt.hist(sigloss,bins=np.arange(0,2,0.01));
bbidx = np.where( (bkgloss > 5) & (sigloss<8))[0]
bbidx = np.where( (bkgloss > 5) & (sigloss<6))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
bbidx = np.where( (bkgloss > 4) & (sigloss<7.5))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
bbidx = np.where( (bkgloss > 3.7) & (sigloss<5))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
bbidx = np.where( (bkgloss > 4.5) & (sigloss<7))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,100));
plt.axvline(x=3823,color='r')
bbidx = np.where( (bkgloss > 4) & (sigloss<5.5))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
bkg_bbidx = np.where( (bkg_bkgloss > 5.8) & (bkg_sigloss<8))[0]
plt.hist(bkg_mass[bkg_bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
# +
bbidx = np.where( (bkgloss > 5.8) & (sigloss<8))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,200));
plt.axvline(x=3823,color='r')
bkg_bbidx = np.where( (bkg_bkgloss > 5.8) & (bkg_sigloss<8))[0]
plt.hist(bkg_mass[bkg_bbidx], bins = np.arange(2800,8000,200),color='orange');
# -
bbidx = np.where( (bkgloss > 3.3) & (sigloss<3.6))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,150));
plt.axvline(x=3823,color='r')
# +
bbidx = np.where( (bkgloss > 5.8) & (sigloss<8))[0]
plt.hist(mass[bbidx], bins = np.arange(2800,8000,200));
plt.axvline(x=3823,color='r')
bkg_bbidx = np.where( (bkg_bkgloss > 5.8) & (bkg_sigloss<8))[0]
plt.hist(bkg_mass[bkg_bbidx], bins = np.arange(2800,8000,200),color='orange');
# -
| make_plots_for_the_paper/make_npystrings_for_phil_for_BB1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="kNx5MMF4pmZ3" outputId="8e8321df-2624-4d1a-d2a1-e09c816621e1"
10+20
# + colab={"base_uri": "https://localhost:8080/"} id="hIYlKbfFrqND" outputId="65b2dab8-d59c-4402-df0b-dbf1ab3f7937"
10*5
# + colab={"base_uri": "https://localhost:8080/"} id="BUtaClcSsRwE" outputId="d19ac85e-0418-43ae-9414-66c3e1606034"
20-10
# + colab={"base_uri": "https://localhost:8080/"} id="duFkx-9qsVf8" outputId="4b03c0ba-21ef-4ffb-a57e-71ae714edc6a"
10/5
# + id="gFfSyFAssgJs"
x=5
# + id="OHTdtAvftBRU"
y=10
# + colab={"base_uri": "https://localhost:8080/"} id="pN9O6iYBtC-k" outputId="0091ddde-d0f4-41cc-d312-0035cae82d6c"
x+y
# + colab={"base_uri": "https://localhost:8080/"} id="IUC3ApnjtGNs" outputId="da935802-e17c-477d-c34e-b299178e3dc0"
x-y
# + colab={"base_uri": "https://localhost:8080/"} id="RoxDGIO7tIps" outputId="5f9c774f-1133-4c0b-93c2-29cb24d5c4f6"
x*y
# + colab={"base_uri": "https://localhost:8080/"} id="eW31VF8ntLDc" outputId="5d968e8d-65c8-41ad-f812-cda3bd745c81"
x/y
# + id="VLsFeS0HwIAu"
p=28000
n=5
r=0.08
i=p*n*r
# + colab={"base_uri": "https://localhost:8080/"} id="w89gVil6wWIl" outputId="6dedb9de-9043-4553-a369-3b7acdf55325"
i
# + id="40j3tHLQwmc1"
x="jasin"
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="-2sSAPa9yDpU" outputId="cae94eca-f5f3-4b29-d41b-2a5a85bb39b8"
x
# + id="DUXlGYc-yHS8"
x=10
y=15
z="municipality"
# + colab={"base_uri": "https://localhost:8080/"} id="Y6cZjOrgzdXV" outputId="d5604208-4938-4e06-d6e8-cc4cb2a806b7"
type(x)
# + colab={"base_uri": "https://localhost:8080/"} id="91tDbb6Pzi31" outputId="a98116e5-51f1-4ddd-de65-26424b22d60c"
type(y)
# + colab={"base_uri": "https://localhost:8080/"} id="VsuRAo2wzlI9" outputId="41a880b9-2185-46fd-f749-a8bd2780c999"
type(z)
# + colab={"base_uri": "https://localhost:8080/"} id="Mt0CT8XpzozF" outputId="c0e6e47b-7103-4aae-e754-0d881f3bdcf0"
x=input("enter your name")
# + colab={"base_uri": "https://localhost:8080/"} id="oW6hWv7sACZO" outputId="d288d9ef-e0e4-4722-9025-6a8dc741577f"
x=input("enter the first number")
# + colab={"base_uri": "https://localhost:8080/"} id="xrtIlSirAgeF" outputId="5f39e4de-e78b-4960-9fca-b990aaceffd8"
y=input("enter the second number")
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="D7lRz0r4Aq4-" outputId="109d7684-a506-48a6-c431-810a242d095f"
x
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="ohfIQcsMAvmO" outputId="565010f7-5679-4276-86c7-2093f0606e70"
y
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="pJi3Hsz8AwxF" outputId="16f8ee5c-29ae-476c-fd8a-0a75f7b609fd"
x+y
# + colab={"base_uri": "https://localhost:8080/"} id="PlbWPww-CPyG" outputId="0de3035a-b134-4a09-8809-76a2e0956d55"
type(x)
# + colab={"base_uri": "https://localhost:8080/"} id="coO5il5KCajO" outputId="9dffd170-8f0e-4886-ed00-34827230b12c"
type(y)
# + colab={"base_uri": "https://localhost:8080/"} id="a9WLFrp7Ccs2" outputId="1fac3cad-3a8e-4cfb-9092-68a4671f4457"
int(x)+int(y)
# + colab={"base_uri": "https://localhost:8080/"} id="nHx1ss4FDL9u" outputId="7f905f7b-0639-4070-f27c-a784c41908d5"
x=input("enter a number")
# + colab={"base_uri": "https://localhost:8080/"} id="FETA9kHUFKNO" outputId="ee7a802a-a1ed-4b20-e25a-ab3484dc69ea"
y=input("enter a number")
# + colab={"base_uri": "https://localhost:8080/"} id="3Af9iFNPFebI" outputId="76c0f8af-e1fb-450a-8a02-f4811d09ad94"
x=input("enter a number")
y=input("enter a number")
print(int(x)+int(y))
print(int(x)-int(y))
print(int(x)*int(y))
print(int(x)/int(y))
# + id="Eae2p8IJGPAf"
| Untitled0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5
# name: julia-1.5
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/daryoush/CudaAndFluxExperiments/blob/main/startjulia1_5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/", "height": 128} id="oMSuTc3pDlHv" outputId="20b166c9-7ae5-4067-ce75-8b2904666d33"
### NOTE THIS WORKS AS PYTHON, IT ONLY WORKS WHEN THE RUNTIME COMPLAINS ABOUT JULIA RUNTIME, SO IT REVERTS TO PYTHON. AFTER JULIA IS INSTALLED, CHANGE TO JULIA RUNTIME
!curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/1.5/julia-1.5.0-linux-x86_64.tar.gz" -o julia.tar.gz
!tar -xzf julia.tar.gz -C /usr --strip-components 1
!rm -rf julia.tar.gz*
!julia -e 'using Pkg; pkg"add IJulia; precompile"'
!julia -e 'using Pkg; Pkg.add([ "CUDA", "Flux","Zygote","Profile", "TensorBoardLogger", "Logging",\
"Random", "DataStructures", "Transformers","Statistics", "BenchmarkTools"\
]);' ###Pkg.precompile()'
!echo "DONE"
# + id="BoZnMwsrCNZn"
[1,2,3]
# + id="qrtJwtCUgCjX"
using CUDA
# + colab={"base_uri": "https://localhost:8080/"} id="yQlpeR9wNOi8" outputId="1f746055-f882-4fd0-e176-e92600e8820f"
using BenchmarkTools
M = rand(2048, 2048)
@benchmark M^2
if ENV["COLAB_GPU"] == "1"
M_gpu = cu(M)
@benchmark CUDA.@sync M_gpu^2
else
println("No GPU found.")
end
# + colab={"base_uri": "https://localhost:8080/"} id="gR0rok0y2JeR" outputId="0d96a07e-5918-4f40-a98a-704bfaa52d6d"
CUDA.memory_status()
GC.gc(true)
CUDA.memory_status()
# + id="vD2UftRPetCK"
| startjulia1_5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Long Short Term Memory (LSTM)
#
# :label:`chapter_lstm`
#
#
# The challenge to address long-term information preservation and short-term input
# skipping in latent variable models has existed for a long time. One of the
# earliest approaches to address this was the LSTM
# :cite:`Hochreiter.Schmidhuber.1997`. It shares many of the properties of the
# Gated Recurrent Unit (GRU) and predates it by almost two decades. Its design is
# slightly more complex.
#
# Arguably it is inspired by logic gates of a computer. To control a memory cell
# we need a number of gates. One gate is needed to read out the entries from the
# cell (as opposed to reading any other cell). We will refer to this as the
# *output* gate. A second gate is needed to decide when to read data into the
# cell. We refer to this as the *input* gate. Lastly, we need a mechanism to reset
# the contents of the cell, governed by a *forget* gate. The motivation for such a
# design is the same as before, namely to be able to decide when to remember and
# when to ignore inputs into the latent state via a dedicated mechanism. Let's see
# how this works in practice.
#
# ## Gated Memory Cells
#
# Three gates are introduced in LSTMs: the input gate, the forget gate, and the output gate. In addition to that we introduce memory cells that take the same shape as the hidden state. Strictly speaking this is just a fancy version of a hidden state, custom engineered to record additional information.
#
# ### Input Gates, Forget Gates and Output Gates
#
# Just like with GRUs, the data feeding into the LSTM gates is the input at the current time step $\mathbf{X}_t$ and the hidden state of the previous time step $\mathbf{H}_{t-1}$. These inputs are processed by a fully connected layer and a sigmoid activation function to compute the values of input, forget and output gates. As a result, the three gate elements all have a value range of $[0,1]$.
#
# 
#
# We assume there are $h$ hidden units and that the minibatch is of size $n$. Thus
# the input is $\mathbf{X}_t \in \mathbb{R}^{n \times d}$ (number of examples:
# $n$, number of inputs: $d$) and the hidden state of the last time step is $\mathbf{H}_{t-1} \in \mathbb{R}^{n \times h}$. Correspondingly the gates are defined as follows: the input gate is $\mathbf{I}_t \in \mathbb{R}^{n \times h}$, the forget gate is $\mathbf{F}_t \in \mathbb{R}^{n \times h}$, and the output gate is $\mathbf{O}_t \in \mathbb{R}^{n \times h}$. They are calculated as follows:
#
# $$
# \begin{aligned}
# \mathbf{I}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xi} + \mathbf{H}_{t-1} \mathbf{W}_{hi} + \mathbf{b}_i),\\
# \mathbf{F}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xf} + \mathbf{H}_{t-1} \mathbf{W}_{hf} + \mathbf{b}_f),\\
# \mathbf{O}_t &= \sigma(\mathbf{X}_t \mathbf{W}_{xo} + \mathbf{H}_{t-1} \mathbf{W}_{ho} + \mathbf{b}_o),
# \end{aligned}
# $$
#
# $\mathbf{W}_{xi}, \mathbf{W}_{xf}, \mathbf{W}_{xo} \in \mathbb{R}^{d \times h}$ and $\mathbf{W}_{hi}, \mathbf{W}_{hf}, \mathbf{W}_{ho} \in \mathbb{R}^{h \times h}$ are weight parameters and $\mathbf{b}_i, \mathbf{b}_f, \mathbf{b}_o \in \mathbb{R}^{1 \times h}$ are bias parameters.
#
#
# ### Candidate Memory Cell
#
# Next we design a memory cell. Since we haven't specified the action of the various gates yet, we first introduce a *candidate* memory cell $\tilde{\mathbf{C}}_t \in \mathbb{R}^{n \times h}$. Its computation is similar to the three gates described above, but using a $\tanh$ function with a value range for $[-1, 1]$ as activation function. This leads to the following equation at time step $t$.
#
# $$\tilde{\mathbf{C}}_t = \text{tanh}(\mathbf{X}_t \mathbf{W}_{xc} + \mathbf{H}_{t-1} \mathbf{W}_{hc} + \mathbf{b}_c)$$
#
# Here $\mathbf{W}_{xc} \in \mathbb{R}^{d \times h}$ and $\mathbf{W}_{hc} \in \mathbb{R}^{h \times h}$ are weights and $\mathbf{b}_c \in \mathbb{R}^{1 \times h}$ is a bias.
#
# 
#
#
# ### Memory Cell
#
# In GRUs we had a single mechanism to govern input and forgetting. Here we have two parameters, $\mathbf{I}_t$ which governs how much we take new data into account via $\tilde{\mathbf{C}}_t$ and the forget parameter $\mathbf{F}_t$ which addresses how much we of the old memory cell content $\mathbf{C}_{t-1} \in \mathbb{R}^{n \times h}$ we retain. Using the same pointwise multiplication trick as before we arrive at the following update equation.
#
# $$\mathbf{C}_t = \mathbf{F}_t \odot \mathbf{C}_{t-1} + \mathbf{I}_t \odot \tilde{\mathbf{C}}_t.$$
#
# If the forget gate is always approximately 1 and the input gate is always approximately 0, the past memory cells will be saved over time and passed to the current time step. This design was introduced to alleviate the vanishing gradient problem and to better capture dependencies for time series with long range dependencies. We thus arrive at the following flow diagram.
#
# 
#
#
# ### Hidden States
#
# Lastly we need to define how to compute the hidden state $\mathbf{H}_t \in \mathbb{R}^{n \times h}$. This is where the output gate comes into play. In the LSTM it is simply a gated version of the $\tanh$ of the memory cell. This ensures that the values of $\mathbf{H}_t$ are always in the interval $[-1, 1]$. Whenever the output gate is $1$ we effectively pass all memory information through to the predictor whereas for output $0$ we retain all information only within the memory cell and perform no further processing. The figure below has a graphical illustration of the data flow.
#
# $$\mathbf{H}_t = \mathbf{O}_t \odot \tanh(\mathbf{C}_t).$$
#
# 
#
#
#
#
# ## Implementation from Scratch
#
# Now it's time to implement an LSTM. We begin with a model built from scratch. As with the experiments in the previous sections we first need to load the data. We use *The Time Machine* for this.
# + attributes={"classes": [], "id": "", "n": "1"}
import d2l
from mxnet import nd
from mxnet.gluon import rnn
batch_size, num_steps = 32, 35
train_iter, vocab = d2l.load_data_time_machine(batch_size, num_steps)
# -
# ### Initialize Model Parameters
#
# Next we need to define and initialize the model parameters. As previously, the hyperparameter `num_hiddens` defines the number of hidden units. We initialize weights with a Gaussian with $0.01$ variance and we set the biases to $0$.
# + attributes={"classes": [], "id": "", "n": "2"}
def get_lstm_params(vocab_size, num_hiddens, ctx):
num_inputs = num_outputs = vocab_size
normal = lambda shape : nd.random.normal(scale=0.01, shape=shape, ctx=ctx)
three = lambda : (normal((num_inputs, num_hiddens)),
normal((num_hiddens, num_hiddens)),
nd.zeros(num_hiddens, ctx=ctx))
W_xi, W_hi, b_i = three() # Input gate parameters
W_xf, W_hf, b_f = three() # Forget gate parameters
W_xo, W_ho, b_o = three() # Output gate parameters
W_xc, W_hc, b_c = three() # Candidate cell parameters
# Output layer parameters
W_hq = normal((num_hiddens, num_outputs))
b_q = nd.zeros(num_outputs, ctx=ctx)
# Create gradient
params = [W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc,
b_c, W_hq, b_q]
for param in params:
param.attach_grad()
return params
# -
# ### Define the Model
#
# In the initialization function, the hidden state of the LSTM needs to return an additional memory cell with a value of $0$ and a shape of (batch size, number of hidden units). Hence we get the following state initialization.
# + attributes={"classes": [], "id": "", "n": "3"}
def init_lstm_state(batch_size, num_hiddens, ctx):
return (nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx),
nd.zeros(shape=(batch_size, num_hiddens), ctx=ctx))
# -
# The actual model is defined just like we discussed it before with three gates and an auxiliary memory cell. Note that only the hidden state is passed on to the output layer. The memory cells do not participate in the computation directly.
# + attributes={"classes": [], "id": "", "n": "4"}
def lstm(inputs, state, params):
[W_xi, W_hi, b_i, W_xf, W_hf, b_f, W_xo, W_ho, b_o, W_xc, W_hc, b_c,
W_hq, b_q] = params
(H, C) = state
outputs = []
for X in inputs:
I = nd.sigmoid(nd.dot(X, W_xi) + nd.dot(H, W_hi) + b_i)
F = nd.sigmoid(nd.dot(X, W_xf) + nd.dot(H, W_hf) + b_f)
O = nd.sigmoid(nd.dot(X, W_xo) + nd.dot(H, W_ho) + b_o)
C_tilda = nd.tanh(nd.dot(X, W_xc) + nd.dot(H, W_hc) + b_c)
C = F * C + I * C_tilda
H = O * C.tanh()
Y = nd.dot(H, W_hq) + b_q
outputs.append(Y)
return nd.concat(*outputs, dim=0), (H, C)
# -
# ### Training
#
# Again, we just train as before.
# + attributes={"classes": [], "id": "", "n": "9"}
vocab_size, num_hiddens, ctx = len(vocab), 256, d2l.try_gpu()
num_epochs, lr = 500, 1
model = d2l.RNNModelScratch(len(vocab), num_hiddens, ctx, get_lstm_params,
init_lstm_state, lstm)
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, ctx)
# -
# ## Concise Implementation
#
# In Gluon, we can call the `LSTM` class in the `rnn` module directly to instantiate the model.
# + attributes={"classes": [], "id": "", "n": "10"}
lstm_layer = rnn.LSTM(num_hiddens)
model = d2l.RNNModel(lstm_layer, len(vocab))
d2l.train_ch8(model, train_iter, vocab, lr, num_epochs, ctx)
# -
# ## Summary
#
# * LSTMs have three types of gates: input, forget and output gates which control the flow of information.
# * The hidden layer output of LSTM includes hidden states and memory cells. Only hidden states are passed into the output layer. Memory cells are entirely internal.
# * LSTMs can help cope with vanishing and exploding gradients due to long range dependencies and short-range irrelevant data.
# * In many cases LSTMs perform slightly better than GRUs but they are more costly to train and execute due to the larger latent state size.
# * LSTMs are the prototypical latent variable autoregressive model with nontrivial state control. Many variants thereof have been proposed over the years, e.g. multiple layers, residual connections, different types of regularization.
# * Training LSTMs and other sequence models is quite costly due to the long dependency of the sequence. Later we will encounter alternative models such as transformers that can be used in some cases.
#
# ## Exercises
#
# 1. Adjust the hyperparameters. Observe and analyze the impact on runtime, perplexity, and the generted output.
# 1. How would you need to change the model to generate proper words as opposed to sequences of characters?
# 1. Compare the computational cost for GRUs, LSTMs and regular RNNs for a given hidden dimension. Pay special attention to training and inference cost
# 1. Since the candidate memory cells ensure that the value range is between -1 and 1 using the tanh function, why does the hidden state need to use the tanh function again to ensure that the output value range is between -1 and 1?
# 1. Implement an LSTM for time series prediction rather than character sequences.
#
#
# ## Scan the QR Code to [Discuss](https://discuss.mxnet.io/t/2368)
#
# 
| d2l-en/chapter_recurrent-neural-networks/lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
# <h1 align='center' style="color:green" margin="10">Zomato Web Scrapping With BeautifulSoup</h1>
#
# ---
# ### The Data Science projects start with the collection of data. The data can be collected from the database, internet/online and offline mode. These days most of the information is available online and in order to extract that information Data Engineers/Data Scientists use Web Scraping.
# ### we will learn about web scraping and how is it done in Python using openly available tools.
# ---
# ## **1. Introduction**
# ---
# ### Wikipedia Definition: Web scraping, web harvesting, or web data extraction is data scraping used for extracting data from websites. Web scraping software may access the World Wide Web directly using the Hypertext Transfer Protocol or through a web browser. While web scraping can be done manually by a software user, the term typically refers to automated processes implemented using a bot or web crawler. It is a form of copying, in which specific data is gathered and copied from the web, typically into a central local database or spreadsheet, for later retrieval or analysis.
# ### we will extract the data for top restaurants in Bangalore(India) from Zomato website. The information will be accessed and read through HTML web pages. So, Let’s get started building a web scraper tool.
# ---
# ## **2. Website Content: Access and Scrap**
# ---
# ### In order to open a website on our browser, we type the website address and submit an HTTP request to access the webpage. This displays a webpage on the browser if the request is a success else we get an error. In order to access the Zomato website page, we would need to submit the request in the same way.
# ### We have a few tools available which allow us to access the website within Python.
# ### > Import libraries
import requests
import pandas as pd
from bs4 import BeautifulSoup
# ### Before we use these libraries and their functions to access the website, let’s try and understand their usage.
# ### ***@Requests***
#
# ### It is designed to be used by humans to interact with the language. This means you don’t have to manually add query strings to URLs, or form-encode your POST data. Requests will allow you to send HTTP/1.1 requests using Python. With it, you can add content like headers, form data, multipart files, and parameters via simple Python libraries. It also allows you to access the response data of Python in the same way.
# ### ***@BS4 – BeautifulSoup***
# ### Beautiful Soup is a Python library for pulling data out of HTML and XML files. It works with your favorite parser to provide idiomatic ways of navigating, searching and modifying the parse tree. It commonly saves programmers hours or days of work.
# ### Now that we know what these tools do, we can now try accessing the Zomato website.
# +
# Used header/agent because the request was timed out and asking for an agent
# Using following code we can fake the agent.
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
response = requests.get("https://www.zomato.com/bangalore/top-restaurants",headers=headers)
# -
# ### *Let’s try reading the content of the website as now we have successfully established the connection.*
content=response.content
#print(content)
soup=BeautifulSoup(content,'html.parser')
#print(soup)
# ### The above code will first dump the content retrieve after accessing the website. The dumped content then will be passed to the BeautifulSoup function in order to get only the data with HTML/valid website tags that were used to develop the website.
# ---
# ## **3. Top restaurants: Format the data**
# ### We now have the data for a Top restaurant on Zomato, dumped into a variable. But is it in a readable format? Maybe for a computer scientist but not for all the people. Let’s try to format the scraped data.
# ### For this particular exercise, we are interested in extracting Restaurant’s Name, Restaurant’s Address and Type of Cuisine. In order to start looking for these details, we would need to find the HTML tags which store this information.
# ### Take a pause and look at the BeautifulSoup content above or you can use inspect on your Chrome Web Browser, you will be able to see which tag keeps the collection of top restaurant and other tags which has further details.
top_rest=soup.find_all("div",attrs={"class":"bb0 collections-grid col-l-16"})
# print(top_rest)
list_tr=top_rest[0].find_all("div",attrs={"class":"col-s-8 col-l-1by3"})
# print(list_tr)
# ### The above code will try to find all HTML div tags containing class equals to “col-s-8 col-l-1by3” and will return the collection/list of restaurants data. In order to extract the further information, we will need to access the list elements i.e. one restaurant information one by one using a loop.
#
list_rest=[]
list_rest =[]
for tr in list_tr:
dataframe ={}
dataframe["rest_name"] = (tr.find("div",attrs={"class": "res_title zblack bold nowrap"})).text.replace('\n', ' ')
dataframe["rest_address"] = (tr.find("div",attrs={"class": "nowrap grey-text fontsize5 ttupper"})).text.replace('\n', ' ')
dataframe["cuisine_type"] = (tr.find("div",attrs={"class":"nowrap grey-text"})).text.replace('\n', ' ')
list_rest.append(dataframe)
print(list_rest)
# ### In the above code, tr will contain the different information about the restaurant like – Name, address,
# ### Cuisine, prices, menu, reviews etc. Each information is stored in different tags and the tags can be found after looking at the tr(each element’s data).
# ### Before finding the tags in the HTML dump, we should try and check how does the list of the restaurant actually look like on the website.
# <img src="images/1.png">
# ### From above image, you can see that the information we want to extract for this exercise is displaying in different fonts or formats. Going back to the HTML dump/content we found that information is stored inside a div tag with classes defined as the type of fonts used or the used formats.
# ### We have defined a DataFrame to collect the required information. Restaurant Name is stored underclass – res_title zblack bold nowrap, Restaurant Address is stored underclass – nowrap grey-text fontsize5 ttupper and Cuisine type is stored under class – nowrap grey-text.
#
# ### We will access this information one by one and store them into different DataFrame columns. We will also have to use few String function here because the HTML data uses \n to separate the data and cannot be stored into the DataFrame. So, to avoid any errors – we can replace \n with ”(space).
# ### The output of above code will look something like this –
# <img src="images/2.png">
# ---
# ## **4. Save Data in Human Readable format**
# ### Thinking of giving above data to someone who does not know Python? Will they be able to read the data? Maybe not. We will save the data frame data to CSV format which is easily readable.
import pandas as pd
df=pd.DataFrame(list_rest)
df.to_csv("data/zomato_restaurant.csv",index=False) # first create "data" folder in directory thn run this
# ---
# ## **5. Summary**
# ### There, we saw how we can use request to access any website from a python code and use BeautifulSoup to extract the HTML content. After extracting the content we formatted it using data frame and later saved the data in CSV file. There was more information that can be retrieved from the website but this file was to find a restaurant, their address, and cuisine. Though the similar process can be used to scrap the data from other websites too.
| 01 Zomato Web Scraping with BeautifulSoup/Zomato Web Scraping with BeautifulSoup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# # !pip install joblib
# # !pip install gensim
# # !pip install glove-python-binary
# # !pip install transformers
# # !pip install datasets
# # !pip install tokenizers
# +
import time
import os
import psutil
def count_time(func):
def int_time():
start_time = time.time()
func()
over_time = time.time()
total_time = over_time - start_time
print("程序运行了%s秒" % total_time)
return int_time
def count_info(func):
def float_info():
pid = os.getpid()
p = psutil.Process(pid)
info_start = p.memory_full_info().uss/1024
func()
info_end=p.memory_full_info().uss/1024
print("程序占用了内存"+str(info_end-info_start)+"KB")
return float_info
# +
import numpy as np
import gc
import pandas as pd
import re
from joblib import Parallel, delayed
from gensim.models import FastText,Word2Vec
from glove import Glove,Corpus
from sklearn.model_selection import KFold
from sklearn.cluster import KMeans
from scipy import sparse
from sklearn.feature_extraction.text import TfidfVectorizer
from transformers import (AutoModel,AutoModelForMaskedLM,
AutoTokenizer, LineByLineTextDataset,
DataCollatorForLanguageModeling,
Trainer, TrainingArguments,PreTrainedTokenizerFast,pipeline)
from datasets import Dataset
from sklearn.linear_model import Ridge
from tokenizers import (
decoders,
models,
normalizers,
pre_tokenizers,
processors,
trainers,
Tokenizer,
)
import warnings
warnings.filterwarnings('ignore')
from tqdm.auto import tqdm
# -
from NLP_feature import NLP_feature
df_train = pd.read_csv('sub_train.csv')
df_test = pd.read_csv('sub_val.csv')
df_test.head(3)
use_Toknizer=True
emb_mode = 'Word2Vec'
encode_mode = 'supervise'
text_columns_name = ['overview']
target_column = df_train['revenue']
candidate_labels=None
nlp = NLP_feature()
# nlp.tokenizers
nlp.do_mlm = True
nlp.emb_size=100
nlp.n_clusters=20
@count_time
@count_info
def fit():
return nlp.fit(df_train,
text_columns_name,
use_Toknizer,
emb_mode,
encode_mode,
target_column,
candidate_labels)
df = fit()
# +
# for column in df.columns:
# df_train[column] = df[column]
# -
@count_time
@count_info
def trans():
return nlp.transform(df_test)
test = trans()
# +
# df_train.head(1)
# +
# df_train.to_csv(f'mlm_{emb_mode}_{encode_mode}_autox_trn.csv',index=False)
# test.to_csv(f'mlm_{emb_mode}_{encode_mode}_autox_tst.csv',index=False)
# -
| autox/autox_nlp/demo/TMDBBoxOffice/Efficiency/autox_fe-300-speed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Project Submission
#
# When you are ready to submit your project, meaning you have checked the [rubric](https://review.udacity.com/#!/rubrics/1426/view) and made sure that you have completed all tasks and answered all questions. Then you are ready to compress your files and submit your solution!
#
# The following steps assume:
# 1. All cells have been *run* in Notebooks 2 and 3 (and that progress has been saved).
# 2. All questions in those notebooks have been answered.
# 3. Your architecture in `models.py` is your best tested architecture.
#
# Please make sure all your work is saved before moving on. You do not need to change any code in these cells; this code is to help you submit your project, only.
#
# ---
#
# The first thing we'll do, is convert your notebooks into `.html` files; these files will save the output of each cell and any code/text that you have modified and saved in those notebooks. Note that the first notebook is not included because its contents will not affect your project review.
# !jupyter nbconvert "2. Define the Network Architecture.ipynb"
# !jupyter nbconvert "3. Facial Keypoint Detection, Complete Pipeline.ipynb"
# !jupyter nbconvert "4. Fun with Keypoints.ipynb"
# ### Zip the project files
#
# Next, we'll zip all these notebook files and your `models.py` file into one compressed archive named `project1.zip`.
#
# After completing this step you should see this zip file appear in your home directory, where you can download it as seen in the image below, by selecting it from the list and clicking **Download**.
#
# <img src='images/download_ex.png' width=50% height=50%/>
#
!!apt-get -y update && apt-get install -y zip
# !zip project1.zip -r . -i@filelist.txt
# ### Submit Your Project
#
# After creating and downloading your zip file, click on the `Submit` button and follow the instructions for submitting your `project1.zip` file. Congratulations on completing this project and I hope you enjoyed it!
| project1/5. Zip Your Project Files and Submit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from pprint import pprint
from decimal import Decimal
import itertools
import metrics
import pandas
import simulate
import harvesting
import market
import withdrawal
import numpy
# -
import seaborn
from matplotlib import pyplot as plt
import matplotlib
seaborn.set(style="whitegrid")
seaborn.set_context('poster')
series = market.Returns_US_1871()
# +
def mk_lens(type_):
m = {'Withdrawal (Nominal)': lambda x: float(x.withdraw_n),
'Withdrawal (Real)' : lambda x: float(x.withdraw_r),
'Portfolio (Nominal)': lambda x: float(x.portfolio_pre.value_n),
'Portfolio (Real)': lambda x: float(x.portfolio_pre.value_r),
'Bond %': lambda x: float(x.portfolio_pre.bonds / (x.portfolio_pre.bonds + x.portfolio_pre.stocks))
}
lens = m[type_]
lens.__label__ = type_
return lens
def fn(lens, x):
return [lens(_) for _ in x]
# -
def run_sim(year, key, length=35):
h = {
'tent': lambda p: harvesting.ParameterGlidepath(p, 0.4, 0.75, 30),
'static-60': harvesting.make_rebalancer(0.60),
}
portfolio = (400_000, 600_000)
return simulate.withdrawals(series.iter_from(year),
withdraw=lambda p, s: withdrawal.VPW(p, s, years_left=40),
years=length,
portfolio=portfolio,
harvesting=h[key])
# +
def cew_one(year):
tent = run_sim(year, 'tent')
static = run_sim(year, 'static-60')
df = pandas.DataFrame({
'tent' : [x.withdraw_r for x in tent],
'static-60' : [x.withdraw_r for x in static],
})
return df.apply(metrics.cew)
def cew_all():
df = pandas.DataFrame(index=range(1887, 2019 - 30 + 1), columns=['tent', 'static-60'])
for i in range(1887, 2019 - 30 + 1):
r = cew_one(i)
df.loc[i] = r
return df
# -
cew_one(1929)
# %time
df = cew_all()
df.head()
tent_wins = df[df['tent'] > df['static-60']]
print(len(tent_wins))
diff = df['tent'] - df['static-60']
plt.figure(figsize=(8,6))
plt.title('Tent CEW - 60/40 CEW (difference)')
seaborn.lineplot(data=diff.astype(float))
diff[diff > 0]
tent_loses = df[df['static-60'] > df['tent']]
print('Chance of tent wins', len(tent_wins) / (len(tent_wins) + len(tent_loses)))
print(diff[diff>0].median())
print(diff[diff<0].median())
def chart_one(year, lens):
tent = run_sim(year, 'tent')
static = run_sim(year, 'static-60')
df = pandas.DataFrame({
'tent' : fn(lens, tent),
'static-60' : fn(lens, static),
})
g = seaborn.relplot(
data=df,
kind='line',
aspect=2,
)
g.fig.autofmt_xdate()
g.despine(left=True, bottom=True, offset=20)
g.fig.suptitle(f'Retirement in {year}')
return g
chart_one(1966, mk_lens('Portfolio (Real)'))
#chart_one(1929, mk_lens('Bond %'))
s = df.loc[1929]
def one(year, lens):
tent = run_sim(year, 'tent')
static = run_sim(year, 'static-60')
df = pandas.DataFrame({
'tent' : fn(lens, tent),
'static-60' : fn(lens, static),
})
return df
one(1929, mk_lens('Bond %')).head()
| Bond Tents [VPW, CEW].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PythonData] *
# language: python
# name: conda-env-PythonData-py
# ---
#Importing Dependencies
from bs4 import BeautifulSoup as bs
import requests
import pymongo
from splinter import Browser
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import pandas as pd
executable_path = {'executable_path': 'chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
# NASA Mars News
# Scrape the NASA Mars News Site and collect the latest News Title and Paragraph Text. Assign the text to variables that you can reference later.
# +
browser.visit('https://mars.nasa.gov/news/')
html = browser.html
news_soup = bs(html, 'html.parser')
news_title = browser.find_by_css('div.content_title a')[0].text
news_p = news_soup.find_all('div', class_='article_teaser_body')[0].text
print(news_title)
print('-------------')
print(news_p)
# -
#
# JPL Mars Space Images - Featured Image¶
# Visit the url for JPL Featured Space Image here.
#
# Use splinter to navigate the site and find the image url for the current Featured Mars Image and assign the url string to a variable called featured_image_url.
#
# Make sure to find the image url to the full size .jpg image.
#
# Make sure to save a complete url string for this image.
#
#
browser.visit('https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/index.html')
browser.links.find_by_partial_text('FULL IMAGE').click()
browser.find_by_css('img.fancybox-image')['src']
featured_image_url = 'https://data-class-jpl-space.s3.amazonaws.com/JPL_Space/image/featured/mars3.jpg'
# Mars Facts
# Visit the Mars Facts webpage here and use Pandas to scrape the table containing facts about the planet including Diameter, Mass, etc.
#
# Use Pandas to convert the data to a HTML table string.
Mars_facts_url = 'https://space-facts.com/mars/'
tables = pd.read_html(Mars_facts_url)
tables
mars_df = tables[2]
mars_df.columns = ['Description','Value']
mars_df
mars_html = mars_df.to_html()
mars_html
mars_html.replace('\n','')
print(mars_html)
#
# Mars Hemispheres
# Visit the USGS Astrogeology site here to obtain high resolution images for each of Mar's hemispheres.
#
# You will need to click each of the links to the hemispheres in order to find the image url to the full resolution image.
#
# Save both the image url string for the full resolution hemisphere image, and the Hemisphere title containing the hemisphere name. Use a Python dictionary to store the data using the keys img_url and title.
#
# Append the dictionary with the image url string and the hemisphere title to a list. This list will contain one dictionary for each hemisphere.
main_url = 'https://astrogeology.usgs.gov'
hemispheres = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemispheres)
hemispheres_html = browser.html
hemi_soup = bs(hemispheres_html, 'html.parser')
# +
#Hemispheres data
all_hemispheres= hemi_soup.find('div',class_ = 'collapsible results')
mars_hemi = all_hemispheres.find_all('div',class_='item')
hemi_images = []
#for loop for hemi data
for i in mars_hemi:
#title
hemisphere = i.find('div', class_ ="description")
title = hemisphere.h3.text
#Image link
hemisphere_url = hemisphere.a["href"]
browser.visit(main_url + hemisphere_url)
image_html = browser.html
image_soup = bs(image_html, 'html.parser')
image_link = image_soup.find('div', class_ ='downloads')
image_url = image_link.find('li').a['href']
#Dictionary Storage
images = {}
images['title'] = title
images['image_url'] = image_url
hemi_images.append(images)
print(hemi_images)
# +
mars_storage = {"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"fact_table": str(mars_html),
"hemisphere_image": hemi_images}
mars_storage
# -
| Mission_To_Mars/.ipynb_checkpoints/mission_to_mars-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/xhang24/xiaotong/blob/master/%E2%80%9Chw4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="VBlkqbgUvQ6k"
# # Volatility calibration to BSM
# + [markdown] colab_type="text" id="gxxtEM40t092"
# ## Abstract
#
# - Goal
# - Calibrate BSM model for makert price of options
#
# + [markdown] colab_type="text" id="Y0jFTVYPt1Ot"
# ## Problem
#
# All of pricing models require a set of model parameters in order to fully define the dynamics of each model. The process of adjusting model parameters such that the model prices are compatible with market prices is called **Calibration**.
#
# Input:
#
# - market prices of several options with different strikes and maturities
# - spot price
# - interest rates
#
# Output:
#
# - Find volatility of BSM model
# + [markdown] colab_type="text" id="imn9O8Qcu1iW"
# ## Anal
#
#
# Suppose
# - Model prameter $\theta = (\theta_1, ... \theta_m)$ is a vector
# - Prices of $n$ instruments are aviable in the market with its market price of $i$th instrument quoted by $C_i$.
# Let's denote $C = (C_1, ..., C_n)$ as a market price vector.
# - In theory, $C_i^\theta$ is the model price of the $i$th instrument, and they are denoted as a vector $C^\theta = (C_1^\theta, ..., C^\theta_n)$.
#
# Given a distance function $H: \mathbb R^n \times \mathbb R^n \mapsto \mathbb R^+$, calibration is to find the parameter $\theta^*$ which minimize the objective function $H(C^\theta, C)$, i.e.
# $$ \theta^* = \arg\min_\theta H(C^\theta, C).$$
#
# Commonly used distance functions for $H$ are
# - $H(x, y) = (\sum_{i=1}^n w_i |x_i - y_i|^p)^{1/p}.$
# - $H(x, y) = (\sum_{i=1}^n w_i | \frac{x_i - y_i}{y_i}|^p)^{1/p}.$ If $p = 2$, $w_i =1$, then $H^2$ is called as SSRE (sum of squred relative errors)
# - $H(x, y) = (\sum_{i=1}^n w_i |\ln x - \ln y|^p)^{1/p}.$
#
# In this below, we are goint to use, the first case of $H$ with $p = 2$ and $w_i = 1$ for all i's.
# + [markdown] id="I7Id5NE-hOaZ" colab_type="text"
# # Hw Tasks
# + [markdown] id="ipfXGogphZ79" colab_type="text"
# ## Task-1
#
# - Upload data from [here](https://github.com/songqsh/20s_ma573/blob/master/src/20optiondata2.dat) to your cloud platform
# - Calibration/Performance:
# - Filter out 2-mon options and calibrate volatility using the distance function
# $$H(x, y) = (\sum_{i=1}^n w_i |x_i - y_i|^2)^{1/2}.$$
# - Calibrated price for option means the computed option price using calibrated volatility and other parameters given by the market. Compare the market price and calibrated price using a plot of two curves: strike vs market (calibrated) price
# - Do the same thing for the filter from 5-mon options.
# - Which calibrated volatility is bigger, 2-mon or 5-mon?
#
# + [markdown] id="DuovTMczlP_d" colab_type="text"
# ## Task-2 (optional)
# Please discuss the validity of the following statements:
# - Given an underlying asset, options with shorter maturity can be better fitted than a longer maturity.
# - Suppose Assest1(ex. AAPL) is more liquid than Asset2 (ex. TAL). Then Asset1 can be fit better than Asset2.
# + colab_type="code" id="UOVUqxlEEqIG" colab={"base_uri": "https://localhost:8080/", "height": 289} outputId="bf6572dd-1fe1-4a97-acc9-800a866bb2eb"
import numpy as np
# !git clone https://github.com/songqsh/20s_MA573.git
# %cd 20s_MA573/src/
# %ls 20optiondata2.dat
np_option_data1 = np.loadtxt('20optiondata2.dat', comments='#', delimiter=',')
print('>>>>>>otype, maturity, strike, option_price')
print(np_option_data1)
# + id="4tO1FcoQc-xl" colab_type="code" colab={}
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as so
import scipy.stats as ss
# + id="LsmVfATQ7_5h" colab_type="code" colab={}
'''=========
option class init
=========='''
class VanillaOption:
def __init__(
self,
otype = 1, # 1: 'call'
# -1: 'put'
strike = 110.,
maturity = 1.,
market_price = 10.):
self.otype = otype
self.strike = strike
self.maturity = maturity
self.market_price = market_price #this will be used for calibration
def payoff(self, s): #s: excercise price
otype = self.otype
k = self.strike
maturity = self.maturity
return max([0, (s - k)*otype])
'''============
Gbm class
============='''
class Gbm:
def __init__(self,
init_state = 100.,
drift_ratio = .0475,
vol_ratio = .2
):
self.init_state = init_state
self.drift_ratio = drift_ratio
self.vol_ratio = vol_ratio
'''========
Black-Scholes-Merton formula.
=========='''
def bsm_price(self, vanilla_option):
s0 = self.init_state
sigma = self.vol_ratio
r = self.drift_ratio
otype = vanilla_option.otype
k = vanilla_option.strike
maturity = vanilla_option.maturity
d1 = (np.log(s0 / k) + (r + 0.5 * sigma ** 2)
* maturity) / (sigma * np.sqrt(maturity))
d2 = d1 - sigma * np.sqrt(maturity)
return (otype * s0 * ss.norm.cdf(otype * d1) #line break needs parenthesis
- otype * np.exp(-r * maturity) * k * ss.norm.cdf(otype * d2))
Gbm.bsm_price = bsm_price
# + id="nkDOMVde8EOF" colab_type="code" colab={}
gbm1 = Gbm(
init_state = 100, #market data
drift_ratio = .05, #market data
vol_ratio = .1 #initial guess
)
# + id="lM3RCWG48G29" colab_type="code" colab={}
num_row = np_option_data1.shape[0]
option_list1 = []
for i in range(num_row):
option1 = VanillaOption(
otype = np_option_data1[i,0],
strike = np_option_data1[i,2],
maturity = np_option_data1[i,1],
market_price = np_option_data1[i,3]
)
option_list1.append(option1)
# + id="1JhQZWX-8M92" colab_type="code" colab={}
import math
def error_function(vol, gbm, option):
gbm.vol_ratio = vol
error = 0
for i in range(len(option)):
error = error + ((option[i]).market_price - gbm.bsm_price(option[i]))**2
return math.sqrt(error)
def bsm_calibration(gbm, option):
init_vol = .1
return so.fmin(error_function, init_vol, args = (gbm, option), disp = 0)[0]
# + id="RGTwQSZ0Cy1X" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="c8a16640-c74f-45ae-e622-5114262b57f2"
filter1 = list(filter(lambda x: x.maturity == 2/12, option_list1))
calibrated_volatility1 = bsm_calibration(gbm1, filter1)
print('>>>>>calibrated volatility for 2-mon call is ' + str(calibrated_volatility1))
# + id="xjwBwu_pDrFj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="be37645a-f816-4bbb-eded-29c1fb1db69a"
gbm1.vol_ratio = calibrated_volatility1
#find calibrated option price
filter1_calibrated_price = [gbm1.bsm_price(filter1[i]) for i in range(len(filter1))]
print('>>>>>>>>> this is option price calculated from calibrated vol')
filter1_calibrated_price
# + id="YGHr_H8x8REM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="39899125-0e76-496f-a39e-d027d4f5a132"
x_co = [filter1[i].strike for i in range(len(filter1))]
y_co = [filter1[i].market_price for i in range(len(filter1))]
plt.plot(x_co, y_co, 'o', label='market price')
y_co = filter1_calibrated_price
plt.plot(x_co, y_co, label = 'calib price')
plt.ylabel('option price')
plt.xlabel('strike with fixed 2-mon maturity')
plt.legend();
# + id="zgoOVaaq8etL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ead2114b-2fbd-4d8f-fa3f-adc626ac14af"
filter2 = list(filter(lambda x: x.maturity == 5/12, option_list1))
calibrated_volatility2 = bsm_calibration(gbm1, filter2)
print('>>>>>>>> calibrated_volatility is ' + str(calibrated_volatility2))
# + id="iRwULZNm-E5w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="96b4496c-64ae-40fd-ba51-d2dc66649d4d"
gbm1.vol_ratio = calibrated_volatility2
#find calibrated option price
filter2_calibrated_price = [gbm1.bsm_price(filter2[i]) for i in range(len(filter2))]
print('>>>>>>>>> this is option price calculated from calibrated vol')
filter2_calibrated_price
# + id="_MXMF66AD_xc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 279} outputId="63ec95ca-d786-4094-899c-f9a2147cc20e"
x_co = [filter2[i].strike for i in range(len(filter2))]
y_co = [filter2[i].market_price for i in range(len(filter2))]
plt.plot(x_co, y_co, 'o', label='market price')
y_co = filter2_calibrated_price
plt.plot(x_co, y_co, label = 'calib price')
plt.ylabel('option price')
plt.xlabel('strike with fixed 5-mon maturity')
plt.legend();
| src/hw4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Teaching Math with Data Science and Computational Thinking
# ### MCATA Conference 2019
#
# <EMAIL> | [@misterhay](https://twitter.com/misterhay)
#
# [Callysto.ca](https://callysto.ca) | [@callysto_canada](https://twitter.com/callysto_canada)
#
# **bit.ly/CallystoMCATA**
# + [markdown] slideshow={"slide_type": "subslide"}
# The ability to process information in an analytical way is becoming increasingly important, and educators are encouraged or expected to promote coding and data analytics. Callysto is a free online platform capable of engaging data visualizations, big data processing, coding, and text formatting.
#
# In this session we will explore Python-based modules developed for teaching outcomes in math and other subjects. We will talk about how to incorporate these into your teaching, and help you create or edit modules for other outcomes if you are interested.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Who is here? What brought you here?
# + [markdown] slideshow={"slide_type": "fragment"}
# Are you a secondary teacher? elementary teacher?
# + [markdown] slideshow={"slide_type": "fragment"}
# How many administrators, consultants, or learning coaches?
# + [markdown] slideshow={"slide_type": "fragment"}
# Who else?
# + [markdown] slideshow={"slide_type": "slide"}
# ## Outline
#
# 1. About CanCode and Callysto
# 2. Jupyter Notebooks
# 3. Computational Thinking and Data Analysis
# 4. Call to Action
# + [markdown] slideshow={"slide_type": "slide"}
# 
#
# CanCode aims to equip Canadian youth, including traditionally underrepresented groups, with the skills they need to be prepared for further studies, including advanced digital skills and science, technology, engineering and math (STEM) courses, leading to the jobs of the future. Canada's success in the digital economy depends on leveraging our diverse talent and providing opportunity for all to participate — investing in digital skills development will help to achieve this.
# + [markdown] slideshow={"slide_type": "slide"}
# 
#
# Callysto is a free, interactive, curriculum-based learning and skills development tool — think “interactive textbook” — that can be accessed from any device with an internet connection. It was created to help students strengthen their computational thinking, coding and data science skills.
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="callysto-impact.png" width="500">
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Callysto is by:
# + [markdown] slideshow={"slide_type": "subslide"}
# 
# + [markdown] slideshow={"slide_type": "subslide"}
# 
#
# PIMS is a collaborative network dedicated to the promotion of discovery, understanding and awareness in the mathematical sciences. PIMS brings together leading researchers from major Universities across western Canada, as well as the University of Washington, and is a Unité Mixte Internationale of the National Center for Scientific Research (Le Centre national de la recherche scientifique, CNRS).
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Callysto includes:
# - infrastructure: [Jupyter Hub](https://hub.callysto.ca)
# - resources: [learning modules](https://callysto.ca/learning_modules/) (that also run on sites like [Colab](https://colab.research.google.com), [Azure](https://notebooks.azure.com/), and [Watson Studio](https://www.ibm.com/uk-en/cloud/watson-studio))
# - teacher and student training
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is a Jupyter Notebook?
#
# - a browser-based document
# - contains text, graphics, LaTeX, multimedia, and live code (Python)
# - easily usable, modifiable, and sharable
# This presentation is a Jupyter Notebook
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Embedded Video Example
#
# + slideshow={"slide_type": "fragment"}
from IPython.display import YouTubeVideo
YouTubeVideo('ZmhlRsYjs7Y')
# + slideshow={"slide_type": "subslide"}
# Code example
2 + 2
# + [markdown] slideshow={"slide_type": "subslide"}
# ### LaTeX Example
# $ E = mc^2 $ for special relativity and and also $ R_{\mu \nu} - \frac{1}{2} R g_{\mu \nu} + \Lambda g_{\mu \nu}
# = \frac{8\pi G}{c^4} T_{\mu \nu}.$
# + [markdown] slideshow={"slide_type": "slide"}
# ## Graphics: Plots and Animations
#
# We can create graphs of functions or data.
# + slideshow={"slide_type": "subslide"}
# import libraries
from numpy import linspace, pi, sin, nan
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from IPython.display import HTML
# %matplotlib inline
# + slideshow={"slide_type": "subslide"}
# create a graph
fig, ax = plt.subplots()
x = linspace(0, 2*pi, 100)
line, = ax.plot(x, sin(x))
# + slideshow={"slide_type": "subslide"}
# animate the graph
def animate(i):
line.set_ydata(sin(x + 2*pi*i / 100)) # update the data.
return line,
ani = animation.FuncAnimation(fig, animate, interval=20, save_count=100)
HTML(ani.to_jshtml())
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Graphing Data
#
# We can retrive data from online sources and display it in tables or graphs.
# + slideshow={"slide_type": "subslide"}
import requests
import pandas as pd
import plotly.express as px
# Get tide data from Fisheries and Oceans Canada for Advocate Harbour (hourly for the next 7 days)
url = 'https://www.waterlevels.gc.ca/eng/station?sid=215' # Joggins Wharf
response = requests.get(url)
dfs = pd.read_html(response.text) # construct dataframe
tideHeight = dfs[7]['Hour'].transpose() # select the 7th frame and transpose columns and rows
tideList = [] # construct a list from the tideHeight data
for key in tideHeight.keys():
tideList.extend(tideHeight[key].tolist())
df = pd.DataFrame(tideList) # turn the list into a dataframe
df.columns = ['Height (m)'] # label the height column
df['Time (h)'] = range(0,len(tideList)) # add a time (in hours) column
# + slideshow={"slide_type": "subslide"}
fig = px.line(df, x='Time (h)', y='Height (m)', title='Joggins Wharf Tides') # create graph
fig.show()
# + [markdown] slideshow={"slide_type": "subslide"}
# ### GeoGebra and Desmos Examples
#
# We can display Desmos or GeoGebra applets, or other web pages, inside an iframe.
# + slideshow={"slide_type": "subslide"}
import IPython
IPython.display.IFrame('https://www.geogebra.org/geometry/xfbk482w','100%','480px') # URL, width, height
# + slideshow={"slide_type": "subslide"}
IPython.display.IFrame('https://www.desmos.com/calculator/6dpcugds4t','100%','480px')
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Turtles
# Drawing geometry with turtles, just like [Logo](https://en.wikipedia.org/wiki/Logo_(programming_language))
# + slideshow={"slide_type": "subslide"}
# Uncomment the following line if mobilechelonian isn't already installed
# #!pip install --user mobilechelonian
from mobilechelonian import Turtle
t = Turtle()
t.forward(50)
t.right(90)
t.forward(50)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Turtle Commands
#
# `t.speed(integer)`
# Speed of your turtle, 1-10
#
# `t.penup()`
# `t.pendown()`
# For drawing lines or not
#
# `t.right(degrees)`
# `t.left(degrees)`
# Turn turtle left or right by some degree
#
# `t.forward(units)`
# `t.backward(units)`
# Move your turtle forward or backward
#
# `t.pencolor(“color”)`
# Color of your turtle’s line, e.g. “blue”
#
# `t.setposition(x,y)`
# Move turtle to position x,y
#
# `t.circle(r, degrees)`
# Draw a piece of a circle of radius r some number of degrees
# + [markdown] slideshow={"slide_type": "slide"}
# ## Computational Thinking
#
# 1. Decomposition - break down a problem
# 2. Pattern recognition - find trends in problem
# 3. Algorithm design - create a series of steps to solve problem
# 4. Abstraction - remove parts to generalize solution
# 5. Analyze and evaluate the solution
#
# Coming with new curriculum in math and science, but also applicable to other disciplines.
#
# This may involve coding, but it's more about designing problems that can be solved by computers.
# + slideshow={"slide_type": "slide"}
# Elementary Math Example
n = input('How many numbers will we sort? ')
numberList = []
for x in range(0, int(n)):
newNumber = input(str(x+1) + ' Give me a number: ')
numberList.append(newNumber)
numberList.sort()
print(numberList)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Data Analysis
#
# Can be student data, machine- or sensor-generated data, curated data, or open data.
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Canadian Open Data
# 1. [Statistics Canada](https://www150.statcan.gc.ca/n1/en/type/data?MM=1)
# 1. [Canada Open Data](https://open.canada.ca/en) (Contains many data sets for provinces or territories without their own open data portal)
# 1. [Alberta Open Data](https://open.alberta.ca/opendata)
# 1. [BC Open Data](https://data.gov.bc.ca/)
# 1. [Saskatchewan Open Data](http://www.opendatask.ca/)
# 1. [Northwest Territories Open Data](https://www.opennwt.ca/)
# 1. [Ontario Open Data](https://www.ontario.ca/search/data-catalogue)
# 1. [Quebec Open Data](http://donnees.ville.quebec.qc.ca/catalogue.aspx) (French only)
# 1. [Nova Scotia Open Data](https://data.novascotia.ca/)
# 1. [PEI Open Data](https://data.princeedwardisland.ca/)
# 1. [Calgary Open Data Portal](https://data.calgary.ca/)
# 1. [Edmonton Open Data Portal](https://data.edmonton.ca/)
# 1. [Vancouver Open Data Portal](https://vancouver.ca/your-government/open-data-catalogue.aspx)
# 1. [Toronto Open Data Portal](https://www.toronto.ca/city-government/data-research-maps/open-data/)
# 1. [Winnipeg Open Data Portal](https://data.winnipeg.ca/)
# 1. [Whitehorse Open Data](http://data.whitehorse.ca)
#
# *Many cities, provinces, and territories offer their own open data portals, and can often be found through a Google search of "`CITY NAME` open data portal"*
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Other Governments
# 1. [EU Open Data](https://open-data.europa.eu/)
# 1. [USA](https://www.data.gov/)
# 1. [Australia](https://data.gov.au/)
# 1. [NASA](https://open.nasa.gov/open-data/)
# 1. [Russia](http://data.gov.ru/?language=en) (Site is in English, but many of the datasets are in Russian)
#
# *There are many national open data portals: these are just a few.*
#
# ### Data Aggregators
# 1. [Kaggle](https://www.kaggle.com/datasets)
# 1. [Open Data Soft](https://data.opendatasoft.com/pages/home/)
# 1. [Open Africa](https://africaopendata.org/)
# 1. [List of interesting data sets](https://github.com/awesomedata/awesome-public-datasets)
# 1. [Open Data Network](https://www.opendatanetwork.com/) (Technically speaking, this is a data set search engine)
# 1. [Google Public Data](https://www.google.com/publicdata/directory)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Examples
#
# [Sine fit to tides data](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https://github.com/misterhay/RabbitMath&branch=master&urlpath=notebooks/RabbitMath/14-The-period-of-the-tides/14.1-The-period-of-the-tides.ipynb)
#
# [Frog statistics problem](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https://github.com/misterhay/frog_problem&branch=master&urlpath=notebooks/frog_problem/frog-jump-statistics-problem.ipynb)
#
# [TED Ed Riddles](https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https://github.com/callysto/riddles)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Call to Action
#
# 1. Explore curriculum-aligned resources at https://callysto.ca/learning_modules
# 2. Use resources and the Callysto Hub https://hub.callysto.ca with students
# 1. sign in using Google, Microsoft, or Cybera (Pika)
# 2. each user gets their own Jupyter server
# 3. get a copy of this notebook from **bit.ly/CallystoMCATA**
# 3. Encourage other teachers to try it out
# 4. Consider beta testing the online course "[Callysto and Computational Thinking](https://courses.callysto.farm/)"
# 5. Suggest or develop and share notebooks
# 1. we can help
# 2. [call for proposals](http://bit.ly/callysto-proposals)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Get in Touch
#
# [Callysto.ca](https://callysto.ca) | [@callysto_canada](https://twitter.com/callysto_canada)
#
# <EMAIL> | [@misterhay](https://twitter.com/misterhay)
#
# **bit.ly/CallystoMCATA**
| Teaching-Math-with-Data-Science-and-Computational-Thinking-MCATA-2019.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="v24tePJBsFbe"
# # Disclaimer
#
# Released under the CC BY 4.0 License (https://creativecommons.org/licenses/by/4.0/)
#
# # Purpose of this notebook
#
# The purpose of this document is to show how I approached the presented problem and to record my learning experience in how to use Tensorflow 2 and CatBoost to perform a classification task on text data.
#
# If, while reading this document, you think _"Why didn't you do `<this>` instead of `<that>`?"_, the answer could be simply because I don't know about `<this>`. Comments, questions and constructive criticism are of course welcome.
#
# # Intro
#
# This simple classification task has been developed to get familiarized with Tensorflow 2 and CatBoost handling of text data. In summary, the task is to predict the author of a short text.
#
# To get a number of train/test examples, it is enough to create a twitter app and, using the python client library for twitter, read the user timeline of multiple accounts. This process is not covered here. If you are interested in this topic, feel free to contact me.
#
#
# ## Features
#
# It is assumed the collected raw data consists of:
#
# 1. The author handle (the label that will be predicted)
# 2. The timestamp of the post
# 3. The raw text of the post
#
# ### Preparing the dataset
#
# When preparing the dataset, the content of the post is preprocessed using these rules:
#
# 1. Newlines are replaced with a space
# 2. Links are replaced with a placeholder (e.g. `<link>`)
# 3. For each possible unicode char category, the number of chars in that category is added as a feature
# 4. The number of words for each tweet is added as a feature
# 5. Retweets (even retweets with comment) are discarded. Only responses and original tweets are taken into account
#
# The dataset has been randomly split into three different files for train (70%), validation (10%) and test (20%). For each label, it has been verified that the same percentages hold in all three files.
#
# Before fitting the data and before evaluation on the test dataset, the timestamp values are normalized, using the mean and standard deviation computed on the train dataset.
# + [markdown] colab_type="text" id="cixKTRjUQfJ0"
# # TensorFlow 2 model
#
# The model has four different input features:
#
# 1. The normalized timestamp.
# 2. The input text, represented as the whole sentence. This will be transformed in a 128-dimensional vector by an embedding layer.
# 3. The input text, this time represented as a sequence of words, expressed as indexes of tokens. This representation will be used by a LSTM layer to try to extract some meaning from the actual sequence of the used words.
# 4. The unicode character category usage. This should help in identify handles that use emojis, a lot of punctuation or unusual chars.
#
# The resulting layers are concatenated, then after a sequence of two dense layers (with an applied dropout) the final layer computes the logits for the different classes. The used loss function is *sparse categorical crossentropy*, since the labels are represented as indexes of a list of twitter handles.
# + [markdown] colab_type="text" id="AJdXtG7eQ5tK"
# ## Imports for the TensorFlow 2 model
# + colab={} colab_type="code" id="6Dl5RBSRQ31X"
import functools
import os
from tensorflow.keras import Input, layers
import tensorflow as tf
import tensorflow_hub as hub
from tensorflow.keras import regularizers
import pandas as pd
import numpy as np
import copy
import calendar
import datetime
import re
from tensorflow.keras.preprocessing.text import Tokenizer
import unicodedata
#masking layers and GPU don't mix
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
# + [markdown] colab_type="text" id="bzzxApOjQ_bJ"
# ## Definitions for the TensorFlow 2 model
#
# + colab={} colab_type="code" id="ABqLUusIRMAs"
#Download size: ~446MB
hub_layer = hub.KerasLayer(
"https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1",
output_shape=[512],
input_shape=[],
dtype=tf.string,
trainable=False
)
embed = hub.load("https://tfhub.dev/google/tf2-preview/nnlm-en-dim128/1")
unicode_data_categories = [
"Cc",
"Cf",
"Cn",
"Co",
"Cs",
"LC",
"Ll",
"Lm",
"Lo",
"Lt",
"Lu",
"Mc",
"Me",
"Mn",
"Nd",
"Nl",
"No",
"Pc",
"Pd",
"Pe",
"Pf",
"Pi",
"Po",
"Ps",
"Sc",
"Sk",
"Sm",
"So",
"Zl",
"Zp",
"Zs"
]
column_names = [
"handle",
"timestamp",
"text"
]
column_names.extend(unicode_data_categories)
train_file = os.path.realpath("input.csv")
n_tokens = 100000
tokenizer = Tokenizer(n_tokens, oov_token='<OOV>')
#List of handles (labels)
#Fill with the handles you want to consider in your dataset
handles = [
]
end_token = "XEND"
train_file = os.path.realpath("data/train.csv")
val_file = os.path.realpath("data/val.csv")
test_file = os.path.realpath("data/test.csv")
# + [markdown] colab_type="text" id="TIQrVrIyR1BB"
# ## Preprocessing and computing dataset features
# + colab={} colab_type="code" id="X6v13WJXR2qq"
def get_pandas_dataset(input_file, fit_tokenizer=False, timestamp_mean=None, timestamp_std=None, pad_sequence=None):
pd_dat = pd.read_csv(input_file, names=column_names)
pd_dat = pd_dat[pd_dat.handle.isin(handles)]
if(timestamp_mean is None):
timestamp_mean = pd_dat.timestamp.mean()
if(timestamp_std is None):
timestamp_std = pd_dat.timestamp.std()
pd_dat.timestamp = (pd_dat.timestamp - timestamp_mean) / timestamp_std
pd_dat["handle_index"] = pd_dat['handle'].map(lambda x: handles.index(x))
if(fit_tokenizer):
tokenizer.fit_on_texts(pd_dat["text"])
pad_sequence = tokenizer.texts_to_sequences([[end_token]])[0][0]
pd_dat["sequence"] = tokenizer.texts_to_sequences(pd_dat["text"])
max_seq_length = 30
pd_dat = pd_dat.reset_index(drop=True)
#max length
pd_dat["sequence"] = pd.Series(el[0:max_seq_length] for el in pd_dat["sequence"])
#padding
pd_dat["sequence"] = pd.Series([el + ([pad_sequence] * (max_seq_length - len(el))) for el in pd_dat["sequence"]])
pd_dat["words_in_tweet"] = pd_dat["text"].str.strip().str.split(" ").str.len() + 1
return pd_dat, timestamp_mean, timestamp_std, pad_sequence
train_dataset, timestamp_mean, timestamp_std, pad_sequence = get_pandas_dataset(train_file, fit_tokenizer=True)
test_dataset, _, _, _= get_pandas_dataset(test_file, timestamp_mean=timestamp_mean, timestamp_std=timestamp_std, pad_sequence=pad_sequence)
val_dataset, _, _, _ = get_pandas_dataset(val_file, timestamp_mean=timestamp_mean, timestamp_std=timestamp_std, pad_sequence=pad_sequence)
#selecting as features only the unicode categories that are used in the train dataset
non_null_unicode_categories = []
for unicode_data_category in unicode_data_categories:
category_name = unicode_data_category
category_sum = train_dataset[category_name].sum()
if(category_sum > 0):
non_null_unicode_categories.append(category_name)
print("Bucketized unicode categories used as features: " + repr(non_null_unicode_categories))
# + [markdown] colab_type="text" id="Dh1eTHn-TY9b"
# ## Defining input/output features from the datasets
# + colab={} colab_type="code" id="ItvQq-OlTitb"
def split_inputs_and_outputs(pd_dat):
labels = pd_dat['handle_index'].values
icolumns = pd_dat.columns
timestamps = pd_dat.loc[:, "timestamp"].astype(np.float32)
text = pd_dat.loc[:, "text"]
sequence = np.asarray([np.array(el) for el in pd_dat.loc[:, "sequence"]])
#unicode_char_ratios = pd_dat[unicode_data_categories].astype(np.float32)
unicode_char_categories = {
category_name: pd_dat[category_name] for category_name in non_null_unicode_categories
}
words_in_tweet = pd_dat['words_in_tweet']
return timestamps, text, sequence, unicode_char_categories, words_in_tweet, labels
timestamps_train, text_train, sequence_train, unicode_char_categories_train, words_in_tweet_train, labels_train = split_inputs_and_outputs(train_dataset)
timestamps_val, text_val, sequence_val, unicode_char_categories_val, words_in_tweet_val, labels_val = split_inputs_and_outputs(val_dataset)
timestamps_test, text_test, sequence_test, unicode_char_categories_test, words_in_tweet_test, labels_test = split_inputs_and_outputs(test_dataset)
# + [markdown] colab_type="text" id="Cc7CEeVgTsWB"
# ## Input tensors
# + colab={} colab_type="code" id="Sad1qGeyTtvi"
input_timestamp = Input(shape=(1, ), name='input_timestamp', dtype=tf.float32)
input_text = Input(shape=(1, ), name='input_text', dtype=tf.string)
input_sequence = Input(shape=(None, 1 ), name="input_sequence", dtype=tf.float32)
input_unicode_char_categories = [
Input(shape=(1, ), name="input_"+category_name, dtype=tf.float32) for category_name in non_null_unicode_categories
]
input_words_in_tweet = Input(shape=(1, ), name="input_words_in_tweet", dtype=tf.float32)
inputs_train = {
'input_timestamp': timestamps_train,
"input_text": text_train,
"input_sequence": sequence_train,
'input_words_in_tweet': words_in_tweet_train,
}
inputs_train.update({
'input_' + category_name: unicode_char_categories_train[category_name] for category_name in non_null_unicode_categories
})
outputs_train = labels_train
inputs_val = {
'input_timestamp': timestamps_val,
"input_text": text_val,
"input_sequence": sequence_val,
'input_words_in_tweet': words_in_tweet_val
}
inputs_val.update({
'input_' + category_name: unicode_char_categories_val[category_name] for category_name in non_null_unicode_categories
})
outputs_val = labels_val
inputs_test = {
'input_timestamp': timestamps_test,
"input_text": text_test,
"input_sequence": sequence_test,
'input_words_in_tweet': words_in_tweet_test
}
inputs_test.update({
'input_' + category_name: unicode_char_categories_test[category_name] for category_name in non_null_unicode_categories
})
outputs_test = labels_test
# + [markdown] colab_type="text" id="8Mo17vnQUBWK"
# ## TensorFlow 2 model definition
# + colab={} colab_type="code" id="xDlkOVEqUE8j"
def get_model():
reg = None
activation = 'relu'
reshaped_text = layers.Reshape(target_shape=())(input_text)
embedded = hub_layer(reshaped_text)
x = layers.Dense(256, activation=activation)(embedded)
masking = layers.Masking(mask_value=pad_sequence)(input_sequence)
lstm_layer = layers.Bidirectional(layers.LSTM(32))(masking)
flattened_lstm_layer = layers.Flatten()(lstm_layer)
x = layers.concatenate([
input_timestamp,
flattened_lstm_layer,
*input_unicode_char_categories,
input_words_in_tweet,
x
])
x = layers.Dense(n_tokens // 30, activation=activation, kernel_regularizer=reg)(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(n_tokens // 50, activation=activation, kernel_regularizer=reg)(x)
x = layers.Dropout(0.1)(x)
x = layers.Dense(256, activation=activation, kernel_regularizer=reg)(x)
y = layers.Dense(len(handles), activation='linear')(x)
model = tf.keras.Model(
inputs=[
input_timestamp,
input_text,
input_sequence,
*input_unicode_char_categories,
input_words_in_tweet
],
outputs=[y]
)
cce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(
optimizer='adam',
loss=cce,
metrics=['sparse_categorical_accuracy']
)
return model
model = get_model()
tf.keras.utils.plot_model(model, to_file='twitstar.png', show_shapes=True)
# + [markdown] colab_type="text" id="1hygiOHkUQBZ"
# ## TensorFlow 2 model fitting
# + colab={} colab_type="code" id="c8TQEVHeUS3k"
history = model.fit(
inputs_train,
outputs_train,
epochs=15,
batch_size=64,
verbose=True,
validation_data=(inputs_val, outputs_val),
callbacks=[
tf.keras.callbacks.ModelCheckpoint(
os.path.realpath("weights.h5"),
monitor="val_sparse_categorical_accuracy",
save_best_only=True,
verbose=2
),
tf.keras.callbacks.EarlyStopping(
patience=3,
monitor="val_sparse_categorical_accuracy"
),
]
)
# + [markdown] colab_type="text" id="TY8cOfMM03z0"
# ## TensorFlow 2 model plots for train loss and accuracy
#
# + colab={} colab_type="code" id="ix6BTFsk1MCd"
import matplotlib.pyplot as plt
# %matplotlib inline
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Loss vs. epochs')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.show()
plt.plot(history.history['sparse_categorical_accuracy'])
plt.plot(history.history['val_sparse_categorical_accuracy'])
plt.title('Accuracy vs. epochs')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Training', 'Validation'], loc='upper right')
plt.show()
# + [markdown] colab_type="text" id="QOvqOuwtUaI8"
# ## TensorFlow 2 model evaluation
# + colab={} colab_type="code" id="ujdy_-oyUcbo"
#loading the "best" weights
model.load_weights(os.path.realpath("weights.h5"))
model.evaluate(inputs_test, outputs_test)
# + [markdown] colab_type="text" id="i5pk9ZfWyDFa"
# ### TensorFlow 2 model confusion matrix
#
# Using predictions on the test set, a confusion matrix is produced
# + colab={} colab_type="code" id="JjIS-cDPyNQ5"
def tf2_confusion_matrix(inputs, outputs):
predictions = model.predict(inputs)
wrong_labelled_counter = np.zeros((len(handles), len(handles)))
wrong_labelled_sequences = np.empty((len(handles), len(handles)), np.object)
for i in range(len(handles)):
for j in range(len(handles)):
wrong_labelled_sequences[i][j] = []
tot_wrong = 0
for i in range(len(predictions)):
predicted = int(predictions[i].argmax())
true_value = int(outputs[i])
wrong_labelled_counter[true_value][predicted] += 1
wrong_labelled_sequences[true_value][predicted].append(inputs.get('input_text')[i])
ok = (int(true_value) == int(predicted))
if(not ok):
tot_wrong += 1
return wrong_labelled_counter, wrong_labelled_sequences, predictions
def print_confusion_matrix(wrong_labelled_counter):
the_str = "\t"
for handle in handles:
the_str += handle + "\t"
print(the_str)
ctr = 0
for row in wrong_labelled_counter:
the_str = handles[ctr] + '\t'
ctr+=1
for i in range(len(row)):
the_str += str(int(row[i]))
if(i != len(row) -1):
the_str += "\t"
print(the_str)
wrong_labelled_counter, wrong_labelled_sequences, predictions = tf2_confusion_matrix(inputs_test, outputs_test)
print_confusion_matrix(wrong_labelled_counter)
# + [markdown] colab_type="text" id="eLn-VedFUpwd"
# # CatBoost model
#
# This CatBoost model instance was developed reusing the ideas presented in these tutorials from the official repository: [classification](https://github.com/catboost/tutorials/blob/master/classification/classification_tutorial.ipynb) and [text features](https://github.com/catboost/tutorials/blob/master/text_features/text_features_in_catboost.ipynb)
# + [markdown] colab_type="text" id="oEKBxOJ_Wdgy"
# ## Imports for the CatBoost model
# + colab={} colab_type="code" id="3pNFdq1rWe9J"
import functools
import os
import pandas as pd
import numpy as np
import copy
import calendar
import datetime
import re
import unicodedata
from catboost import Pool, CatBoostClassifier
# + [markdown] colab_type="text" id="8G1QkbbfWsYA"
# ## Definitions for the CatBoost model
# + colab={} colab_type="code" id="5P268w0RWwke"
unicode_data_categories = [
"Cc",
"Cf",
"Cn",
"Co",
"Cs",
"LC",
"Ll",
"Lm",
"Lo",
"Lt",
"Lu",
"Mc",
"Me",
"Mn",
"Nd",
"Nl",
"No",
"Pc",
"Pd",
"Pe",
"Pf",
"Pi",
"Po",
"Ps",
"Sc",
"Sk",
"Sm",
"So",
"Zl",
"Zp",
"Zs"
]
column_names = [
"handle",
"timestamp",
"text"
]
column_names.extend(unicode_data_categories)
#List of handles (labels)
#Fill with the handles you want to consider in your dataset
handles = [
]
train_file = os.path.realpath("./data/train.csv")
val_file = os.path.realpath("./data/val.csv")
test_file = os.path.realpath("./data/test.csv")
# + [markdown] colab_type="text" id="baGty7pSXLCv"
# ## Preprocessing and computing dataset features
# + colab={} colab_type="code" id="wC-YYqQmXMYP"
def get_pandas_dataset(input_file, timestamp_mean=None, timestamp_std=None):
pd_dat = pd.read_csv(input_file, names=column_names)
pd_dat = pd_dat[pd_dat.handle.isin(handles)]
if(timestamp_mean is None):
timestamp_mean = pd_dat.timestamp.mean()
if(timestamp_std is None):
timestamp_std = pd_dat.timestamp.std()
pd_dat.timestamp = (pd_dat.timestamp - timestamp_mean) / timestamp_std
pd_dat["handle_index"] = pd_dat['handle'].map(lambda x: handles.index(x))
pd_dat = pd_dat.reset_index(drop=True)
return pd_dat, timestamp_mean, timestamp_std
train_dataset, timestamp_mean, timestamp_std = get_pandas_dataset(train_file)
test_dataset, _, _ = get_pandas_dataset(test_file, timestamp_mean=timestamp_mean, timestamp_std=timestamp_std)
val_dataset, _, _ = get_pandas_dataset(val_file, timestamp_mean=timestamp_mean, timestamp_std=timestamp_std)
def split_inputs_and_outputs(pd_dat):
labels = pd_dat['handle_index'].values
del(pd_dat['handle'])
del(pd_dat['handle_index'])
return pd_dat, labels
X_train, labels_train = split_inputs_and_outputs(train_dataset)
X_val, labels_val = split_inputs_and_outputs(val_dataset)
X_test, labels_test = split_inputs_and_outputs(test_dataset)
# + [markdown] colab_type="text" id="otA2nEKNX_N8"
# ## CatBoost model definition
# + colab={} colab_type="code" id="iy9URje7YAwg"
def get_model(catboost_params={}):
cat_features = []
text_features = ['text']
catboost_default_params = {
'iterations': 1000,
'learning_rate': 0.03,
'eval_metric': 'Accuracy',
'task_type': 'GPU',
'early_stopping_rounds': 20
}
catboost_default_params.update(catboost_params)
model = CatBoostClassifier(**catboost_default_params)
return model, cat_features, text_features
model, cat_features, text_features = get_model()
# + [markdown] colab_type="text" id="o7pM4RbMYQlA"
# ## CatBoost model fitting
# + colab={} colab_type="code" id="6_OAsp6nYXGz"
def fit_model(X_train, X_val, y_train, y_val, model, cat_features, text_features, verbose=100):
learn_pool = Pool(
X_train,
y_train,
cat_features=cat_features,
text_features=text_features,
feature_names=list(X_train)
)
val_pool = Pool(
X_val,
y_val,
cat_features=cat_features,
text_features=text_features,
feature_names=list(X_val)
)
model.fit(learn_pool, eval_set=val_pool, verbose=verbose)
return model
model = fit_model(X_train, X_val, labels_train, labels_val, model, cat_features, text_features)
# + [markdown] colab_type="text" id="dOksfT2BY5em"
# ## CatBoost model evaluation
#
# Also for the CatBoost model, predictions on the test set, a confusion matrix is produced
# + colab={} colab_type="code" id="N1VRhDP6YiOn"
def predict(X, model, cat_features, text_features):
pool = Pool(
data=X,
cat_features=cat_features,
text_features=text_features,
feature_names=list(X)
)
probs = model.predict_proba(pool)
return probs
def check_predictions_on(inputs, outputs, model, cat_features, text_features, handles):
predictions = predict(inputs, model, cat_features, text_features)
labelled_counter = np.zeros((len(handles), len(handles)))
labelled_sequences = np.empty((len(handles), len(handles)), np.object)
for i in range(len(handles)):
for j in range(len(handles)):
labelled_sequences[i][j] = []
tot_wrong = 0
for i in range(len(predictions)):
predicted = int(predictions[i].argmax())
true_value = int(outputs[i])
labelled_counter[true_value][predicted] += 1
labelled_sequences[true_value][predicted].append(inputs.get('text').values[i])
ok = (int(true_value) == int(predicted))
if(not ok):
tot_wrong += 1
return labelled_counter, labelled_sequences, predictions
def confusion_matrix(labelled_counter, handles):
the_str = "\t"
for handle in handles:
the_str += handle + "\t"
the_str += "\n"
ctr = 0
for row in labelled_counter:
the_str += handles[ctr] + '\t'
ctr+=1
for i in range(len(row)):
the_str += str(int(row[i]))
if(i != len(row) -1):
the_str += "\t"
the_str += "\n"
return the_str
labelled_counter, labelled_sequences, predictions = check_predictions_on(
X_test,
labels_test,
model,
cat_features,
text_features,
handles
)
confusion_matrix_string = confusion_matrix(labelled_counter, handles)
print(confusion_matrix_string)
# + [markdown] colab_type="text" id="4eACMglJZ6Ai"
# # Evaluation
#
# To perform some experiments and evaluate the two models, 18 Twitter users were selected and, for each user, a number of tweets and responses to other users' tweets were collected. In total 39786 tweets were collected. The difference in class representation could be eliminated, for example limiting the number of tweets for each label to the number of tweets in the less represented class. This difference, however, was not eliminated, in order to test if it represents an issue for the accuracy of the two trained models.
#
# The division of the tweets corresponding to each twitter handle for each file (train, test, validation) is reported in the following table. To avoid policy issues (better safe than sorry), the actual user handle is masked using C_x placeholders and a brief description of the twitter user is presented instead.
#
# |Description|Handle|Train|Test|Validation|Sum|
# |-------|-------|-------|-------|-------|-------|
# |UK-based labour politician|C_1|1604|492|229|2325|
# |US-based democratic politician|C_2|1414|432|195|2041|
# |US-based democratic politician|C_3|1672|498|273|2443|
# |US-based actor|C_4|1798|501|247|2546|
# |UK-based actress|C_5|847|243|110|1200|
# |US-based democratic politician|C_6|2152|605|304|3061|
# |US-based singer|C_7|2101|622|302|3025|
# |US-based singer|C_8|1742|498|240|2480|
# |Civil rights activist|C_9|314|76|58|448|
# |US-based republican politician|C_10|620|159|78|857|
# |US-based TV host|C_11|2022|550|259|2831|
# |Parody account of C_15 |C_12|2081|624|320|3025|
# |US-based democratic politician|C_13|1985|557|303|2845|
# |US-based actor/director|C_14|1272|357|183|1812|
# |US-based republican politician|C_15|1121|298|134|1553|
# |US-based writer|C_16|1966|502|302|2770|
# |US-based writer|C_17|1095|305|153|1553|
# |US-based entrepreneur|C_18|2084|581|306|2971|
# |Sum||27890|7900|3996|39786|
#
#
#
# ## TensorFlow 2 model
#
# The following charts show loss and accuracy vs epochs for train and validation for a typical run of the TF2 model:
#
# 
# 
#
# If the images do not show correctly, they can be found at these links: [loss](https://github.com/icappello/ml-predict-text-author/blob/master/img/tf2_train_val_loss.png) [accuracy](https://github.com/icappello/ml-predict-text-author/blob/master/img/tf2_train_val_accuracy.png)
#
# After a few epochs, the model starts overfitting on the train data, and the accuracy for the validation set quickly reaches a plateau.
#
# The obtained accuracy on the test set is 0.672
#
# ## CatBoost model
#
# The fit procedure stopped after 303 iterations. The obtained accuracy on the test set is 0.808
#
# ## Confusion matrices
#
# The confusion matrices for the two models are reported [here](https://docs.google.com/spreadsheets/d/17JGDXYRajnC4THrBnZrbcqQbgzgjo0Jb7KAvPYenr-w/edit?usp=sharing), since large tables are not displayed correctly in the embedded github viewer for jupyter notebooks. Rows represent the actual classes, while columns represent the predicted ones.
#
# ## Summary
#
# The CatBoost model obtained a better accuracy overall, as well as a better accuracy on all but one label. No particular optimization was done on the definition of the CatBoost model. The TF2 model could need more data, as well as some changes to its definition, to perform better (comments and pointers on this are welcome). Some variants of the TF2 model were tried: a deeper model with more dense layers, higher dropout rate, more/less units in layers, using only a subset of features, regularization methods (L1, L2, batch regularization), different activation functions (sigmoid, tanh) but none performed significantly better than the one presented.
#
# Looking at the results summarized in the confusion matrices, tweets from C_9 clearly represented a problem, either for the under-representation relative to the other classes or for the actual content of the tweets (some were not written in english). Also, tweets from handles C_5 and C_14 were hard to correctly classify for both models, even if they were not under-represented w.r.t other labels.
# -
| creators_summary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Author
#
# In order to create the dictionary of `Authors_mapping`, we go over the papers in the training set. For each paper and for each author of this paper, we add the normalized citations (that is, the average number of citations per year) for this paper to the `author_score` for this author. There are total number of 8494 unique authors in our training set.
# ## Setup
# +
import pandas as pd
import numpy as np
import re
import nltk
import string
import unicodedata
import spacy
from spacy import displacy
from collections import Counter
import en_core_web_sm
import json
CURRENT_YEAR = 2018
class MyDF(object):
def __init__(self, path, name):
self.name = name
self.df = pd.read_csv(path)
self.df_orig = self.df.copy()
self.drop_redundant_columns()
self.max_num_names = len(self.df.columns) -1
self.make_all_object()
self.num_non_nan = self.count_non_nan()
self.list_unique_non_nan = self.list_unique_non_nan()
self.num_unique_non_nan = len(self.list_unique_non_nan)
def drop_redundant_columns(self):
self.df = self.df.drop('year', axis=1)
self.df = self.df.drop('index', axis=1)
self.df = self.df.drop('citations', axis=1)
def make_all_object(self):
for i in range(0,self.max_num_names):
self.df[self.name + '{x}'.format(x=i)] = self.df[self.name + '{x}'.format(x=i)].astype(object)
def list_unique_non_nan(self):
return pd.concat([self.df[self.name + '{x}'.format(x=i)] for i in range(0,self.max_num_names) ]).dropna().unique().tolist()
def count_non_nan(self):
return pd.concat([self.df[self.name + '{x}'.format(x=i)] for i in range(0,self.max_num_names) ]).dropna().count()
def making_name_score_df(self):
df_copy = self.df.copy()
# keep index as index, stack other columns 'against' it
stacked = df_copy.set_index('citations_average').stack()
# set the name of the new series created
df_temp = stacked.reset_index(name=self.name)
# drop the 'source' level (key.*)
df_temp.drop('level_1', axis=1, inplace=True)
unique_names = df_temp[self.name].unique()
names_score = []
for name in unique_names:
names_score.append(df_temp.loc[df_temp[self.name] == name]['citations_average'].sum())
table = [unique_names, names_score]
output_df = pd.DataFrame(table)
output_df = output_df.transpose()
output_df.columns=[self.name, 'Score']
output_dict = dict(zip(unique_names, names_score))
return [output_df, output_dict]
def polish_data(self, target):
df_processed = self.df
for n in self.list_unique_non_nan:
score = target.loc[target[self.name] == n]['Score'].sum()
df_processed = df_processed.replace(n, score)
df_processed['predicted_citations']= df_processed.iloc[:, 2:self.max_num_names+2].sum(axis=1)
return df_processed
# -
# ## Create the dataframe of authors and their scores for all authors in the training set
#
#
df_author_training = MyDF("./data/data_processed/Author_training.csv", "Author")
[df_author_score, dict_author_score] = df_author_training.making_name_score_df()
with open("./data/data_processed/json/Authors_score.json", "w") as fp:
json.dump(dict_author_score , fp)
# Below we indicate the top-10 authors sorted based on their author_score.
df_author_score.sort_values(['Score'], ascending=[0])[0:10]
# ### Correlation between citations and score_mean in Training for Authors
#
# +
train_authors_df = df_author_training.polish_data(df_author_score)
train_authors_df.citations_average.corr(train_authors_df.predicted_citations)
# -
# ### Save the training data with predicated values
train_authors_df.to_csv('./data/data_processed/Author_training_predicted.csv', index=False)
# ## Create the dataframe of authors and their scores for all authors in the test set
#
df_author_test = MyDF("./data/data_processed/Author_test.csv", "Author")
test_authors_df = df_author_test.polish_data(df_author_score)
# ### Correlation between citations and score_mean in Test for Authors
#
test_authors_df.citations_average.corr(test_authors_df.predicted_citations)
# ### Save the test data with predicated values
test_authors_df.to_csv('./data/data_processed/Author_test_predicted.csv', index=False)
| author.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.9 64-bit (''env'': venv)'
# language: python
# name: python3
# ---
# # Modelado de Smile, LocVol y Calibración de Modelo de Volatilidad Stocástica
# Importamos QuantLib
import QuantLib as ql
import math
# +
# Definimos las convenciones de day count y calendario
day_count = ql.Actual365Fixed()
calendar = ql.UnitedStates()
calculation_date = ql.Date(6, 11, 2015)
spot = 659.37
ql.Settings.instance().evaluationDate = calculation_date
dividend_yield = ql.QuoteHandle(ql.SimpleQuote(0.0))
risk_free_rate = 0.01
dividend_rate = 0.0
flat_ts = ql.YieldTermStructureHandle(
ql.FlatForward(calculation_date, risk_free_rate, day_count))
dividend_ts = ql.YieldTermStructureHandle(
ql.FlatForward(calculation_date, dividend_rate, day_count))
# -
# Lo que vemos a continuacion son matrices de vencimiento, strike y las volatilidades correspondientes
# sacadas de un ejemplo de quantlib
# Las volatilidades son lognormales, y pueden ser interpoladas para armar la sabana
expiration_dates = [ql.Date(6,12,2015), ql.Date(6,1,2016), ql.Date(6,2,2016),
ql.Date(6,3,2016), ql.Date(6,4,2016), ql.Date(6,5,2016),
ql.Date(6,6,2016), ql.Date(6,7,2016), ql.Date(6,8,2016),
ql.Date(6,9,2016), ql.Date(6,10,2016), ql.Date(6,11,2016),
ql.Date(6,12,2016), ql.Date(6,1,2017), ql.Date(6,2,2017),
ql.Date(6,3,2017), ql.Date(6,4,2017), ql.Date(6,5,2017),
ql.Date(6,6,2017), ql.Date(6,7,2017), ql.Date(6,8,2017),
ql.Date(6,9,2017), ql.Date(6,10,2017), ql.Date(6,11,2017)]
strikes = [527.50, 560.46, 593.43, 626.40, 659.37, 692.34, 725.31, 758.28]
data = [
[0.37819, 0.34177, 0.30394, 0.27832, 0.26453, 0.25916, 0.25941, 0.26127],
[0.3445, 0.31769, 0.2933, 0.27614, 0.26575, 0.25729, 0.25228, 0.25202],
[0.37419, 0.35372, 0.33729, 0.32492, 0.31601, 0.30883, 0.30036, 0.29568],
[0.37498, 0.35847, 0.34475, 0.33399, 0.32715, 0.31943, 0.31098, 0.30506],
[0.35941, 0.34516, 0.33296, 0.32275, 0.31867, 0.30969, 0.30239, 0.29631],
[0.35521, 0.34242, 0.33154, 0.3219, 0.31948, 0.31096, 0.30424, 0.2984],
[0.35442, 0.34267, 0.33288, 0.32374, 0.32245, 0.31474, 0.30838, 0.30283],
[0.35384, 0.34286, 0.33386, 0.32507, 0.3246, 0.31745, 0.31135, 0.306],
[0.35338, 0.343, 0.33464, 0.32614, 0.3263, 0.31961, 0.31371, 0.30852],
[0.35301, 0.34312, 0.33526, 0.32698, 0.32766, 0.32132, 0.31558, 0.31052],
[0.35272, 0.34322, 0.33574, 0.32765, 0.32873, 0.32267, 0.31705, 0.31209],
[0.35246, 0.3433, 0.33617, 0.32822, 0.32965, 0.32383, 0.31831, 0.31344],
[0.35226, 0.34336, 0.33651, 0.32869, 0.3304, 0.32477, 0.31934, 0.31453],
[0.35207, 0.34342, 0.33681, 0.32911, 0.33106, 0.32561, 0.32025, 0.3155],
[0.35171, 0.34327, 0.33679, 0.32931, 0.3319, 0.32665, 0.32139, 0.31675],
[0.35128, 0.343, 0.33658, 0.32937, 0.33276, 0.32769, 0.32255, 0.31802],
[0.35086, 0.34274, 0.33637, 0.32943, 0.3336, 0.32872, 0.32368, 0.31927],
[0.35049, 0.34252, 0.33618, 0.32948, 0.33432, 0.32959, 0.32465, 0.32034],
[0.35016, 0.34231, 0.33602, 0.32953, 0.33498, 0.3304, 0.32554, 0.32132],
[0.34986, 0.34213, 0.33587, 0.32957, 0.33556, 0.3311, 0.32631, 0.32217],
[0.34959, 0.34196, 0.33573, 0.32961, 0.3361, 0.33176, 0.32704, 0.32296],
[0.34934, 0.34181, 0.33561, 0.32964, 0.33658, 0.33235, 0.32769, 0.32368],
[0.34912, 0.34167, 0.3355, 0.32967, 0.33701, 0.33288, 0.32827, 0.32432],
[0.34891, 0.34154, 0.33539, 0.3297, 0.33742, 0.33337, 0.32881, 0.32492]]
# Toda la Data anterior la tenemos que poner en un objecto quantlib Matrix.
# La matriz de quantlib necesita que los strike sean las filas
# y los vencimientos las columnas, por eso permutamos [i] y [j]
# La matriz de quantlib se puede usar después para armar otras sabanas.
implied_vols = ql.Matrix(len(strikes), len(expiration_dates))
for i in range(implied_vols.rows()):
for j in range(implied_vols.columns()):
implied_vols[i][j] = data[j][i]
# Con el metodo BlackVarianceSurface armamos nuestra superficie
black_var_surface = ql.BlackVarianceSurface(
calculation_date, calendar,
expiration_dates, strikes,
implied_vols, day_count)
# Las volatilidades para cualquier strike y vencimiento se puede
# obtener usando el meotodo blackVol
strike = 600.0
expiry = 1.2 # years
black_var_surface.blackVol(expiry, strike)
# ## Visualización
import numpy as np
# %matplotlib inline
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
# +
# Dado una fecha de vencimiento podemos ver la curva vol/strike
strikes_grid = np.arange(strikes[0], strikes[-1],10)
expiry = 1.0 # años
implied_vols = [black_var_surface.blackVol(expiry, s)
for s in strikes_grid] # se puede interpolar acá
actual_data = data[11] # data elegida para el vencimiento
fig, ax = plt.subplots()
ax.plot(strikes_grid, implied_vols, label="Black Surface")
ax.plot(strikes, actual_data, "o", label="Actual")
ax.set_xlabel("Strikes", size=12)
ax.set_ylabel("Vols", size=12)
legend = ax.legend(loc="upper right")
# +
# Ahora visualizamos la superficie completa
plot_years = np.arange(0, 2, 0.1)
plot_strikes = np.arange(535, 750, 1)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
X, Y = np.meshgrid(plot_strikes, plot_years)
Z = np.array([black_var_surface.blackVol(y, float(x))
for xr, yr in zip(X, Y)
for x, y in zip(xr,yr) ]
).reshape(len(X), len(X[0]))
surf = ax.plot_surface(X,Y,Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0.1)
fig.colorbar(surf, shrink=0.5, aspect=5)
# -
# ## Volatilidad Local de Dupire
local_vol_surface = ql.LocalVolSurface(
ql.BlackVolTermStructureHandle(black_var_surface),
flat_ts,
dividend_ts,
spot)
# +
plot_years = np.arange(0, 2, 0.1)
plot_strikes = np.arange(535, 750, 1)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
X, Y = np.meshgrid(plot_strikes, plot_years)
Z = np.array([local_vol_surface.localVol(y, float(x))
for xr, yr in zip(X, Y)
for x, y in zip(xr,yr) ]
).reshape(len(X), len(X[0]))
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0.1)
fig.colorbar(surf, shrink=0.5, aspect=5)
# -
# ### IMPORTANTE:
# El priceo correcto de volatilidad local require que la sabana sea "arbitrage free". Si esto no ocurre, pueden ocurrir probabilidades de transición negativas o volatilidades locales negativas. Quanlib carece de "Fengler's arbitrage free smoothing" https://core.ac.uk/reader/6978470
#
# Cuando usamos un arbitrary smoothing, podemos ver que la sabana de volatilidad local conduce a volatilidades negativas
# +
black_var_surface.setInterpolation("bicubic")
local_vol_surface = ql.LocalVolSurface(
ql.BlackVolTermStructureHandle(black_var_surface),
flat_ts,
dividend_ts,
spot)
plot_years = np.arange(0, 2, 0.15)
plot_strikes = np.arange(535, 750, 10)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
X, Y = np.meshgrid(plot_strikes, plot_years)
Z = np.array([local_vol_surface.localVol(y, float(x))
for xr, yr in zip(X, Y)
for x, y in zip(xr,yr) ]
).reshape(len(X), len(X[0]))
surf = ax.plot_surface(Y,X, Z, rstride=1, cstride=1, cmap=cm.coolwarm,
linewidth=0.1)
fig.colorbar(surf, shrink=0.5, aspect=5)
# -
# #### VER EL ERROR!!!!
# "negative local vol^2 at strike 655 and time 0.75; the black vol surface is not smooth enough".
#
# Tal como comentabamos arriba, esto pasa cuando elegimos arbitráriamente una interpolación.
# ## Calibración del Modelo de Heston
#
# Las ecuaciones que definen al model son:
#
# $ dS_{t} = \mu Sdt+ \sqrt{\nu}SdW_{1} $
#
# $ d\nu_{t} =\kappa(\theta - \nu)dt + \sigma\sqrt{\nu}dW_{2}$
#
# $dW1_{2}dW_{2} = \rho dt $
#
# Arriba lo que podemos ver es que el Stock se mueve como un proceso estocástico que depende de la varianza que es a la vez otro proceso estocástico. El segundo es un proceso de reversión a la media y tiene un desvio constante $\sigma$ . La correlación entre procesos es $\rho$ .
#
# Veamos cómo podemos calibrar el modelo de Heston para algunas cotizaciones del mercado. Como ejemplo, digamos que estamos interesados en negociar opciones con vencimiento a 1 año. Por lo tanto, calibraremos el modelo de Heston para que se ajuste a las cotizaciones de volatilidad del mercado con un vencimiento de un año. Antes de hacer eso, necesitamos construir el motor de precios que necesitarían las rutinas de calibración.
# +
# Parámetros elegidos al azar
v0 = 0.01; kappa = 0.2; theta = 0.02; rho = -0.75; sigma = 0.5;
process = ql.HestonProcess(flat_ts, dividend_ts,
ql.QuoteHandle(ql.SimpleQuote(spot)),
v0, kappa, theta, sigma, rho)
model = ql.HestonModel(process)
engine = ql.AnalyticHestonEngine(model)
# -
# Ahora que tenemos el modelo Heston y un motor de precios, escojemos las cotizaciones con todas los strikes y el vencimiento de 1 año para calibrar el modelo Heston. Creamos el modelo auxiliar de Heston que se incluirá en las rutinas de calibración.
#
heston_helpers = []
black_var_surface.setInterpolation("bicubic")
one_year_idx = 11 # la fila 12 en data es para el vencimiento de 1 año
date = expiration_dates[one_year_idx]
for j, s in enumerate(strikes):
t = (date - calculation_date )
p = ql.Period(t, ql.Days)
sigma = data[one_year_idx][j]
helper = ql.HestonModelHelper(p, calendar, spot, s,
ql.QuoteHandle(ql.SimpleQuote(sigma)),
flat_ts,
dividend_ts)
helper.setPricingEngine(engine)
heston_helpers.append(helper)
lm = ql.LevenbergMarquardt(1e-8, 1e-8, 1e-8)
model.calibrate(heston_helpers, lm,
ql.EndCriteria(500, 50, 1.0e-8,1.0e-8, 1.0e-8))
theta, kappa, sigma, rho, v0 = model.params()
print( "theta = %f, kappa = %f, sigma = %f, rho = %f, v0 = %f" % (theta, kappa, sigma, rho, v0))
# Veamos ahora el error de la calibración priceando las opciones utlizadas para calibrar utilizando el modelo calibrado y estimemos el error
# +
avg = 0.0
print( "%15s %15s %15s %20s" % (
"Strikes", "Market Value",
"Model Value", "Relative Error (%)"))
print("="*70)
for i, opt in enumerate(heston_helpers):
err = (opt.modelValue()/opt.marketValue() - 1.0)
print( "%15.2f %14.5f %15.5f %20.7f " % (
strikes[i], opt.marketValue(),
opt.modelValue(),
100.0*(opt.modelValue()/opt.marketValue() - 1.0)))
avg += abs(err)
avg = avg*100.0/len(heston_helpers)
print( "-"*70)
print( "Average Abs Error (%%) : %5.3f" % (avg))
# -
# ## REFERENCIAS:
# QuantLib Python Cookbook
#
# QuantLib Documentation
#
# Arbitrage-Free Smoothing of the Implied Volatility Surface <NAME>:
# https://core.ac.uk/reader/6978470
#
| Modelos/Volatility_Smile, LocVol & Heston Model Calibration.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,md
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Python the basics: Reusing code
#
# > *© 2021, <NAME> and <NAME> (<mailto:<EMAIL>>, <mailto:<EMAIL>>). Licensed under [CC BY 4.0 Creative Commons](http://creativecommons.org/licenses/by/4.0/)*
#
# ---
#
# > This notebook is largely based on material of the *Python Scientific Lecture Notes* (https://scipy-lectures.github.io/), adapted with some exercises.
#
# ## Introduction
#
# For now, we have typed all instructions in the interpreter. For longer sets of instructions we need to change track and write the code in text files (using a text editor or Spyder), that we will call either scripts or modules.
#
# * Sets of instructions that are called several times should be written inside **functions** for better code reusability.
# * Functions (or other bits of code) that are called from several scripts should be written inside a **module**, so that only the module is imported in the different scripts (do not copy-and-paste your functions in the different scripts!).
#
#
# ## Scripts
#
#
# Let us first write a *script*, that is a file with a sequence of instructions that are executed each time the script is called. Instructions may be e.g. copied-and-pasted from the interpreter (but take care to respect indentation rules!).
#
# The extension for Python files is ``.py``. Write or copy-and-paste the following lines in a file called test.py
# + run_control={"frozen": false, "read_only": false}
# %%file test.py
message = "Hello how are you?"
for word in message.split():
print(word)
# -
# Let us now execute the script interactively, that is inside the Ipython interpreter. This is maybe the most common use of scripts in scientific computing.
# + run_control={"frozen": false, "read_only": false}
# %run test.py
# + run_control={"frozen": false, "read_only": false}
message
# -
# The script has been executed. Moreover the variables defined in the script (such as ``message``) are now available inside the interpreter’s namespace.
#
# Other interpreters also offer the possibility to execute scripts (e.g., ``execfile`` in the plain Python interpreter, etc.).
#
# It is also possible to execute this script as a *standalone program*, by executing the script inside a shell terminal (Linux/Mac console or cmd Windows console). For example, if we are in the same directory as the test.py file, we can execute this in a console:
# + run_control={"frozen": false, "read_only": false}
# !python test.py
# -
# ## Creating modules
#
# If we want to write larger and better organized programs (compared to simple scripts), where some objects are defined, (variables, functions, classes) and that we want to reuse several times, we have to create our own *modules*.
#
# Let us create a module demo contained in the file ``demo.py``:
# + run_control={"frozen": false, "read_only": false}
# %%file demo.py
"A demo module."
def print_b():
"Prints b."
print('b')
def print_a():
"Prints a."
print('a')
c = 2
d = 2
# -
# In this file, we defined two functions ``print_a`` and ``print_b``. Suppose we want to call the ``print_a`` function from the interpreter. We could execute the file as a script, but since we just want to have access to the function ``print_a``, we are rather going to **import it as a module**. The syntax is as follows.
# + run_control={"frozen": false, "read_only": false}
import demo
# + run_control={"frozen": false, "read_only": false}
demo.print_a()
# + run_control={"frozen": false, "read_only": false}
demo.print_b()
# -
# ## `__main__` function and executing scripts
# + run_control={"frozen": false, "read_only": false}
# %%file demo2.py
"A second demo module with a main function."
def print_a():
"Prints a."
print('a')
def print_b():
"Prints b."
print('b')
if __name__ == '__main__':
print_a()
# + run_control={"frozen": false, "read_only": false}
# !python demo2.py
# + run_control={"frozen": false, "read_only": false}
import demo2
demo2.print_b()
# -
# Standalone scripts may also take command-line arguments:
# + run_control={"frozen": false, "read_only": false}
# %%file demo3.py
import sys
if __name__ == '__main__':
print(sys.argv)
# + run_control={"frozen": false, "read_only": false}
# !python demo3.py arg1 arg2
| notebooks/python_recap/04-reusing_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="s20a7SHBwWIn" colab_type="text"
# # Data Exploration with Pandas
# + id="PnCwayFwwWIo" colab_type="code" colab={}
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# + id="ZTvVwOZjwWIr" colab_type="code" colab={}
df = pd.read_csv('../data/titanic-train.csv')
# + id="RmbpM2ekwWIu" colab_type="code" colab={}
type(df)
# + id="NAd4D9FzwWIx" colab_type="code" colab={}
df.head()
# + id="5sJgCZQrwWI0" colab_type="code" colab={}
df.info()
# + id="HKMVT-yGwWI3" colab_type="code" colab={}
df.describe()
# + [markdown] id="oS-thH2HwWI5" colab_type="text"
# ### Indexing
# + id="JZyRyCvpwWI6" colab_type="code" colab={}
df.iloc[3]
# + id="w1qacfp3wWI9" colab_type="code" colab={}
df.loc[0:4,'Ticket']
# + id="uNSojzdtwWI_" colab_type="code" colab={}
df['Ticket'].head()
# + id="098yz8DxwWJB" colab_type="code" colab={}
df[['Embarked', 'Ticket']].head()
# + [markdown] id="hZwG_sqZwWJE" colab_type="text"
# ### Selections
# + id="b0KWrpIEwWJE" colab_type="code" colab={}
df[df['Age'] > 70]
# + id="obBCSW2IwWJG" colab_type="code" colab={}
df['Age'] > 70
# + id="MBkzBrTGwWJI" colab_type="code" colab={}
df.query("Age > 70")
# + id="9JCKeWazwWJL" colab_type="code" colab={}
df[(df['Age'] == 11) & (df['SibSp'] == 5)]
# + id="U-z8Y4VEwWJM" colab_type="code" colab={}
df[(df.Age == 11) | (df.SibSp == 5)]
# + id="Ar_O8Po6wWJP" colab_type="code" colab={}
df.query('(Age == 11) | (SibSp == 5)')
# + [markdown] id="ouPZ6F_zwWJR" colab_type="text"
# ### Unique Values
# + id="OXiVslCVwWJR" colab_type="code" colab={}
df['Embarked'].unique()
# + [markdown] id="z8DLnEstwWJV" colab_type="text"
# ### Sorting
# + id="E47kmr51wWJV" colab_type="code" colab={}
df.sort_values('Age', ascending = False).head()
# + [markdown] id="On-SEgNNwWJX" colab_type="text"
# ### Aggregations
# + id="o6MUjZmWwWJY" colab_type="code" colab={}
df['Survived'].value_counts()
# + id="EercV_e6wWJa" colab_type="code" colab={}
df['Pclass'].value_counts()
# + id="5jEcrz-9wWJd" colab_type="code" colab={}
df.groupby(['Pclass', 'Survived'])['PassengerId'].count()
# + id="tEj_WJ-7wWJf" colab_type="code" colab={}
df['Age'].min()
# + id="G3PF8PLpwWJh" colab_type="code" colab={}
df['Age'].max()
# + id="qbcmV0THwWJk" colab_type="code" colab={}
df['Age'].mean()
# + id="GO_n7Hy7wWJn" colab_type="code" colab={}
df['Age'].median()
# + id="4UJhJtIHwWJp" colab_type="code" colab={}
mean_age_by_survived = df.groupby('Survived')['Age'].mean()
mean_age_by_survived
# + id="bTg9TW22wWJs" colab_type="code" colab={}
std_age_by_survived = df.groupby('Survived')['Age'].std()
std_age_by_survived
# + [markdown] id="Y2FmomzPwWJu" colab_type="text"
# ### Merge
# + id="PGpr89kfwWJu" colab_type="code" colab={}
df1 = mean_age_by_survived.round(0).reset_index()
df2 = std_age_by_survived.round(0).reset_index()
# + id="SD_LsRHDwWJw" colab_type="code" colab={}
df1
# + id="ye5cSzv_wWJy" colab_type="code" colab={}
df2
# + id="VNPyieJVwWJ0" colab_type="code" colab={}
df3 = pd.merge(df1, df2, on='Survived')
# + id="afw0U_nzwWJ2" colab_type="code" colab={}
df3
# + id="kvvHx7qowWJ4" colab_type="code" colab={}
df3.columns = ['Survived', 'Average Age', 'Age Standard Deviation']
# + id="YFbrQKDwwWJ6" colab_type="code" colab={}
df3
# + [markdown] id="zERLncJowWJ8" colab_type="text"
# ### Pivot Tables
# + id="XV6EFs9MwWJ8" colab_type="code" colab={}
df.pivot_table(index='Pclass',
columns='Survived',
values='PassengerId',
aggfunc='count')
# + [markdown] id="UYrM0TujwWJ_" colab_type="text"
# ### Correlations
# + id="Wuad7TVMwWJ_" colab_type="code" colab={}
df['IsFemale'] = df['Sex'] == 'female'
# + id="FH_sFNOFwWKB" colab_type="code" colab={}
correlated_with_survived = df.corr()['Survived'].sort_values()
correlated_with_survived
# + id="cPNXd30LwWKD" colab_type="code" colab={}
# %matplotlib inline
# + id="Cn5R7dDswWKF" colab_type="code" colab={}
correlated_with_survived.iloc[:-1].plot(kind='bar',
title='Titanic Passengers: correlation with survival')
# + [markdown] id="hdOBJUfXwWKH" colab_type="text"
# # Visual Data Exploration with Matplotlib
# + id="wIDQAufbwWKH" colab_type="code" colab={}
data1 = np.random.normal(0, 0.1, 1000)
data2 = np.random.normal(1, 0.4, 1000) + np.linspace(0, 1, 1000)
data3 = 2 + np.random.random(1000) * np.linspace(1, 5, 1000)
data4 = np.random.normal(3, 0.2, 1000) + 0.3 * np.sin(np.linspace(0, 20, 1000))
# + id="dAcBRsvcwWKJ" colab_type="code" colab={}
data = np.vstack([data1, data2, data3, data4]).transpose()
# + id="-gxQFKFvwWKM" colab_type="code" colab={}
df = pd.DataFrame(data, columns=['data1', 'data2', 'data3', 'data4'])
df.head()
# + [markdown] id="qEj_xERhwWKN" colab_type="text"
# ### Line Plot
# + id="HsxFG-7WwWKO" colab_type="code" colab={}
df.plot(title='Line plot')
# + id="lTBU7j4SwWKQ" colab_type="code" colab={}
plt.plot(df)
plt.title('Line plot')
plt.legend(['data1', 'data2', 'data3', 'data4'])
# + [markdown] id="45BSq1r5wWKS" colab_type="text"
# ### Scatter Plot
# + id="Wl9Q6vfiwWKU" colab_type="code" colab={}
df.plot(style='.')
# + id="sMr7NfAwwWKV" colab_type="code" colab={}
_ = df.plot(kind='scatter', x='data1', y='data2',
xlim=(-1.5, 1.5), ylim=(0, 3))
# + [markdown] id="tzKPAm3OwWKY" colab_type="text"
# ### Histograms
# + id="czPpUwZtwWKY" colab_type="code" colab={}
df.plot(kind='hist',
bins=50,
title='Histogram',
alpha=0.6)
# + [markdown] id="4G58k3CWwWKc" colab_type="text"
# ### Cumulative distribution
# + id="IjNl0dULwWKc" colab_type="code" colab={}
df.plot(kind='hist',
bins=100,
title='Cumulative distributions',
normed=True,
cumulative=True,
alpha=0.4)
# + [markdown] id="jRQrHK4bwWKf" colab_type="text"
# ### Box Plot
# + id="Mqc5n4PVwWKf" colab_type="code" colab={}
df.plot(kind='box',
title='Boxplot')
# + [markdown] id="GbEWuOFxwWKi" colab_type="text"
# ### Subplots
# + id="gxCd8JCdwWKj" colab_type="code" colab={}
fig, ax = plt.subplots(2, 2, figsize=(5, 5))
df.plot(ax=ax[0][0],
title='Line plot')
df.plot(ax=ax[0][1],
style='o',
title='Scatter plot')
df.plot(ax=ax[1][0],
kind='hist',
bins=50,
title='Histogram')
df.plot(ax=ax[1][1],
kind='box',
title='Boxplot')
plt.tight_layout()
# + [markdown] id="2D9s3hb2wWKl" colab_type="text"
# ### Pie charts
# + id="OvUBWvvLwWKl" colab_type="code" colab={}
gt01 = df['data1'] > 0.1
piecounts = gt01.value_counts()
piecounts
# + id="wknzgZw3wWKr" colab_type="code" colab={}
piecounts.plot(kind='pie',
figsize=(5, 5),
explode=[0, 0.15],
labels=['<= 0.1', '> 0.1'],
autopct='%1.1f%%',
shadow=True,
startangle=90,
fontsize=16)
# + [markdown] id="7oBxxrbnwWKt" colab_type="text"
# ### Hexbin plot
# + id="PPbh_GKgwWKu" colab_type="code" colab={}
data = np.vstack([np.random.normal((0, 0), 2, size=(1000, 2)),
np.random.normal((9, 9), 3, size=(2000, 2))])
df = pd.DataFrame(data, columns=['x', 'y'])
# + id="zE3PKGQ3wWKw" colab_type="code" colab={}
df.head()
# + id="2D9n9dZdwWKy" colab_type="code" colab={}
df.plot()
# + id="GC9XNr27wWK0" colab_type="code" colab={}
df.plot(kind='kde')
# + id="RcXAqiKLwWK2" colab_type="code" colab={}
df.plot(kind='hexbin', x='x', y='y', bins=100, cmap='rainbow')
# + [markdown] id="r-_6STFywWK4" colab_type="text"
# # Unstructured data
# + [markdown] id="ML2_WKLxwWK5" colab_type="text"
# ### Images
# + id="zH2AQwTMwWK5" colab_type="code" colab={}
from PIL import Image
# + id="ItS-CqSjwWK8" colab_type="code" colab={}
img = Image.open('../data/iss.jpg')
img
# + id="Fbh3-pwgwWK-" colab_type="code" colab={}
type(img)
# + id="nxVJ6Za_wWLC" colab_type="code" colab={}
imgarray = np.asarray(img)
# + id="Us7z362dwWLE" colab_type="code" colab={}
type(imgarray)
# + id="pujWIxgjwWLF" colab_type="code" colab={}
imgarray.shape
# + id="NMVJNp1HwWLG" colab_type="code" colab={}
imgarray.ravel().shape
# + id="O0B3iZ1hwWLI" colab_type="code" colab={}
435 * 640 * 3
# + [markdown] id="wxYYYJ2ewWLK" colab_type="text"
# ### Sound
# + id="vOxuSrESwWLL" colab_type="code" colab={}
from scipy.io import wavfile
# + id="yL_uv1PXwWLN" colab_type="code" colab={}
rate, snd = wavfile.read(filename='../data/sms.wav')
# + id="rJr-4_3zwWLO" colab_type="code" colab={}
from IPython.display import Audio
# + id="T3k_3b1PwWLQ" colab_type="code" colab={}
Audio(data=snd, rate=rate)
# + id="b4dL_JFfwWLR" colab_type="code" colab={}
len(snd)
# + id="3Ngv9EtowWLT" colab_type="code" colab={}
snd
# + id="GEKxX1ecwWLV" colab_type="code" colab={}
plt.plot(snd)
# + id="pPRnfVAWwWLW" colab_type="code" colab={}
_ = plt.specgram(snd, NFFT=1024, Fs=44100)
plt.ylabel('Frequency (Hz)')
plt.xlabel('Time (s)')
# + [markdown] id="fWG4RZYswWLX" colab_type="text"
# # Data Exploration Exercises
# + [markdown] id="sUuwoLFDwWLY" colab_type="text"
# ## Exercise 1
# - load the dataset: `../data/international-airline-passengers.csv`
# - inspect it using the `.info()` and `.head()` commands
# - use the function [`pd.to_datetime()`](http://pandas.pydata.org/pandas-docs/version/0.20/generated/pandas.to_datetime.html) to change the column type of 'Month' to a datatime type
# - set the index of df to be a datetime index using the column 'Month' and the `df.set_index()` method
# - choose the appropriate plot and display the data
# - choose appropriate scale
# - label the axes
# + id="u5oEVkzNwWLY" colab_type="code" colab={}
# + [markdown] id="nnu8VGS7wWLa" colab_type="text"
# ## Exercise 2
# - load the dataset: `../data/weight-height.csv`
# - inspect it
# - plot it using a scatter plot with Weight as a function of Height
# - plot the male and female populations with 2 different colors on a new scatter plot
# - remember to label the axes
# + id="pefVMGehwWLa" colab_type="code" colab={}
# + [markdown] id="T-hfaka2wWLd" colab_type="text"
# ## Exercise 3
# - plot the histogram of the heights for males and for females on the same plot
# - use alpha to control transparency in the plot comand
# - plot a vertical line at the mean of each population using `plt.axvline()`
# + id="nLkmON4MwWLe" colab_type="code" colab={}
# + [markdown] id="JwkzjxoYwWLf" colab_type="text"
# ## Exercise 4
# - plot the weights of the males and females using a box plot
# - which one is easier to read?
# - (remember to put in titles, axes and legends)
# + id="woGZv8kzwWLg" colab_type="code" colab={}
# + [markdown] id="zZLHj3zywWLj" colab_type="text"
# ## Exercise 5
# - load the dataset: `../data/titanic-train.csv`
# - learn about scattermatrix here: http://pandas.pydata.org/pandas-docs/stable/visualization.html
# - display the data using a scattermatrix
# + id="8xWbrAuowWLj" colab_type="code" colab={}
| course/2 Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit (windows store)
# name: python3
# ---
#Importando Bibliotecas
import pandas as pd
import numpy as np
import os
import datetime
from datetime import timedelta
from pathlib import Path
#Selecionando as Tábuas e Variáveis
tabua_mortalidade = 'AMERICAN EXPERIENCE'
tabua_mortalidade_inv = 'RRB-44'
tabua_entrada_inv = 'ALVARO VINDAS'
tabua_morbidez = 'Atuários Ingleses'
ano_dt_base = 2020 #Data base
tx_juros = 0.06 #Taxa de Juros
cres_sal = 0.01 #Crescimento Salarial
cres_benef = 0.01 #Crescimento do Benefício
wx = 0.01 #Rotatividade
# +
#Importando as bases
bd_ativos = pd.read_csv(str(Path(os.getcwd())) + '\\1.SERVIDORES.csv', encoding='UTF-8', sep=';')#, thousands=',')
#Convertendo os dados para float
bd_ativos['DT_NASC_SERVIDOR'] = pd.to_datetime(bd_ativos['DT_NASC_SERVIDOR'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['DT_ING_ENTE'] = pd.to_datetime(bd_ativos['DT_ING_ENTE'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['DT_ING_CARGO'] = pd.to_datetime(bd_ativos['DT_ING_ENTE'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['VL_BASE_CALCULO'] = bd_ativos['VL_BASE_CALCULO'].str.replace(',', '.').astype(float)
bd_ativos['DT_NASC_CONJUGE'] = pd.to_datetime(bd_ativos['DT_NASC_CONJUGE'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['DT_NASC_NOVO'] = pd.to_datetime(bd_ativos['DT_NASC_NOVO'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['DT_NASC_INV'] = pd.to_datetime(bd_ativos['DT_NASC_INV'], yearfirst=True, format='%d/%m/%Y')
bd_ativos['DT_BASE'] = '31/12/2020'
bd_ativos['DT_BASE'] = pd.to_datetime(bd_ativos.DT_BASE, yearfirst=True, format='%d/%m/%Y')
# +
#Calculando as Estatísticas dos Ativos
statis_ativos = pd.DataFrame()
statis_ativos['IDADE_SERVIDOR'] = round((bd_ativos.DT_BASE - bd_ativos.DT_NASC_SERVIDOR) / timedelta(days=365.25) - 0.5).astype(int) #Data-Base - Data de Nascimento do Servidor
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_ADM'] = round((bd_ativos.DT_ING_ENTE - bd_ativos.DT_NASC_SERVIDOR) / timedelta(days=365.25) - 0.5).astype(int) #Data de Ingresso no Ente - Data de Nascimento do Servidor
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_CONJUGE'] = bd_ativos.DT_NASC_CONJUGE.mask(bd_ativos.DT_NASC_CONJUGE.isnull(), '1800-01-01', axis=0)#, inplace=True) #Preenchendo as linhas vazias com alguma data
statis_ativos.IDADE_CONJUGE = round((bd_ativos.DT_BASE - statis_ativos.IDADE_CONJUGE) / timedelta(days=365.25) - 0.5).astype(int) #Calculando as Idades dos Cônjuges | Data-Base - Data de Nascimento do Cônjuge
statis_ativos.loc[(bd_ativos.CO_EST_CIVIL_SERVIDOR == 1) | (bd_ativos.CO_EST_CIVIL_SERVIDOR == 3) | (bd_ativos.CO_EST_CIVIL_SERVIDOR == 4) | (bd_ativos.CO_EST_CIVIL_SERVIDOR == 5) | (statis_ativos.IDADE_CONJUGE > 127), 'IDADE_CONJUGE'] = None #Removendo as datas inseridas
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_EMT'] = round(statis_ativos.IDADE_ADM - (bd_ativos['NU_TEMPO_RGPS']/365.25)) #Calculando a Idade de Entrada no Mercado de Trabalho | Idade de Admissão - (TSANT/365.25)
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_PROJ_APOS'] = bd_ativos.DT_PROV_APOSENT
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['DIF_ETARIA'] = statis_ativos.IDADE_CONJUGE - statis_ativos.IDADE_SERVIDOR #Calculando a Diferença Etária do Cônjuge | Idade do Cônjuge - Idade do Servidor
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_NOVO'] = bd_ativos.DT_NASC_NOVO.mask(bd_ativos.DT_NASC_NOVO.isnull(), '1800-01-01', axis=0)#, inplace=True) #Preenchendo as linhas vazias com alguma data
statis_ativos.IDADE_NOVO = round((bd_ativos.DT_BASE - statis_ativos.IDADE_NOVO) / timedelta(days=365.25) - 0.5).astype(int) #Calculando as Idades do Filho Mais Novo | Data-Base - Data de Nascimento do Filho Mais Novo
statis_ativos.loc[statis_ativos.IDADE_NOVO > 21, 'IDADE_NOVO'] = None #Removendo as datas inseridas
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['IDADE_INV'] = bd_ativos.DT_NASC_INV.mask(bd_ativos.DT_NASC_INV.isnull(), '1800-01-01', axis=0)#, inplace=True) #Preenchendo as linhas vazias com alguma data
statis_ativos.IDADE_INV = round((bd_ativos.DT_BASE - statis_ativos.IDADE_INV) / timedelta(days=365.25) - 0.5).astype(int) #Calculando as Idades do Filho Inválido | Data-Base - Data de Nascimento do Filho Inválido
statis_ativos.loc[statis_ativos.IDADE_INV > 127, 'IDADE_INV'] = None #Removendo as datas inseridas
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['TP_PREFEITURA'] = statis_ativos.IDADE_SERVIDOR - statis_ativos.IDADE_ADM #Calculando o Tempo de Prefeitura | Idade do Servidor - Idade de Admissão
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['DIFER(r-x)'] = bd_ativos.DT_PROV_APOSENT - statis_ativos.IDADE_SERVIDOR #Calculando o Diferimento até a Aposentadoria | Idade Provável de Aposentadoria - Idade do Servidor
statis_ativos['DIFER(r-u)'] = bd_ativos.DT_PROV_APOSENT - statis_ativos.IDADE_ADM #Calculando o Diferimento até a Aposentadoria | Idade Provável de Aposentadoria - Idade de Admissão do Servidor
#------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------#
statis_ativos['BENEF_PROJ'] = round(bd_ativos.VL_BASE_CALCULO * (1 + cres_sal)**statis_ativos['DIFER(r-x)'], 2)
#Idade de entrada no plano ou RPPS
#Perguntar a rúniu sobre a IDADE_EMT se considera o tempo do RPPS também
statis_ativos#.head(20)
# +
#Projeção do Crescimento Salarial (Benefício de Aposentadoria Projetado)
proj_cres_sal = []
for l in range(len(bd_ativos)):
linha = []
for c in range(statis_ativos['DIFER(r-x)'][l]+1):
benef_proj = round(bd_ativos.VL_BASE_CALCULO.loc[l] * ((1 + cres_sal)**c), 2)
linha.append(benef_proj)
proj_cres_sal.append(linha)
df_benef_proj = pd.DataFrame(data=proj_cres_sal, index=bd_ativos.ID_SERVIDOR_MATRICULA)
# -
print(df_benef_proj)
#Importando as Funções Biométricas
qx = pd.read_excel(str(Path(os.getcwd())) + '\\Tábuas\\Funções Biométricas (Com Fórmulas).xlsx', sheet_name='qx', usecols=[tabua_mortalidade])
px = pd.read_excel(str(Path(os.getcwd())) + '\\Tábuas\\Funções Biométricas (Com Fórmulas).xlsx', sheet_name='px', usecols=[tabua_mortalidade])
lx = pd.read_excel(str(Path(os.getcwd())) + '\\Tábuas\\Funções Biométricas (Com Fórmulas).xlsx', sheet_name='lx', usecols=[tabua_mortalidade])
dx = pd.read_excel(str(Path(os.getcwd())) + '\\Tábuas\\Funções Biométricas (Com Fórmulas).xlsx', sheet_name='dx', usecols=[tabua_mortalidade])
ix = pd.read_excel(str(Path(os.getcwd())) + '\\Tábuas\\Funções Biométricas (Com Fórmulas).xlsx', sheet_name='qx', usecols=[tabua_entrada_inv])
# +
#Calculando as Funções Biométricas e Comutações
df_func_biometricas = pd.DataFrame()
df_func_biometricas['qx'] = qx
df_func_biometricas['px'] = px
df_func_biometricas['lx'] = lx
df_func_biometricas['dx'] = dx
df_func_biometricas['wx'] = wx #Rotatividade
df_func_biometricas['ix'] = ix #Entrada em Invalidez
df_func_biometricas['qx_m'] = df_func_biometricas.qx * ((1 - 0.5 * df_func_biometricas.ix) + (1 - 0.5 * df_func_biometricas.wx)) #Ambiente Multidecremental.
df_func_biometricas['ix_m'] = df_func_biometricas.ix * ((1 - 0.5 * df_func_biometricas.qx) + (1 - 0.5 * df_func_biometricas.wx)) #Ambiente Multidecremental.
df_func_biometricas['wx_m'] = df_func_biometricas.wx * ((1 - 0.5 * df_func_biometricas.qx) + (1 - 0.5 * df_func_biometricas.ix)) #Ambiente Multidecremental.
df_func_biometricas['qx_t'] = df_func_biometricas.qx_m + df_func_biometricas.ix_m + df_func_biometricas.wx_m
df_func_biometricas['px_t'] = 1 - df_func_biometricas.qx_t
df_func_biometricas['v'] = 1
df_func_biometricas['Nx'] = 1
df_func_biometricas['lx_t'] = 1
for i in range(len(df_func_biometricas)): df_func_biometricas['v'].loc[i] = 1 / (1 + tx_juros)**i
df_func_biometricas['Dx'] = df_func_biometricas.lx * df_func_biometricas.v
for i in range(len(df_func_biometricas)):
df_func_biometricas.Nx.loc[i] = df_func_biometricas.Dx.loc[i:].sum()
if i >= 1:
df_func_biometricas['lx_t'].loc[i] = df_func_biometricas['lx_t'].loc[i - 1] * df_func_biometricas.px_t[i - 1]
else:
df_func_biometricas['lx_t'].loc[i] = 100000
df_func_biometricas.head()
# +
df_anuidades = pd.DataFrame()
df_comp_VP_ativos = pd.DataFrame()
df_comp_VP_ativos['Br'] = statis_ativos.BENEF_PROJ
df_comp_VP_ativos['r-upu_t'] = statis_ativos.IDADE_PROJ_APOS.map(df_func_biometricas.lx_t) / statis_ativos.IDADE_ADM.map(df_func_biometricas.lx_t)
df_comp_VP_ativos['v_r-u'] = statis_ativos['DIFER(r-u)'].map(df_func_biometricas.v)
df_comp_VP_ativos['ar'] = bd_ativos.DT_PROV_APOSENT.map(df_func_biometricas.Nx) / bd_ativos.DT_PROV_APOSENT.map(df_func_biometricas.Dx)
df_comp_VP_ativos#.loc[30]
# +
#df_func_biometricas = pd.DataFrame()
#df_func_biometricas['TESTE'] = statis_ativos.IDADE_EMT
#df_func_biometricas['TESTE1'] = statis_ativos.IDADE_EMT + 10
#df_func_biometricas['TESTE2'] = df_func_biometricas.apply(lambda x: x['TESTE']+100, axis=1)
#df_func_biometricas['x+npx'] = df_func_biometricas.TESTE1.map(lx[tabua_mortalidade]) / df_func_biometricas.TESTE.map(lx[tabua_mortalidade])
#df_func_biometricas
# +
#𝑞𝑥(𝑚)=𝑞𝑥 ∗ [(1−0,5 ∗ 𝑖𝑥 )+(1−0,5 ∗ 𝑤𝑥 )]
#bd_ativos.DT_PROV_APOSENT
| estatisticas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Variable Types
#
# A Variable is analogous to a column in a table in a relational database. When creating an Entity, Featuretools will attempt to infer the types of variables present. Featuretools also allows for explicitly specifying the variable types when creating the Entity.
#
# **It is important that datasets have appropriately defined variable types when using DFS because this will allow the correct primitives to be used to generate new features.**
#
# > Note: When using Dask Entities, users must explicitly specify the variable types for all columns in the Entity dataframe.
#
# To understand the different variable types in Featuretools, let's first look at a graph of the variables:
from featuretools.variable_types import graph_variable_types
graph_variable_types()
# As we can see, there are multiple variable types and some have subclassed variable types. For example, ZIPCode is variable type that is child of Categorical type which is a child of Discrete type.
# Let's explore some of the variable types and understand them in detail.
# ## Discrete
#
# A Discrete variable type can only take certain values. It is a type of data that can be counted, but cannot be measured. If it can be classified into distinct buckets, then it a discrete variable type.
#
# There are 2 sub-variable types of Discrete. These are Categorical, and Ordinal. If the data has a certain ordering, it is of Ordinal type. If it cannot be ordered, then is a Categorical type.
#
# ### Categorical
#
# A Categorical variable type can take unordered discrete values. It is usually a limited, and fixed number of possible values. Categorical variable types can be represented as strings, or integers.
#
# Some examples of Categorical variable types:
#
# - Gender
# - Eye Color
# - Nationality
# - Hair Color
# - Spoken Language
#
# ### Ordinal
#
# A Ordinal variable type can take ordered discrete values. Similar to Categorical, it is usually a limited, and fixed number of possible values. However, these discrete values have a certain order, and the ordering is important to understanding the values. Ordinal variable types can be represented as strings, or integers.
#
# Some examples of Ordinal variable types:
#
# - Educational Background (Elementary, High School, Undergraduate, Graduate)
#
# - Satisfaction Rating (“Not Satisfied”, “Satisfied", “Very Satisfied”)
#
# - Spicy Level (Hot, Hotter, Hottest)
#
# - Student Grade (A, B, C, D, F)
#
# - Size (small, medium, large)
#
#
# #### Categorical SubTypes (CountryCode, Id, SubRegionCode, ZIPCode)
#
# There are also more distinctions within the Categorical variable type. These include CountryCode, Id, SubRegionCode, and ZIPCode.
#
# It is important to make this distinction because there are certain operations that can be applied, but they don't necessary apply to all Categorical types. For example, there could be a [custom primitive](https://docs.featuretools.com/en/stable/automated_feature_engineering/primitives.html#defining-custom-primitives) that applies to the ZIPCode variable type. It could extract the first 5 digits of a ZIPCode. However, this operation is not valid for all Categorical variable types. Therefore it is approriate to use the ZIPCode variable type.
# ## Datetime
# A Datetime is a representation of a date and/or time. Datetime variable types can be represented as strings, or integers. However, they should be in a intrepretable format or properly cast before using DFS.
#
# Some examples of Datetime include:
#
# - transaction time
# - flight departure time
# - pickup time
#
# ### DateOfBirth
# A more distinct type of datetime is a DateOfBirth. This is an important distinction because it allows additional primitives to be applied to the data to generate new features. For example, having an DateOfBirth variable type, will allow the Age primitive to be applied during DFS, and lead to a new Numeric feature.
# ## Text
# Text is a long-form string, that can be of any length. It is commonly used with NLP operations, such as TF-IDF. Featuretools supports NLP operations with the nlp-primitives [add-on](https://innovation.alteryx.com/natural-language-processing-featuretools/).
# ## LatLong
# A LatLong represents an ordered pair (Latitude, Longitude) that tells the location on Earth. The order of the tuple is important. LatLongs can be represented as tuple of floating point numbers.
#
# To make a LatLong in a dataframe do the following:
# +
import pandas as pd
data = pd.DataFrame()
data['latitude'] = [51.52, 9.93, 37.38]
data['longitude'] = [-0.17, 76.25, -122.08]
data['latlong'] = data[['latitude', 'longitude']].apply(tuple, axis=1)
data['latlong']
# -
# ## List of Variable Types
# We can also get all the variable types as a DataFrame.
from featuretools.variable_types import list_variable_types
list_variable_types()
| docs/source/variables.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Ridge-Lasso Regression
#
# > In this post, We will review the way of generalization, especially on Ridge and Lasso.
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - categories: [Python, Machine_Learning]
# - image: images/ridge_reg.png
# ## Packages
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import lasso_path, Lasso, Ridge
np.random.seed(1)
# ## Ridge Penalty
# In logistic Regression, we can define the loss function like this,
#
# $$ L(\beta; \lambda) = -\frac{1}{n} \sum_{i=1}^n (y_i x_i^T \beta - \log(1+ \exp(x_i^T \beta))) $$
#
# For the Regularization, we can add the penalty term. In this case, we added Ridge penalty (also known as L2 error). That is, we measure the squared beta for the penalty.
#
# $$ L(\beta; \lambda) = -\frac{1}{n} \sum_{i=1}^n (y_i x_i^T \beta - \log(1+ \exp(x_i^T \beta))) + \lambda \Vert \beta \Vert_2^2 $$
p = 2
lambda_v = 1.5
n = 10
true_beta = np.array([[1], [-0.5]])
# So we need to make predictor. In this example, we assume that the sample data is from binomial distribution.
x = np.random.normal(0, 1, (n, p))
prob = 1 / (1 + np.exp(-x @ true_beta))
prob = prob.reshape((n,))
prob
y = np.random.binomial(np.ones(n, dtype='int32'), prob, n)
y = y.reshape((n, 1))
y
# Currently, we generate the ture data from true beta. So how can we get true beta from initial beta?
beta = np.array([.5, .5]).reshape((p, 1))
prob = 1 / (1 + np.exp(-x @ beta))
prob
# With Newton raphson method, we differentiate it two times.
#
# $$ g = \frac{\partial}{\partial\beta} L(\beta; \lambda) = -\frac{1}{n} \sum_{i=1}^n \big( y_i x_i - \frac{\exp(x_i^T \beta)}{1 + \exp(x_i^T \beta)} x_i \big) + 2 \lambda \beta \\
# H = \frac{\partial^2}{\partial \beta^2} L(\beta ; \lambda) = \frac{1}{n} \sum_{i=1}^n \big( \frac{\exp(x_i^T \beta)}{1 + \exp(x_i^T \beta)} \cdot \frac{1}{1 + \exp(x_i^T \beta)} \big) x_i x_i^T + 2 \lambda I$$
grad = np.mean((prob - y) * x, axis=0, keepdims=True).T + 2 * lambda_v * beta
grad
D = np.diag((prob * (1 - prob)).reshape(n))
D[:5, :5]
H = x.T @ D @ x / n + np.diag(np.repeat(2 * lambda_v, p))
H
# After we calculate the gradient and hessian matrix of one optimization, we do the one step for the beta update. All we need to do is to repeat these steps until the threshold is reached.
# +
beta = np.zeros((p, 1))
beta_0 = []
beta_1 = []
for i in range(10):
prob = 1 / (1 + np.exp(-x @ beta))
grad = np.mean((prob - y) * x, axis=0, keepdims=True).T + 2 * lambda_v * beta
D = np.diag((prob * (1 - prob)).reshape(n))
H = x.T @ D @ x / n + np.diag(np.repeat(2 * lambda_v, p))
beta_new = beta - np.linalg.inv(H) @ grad
beta_0.append(beta_new[0])
beta_1.append(beta_new[1])
if np.sum(np.abs(beta_new - beta)) < 1e-8:
beta = beta_new
print('Iteration {} beta:'.format(i))
print(beta, '\n')
break
else:
beta = beta_new
print('Iteration {} beta:'.format(i))
print(beta, '\n')
# -
# We almost found the solution for beta. So does this solution satisfy the optimality? Or Is the gradient of 0 at this beta? One way to check this is to apply [Karush-Kuhn-Tucker (KKT) condition](https://en.wikipedia.org/wiki/Karush%E2%80%93Kuhn%E2%80%93Tucker_conditions).
prob = 1 / (1 + np.exp(-x @ beta))
grad = np.mean((prob - y) * x, axis=0, keepdims=True).T + 2 * lambda_v * beta
grad
np.all(np.abs(grad) < 1e-8)
# In this time, we will use Lasso and Ridge Regression implemented in scikit-learn.
#
# At first, we need to find solution path for each lambda.
#
# Note that, Ridge penalty can be expressed like this,
#
# $$ \beta(\lambda) = \arg\min_{\beta} \frac{1}{2} \Vert Y - X \beta \Vert^2 + \lambda \Vert \beta \Vert^2 $$
n_lambdas = 50
lambda_vec = np.linspace(0, 100, n_lambdas)
coefs = []
for lambda_v in lambda_vec:
ridge = Ridge(alpha=lambda_v, fit_intercept=False)
ridge.fit(x, y)
coefs.append(ridge.coef_)
coefs = np.squeeze(np.array(coefs))
coefs[:6, :]
# +
plt.figure(figsize=(16, 10))
colors = ['b', 'r', 'g']
lstyles = ['-', '--', '-.', ':']
for i in range(p):
l = plt.plot(lambda_vec, coefs[:, i], linestyle=lstyles[i], c=colors[i])
plt.xscale('log')
plt.axis('tight')
plt.xlabel('lambda')
plt.ylabel('beta')
plt.title('Ridge Regression Solution Path')
plt.show()
# -
# As you can see, when the $\lambda$ is increased, $\beta$ is closed to 0, which means the **'shrinkage'** of the beta.
#
# Unlike Ridge, Lasso uses absolute beta for the penalty.
#
# $$ \beta(\lambda) = \arg\min_{\beta} \frac{1}{2} \Vert Y - X \beta \Vert^2 + \lambda \vert \beta \vert $$
# +
eps = 5e-3
lambdas_lasso, coefs_lasso, _ = lasso_path(x, y, eps=eps, fit_intercept=False)
coefs_lasso = np.squeeze(coefs_lasso)
coefs_lasso[:, :5]
# +
plt.figure(figsize=(16, 10))
colors = ['b', 'r', 'g']
lstyles = ['-', '--', '-.', ':']
for coef, c, l in zip(coefs_lasso, colors, lstyles):
l = plt.plot(lambdas_lasso, coef, linestyle=l, c=c)
plt.axis('tight')
plt.xlabel('lambda')
plt.ylabel('beta')
plt.title('Lasso Regression Solution Path')
plt.show()
| _notebooks/2021-06-17-Ridge-Lasso-Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 5. Petrophysical Calculations
# Created By: <NAME>
#
# The following tutorial illustrates how to calculate key petrophysical properties using pandas
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# #### Shale Volume Function
def shale_volume(gamma_ray, gamma_ray_max, gamma_ray_min):
vshale = (gamma_ray - gamma_ray_min) / (gamma_ray_max - gamma_ray_min)
return round(vshale, 4)
shale_volume(120, 200, 15)
# #### Density Porosity Function
def density_porosity(input_density, matrix_density, fluid_density):
denpor = (matrix_density - input_density) / (matrix_density - fluid_density)
return round(denpor, 4)
density_porosity(2.45, 2.65, 1)
# #### Water Saturation Functions
def sw_archie(porosity, rt, rw, archieA, archieM, archieN):
sw = ((archieA / (porosity ** archieM)) * (rw/rt))**(1/archieN)
return sw
sw_archie(0.1, 10, 0.1, 1, 2, 2)
# equations from https://www.spec2000.net/01-quickmath.htm
def sw_simandoux(phie, rt, rw, archieA, archieM, archieN, vshale, rshale):
A = (1 - vshale) * archieA * rw / (phie ** archieM)
B = A * vshale / (2 * rshale)
C = A / rt
sw = ((B **2 + C)**0.5 - B) **(2 / archieN)
return sw
sw_simandoux(0.1, 10, 0.1, 1, 2, 2, 0.3, 2)
# ### Loading Well Data From CSV
well = pd.read_csv("Data/15_9-19.csv", header=0, skiprows=[1])
well.head()
well.replace(-999.00, np.nan, inplace=True)
well.describe()
well['GR'].plot(kind='hist', bins=230)
plt.xlim(0, 250)
# ### Calculate Petrophysical Properties
#
# Using the functions created above, we can pass through the required columns in order to calculate the key petrophysical properties
#Calculate Shale Volume
well['VSHALE'] = shale_volume(well['GR'], well['GR'].quantile(q=0.99),
well['GR'].quantile(q=0.01))
#Calculate density porosity
well['PHI'] = density_porosity(well['RHOB'], 2.65, 1)
#Calculate PHIE
well['PHIECALC'] = well['PHI'] - (well['VSHALE'] * 0.3)
well.head(10)
#Calculate Archie SW
well['SW'] = sw_archie(well['PHI'], well['RT'], well['RW'], 1, 2, 2)
#Calculate Simandoux SW
well['SW_SIM'] = sw_simandoux(well['PHIECALC'], well['RT'], well['RW'], 1, 2, 2, well['VSHALE'],2)
#Limit SW to 1
well['SW_LIM'] = well['SW'].mask(well['SW']>1, 1)
well['SW__SIM_LIM'] = well['SW_SIM'].mask(well['SW_SIM']>1, 1)
well.describe()
# ### Creating a Log Plot to Display the Results
# +
fig, ax = plt.subplots(figsize=(15,10))
#Set up the plot axes
ax1 = plt.subplot2grid((1,7), (0,0), rowspan=1, colspan = 1)
ax2 = plt.subplot2grid((1,7), (0,1), rowspan=1, colspan = 1)
ax3 = plt.subplot2grid((1,7), (0,2), rowspan=1, colspan = 1)
ax4 = plt.subplot2grid((1,7), (0,3), rowspan=1, colspan = 1)
ax5 = ax3.twiny() #Twins the y-axis for the density track with the neutron track
ax6 = plt.subplot2grid((1,7), (0,4), rowspan=1, colspan = 1)
ax7 = ax6.twiny()
ax8 = plt.subplot2grid((1,7), (0,5), rowspan=1, colspan = 1)
ax9 = plt.subplot2grid((1,7), (0,6), rowspan=1, colspan = 1)
# As our curve scales will be detached from the top of the track,
# this code adds the top border back in without dealing with splines
ax10 = ax1.twiny()
ax10.xaxis.set_visible(False)
ax11 = ax2.twiny()
ax11.xaxis.set_visible(False)
ax12 = ax3.twiny()
ax12.xaxis.set_visible(False)
# Gamma Ray track
ax1.plot("GR", "DEPTH", data = well, color = "green")
ax1.set_xlabel("Gamma")
ax1.xaxis.label.set_color("green")
ax1.set_xlim(0, 200)
ax1.set_ylabel("Depth (m)")
ax1.tick_params(axis='x', colors="green")
ax1.spines["top"].set_edgecolor("green")
ax1.title.set_color('green')
ax1.set_xticks([0, 50, 100, 150, 200])
# Resistivity track
ax2.plot("RT", "DEPTH", data = well, color = "red")
ax2.set_xlabel("Resistivity")
ax2.set_xlim(0.2, 2000)
ax2.xaxis.label.set_color("red")
ax2.tick_params(axis='x', colors="red")
ax2.spines["top"].set_edgecolor("red")
ax2.set_xticks([0.1, 1, 10, 100, 1000])
ax2.semilogx()
# Density track
ax3.plot("RHOB", "DEPTH", data = well, color = "red")
ax3.set_xlabel("Density")
ax3.set_xlim(1.95, 2.95)
ax3.xaxis.label.set_color("red")
ax3.tick_params(axis='x', colors="red")
ax3.spines["top"].set_edgecolor("red")
ax3.set_xticks([1.95, 2.45, 2.95])
# Sonic track
ax4.plot("DT", "DEPTH", data = well, color = "purple")
ax4.set_xlabel("Sonic")
ax4.set_xlim(140, 40)
ax4.xaxis.label.set_color("purple")
ax4.tick_params(axis='x', colors="purple")
ax4.spines["top"].set_edgecolor("purple")
# Neutron track placed ontop of density track
ax5.plot("NPHI", "DEPTH", data = well, color = "blue")
ax5.set_xlabel('Neutron')
ax5.xaxis.label.set_color("blue")
ax5.set_xlim(0.45, -0.15)
ax5.set_ylim(4150, 3500)
ax5.tick_params(axis='x', colors="blue")
ax5.spines["top"].set_position(("axes", 1.08))
ax5.spines["top"].set_visible(True)
ax5.spines["top"].set_edgecolor("blue")
ax5.set_xticks([0.45, 0.15, -0.15])
# Porosity track
ax6.plot("PHI", "DEPTH", data = well, color = "black")
ax6.set_xlabel("Total PHI")
ax6.set_xlim(0.5, 0)
ax6.xaxis.label.set_color("black")
ax6.tick_params(axis='x', colors="black")
ax6.spines["top"].set_edgecolor("black")
ax6.set_xticks([0, 0.25, 0.5])
# Porosity track
ax7.plot("PHIECALC", "DEPTH", data = well, color = "blue")
ax7.set_xlabel("Effective PHI")
ax7.set_xlim(0.5, 0)
ax7.xaxis.label.set_color("blue")
ax7.tick_params(axis='x', colors="blue")
ax7.spines["top"].set_position(("axes", 1.08))
ax7.spines["top"].set_visible(True)
ax7.spines["top"].set_edgecolor("blue")
ax7.set_xticks([0, 0.25, 0.5])
# Sw track
ax8.plot("SW_LIM", "DEPTH", data = well, color = "black")
ax8.set_xlabel("SW - Archie")
ax8.set_xlim(0, 1)
ax8.xaxis.label.set_color("black")
ax8.tick_params(axis='x', colors="black")
ax8.spines["top"].set_edgecolor("black")
ax8.set_xticks([0, 0.5, 1])
# Sw track
ax9.plot("SW_SIM", "DEPTH", data = well, color = "blue")
ax9.set_xlabel("SW - Simandoux")
ax9.set_xlim(0, 1)
ax9.xaxis.label.set_color("blue")
ax9.tick_params(axis='x', colors="blue")
ax9.spines["top"].set_edgecolor("blue")
ax9.set_xticks([0, 0.5, 1])
# Common functions for setting up the plot can be extracted into
# a for loop. This saves repeating code.
for ax in [ax1, ax2, ax3, ax4, ax6, ax8, ax9]:
ax.set_ylim(4150, 3500)
ax.grid(which='major', color='lightgrey', linestyle='-')
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
ax.spines["top"].set_position(("axes", 1.02))
plt.tight_layout()
# -
| 05 - Petrophysical Calculations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 2: String Formatting
name = "Mike"
age = 45
salary = 15.75
# string formatting
print("Hello there %s. How are you? " % (name))
# formatting redux
print("%s makes %f per hour." % (name, salary))
# let's use spacing
print("%s makes %.2f per hour." % (name, salary))
# right alignment
print("-" * 10) # print 10 dashes
print("%10d" %(age))
# left alignment
print("-" * 10)
print("%-10d" % (age))
| lessons/02-Variables/WMC2-String-Formatting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
import numpy as np
import pandas as pd
import os, time, random
# +
#read the dataset prepared after feature engineering
data_path = 'data.csv'
data = pd.read_csv(data_path)
#Understand the dataframe and datatypes for each column
print(data.info())
# -
#remove columns which might lead to ordinal behavior
remove_features = ['id','item_id','dept_id','cat_id','state_id','store_id', 'release',
'event_name_1','event_type_1','event_name_2','event_type_2','event_name_1_lag_1',
'event_type_1_lag_1', 'event_name_1_lag_2', 'event_type_1_lag_2',
'event_name_1_lag_3', 'event_type_1_lag_3', 'date','wm_yr_wk','d',
'sales','temp_d','day', 'week', 'month', 'year', 'dayofweek', 'weekend']
# +
#Convert the object columns to datatype 'category'
category_columns=['id','item_id','dept_id','cat_id','store_id','state_id','event_name_1','event_type_1','event_name_2','event_type_2','event_name_1_lag_1', 'event_type_1_lag_1',
'event_name_1_lag_2', 'event_type_1_lag_2', 'event_name_1_lag_3', 'event_type_1_lag_3']
#convert each category in the list one
for col in category_columns:
data[col] = data[col].astype('category')
#Create a list of Store Ids for which data is considered
stores_ids = data['store_id']
stores_ids = list(stores_ids.unique())
# -
#Check if it is indeed 'TX_1', since we choose this specific store for modeling purposes
#due to processing power limitations and to avoid OOM(Out of Memory) Error
stores_ids
# #copy the dataframe into new df
df = data.copy()
df
#Selected categorical columns are used for OneHotEnconding or to create DummyVariables/Columns
#removes ordinal behavior
df = pd.get_dummies(data=df, columns=['cat_id', 'dept_id','event_name_1','event_name_2','day',
'week',
'month',
'year',
'dayofweek',
'weekend'])
# +
#create a temporary date column with integer values which denotes day number
#this is later used for subsetting the data into test/train
df['temp_d'] = pd.to_numeric(data['d'].str[2:])
#Once selected categorical columns are dummy encoded,
#create list of categorical columns to remove from df
features = [col for col in list(df) if col not in remove_features]
# -
#Checking dummy encoded columns
df
#List of features that we are finally considering for Modeling
features
# +
#Creating variables for limiting the data by dates
START_TRAIN = 1000 # Start day for training data
END_TRAIN = 1885 # End day of our train data,
#28 days after this are left for testing(1886 - Start day for Testing Data)
LimitData = 1913 # End day for Testing Data
#Subset the data for 1000 to 1913 days
df = df[(df['temp_d']>=START_TRAIN) & (df['temp_d']<=LimitData)].reset_index(drop=True)
#df = df[(df['temp_d']>=START_TRAIN)].reset_index(drop=True)
# +
#Create train and test datasets
train_mask = df['temp_d']<=END_TRAIN
#valid_mask = train_mask&(df['temp_d']>(END_TRAIN-P_HORIZON))
preds_mask = df['temp_d']>(END_TRAIN)
train = df[train_mask.values]
test = df[preds_mask.values]
#Split both train and test datasets for independant and depandant variables
x_train = train[features]
y_train = train[['sales']]
x_test = test[features]
y_test = test[['sales']]
# +
#Fill the NAs with 0, if present
x_test1 = x_test.fillna(0)
y_test1 = y_test.fillna(0)
x_train1 = x_train.fillna(0)
y_train1 = y_train.fillna(0)
# -
y_test1
# +
#Import the necessary libraries for - Linear Regression
from sklearn import linear_model
model = linear_model.LinearRegression()
#Fit the model based on training data
model.fit(x_train1,y_train1)
# -
#With the linear model built above, predict the sales for test timeframe
testing_predictions = model.predict(x_test1)
testing_predictions
# +
#Calculate the accuracy for linear regression
from sklearn import metrics
actuals = np.array(y_test1)
#accuracy for train data
lin_acc = model.score(x_train1,y_train1)
print("train accuracy",lin_acc)
#accuracy for test data
lin_acc = model.score(x_test1,y_test1)
print("test accuracy",lin_acc)
# -
# Import the Mean Squared Error and calculate RMSE
from sklearn import metrics
from sklearn.metrics import mean_squared_error
print(np.sqrt(mean_squared_error(y_test1, testing_predictions)))
| Linear_regression_One_Hot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
from obspy import read
st = read('http://examples.obspy.org/RJOB_061005_072159.ehz.new')
st1 = read('test.mseed')
st1.plot()
st.plot()
import numpy as np
# +
from matplotlib import pyplot as plt
ys = 200 + np.random.randn(100)
x = [x for x in range(len(ys))]
plt.plot(x, ys, '-')
plt.fill_between(x, ys, 195, where=(ys > 195), facecolor='g', alpha=0.6)
plt.title("Sample Visualization")
plt.show()
# -
| ex1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
#Check the version
R.version.string
#Check the working directory & the library paths
getwd()
.libPaths()
# +
# Install packages
# install.packages('Rcpp')
# install.packages('randtoolbox')
# install.packages('SciViews')
# install.packages('stringr')
# install.packages('purrr')
# -
# Load libraries
library(Rcpp)
library(randtoolbox)
library(SciViews)
library(stringr)
library(purrr)
# +
# Kmers to Godel numbers for a specific k
# Works for k-mers that contain only 'A', 'C', 'G' & 'T'
kmer2godel <- function(kmer, encoding){
godel = 0
k = nchar(kmer)
primes = get.primes(k)
for (i in 1:k){
nucl = unlist(strsplit(kmer, split=NULL))[i]
coef = switch(
nucl,
'A' = encoding[1],
'C' = encoding[2],
'G' = encoding[3],
'T' = encoding[4],
0
)
godel = godel + coef*ln(primes[i])
}
return (godel)
}
# -
# Example
encoding = c(1, 2, 3, 4)
kmer2godel('TATTGAAGTT', encoding)
| code/cutoff/.ipynb_checkpoints/kmer2godel-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''jupyter-book'': conda)'
# language: python
# name: python3
# ---
# # 流程控制
#
# {guilabel}`视频`
#
# <iframe id="Python"
# title="Python 流程控制"
# width="100%"
# height="600"
# src="https://developer.hs.net/thread/2022">
# </iframe>
#
# Python 不仅仅需要可以求值的 [表达式](intro/basic-type) 与 [函数](function),还需要一些结构用于表达循环和控制等。
#
# Python **语句** 就是告诉你的程序应该做什么的句子。
#
# - 程序由模块构成。
# - 模块包含语句。
# - 语句包含表达式。
# - 表达式建立并处理对象。
#
# ## 真值测试
#
# - 所有的对象都有一个固有的布尔值:真或假。
# - 任何非零的数字或非空的对象都是真。
# - `0`、空对象和特殊对象 `None` 被视为假。
# - 比较和相等测试是递归地应用于数据结构。
# - 比较和相等测试返回 `True` 或 `False`。
# - 布尔运算符 `and` 和 `or` 返回一个真或假的操作对象。
# - 一旦知道结果,布尔运算符就会停止评估("短路")。
#
# 真值判定|结果
# :-|:-
# `X and Y`|如果 `X` 和 `Y` 都为真,则为真。
# `X or Y`|如果 `X` 或 `Y` 为真,则为真。
# `not X`|如果 `X` 是假的,则为真。
#
# ### 比较、相等和真值
#
# - `==` 操作符测试值的相等性。
# - `is` 表达式测试对象的一致性。
#
# 真值判断:
# +
S1 = 'spam'
S2 = 'spam'
S1 == S2, S1 is S2
# -
# 比较:
# +
L1 = [1, ('a', 3)]
L2 = [1, ('a', 3)]
L1 == L2, L1 is L2, L1 < L2, L1 > L2
# -
bool('')
# ### 短路计算
#
# - `or`: 从左到右求算操作对象,然后返回第一个为真的操作对象。
# - `and`: 从左到右求算操作对象,然后返回第一个为假的操作对象。
2 or 3, 3 or 2
[] or 3
[] or {}
2 and 3, 3 and 2
[] and {}
3 and []
# ### 断言
#
# 用于测试推断:
num = -1
assert num > 0, 'num 应该为正数!'
# ## `if` 条件
year = 1990
if year % 4 == 0:
if year % 400 == 0:
print('闰年')
elif year % 100 == 0:
print('平年')
else:
print('闰年')
else:
print('平年')
# 使用 `and` 与 `or` 的短路逻辑简化表达式:
year = 1990
if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0:
print('闰年')
else:
print('平年')
# `if` 的短路(short-ciecuit)计算:`A = Y if X else Z`
year = 1990
print('闰年') if (year % 4 == 0 and year % 100 != 0) or year % 400 == 0 else print('平年')
't' if 'spam' else 'f'
# ## `for` 循环
#
# 遍历序列对象:
#
#
# ```python
# for target in object: # 将对象项目分配给目标
# statements # 循环体
# ```
#
# - `pass` / `...`:空占位语句
for i in range(5):
... # 等价于 pass
list(range(1, 10, 6))
# 阶乘
x = 1
for i in range(1, 11):
x *= i
print(f'10!={x}')
# Python 的 `for` 语句迭代列表或字符串等任意序列,元素的迭代顺序与在序列中出现的顺序一致。例如:
# 可以是 Python 的可迭代容器
seq = [1, 2, 3, 4, 5]
for i in seq:
print(i)
# ### 循环的技巧
#
# 在序列中循环时,用 {func}`enumerate` 函数可以同时取出位置索引和对应的值:
for i, v in enumerate(['苹果', '相机', '飞机']):
print(i, v)
# 同时循环两个或多个序列时,用 {func}`zip` 函数可以将其内的元素一一匹配:
questions = ['名字', '缺点', '最喜爱的颜色']
answers = ['Judy', '比较懒', '天空蓝']
for q, a in zip(questions, answers):
print(f'你的 {q} 是什么? 答案是 {a}。')
# 逆向循环序列时,先正向定位序列,然后调用 {func}`reversed` 函数:
for i in reversed(range(1, 10, 2)):
print(i)
# 按指定顺序循环序列,可以用 {func}`sorted` 函数,在不改动原序列的基础上,重新返回一个序列:
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
for i in sorted(basket, key=len):
print(i)
# 使用 {func}`set` 去除序列中的重复元素。使用 {func}`sorted` 加 {func}`set` 则按排序后的顺序,循环遍历序列中的唯一元素:
basket = ['apple', 'orange', 'apple', 'pear', 'orange', 'banana']
for f in sorted(set(basket)):
print(f)
# ### 序列和其他类型的比较
#
# 序列对象可以与相同序列类型的其他对象比较。这种比较使用 字典式 顺序:
#
# - 首先,比较首个元素,如果不相等,则可确定比较结果;如果相等,则比较之后的元素,以此类推,直到其中一个序列结束。
# - 如果要比较的两个元素本身是相同类型的序列,则递归地执行字典式顺序比较。
# - 如果两个序列中所有的对应元素都相等,则两个序列相等。
# - 如果一个序列是另一个的初始子序列,则较短的序列可被视为较小(较少)的序列。
# - 对于字符串来说,字典式顺序使用 Unicode 码位序号排序单个字符。
#
# 下面列出了一些比较相同类型序列的例子:
(1, 2, 3) < (1, 2, 4)
[1, 2, 3] < [1, 2, 4]
'ABC' < 'C' < 'Pascal' < 'Python' # 支持链式比较
(1, 2, 3, 4) < (1, 2, 4)
(1, 2) < (1, 2, -1)
(1, 2, 3) == (1.0, 2.0, 3.0)
(1, 2, ('aa', 'ab')) < (1, 2, ('abc', 'a'), 4)
# ## `while` 循环
#
# `while` 循环结构:
#
# ```python
# 初值条件
# while test: # 循环测试
# statements # 循环体
# ```
x = 'spam'
while x: # 直至耗尽 x
print(x, end=' ')
x = x[1:]
x = 1 # 初值条件
while x <= 100: # 终止条件
print(x)
x += 27
# ### Callataz 猜想
#
# ```{note}
# 任意取一个正整数 $n$,如果 $n$ 是一个偶数,则除以 $2$ 得到 $n/2$;
# 如果 $n$ 是一个奇数,则乘以 $3$ 加 $1$ 得到 $3n+1$,重复以上操作,我们将得到一串数字。
#
# Collatz 猜想:任何正整数 $n$ 参照以上规则,都将回归 $1$。
# ```
# +
def collatz_guess(num):
assert num > 0, 'num 必须为正数'
while num != 1:
if num % 2 == 0:
# 保证 num 在接下来的运算为整数
num //= 2
else:
num *= 3
num += 1
return num
collatz_guess(75)
# -
# ### 斐波那契数列
#
# ```{note}
# 斐波那契数列:
#
# $$
# \begin{cases}
# f_0 = f_1 = 1\\
# f_{n+2} = f_{n} + f_{n+1}, & n \in \mathbb{N}
# \end{cases}
# $$
# ```
# +
def fib(n): # 写出斐波那契数列,直到n
"""打印直到 n 的斐波那契数列"""
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a+b
print()
fib(2000)
# -
# ```{note}
# 1. 第一行中的 **多重赋值**:变量 `a` 和 `b` 同时获得新值 `0` 和 `1`。
# 2. 最后一行又用了一次多重赋值,这体现在右表达式在赋值前就已经求值了。**右表达式求值顺序为从左到右。**
# ```
#
# ## `continue`
#
# `continue`:跳到最近所在循环的开头处(来到循环的首行)
x = 10
while x:
x -= 1
if x % 2 != 0:
continue # 跳过打印
print(x, end=' ')
for num in range(2, 8):
if num % 2 == 0:
print(f"{num} 是偶数")
continue
print(f"{num} 是奇数")
# ## `else` 子句
#
# - `break`:跳出所在的最内层循环(跳过整个循环语句)
# - `else`:只有当循环正常离开时才会执行(也就是没有碰到 `break` 语句)
#
# 和循环 `else` 子句结合,`break` 语句通常可以忽略所需要的搜索状态标志位。
def fator(y):
'''仅仅打印 y 的首个因子'''
x = y // 2
while x > 1:
if y % x == 0:
print(y, '有因子', x)
break
x -= 1
else: # 没有碰到 break 才会执行
print(y, '是质数!')
fator(7), fator(88), fator(45);
# 看一个更复杂的例子:
for n in range(2, 10):
for x in range(2, n):
if n % x == 0:
print(n, '=', x, 'x', n//x)
break
else:
# 循环失败,没有找到一个因子
print(n, '是质数!')
| doc/python-study/basic/controlflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/priyanshgupta1998/Machine_learning/blob/master/Access_Camera_Through_Google_Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="Yq35gfNwMfNf" colab_type="code" colab={}
# + id="MBT2CKA9MfPJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 514} outputId="b550f079-504f-4204-b4cc-b56f7b7e33a4"
from IPython.display import display, Javascript
from google.colab.output import eval_js
from base64 import b64decode
def take_photo(filename='photo.jpg', quality=0.8):
js = Javascript('''
async function takePhoto(quality) {
const div = document.createElement('div');
const capture = document.createElement('button');
capture.textContent = 'Capture';
div.appendChild(capture);
const video = document.createElement('video');
video.style.display = 'block';
const stream = await navigator.mediaDevices.getUserMedia({video: true});
document.body.appendChild(div);
div.appendChild(video);
video.srcObject = stream;
await video.play();
// Resize the output to fit the video element.
google.colab.output.setIframeHeight(document.documentElement.scrollHeight, true);
// Wait for Capture to be clicked.
await new Promise((resolve) => capture.onclick = resolve);
const canvas = document.createElement('canvas');
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
canvas.getContext('2d').drawImage(video, 0, 0);
stream.getVideoTracks()[0].stop();
div.remove();
return canvas.toDataURL('image/jpeg', quality);
}
''')
display(js)
data = eval_js('takePhoto({})'.format(quality))
binary = b64decode(data.split(',')[1])
with open(filename, 'wb') as f:
f.write(binary)
return filename
from IPython.display import Image
try:
filename = take_photo()
print('Saved to {}'.format(filename))
# Show the image which was just taken.
display(Image(filename))
except Exception as err:
# Errors will be thrown if the user does not have a webcam or if they do not
# grant the page permission to access it.
print(str(err))
# + id="Vq1HWzvTMfQv" colab_type="code" colab={}
# + id="VPJxiP4ZMfSa" colab_type="code" colab={}
# + id="IADGZaKCMfWz" colab_type="code" colab={}
# + id="fa8_8NdnMfd8" colab_type="code" colab={}
# + id="fOCLoGSFMfhz" colab_type="code" colab={}
# + id="vlmdBapqMfs5" colab_type="code" colab={}
# + id="rzrYyRrvMfyG" colab_type="code" colab={}
# + id="sWyvCzJ2MfrS" colab_type="code" colab={}
# + id="8eAPEsx0MfpG" colab_type="code" colab={}
# + id="w3Pvn3XhMfmB" colab_type="code" colab={}
# + id="hmTmt1K0MfbJ" colab_type="code" colab={}
| Access_Camera_Through_Google_Colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics.pairwise import cosine_similarity
# ## Load original word embeddings
# +
## load original word embeddings
#import gensim
#import json
#import csv
#import pickle as pickle
#from sklearn.decomposition import PCA
#import subprocess
# Load Google's pre-trained Word2Vec model
#model = gensim.models.KeyedVectors.load_word2vec_format(
# 'D:/word2vec/GoogleNews-vectors-negative300.bin/GoogleNews-vectors-negative300.bin', binary=True)
# How to access word vectors
# dog = model['dog']
# print dog.shape
# print dog[:10]
# -
'''
second = model['second']
third = model['third']
fourth = model['fourth']
fifth = model['fifth']
sixth = model['sixth']
seventh = model['seventh']
blue = model['blue']
green = model['green']
red = model['red']
ball = model['ball']
embeddings = pd.DataFrame([['second', second],['third', third],['fourth', fourth],
['fifth', fifth],['sixth', sixth],['seventh', seventh],
['blue', blue],['green', green],['red', red],
['ball', ball]],
columns = ['word', 'embedding'])
embeddings.to_csv('word2vec/embeddings.csv', index=False)
'''
'''
#reduce dimensionality of vectors using PCA (using loaded vectors)
X_train_names = []
X_train = []
listofwords = [second, third, fourth, fifth, sixth, seventh, blue, green, red, ball]
listofnames = ['second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'blue', 'green', 'red', 'ball']
run = 0
for word in listofwords:
X_train.append(word)
X_train_names.append(listofnames[run])
run = run + 1
X_train = np.asarray(X_train)
X = X_train
X = X - X.mean(axis=0)
cov_X = np.cov(np.transpose(X))
values, vectors = np.linalg.eig(cov_X)
projection = np.transpose(np.transpose(vectors).dot(np.transpose(X)))
reduced_projection = projection[:,np.std(projection,axis=0) > 0.00000000000000029] #10 dim
#reduced_projection = projection[:,np.std(projection,axis=0) > 0.01]
reduced_projection = np.real(reduced_projection)
second = reduced_projection[0]
third = reduced_projection[1]
fourth = reduced_projection[2]
fifth = reduced_projection[3]
sixth = reduced_projection[4]
seventh = reduced_projection[5]
blue = reduced_projection[6]
green = reduced_projection[7]
red = reduced_projection[8]
ball = reduced_projection[9]
reduced_embeddings = pd.DataFrame([['second', second],['third', third],['fourth', fourth],
['fifth', fifth],['sixth', sixth],['seventh', seventh],
['blue', blue],['green', green],['red', red],
['ball', ball]],
columns = ['word', 'embedding'])
#reduced_embeddings.to_csv('word2vec/reduced_embeddings.csv', index=False)
#reduced_projection[0].shape
'''
'''
reduced_embeddings = reduced_embeddings['embedding']
second_red = np.array(np.matrix(reduced_embeddings[0])).ravel()
third_red = np.array(np.matrix(reduced_embeddings[1])).ravel()
fourth_red = np.array(np.matrix(reduced_embeddings[2])).ravel()
fifth_red = np.array(np.matrix(reduced_embeddings[3])).ravel()
sixth_red = np.array(np.matrix(reduced_embeddings[4])).ravel()
seventh_red = np.array(np.matrix(reduced_embeddings[5])).ravel()
blue_red = np.array(np.matrix(reduced_embeddings[6])).ravel()
green_red = np.array(np.matrix(reduced_embeddings[7])).ravel()
red_red = np.array(np.matrix(reduced_embeddings[8])).ravel()
ball_red = np.array(np.matrix(reduced_embeddings[9])).ravel()
'''
# ## Load full and reduced word embeddings
# load dimensionality-reduced embeddings
red_embeddings = pd.read_csv('word2vec/reduced_embeddings.csv', header = 0)
red_embeddings = red_embeddings['embedding']
# convert the embeddings from list to string of floats
second_red = np.array(np.matrix(red_embeddings[0])).ravel()
third_red = np.array(np.matrix(red_embeddings[1])).ravel()
fourth_red = np.array(np.matrix(red_embeddings[2])).ravel()
fifth_red = np.array(np.matrix(red_embeddings[3])).ravel()
sixth_red = np.array(np.matrix(red_embeddings[4])).ravel()
seventh_red = np.array(np.matrix(red_embeddings[5])).ravel()
blue_red = np.array(np.matrix(red_embeddings[6])).ravel()
green_red = np.array(np.matrix(red_embeddings[7])).ravel()
red_red = np.array(np.matrix(red_embeddings[8])).ravel()
ball_red = np.array(np.matrix(red_embeddings[9])).ravel()
embeddings = pd.read_csv('word2vec/embeddings.csv', header = 0)
embeddings = embeddings['embedding']
# convert the embeddings from list to string of floats
second = np.array(np.matrix(embeddings[0])).ravel()
third = np.array(np.matrix(embeddings[1])).ravel()
fourth = np.array(np.matrix(embeddings[2])).ravel()
fifth = np.array(np.matrix(embeddings[3])).ravel()
sixth = np.array(np.matrix(embeddings[4])).ravel()
seventh = np.array(np.matrix(embeddings[5])).ravel()
blue = np.array(np.matrix(embeddings[6])).ravel()
green = np.array(np.matrix(embeddings[7])).ravel()
red = np.array(np.matrix(embeddings[8])).ravel()
ball = np.array(np.matrix(embeddings[9])).ravel()
# ## Plot similarity matrices
# +
words_red = [second_red, third_red, fourth_red, fifth_red, sixth_red, seventh_red, blue_red, green_red, red_red, ball_red]
words = [second, third, fourth, fifth, sixth, seventh, blue, green, red, ball]
axis = ['second', 'third', 'fourth', 'fifth', 'sixth', 'seventh', 'blue', 'green', 'red', 'ball']
corr_matrix_red = cosine_similarity(words_red)#np.corrcoef(words_red)
corr_matrix = cosine_similarity(words)#np.corrcoef(words)
cos_matrices = [corr_matrix, corr_matrix_red]
fig, ax = plt.subplots(1,2, figsize=(12,12))
for subs in range(len(ax)):
im = ax[subs].imshow(cos_matrices[subs], cmap='viridis', interpolation='nearest')
ax[subs].set_xticks(np.arange(len(axis)))
ax[subs].set_yticks(np.arange(len(axis)))
ax[subs].set_xticklabels(axis)
ax[subs].set_yticklabels(axis)
ax[0].set_title('Cosine similarity word2vec (300 dim)')
ax[1].set_title('Cosine similarity word2vec (10 dim)')
plt.setp(ax[subs].get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
v1 = np.linspace(cos_matrices[subs].min(), cos_matrices[subs].max(), 8, endpoint=True)
cb = plt.colorbar(im, ax = ax[subs], ticks = v1, fraction=0.046, pad=0.04)#ticks =[-0.6, 0.1, 1],
cb.ax.set_yticklabels(["{:4.2f}".format(i) for i in v1])
ax[0].text(-2.7, -1, 'A', fontsize=19)
ax[1].text(-2.7, -1, 'B', fontsize=19)
plt.plot()
plt.subplots_adjust(wspace = 0.4 )
#os. getcwd()
#plt.savefig('Fig9.jpg', dpi = 150, bbox_inches='tight' )
| code_and_data/plot_word_embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import string
import numpy as np
import pandas as pd
# +
df = pd.read_json("Sarcasm_Headlines_Dataset.json", lines=True)
display(df.head())
print(df.shape)
df2 = pd.read_json("Sarcasm_Headlines_Dataset_v2.json", lines=True)
display(df2.head())
print(df2.shape)
# -
sum([i in df2.headline for i in df.headline])
df_all = pd.concat([df, df2]).reset_index(drop=True)
df_all.shape
df_clean = df_all.drop(['article_link'], axis=1)
df_clean.head()
df_clean['headline'] = df_clean['headline'].apply(lambda x: x.lower())
df_clean['len'] = df_clean['headline'].apply(lambda x: len(x.split(" ")))
df_clean.head()
for i in string.punctuation:
df_clean['headline'] = df_clean['headline'].apply(lambda x: x.replace(i, ""))
df_clean
df_clean.groupby('is_sarcastic').describe()
df_clean[df_clean.len==151].iloc[0].headline
df_clean[df_clean.len==2]
sen = pd.read_csv('vader_lexicon.txt',
sep='\t',
usecols=[0, 1],
header=None,
names=['token', 'polarity'],
index_col='token'
)
sen.head()
tidy_format = (
df_clean['headline']
.str.split(expand=True)
.stack()
.reset_index(level=1)
.rename(columns={'level_1': 'num', 0: 'word'})
)
tidy_format.head()
df_clean['polarity'] = (
tidy_format
.merge(sen, how='left', left_on='word', right_index=True)
.reset_index()
.loc[:, ['index', 'polarity']]
.groupby('index')
.sum()
.fillna(0)
)
df_clean.head()
df_clean.groupby('is_sarcastic').describe()
| EDA_joy/headline_kaggle/headline_sarc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/themavencoder/100-Days-Of-ML-Code/blob/master/CycleGAN_Implementation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="JWPTz8m5fySu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="e6c10c80-b49d-4826-d1ac-9c28b8239288"
# !git clone https://github.com/LynnHo/CycleGAN-Tensorflow-2
# + id="GaDGylxwgUlM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="085b8be7-d862-4383-f9cf-b8fb93deb9b2"
# !pip install tensorflow-gpu==2.0
# + id="MDncRAEZg5Td" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="386bc955-ae0f-496f-e484-8939119ad3fd"
# !pip install tensorflow-addons
# + id="XID6F1XBhbo8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 547} outputId="a4eed400-afa0-4bfa-d60b-a3498fd98213"
# !pip install --upgrade tb-nightly
# + id="lFQy6YNaic6I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} outputId="aa7d63ac-19fb-4316-aea1-98271780c5bd"
# %ls
# + id="9Yr6_KaIimA9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="0aa9b250-e6d0-4390-d617-01787617cdb0"
# %cd content/
# + id="Szzh3o7CipAZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="9a387c11-9b63-4c3a-d47a-0466f6c25f4d"
# !git clone https://github.com/LynnHo/CycleGAN-Tensorflow-2
# + id="AgMxS5oqiv9p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="8b59682e-60d4-4cb6-8081-7ca87cfbeed5"
# %ls
# + id="iAFzi1-Ti18y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="83f69afc-876d-41bc-81b7-ddf956a671b1"
# %cd CycleGAN-Tensorflow-2/
# + id="UmpKSMIii437" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="986cfda8-43eb-4f72-a62a-a1a4129cc825"
# !sh ./download_dataset.sh horse2zebra
# + id="7GH63Ha0i-J2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="da1a407a-ecac-4d3e-fa82-a50dfe44967b"
# !python train.py --dataset horse2zebra
# + id="6YUtssV4jImo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} outputId="94d5d4dc-ba81-4f68-b187-5f16e3a5e8de"
# !pip install oyaml
# + id="gwMip7SPjV1k" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5925dc91-c5e6-4df3-c3f4-65864cab7b5c"
# !pip install tqdm
# + id="YjXOwquFjiOW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 275} outputId="543ba479-5516-477f-a61e-cf548e58a162"
# !pip install scikit-image
# + id="C5f04VC8jwTk" colab_type="code" colab={}
| CycleGAN_Implementation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## TASK 5
# +
# Load data
# Carry out the modelling
#
# -
import pickle
import numpy as np
import matplotlib.pyplot as plt
# +
with open("Datasets/processed/training_data.pickle", "br") as fh:
training_imgs = pickle.load(fh)
training_imgs_np = np.array(training_imgs)
with open("Datasets/processed/training_labels.pickle", "br") as fh:
training_labels = pickle.load(fh)
with open("Datasets/processed/training_labels_one_hot.pickle", "br") as fh:
training_labels_one_hot = pickle.load(fh)
training_labels_one_hot_np = np.array(training_labels_one_hot)
with open("Datasets/processed/test_data.pickle", "br") as fh:
testing_data = pickle.load(fh)
test_imgs_np = np.array(testing_data)
with open("Datasets/processed/test_labels.pickle", "br") as fh:
testing_labels = pickle.load(fh)
test_labels = [int(label) for label in testing_labels]
# +
class billy_nn:
def linear_activation_batch(self,matrix):
return matrix
def relu_activation_batch(self,matrix):
return np.maximum(0,matrix)
def relu_derivative_batch(self, matrix):
matrix[matrix<=0] = 0
matrix[matrix>0] = 1
return matrix
def softmax_activation_batch(self, matrix):
z = matrix - np.max(matrix, axis=-1, keepdims=True) #prevent overflow here, with this
numerator = np.exp(z)
denominator = np.sum(numerator,1)
denominator = denominator.reshape(matrix.shape[0],-1) # (number of samples, 1)
probs = numerator/denominator
return probs
def __init__(self, architecture = [1024, 100, 10] , bias = False, activation = 'RELU', learning_rate = 0.0015,
regularizer_l2 = False, L2_term = 0.005, dropout = False, dropout_rate = 0.8, momentum_rate = 0.9):
self.bias = bias
self.activation = activation
self.architecture = architecture
self.learning_rate = learning_rate
self.regularizer_l2 = regularizer_l2
self.L2_term = L2_term
self.dropout = dropout
self.dropout_rate = dropout_rate
self.momentum_terms = []
self.momentum_rate = momentum_rate # momentum rate muct be between (0,1)
self.initialize_weights() #initialize weights by taking into account the architecture
def initialize_weights(self):
self.weights = []
self.biases = []
#initialize weights for arbitrary lenght NN
for _ in range(len(self.architecture)-1):
weight_matrix = np.random.normal(loc=0.0,scale=2/np.sqrt(self.architecture[_]+self.architecture[_+1]),
size=(self.architecture[_],self.architecture[_+1]))
self.weights.append(weight_matrix)
def calculate_cost_batch(self, probs, labels):
losses = labels * np.log(probs+ 1e-5) # works against underflow
#losses
batch_loss = - losses.sum()
return batch_loss
def train_on_batch(self, batch_samples, batch_labels):
if self.dropout == False:
batch_probs, hidden_activations = self.forward_batch_propagation(batch_samples)
else:
batch_probs, hidden_activations, activation_mask = self.forward_batch_propagation_drop(batch_samples)
#calculate batch loss
batch_loss = self.calculate_cost_batch( batch_probs, batch_labels )
self.batch_loss = batch_loss
####update weights for the batch, first backpropagate the error, and then update each weight matrix
if self.dropout == False :
self.update_weights_batch( batch_probs, hidden_activations, batch_labels, batch_samples )
else:
self.update_weights_batch_drop( batch_probs, batch_labels, batch_samples ,activation_mask)
return True
def forward_batch_propagation(self, batch_samples):
# propagate the batch signal through the network, not using biases
input_batch = batch_samples
hidden_activations = [] # needed for gradient calculation
for weight in self.weights:
trans_batch = np.dot(input_batch, weight) #matrix multiplication, no biasses added
if weight.shape[1] == 4: # finla layer, uses a softmax activation
probabilities_batch = self.softmax_activation_batch(trans_batch)
break
elif self.activation == 'RELU':
output_batch = self.relu_activation_batch(trans_batch)
hidden_activations.append(output_batch)
elif self.activation == 'LINEAR':
output_batch = self.linear_activation_batch(trans_batch)
hidden_activations.append(output_batch)
input_batch = output_batch
return probabilities_batch, hidden_activations
def update_weights_batch(self, batch_probs, hidden_activations, batch_labels, batch_samples) :
hidden_activations.reverse()
output_layer_error = batch_probs - batch_labels # error to propagate
weights_list = list(self.weights)
weights_list.reverse()
layer_errors = [] # reverse this if needed
layer_errors.append(output_layer_error.T)
error_l = output_layer_error
for i in range(len(weights_list)-1):
error_term = np.dot(weights_list[i],error_l.T)
derivative_term = self.relu_derivative_batch(hidden_activations[i].T)
#element-wise multiplication for the full error expression
error_l_minus = error_term * derivative_term
layer_errors.append(error_l_minus)
error_l = error_l_minus.T
# layer errors created here.
# update weights here using the layer errors and the hidden activations
activations = list(hidden_activations)
activations.reverse()
activations.insert(0,batch_samples)
activations.reverse()
if not self.momentum_terms: #if momentum terms have not been set... use regular GD and save the weight updates for next time
for i in range(len(layer_errors)):
weight_gradient = np.dot(layer_errors[i],activations[i])
self.momentum_terms.append(weight_gradient)
weights_list[i] -= self.learning_rate * weight_gradient.T
weights_list.reverse()
else: # if the momentum terms DO exist, then use them for accelerating Gradient Descent!!!!!!!!
momentum_terms = list(self.momentum_terms)
for i in range(len(layer_errors)):
weight_gradient = np.dot(layer_errors[i],activations[i])
weight_update = (self.momentum_rate * momentum_terms[i]) + self.learning_rate * weight_gradient
weights_list[i] -= weight_update.T
momentum_terms[i] = weight_update
weights_list.reverse()
self.weights = weights_list
self.momentum_terms = momentum_terms
def evaluate(self,data,labels):
corrects, wrongs = 0, 0
for i in range(len(data)):
res = self.infer_sample(data[i])
#sumaaaaaaaaaaaa = res.sum()
res_max = res.argmax()
#ruin=labels[i]
if res_max == labels[i]:
corrects += 1
else:
wrongs += 1
return corrects, wrongs
def infer_sample(self,sample):
#use this function to get a sample prediction, after the network has been trained
prediction = self.signal_propagation_test(sample)
return prediction
def signal_propagation_test(self,sample):
# get a prediction-class, for a sample vector, forward propagation
trans_vector = sample
for weight in self.weights:
trans_vector = np.dot(trans_vector, weight) # matrix transformation
trans_vector = self.relu_activation_batch(trans_vector) # relu activation, no bias added
prediction = trans_vector
return prediction
# -
# # Starting with an appropriate Architecture, momentum and learning rate
# +
### Modelling
#set seed for reproducible results
np.random.seed(1)
np.random.RandomState(1)
nn = billy_nn( architecture = [2304,100,100,4], bias = False, activation = 'RELU', learning_rate = 0.0001, momentum_rate = 0.4)
print('\n')
weights = nn.weights
# Mini-batch gradient descent
batch_size = 32 #size of the mini-batch should be set to 1 for stochastic gradient descent
epochs = 20
iteration_losses = []
epoch_accuracies = []
#Print training elements
num_of_batches = round(training_imgs_np.shape[0] / batch_size) + 1
num_of_iterations = epochs * num_of_batches
print ('\n Batch size',batch_size )
print('_________________________________________________')
print ('\n Number of batches per epoch',num_of_batches )
print('_________________________________________________')
print ('\n Number of iterations to perform:',num_of_iterations )
print('_________________________________________________')
print('_________________________________________________')
print ('\n Momentum rate:', nn.momentum_rate )
print('_________________________________________________')
print('\n***************************************************')
# +
#for each epoch
#set seed for reproducible results
np.random.seed(1)
np.random.RandomState(1)
for epoch in range(epochs):
# cycle through all minibatches of data
n_samples = training_imgs_np.shape[0]
#shuffle entire dataset indices for proper mini-batch GD
indices = np.arange(training_imgs_np.shape[0])
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min (start + batch_size, n_samples)
batch_indices = indices[start:end]
#train nn on mini-batch data
nn.train_on_batch(training_imgs_np[batch_indices],training_labels_one_hot_np[batch_indices])
#save loss on the mini-batch
iteration_losses.append(nn.batch_loss)
#Evaluate training accuracy after each iteration
corrects, wrongs = nn.evaluate(training_imgs_np, training_labels) #this is the integer representation
accu = corrects / ( corrects + wrongs)
print('_________________________________________________\n')
print("Training accuracy after epoch ", accu, '\n')
epoch_accuracies.append(accu)
#epoch completed
print ("Epochs completed {} / {} ".format(epoch+1,epochs))
# +
plt.plot(range(num_of_iterations), iteration_losses)
plt.ylabel('Cost')
plt.xlabel('Iterations')
plt.title('Loss')
plt.savefig('Results//Loss.png')
plt.show()
plt.plot(range(epochs), epoch_accuracies)
plt.ylabel('Accuracy %')
plt.xlabel('Epochs')
plt.title('Accuracy')
plt.savefig('Results//Accuracy.png')
plt.show()
### Test the network with no training
corrects, wrongs = nn.evaluate(test_imgs_np, testing_labels)
print("\n Testing accuracy after training ", corrects / ( corrects + wrongs), '\n')
# -
# # Iterating over the learning rate
# +
#set seed for reproducible results
rates = [0.0001, 0.00015, 0.0002]
accuracies = [] # same lenght as rates
losses= [] # same lenght as rates
test_accuracies = []
np.random.seed(8)
np.random.RandomState(8)
for lr in rates:
print(lr)
nn = billy_nn( architecture = [2304,100,100,4], bias = False, activation = 'RELU', learning_rate = lr, momentum_rate = 0.4)
iteration_losses = []
epoch_accuracies = []
for epoch in range(epochs):
# cycle through all minibatches of data
n_samples = training_imgs_np.shape[0]
#shuffle entire dataset indices for proper mini-batch GD
indices = np.arange(training_imgs_np.shape[0])
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min (start + batch_size, n_samples)
batch_indices = indices[start:end]
#train nn on mini-batch data
nn.train_on_batch(training_imgs_np[batch_indices],training_labels_one_hot_np[batch_indices])
#save loss on the mini-batch
iteration_losses.append(nn.batch_loss)
#Evaluate training accuracy after each iteration
corrects, wrongs = nn.evaluate(training_imgs_np, training_labels) #this is the integer representation
accu = corrects / ( corrects + wrongs)
#print('_________________________________________________\n')
#print("Training accuracy after epoch ", accu, '\n')
epoch_accuracies.append(accu)
#epoch completed
#print ("Epochs completed {} / {} ".format(epoch+1,epochs))
accuracies.append(epoch_accuracies)
losses.append(iteration_losses)
### Test the network with no training
corrects, wrongs = nn.evaluate(test_imgs_np, testing_labels)
print("\n Testing accuracy after training ", corrects / ( corrects + wrongs), '\n')
test_accuracies.append(corrects / ( corrects + wrongs))
# +
#
# Compare models
#
# -
test_accuracies
# +
plt.plot(range(len(iteration_losses)), losses[0], label = "0.0001" )
plt.plot(range(len(iteration_losses)), losses[1], label = "0.00015")
plt.plot(range(len(iteration_losses)), losses[2], label = "0.0002")
plt.ylabel('Cost')
plt.xlabel('Iterations')
plt.title('Learning rate Iterations: Loss')
plt.legend(loc = "upper right")
plt.savefig('Results//Comparing_losses.png')
plt.show()
plt.plot(range(epochs), accuracies[0], label = '0.0001')
plt.plot(range(epochs), accuracies[1], label = '0.00015')
plt.plot(range(epochs), accuracies[2], label = '0.0002')
plt.ylabel('Accuracy %')
plt.xlabel('Epochs')
plt.title('Learning rate Iterations: Accuracy')
plt.legend(loc = "upper left")
plt.savefig('Results//Comparing_accuracies.png')
plt.show()
# -
# # Applying Dropout to the best model
# +
class billy_nn_dropout:
def linear_activation_batch(self,matrix):
return matrix
def relu_activation_batch(self,matrix):
return np.maximum(0,matrix)
def relu_derivative_batch(self, matrix):
matrix[matrix<=0] = 0
matrix[matrix>0] = 1
return matrix
def softmax_activation_batch(self, matrix):
z = matrix - np.max(matrix, axis=-1, keepdims=True) #prevent overflow here, with this
numerator = np.exp(z)
denominator = np.sum(numerator,1)
denominator = denominator.reshape(matrix.shape[0],-1) # (number of samples, 1)
probs = numerator/denominator
return probs
def __init__(self, architecture = [1024, 100, 10] , bias = False, activation = 'RELU', learning_rate = 0.0015,
regularizer_l2 = False, L2_term = 0.005, dropout = False, dropout_rate = 0.5, momentum_rate = 0.4):
self.bias = bias
self.activation = activation
self.architecture = architecture
self.learning_rate = learning_rate
self.regularizer_l2 = regularizer_l2
self.L2_term = L2_term
self.dropout = dropout
self.dropout_rate = dropout_rate
self.momentum_terms = []
self.momentum_rate = momentum_rate
self.initialize_weights() #initialize weights by taking into account the architecture
def initialize_weights(self):
self.weights = []
self.biases = []
#initialize weights for arbitrary lenght NN
for _ in range(len(self.architecture)-1):
weight_matrix = np.random.normal(loc=0.0,scale=2/np.sqrt(self.architecture[_]+self.architecture[_+1]),
size=(self.architecture[_],self.architecture[_+1]))
self.weights.append(weight_matrix)
#biases = np.random.normal(loc=0.0, scale=1,size=(self.architecture[i+1]))
#
## COST FUNCTION UNDER REVIEW
#
def calculate_cost_batch(self, probs, labels):
losses = labels * np.log(probs+ 1e-5) # works against underflow
#losses
batch_loss = - losses.sum()
return batch_loss
def train_on_batch(self, batch_samples, batch_labels):
if self.dropout == False:
batch_probs, hidden_activations = self.forward_batch_propagation(batch_samples)
else:
batch_probs, hidden_activations, activation_masks = self.forward_batch_propagation_dropout(batch_samples)
#calculate batch loss
batch_loss = self.calculate_cost_batch( batch_probs, batch_labels )
self.batch_loss = batch_loss
####update weights for the batch, first backpropagate the error, and then update each weight matrix
if self.dropout == False :
self.update_weights_batch( batch_probs, hidden_activations, batch_labels, batch_samples )
else:
self.update_weights_batch_dropout( batch_probs, hidden_activations, batch_labels, batch_samples, activation_masks)
return True
def forward_batch_propagation_dropout(self,batch_samples):
activation_masks = []
nn_layers = self.architecture[1:-1] # grab the dimensions of the hidden layers, excluding the first and last layers
for layer in nn_layers:
activation_mask = (np.random.rand(batch_samples.shape[0],layer) < self.dropout_rate) / self.dropout_rate
activation_masks.append(activation_mask)
## forward propagation using masks
# 1. linear transformation
# 2. non-linear activation
# 3. activation_mask application
input_batch = batch_samples
hidden_activations = [] #
mask_counter = 0
for weight in self.weights:
trans_batch = np.dot(input_batch, weight) #matrix multiplication, no biasses added
if weight.shape[1] == 4: #if we are multipying the by the final weight matrix
#apply softmax activation to the batch
probabilities_batch = self.softmax_activation_batch(trans_batch)
break
elif self.activation == 'RELU':
output_batch = self.relu_activation_batch(trans_batch)
output_batch = output_batch * activation_masks[mask_counter] #dropout
hidden_activations.append(output_batch)
mask_counter += 1
elif self.activation == 'LINEAR':
output_batch = self.linear_activation_batch(trans_batch)
hidden_activations.append(output_batch)
input_batch = output_batch
return probabilities_batch, hidden_activations, activation_masks
def update_weights_batch_dropout(self,batch_probs, hidden_activations, batch_labels, batch_samples ,activation_masks) :
hidden_activations.reverse()
# error to propagate
output_layer_error = batch_probs - batch_labels
weights_list = list(self.weights)
weights_list.reverse()
layer_errors = []
layer_errors.append(output_layer_error.T)
error_l = output_layer_error
# back-prop using the activation masks for dropout
for i in range(len(weights_list)-1):
error_term = np.dot(weights_list[i],error_l.T)
derivative_term = self.relu_derivative_batch(hidden_activations[i].T)
#element-wise multiplication for the full error expression
error_l_minus = error_term * derivative_term
layer_errors.append(error_l_minus)
error_l = error_l_minus.T
activations = list(hidden_activations)
activations.reverse()
activations.insert(0,batch_samples)
activations.reverse()
## weight updates using the hidden activations and layer error
activation_masks.reverse()
activation_masks.append( np.ones(batch_samples.shape))
"""
for i in range(len(layer_errors)):
masked_activation = activations[i] * activation_masks[mask_counter]
weight_update = np.dot(layer_errors[i], masked_activation)
weights_list[i] -= self.learning_rate * weight_update.T #take some of the gradient using learning rate
mask_counter += 1
weights_list.reverse()
"""
if not self.momentum_terms: #if momentum terms have not been set... use regular GD and save the weight updates for next time
mask_counter = 0
for i in range(len(layer_errors)):
masked_activation = activations[i] * activation_masks[mask_counter]
weight_update = np.dot(layer_errors[i], masked_activation)
self.momentum_terms.append(weight_update)
weights_list[i] -= self.learning_rate * weight_update.T
mask_counter += 1
weights_list.reverse()
else: # if the momentum terms DO exist, then use them for accelerating Gradient Descent!!!!!!!!
momentum_terms = list(self.momentum_terms)
mask_counter = 0
for i in range(len(layer_errors)):
masked_activation = activations[i] * activation_masks[mask_counter]
weight_update = np.dot(layer_errors[i], masked_activation)
weight_update = (self.momentum_rate * momentum_terms[i]) + self.learning_rate * weight_update #momentum
weights_list[i] -= weight_update.T # weight update
momentum_terms[i] = weight_update # momentum update (keep a runnning average)
mask_counter += 1
weights_list.reverse()
self.weights = weights_list
self.momentum_terms = momentum_terms
def forward_batch_propagation(self, batch_samples):
# propagate the batch signal through the network, not using biases
input_batch = batch_samples
hidden_activations = [] # needed for gradient calculation
for weight in self.weights:
trans_batch = np.dot(input_batch, weight) #matrix multiplication, no biasses added
if weight.shape[1] == 4:
probabilities_batch = self.softmax_activation_batch(trans_batch)
break
elif self.activation == 'RELU':
output_batch = self.relu_activation_batch(trans_batch)
hidden_activations.append(output_batch)
#SAVE HIDDEN ACTIVATION
elif self.activation == 'LINEAR':
output_batch = self.linear_activation_batch(trans_batch)
hidden_activations.append(output_batch)
#SAVE HIDDEN ACTIVATION
input_batch = output_batch
return probabilities_batch, hidden_activations
def update_weights_batch(self, batch_probs, hidden_activations, batch_labels, batch_samples) :
hidden_activations.reverse()
output_layer_error = batch_probs - batch_labels # error to propagate
weights_list = list(self.weights)
weights_list.reverse()
layer_errors = [] # reverse this if needed
layer_errors.append(output_layer_error.T)
error_l = output_layer_error
for i in range(len(weights_list)-1):
error_term = np.dot(weights_list[i],error_l.T)
derivative_term = self.relu_derivative_batch(hidden_activations[i].T)
#element-wise multiplication for the full error expression
error_l_minus = error_term * derivative_term
layer_errors.append(error_l_minus)
error_l = error_l_minus.T
# layer errors created here.
# update weights here using the layer errors and the hidden activations
activations = list(hidden_activations)
activations.reverse()
activations.insert(0,batch_samples)
activations.reverse()
"""
for i in range(len(layer_errors)):
weight_update = np.dot(layer_errors[i],activations[i])
weights_list[i] -= self.learning_rate * weight_update.T #take some of the gradient using learning rate
weights_list.reverse()
self.weights = weights_list
"""
########## Momentum weight updates
if not self.momentum_terms: #if momentum terms have not been set... use regular GD and save the weight updates for next time
for i in range(len(layer_errors)):
weight_gradient = np.dot(layer_errors[i],activations[i])
weight_gradient = weight_gradient + (self.L2_term * weights_list[i].T) #regularization term
self.momentum_terms.append(weight_gradient)
weights_list[i] -= self.learning_rate * weight_gradient.T
weights_list.reverse()
else: # if the momentum terms DO exist, then use them for accelerating Gradient Descent!!!!!!!!
momentum_terms = list(self.momentum_terms)
for i in range(len(layer_errors)):
weight_gradient = np.dot(layer_errors[i],activations[i])
weight_gradient = weight_gradient + (self.L2_term * weights_list[i].T) #regularization term
weight_update = (self.momentum_rate * momentum_terms[i]) + self.learning_rate * weight_gradient #momentum
weights_list[i] -= weight_update.T
momentum_terms[i] = weight_update
weights_list.reverse()
self.weights = weights_list
self.momentum_terms = momentum_terms
def evaluate(self,data,labels):
corrects, wrongs = 0, 0
for i in range(len(data)):
res = self.infer_sample(data[i])
#sumaaaaaaaaaaaa = res.sum()
res_max = res.argmax()
#ruin=labels[i]
if res_max == labels[i]:
corrects += 1
else:
wrongs += 1
return corrects, wrongs
def infer_sample(self,sample):
#use this function to get a sample prediction, after the network has been trained
prediction = self.signal_propagation_test(sample)
return prediction
def signal_propagation_test(self,sample):
trans_vector = sample
for weight in self.weights:
trans_vector = np.dot(trans_vector, weight) # matrix transformation
trans_vector = self.relu_activation_batch(trans_vector) # relu activation, no bias added
prediction = trans_vector
return prediction
### Modelling
# -
nn = billy_nn_dropout( architecture = [2304,100,100,4], bias = False, activation = 'RELU', learning_rate = 0.0001, dropout = True,
momentum_rate = 0.4, dropout_rate = 0.8)
iteration_losses = []
epoch_accuracies = []
# +
#for each epoch
#set seed for reproducible results
np.random.seed(1)
np.random.RandomState(1)
for epoch in range(epochs):
# cycle through all minibatches of data
n_samples = training_imgs_np.shape[0]
#shuffle entire dataset indices for proper mini-batch GD
indices = np.arange(training_imgs_np.shape[0])
np.random.shuffle(indices)
for start in range(0, n_samples, batch_size):
end = min (start + batch_size, n_samples)
batch_indices = indices[start:end]
#train nn on mini-batch data
nn.train_on_batch(training_imgs_np[batch_indices],training_labels_one_hot_np[batch_indices])
#save loss on the mini-batch
iteration_losses.append(nn.batch_loss)
#Evaluate training accuracy after each iteration
corrects, wrongs = nn.evaluate(training_imgs_np, training_labels) #this is the integer representation
accu = corrects / ( corrects + wrongs)
print('_________________________________________________\n')
print("Training accuracy after epoch ", accu, '\n')
epoch_accuracies.append(accu)
#epoch completed
print ("Epochs completed {} / {} ".format(epoch+1,epochs))
# +
plt.plot(range(num_of_iterations), iteration_losses)
plt.ylabel('Cost')
plt.xlabel('Iterations')
plt.title('Dropout Loss')
plt.savefig('Results//Dropout_Loss.png')
plt.show()
plt.plot(range(epochs), epoch_accuracies)
plt.ylabel('Accuracy %')
plt.xlabel('Epochs')
plt.title('Dropoout Accuracy')
plt.savefig('Results//Dropout_Accuracy.png')
plt.show()
# -
### Test the network with no training
corrects, wrongs = nn.evaluate(test_imgs_np, testing_labels)
print("\n Testing accuracy after training ", corrects / ( corrects + wrongs), '\n')
# ## End of Notebook
| Submission/Task 5/Task 5 Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Showing speed over all trials
# +
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.patches import Circle
import matplotlib.tri as tri
import numpy as np
from scipy.spatial.transform import Rotation as R
import math
root = 'C:/Users/Fabian/Desktop/Analysis/Multiple_trial_analysis/Data/Raw/'
figures = 'C:/Users/Fabian/Desktop/Analysis/Multiple_trial_analysis/Figures/'
#Data with beacon metadata
beacon = pd.read_csv(root+'beacons 20200128-151826.txt',sep=" ", header=None)
beacon2 = pd.read_csv(root+'beacons 20200128-160013.txt',sep=" ", header=None)
beacon_Day86_fs2 = pd.read_csv(root+'beacons 20200128-160013.txt',sep=" ", header=None)
beacon_Day86_fs1 = pd.read_csv(root+'beacons 20200128-151826.txt',sep=" ", header=None)
beacon_Day87_fs2 = pd.read_csv(root+'beacons 20200129-153534.txt',sep=" ", header=None)
beacon_Day87_fs1 = pd.read_csv(root+'beacons 20200129-161806.txt',sep=" ", header=None)
beacon_Day88_fs2 = pd.read_csv(root+'beacons 20200130-102126.txt',sep=" ", header=None)
beacon_Day88_fs1 = pd.read_csv(root+'beacons 20200130-111741.txt',sep=" ", header=None)
beacon_Day89_fs2 = pd.read_csv(root+'beacons 20200130-161126.txt',sep=" ", header=None)
beacon_Day89_fs1 = pd.read_csv(root+'beacons 20200130-151829.txt',sep=" ", header=None)
beacon_Day90_fs2 = pd.read_csv(root+'beacons 20200203-154441.txt',sep=" ", header=None)
beacon_Day90_fs1 = pd.read_csv(root+'beacons 20200203-145842.txt',sep=" ", header=None)
beacon_Day91_fs2 = pd.read_csv(root+'beacons 20200204-125552.txt',sep=" ", header=None)
beacon_Day91_fs1 = pd.read_csv(root+'beacons 20200204-133905.txt',sep=" ", header=None)
beacon_Day92_fs2 = pd.read_csv(root+'beacons 20200205-143220.txt',sep=" ", header=None)
beacon_Day92_fs1 = pd.read_csv(root+'beacons 20200205-151052.txt',sep=" ", header=None)
beacon_Day93_fs2 = pd.read_csv(root+'beacons 20200206-133529.txt',sep=" ", header=None)
beacon_Day93_fs1 = pd.read_csv(root+'beacons 20200206-125706.txt',sep=" ", header=None)
Day46_fs1 = pd.read_csv(root+'position 20190923-174441.txt',sep=" ", header=None)
Day46_fs2 = pd.read_csv(root+'position 20190923-171112.txt',sep=" ", header=None)
Day47_fs1 = pd.read_csv(root+'position 20191001-112411.txt',sep=" ", header=None)
Day47_fs2 = pd.read_csv(root+'position 20191001-115127.txt',sep=" ", header=None)
Day48_fs1 = pd.read_csv(root+'position 20191002-115000.txt',sep=" ", header=None)
Day48_fs2 = pd.read_csv(root+'position 20191002-111038.txt',sep=" ", header=None)
Day51_fs1 = pd.read_csv(root+'position 20191106-170809.txt',sep=" ", header=None)
Day52_fs2 = pd.read_csv(root+'position 20191107-174215.txt',sep=" ", header=None)
Day52_fs1 = pd.read_csv(root+'position 20191107-183857.txt',sep=" ", header=None)
Day53_fs2 = pd.read_csv(root+'position 20191108-142321.txt',sep=" ", header=None)
Day53_fs1 = pd.read_csv(root+'position 20191108-145125.txt',sep=" ", header=None)
Day66_fs1 = pd.read_csv(root+'position 20191118-161325.txt',sep=" ", header=None)
Day66_fs2 = pd.read_csv(root+'position 20191118-171209.txt',sep=" ", header=None)
Day72_fs1 = pd.read_csv(root+'position 20191127-122008.txt',sep=" ", header=None)
Day72_fs2 = pd.read_csv(root+'position 20191127-132223.txt',sep=" ", header=None)
Day79_fs2 = pd.read_csv(root+'position 20200121-154004.txt',sep=" ", header=None)
Day79_fs1 = pd.read_csv(root+'position 20200121-161359.txt',sep=" ", header=None)
Day80_fs2 = pd.read_csv(root+'position 20200122-141738.txt',sep=" ", header=None)
Day80_fs1 = pd.read_csv(root+'position 20200122-133022.txt',sep=" ", header=None)
Day81_fs2 = pd.read_csv(root+'position 20200123-141930.txt',sep=" ", header=None)
Day81_fs1 = pd.read_csv(root+'position 20200123-150059.txt',sep=" ", header=None)
Day82_fs2 = pd.read_csv(root+'position 20200124-151642.txt',sep=" ", header=None)
Day82_fs1 = pd.read_csv(root+'position 20200124-160826.txt',sep=" ", header=None)
Day83_fs2 = pd.read_csv(root+'position 20200126-183810.txt',sep=" ", header=None)
Day83_fs1 = pd.read_csv(root+'position 20200126-180200.txt',sep=" ", header=None)
Day84_fs2 = pd.read_csv(root+'position 20200127-205615.txt',sep=" ", header=None)
Day84_fs1 = pd.read_csv(root+'position 20200127-155645.txt',sep=" ", header=None)
Day85_fs2 = pd.read_csv(root+'position 20200128-112255.txt',sep=" ", header=None)
Day85_fs1 = pd.read_csv(root+'position 20200128-104637.txt',sep=" ", header=None)
Day86_fs2 = pd.read_csv(root+'position 20200128-160013.txt',sep=" ", header=None)
Day86_fs1 = pd.read_csv(root+'position 20200128-151826.txt',sep=" ", header=None)
Day87_fs2 = pd.read_csv(root+'position 20200129-153534.txt',sep=" ", header=None)
Day87_fs1 = pd.read_csv(root+'position 20200129-161806.txt',sep=" ", header=None)
Day88_fs2 = pd.read_csv(root+'position 20200130-102126.txt',sep=" ", header=None)
Day88_fs1 = pd.read_csv(root+'position 20200130-111741.txt',sep=" ", header=None)
Day89_fs2 = pd.read_csv(root+'position 20200130-161126.txt',sep=" ", header=None)
Day89_fs1 = pd.read_csv(root+'position 20200130-151829.txt',sep=" ", header=None)
Day90_fs2 = pd.read_csv(root+'position 20200203-154441.txt',sep=" ", header=None)
Day90_fs1 = pd.read_csv(root+'position 20200203-145842.txt',sep=" ", header=None)
Day91_fs2 = pd.read_csv(root+'position 20200204-125552.txt',sep=" ", header=None)
Day91_fs1 = pd.read_csv(root+'position 20200204-133905.txt',sep=" ", header=None)
Day92_fs2 = pd.read_csv(root+'position 20200205-143220.txt',sep=" ", header=None)
Day92_fs1 = pd.read_csv(root+'position 20200205-151052.txt',sep=" ", header=None)
Day93_fs2 = pd.read_csv(root+'position 20200206-133529.txt',sep=" ", header=None)
Day93_fs1 = pd.read_csv(root+'position 20200206-125706.txt',sep=" ", header=None)
# +
time = np.arange(0.01, len(Day86_fs1[0]), 0.01)
time= np.array(Day86_fs1[0][1:]-Day86_fs1[0][0])
def calculateSpeed_list(x,y,time):
travel=0
speed=[]
for i in range(len(y)-1):
dist = math.sqrt((x[0+i] - x[1+i])**2 + (y[0+i] - y[1+i])**2)/time[0+i]
speed.append(dist)
return (speed)
speed_list = calculateSpeed_list(Day86_fs1[1],Day86_fs1[3],time)
speed_list=np.array(speed_list)
print(speed_list.shape)
speed_list
# -
time= np.array(Day86_fs1[0][1:]-Day86_fs1[0][0])
speed_list = calculateSpeed_list(Day86_fs1[1],Day86_fs1[3],time)
speed_list = [element * 10000 for element in speed_list]
fig, ax = plt.subplots()
ax.set(xlabel='time (10ms)', ylabel='rat speed cm/s',
title='Rat speed around beacons')
ax.plot(speed_list[50000:70000])
time= np.array(Day86_fs1[0]-Day86_fs1[0][0])
time.shape
# ### Showing speed over all trials
# +
from scipy.ndimage.filters import gaussian_filter1d
list_of_days = [Day79_fs1,Day80_fs1,Day81_fs1,Day82_fs1,Day83_fs1,Day84_fs1,Day85_fs1,Day86_fs1,Day87_fs1,Day88_fs1,Day89_fs1,Day90_fs1]
list_of_days2 = [Day79_fs2,Day80_fs2,Day81_fs2,Day82_fs2,Day83_fs2,Day84_fs2,Day85_fs2,Day86_fs2,Day87_fs2,Day88_fs2,Day89_fs2,Day90_fs2]
Day_number_list =['79','80','81','82','83','84','85','86','87','88','89','90']
def calculateSpeed_list(x,y,time):
travel=0
speed=[]
for i in range(len(y)-1):
dist = math.sqrt((x[0+i] - x[1+i])**2 + (y[0+i] - y[1+i])**2)/time[0+i]
speed.append(dist)
return (speed)
def Speed_over_days (list_of_fs1_days,list_of_fs2_days,list_of_number_of_days,smoothening = 5,cut=500 ) :
"""this function takes lists of days for each animal and plots the averge speed for a day over given days. """
fig, ax = plt.subplots(1,2, dpi=300)
LT_distance_fs2 = []
for day in list_of_fs1_days:
speed_list = (calculateSpeed_list(list(day[1][cut:]),list(day[3][cut:]),np.arange(0.01, len(day[0][cut:]), 0.01)))
speed_list = [element * 10000 for element in speed_list] # to get to cm/s speed.
ysmoothed = gaussian_filter1d(speed_list, sigma=smoothening)
ax[0].plot((np.arange(0, len(day[0][cut:])-1, 1)),ysmoothed,linewidth=.5)
for day in list_of_fs2_days:
speed_list = (calculateSpeed_list(list(day[1][cut:]),list(day[3][cut:]),np.arange(0.01, len(day[0][cut:]), 0.01)))
speed_list = [element * 10000 for element in speed_list] # to get to cm/s speed.
ysmoothed = gaussian_filter1d(speed_list, sigma=smoothening)
ax[1].plot((np.arange(0, len(day[0][cut:])-1, 1)),ysmoothed,linewidth=.5)
ax[0].set_ylabel('centimeters/minute')
ax[0].set_xlabel('time')
ax[0].set_title('speed by animal over sessions')
ax[0].set_ylim([0,20])
ax[1].set_ylim([0,20])
fig.tight_layout()
plt.savefig(figures+'speed_stretched_over_days_'+Day_number_list[0]+'-'+Day_number_list[-1]+'.png', dpi = 1000)
plt.show()
Speed_over_days(list_of_days, list_of_days2, Day_number_list)
# -
| Code/The_WORKS/20200404_FS_THE_WORKS_speed_Profiles_over_sessions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="7vAUgwlccDCn"
# # <font color=blue>Assignments for "Data Cleaning - Missing Values"</font>
# + [markdown] id="mKN1NgWAcDDC"
# In this assignment, you are going to use a dataset related to the US education system. Please download the ([dataset](https://www.kaggle.com/noriuk/us-education-datasets-unification-project/home)) from Kaggle. You are going to use `states_all.csv` within this dataset.
#
# To complete this assignment, submit the Github link of the Jupyter notebook file containing solutions to the questions below. You can talk to your mentor on your head or ask Slack at office time.
# + [markdown] id="14YqG3zTcDDE"
# **(1)** Find the types of variables in the dataset and the missing (null) ratio of each variable.
# + id="mv41O8nfcDDG" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1637483940560, "user_tz": -180, "elapsed": 7, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="34938b4b-cc51-4e1b-ee1c-c4e7d0659c43"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("states_all.csv")
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="L9KIjc0Mu_ra" executionInfo={"status": "ok", "timestamp": 1637483945205, "user_tz": -180, "elapsed": 331, "user": {"displayName": "Serhan \u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="da95392c-9815-441b-a6c8-3a3fb142de8c"
df.isna().head()
# + colab={"base_uri": "https://localhost:8080/"} id="AL4E_egVw1YO" executionInfo={"status": "ok", "timestamp": 1637483950501, "user_tz": -180, "elapsed": 310, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="ff81e777-3731-430f-8234-044605cdf8c2"
df.isnull().sum()*100/df.shape[0]
# + [markdown] id="0P8mZt0CcDDG"
# **(2)** You may have noticed that our data has a year column. For now, forget about the year data and assume that each observation was made in the same year. Consider how you can fill in missing values for each variable. Filling in the missing values with a value is meaningful for which variables and for which ones?
# + id="yDrJvPmCcDDH"
"""
The only non-null value is state now, so we can arrange the variables that are in the same state,
then use those grouped data to fill out the missing data.
Filling in the missing values with the given values is only reasonable for the ones who got the result below 10 in the upper code.
"""
# + [markdown] id="PTFns1NOcDDH"
# **(3)** Now it's time to consider the factor of time! Review your answer in question 2 and fill in the missing data based on the values observed during that year. For example, if you want to fill a value with an average value, calculate the average of that year.
# + id="xqv98ZIvcDDH" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1637483977422, "user_tz": -180, "elapsed": 333, "user": {"displayName": "Serhan \u00d6<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="30da8536-3674-4d68-d05e-39fdcc87069e"
indices=np.where(np.isnan(df.GRADES_ALL_G))[0].tolist()
df.GRADES_ALL_G[indices]
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="xgWkyrEPqLnz" executionInfo={"status": "ok", "timestamp": 1637484335535, "user_tz": -180, "elapsed": 322, "user": {"displayName": "Serhan \u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="969c0e68-30ec-48b9-849c-9fefd50cd476"
df['GRADES_ALL_G'].fillna(df['GRADES_ALL_G'].mean(), inplace=True)
df.head()
# + [markdown] id="vO-MlrtjcDDI"
# **(4)** This time, fill in the missing values by interpolating.
# + id="PCw2g9-hcDDI" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1637337945514, "user_tz": -180, "elapsed": 268, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiHrfv372wn98VVRXVKRE9HrNOFRYFJ236z3hwSSQ=s64", "userId": "17824916347375562391"}} outputId="dbc8e830-7421-4c2b-8215-3c0efef78455"
df.GRADES_ALL_G.interpolate(method ='linear', limit_direction ='forward')
# + [markdown] id="zexfIx8RcDDI"
# **(5)** Compare your answers on the second, third and fourth questions. Do you notice a meaningful difference?
# + id="2QrCAsAGcDDJ"
# The 3rd solution is way better than the 4th one, since GRADES_ALL column is unsorted.
# Thus, using mean of the values is clever-ish.
| EDA_Assignments/A_03_DataCleaningMissingValues_en_SerhanOnerAksakal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
printf <- function(...)print(sprintf(...))
# +
options( warn = -1 )
age <- read.csv("age.tsv", sep="\t")
head(age)
# -
education <- read.csv("education.tsv", sep="\t")
head(education)
gender <- read.csv("gender.tsv", sep="\t")
head(gender)
ideology <- read.csv("ideology.tsv", sep="\t")
head(ideology)
income <- read.csv("income.tsv", sep="\t")
head(income)
orientation <- read.csv("orientation.tsv", sep="\t")
head(orientation)
party <- read.csv("party.tsv", sep="\t")
head(party)
race <- read.csv("race.tsv", sep="\t")
head(race)
region <- read.csv("region.tsv", sep="\t")
head(region)
religion <- read.csv("religion.tsv", sep="\t")
head(religion)
printf("Most Clinton voters from %s",age[which.max(age$Clinton),'age'])
printf("Most Clinton voters from %s",education[which.max(education$Clinton),'education'])
printf("Most Clinton voters from %s",gender[which.max(gender$Clinton),'gender'])
printf("Most Clinton voters from %s",ideology[which.max(ideology$Clinton),'ideology'])
printf("Most Clinton voters from %s",income[which.max(income$Clinton),'income'])
printf("Most Clinton voters from %s",orientation[which.max(orientation$Clinton),'orientation'])
printf("Most Clinton voters from %s",party[which.max(party$Clinton),'party'])
printf("Most Clinton voters from %s",race[which.max(race$Clinton),'race'])
printf("Most Clinton voters from %s",region[which.max(region$Clinton),'region'])
printf("Most Clinton voters from %s",religion[which.max(religion$Clinton),'religion'])
printf("Most Trump voters from %s",age[which.max(age$Trump),'age'])
printf("Most Trump voters from %s",education[which.max(education$Trump),'education'])
printf("Most Trump voters from %s",gender[which.max(gender$Trump),'gender'])
printf("Most Trump voters from %s",ideology[which.max(ideology$Trump),'ideology'])
printf("Most Trump voters from %s",income[which.max(income$Trump),'income'])
printf("Most Trump voters from %s",orientation[which.max(orientation$Trump),'orientation'])
printf("Most Trump voters from %s",party[which.max(party$Trump),'party'])
printf("Most Trump voters from %s",race[which.max(race$Trump),'race'])
printf("Most Trump voters from %s",region[which.max(region$Trump),'region'])
printf("Most Trump voters from %s",religion[which.max(religion$Trump),'religion'])
| Section 6/Elections Data using R.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # 5 simulate a scenario
# change ${CORE_ROOT} to your path of core.
export CORE_ROOT="${HOME}/core"
# change ${PJ_ROOT} to your path of uoa-poc2.
export PJ_ROOT="${HOME}/uoa-poc2"
cd ${PJ_ROOT};pwd
# example)
# ```
# /Users/user/uoa-poc2
# ```
# ## load environment variables
# load from `core`
source ${CORE_ROOT}/docs/environments/azure_aks/env
# load from `uoa-poc2`
source ${PJ_ROOT}/docs/environments/azure_aks/env
# ## setup alias
alias now="python -c 'import datetime; print(datetime.datetime.now().strftime(\"%Y-%m-%dT%H:%M:%S.%f+09:00\"))'"
alias iso8601="python -c 'import datetime; print(datetime.datetime.now(tz=datetime.timezone.utc).isoformat(timespec=\"seconds\"))'"
# ## simulate a scenario
# ### 1. ship an item to a destination
# #### post a shipment data (as ORDERING system)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-type: application/json" https://api.${DOMAIN}/controller/api/v1/shipments/ -X POST -d @-<<__EOS__
{
"destination": {
"name": "会議室1"
},
"updated": [
{
"prev_quantity": 5,
"new_quantity": 4,
"reservation": 1,
"title": "美味しい天然水(2L)",
"place": "倉庫1"
}
],
"caller": "zaico-extensions"
}
__EOS__
# example)
#
# ```
# HTTP/1.1 201 Created
# server: envoy
# date: Fri, 15 Nov 2019 02:06:05 GMT
# content-type: application/json
# content-length: 160
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 305
#
# {"caller":"ordering","delivery_robot":{"id":"delivery_robot_02"},"order":{"destination":"place_Jdash","source":"place_V","via":["place_L"]},"result":"success"}
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (157 bytes))
# {"send_cmd":{"time":"2019-11-15T11:06:05.162+09:00","cmd":"navi","waypoints":[{"point":{"x":-0.5,"y":-1.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-0.46}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` )
# example)
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:06:05.162+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: -0.5
# y: -1.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.46
# ---
# ```
# #### confirm delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=send_cmd_status,current_routes,navigating_waypoints,remaining_waypoints_list" | jq .
# confirm below:
#
# * `send_cmd_status` is `PENDING`.
# * `current_routes`, `navigating_waypoints` and `remaining_waypoints_list` is filled.
# * `navigating_waypoints.waypoints` is the same of `waypoints` of MQTT message and ros message.
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * all token is not locked. (`is_locked` is `false`)
# * all `lock_owner_id` are empty.
# * all `waitings` are empty list.
# ### 2. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/megarover_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_02'
# type: 'delivery_robot'
# time: '2019-11-08T14:00:00.000+09:00'
# received_time: '2019-11-13T14:51:51.416+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: -0.5
# y: -1.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.46
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmdexe', ... (283 bytes))
# {"send_cmd": {"received_time": "2019-11-13T14:51:51.416+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}], "result": "ack", "time": "2019-11-08T14:00:00.000+09:00"}}
# ```
# #### confirm delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=send_cmd_status,send_cmd_info" | jq .
# confirm below:
# * `send_cmd_status` turns to `OK` and `send_cmd_info` has the result message.
# ### 3. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.0, y: -0.2, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.2}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (531 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.2, "roll": 0.0, "pitch": 0.0}, "point": {"y": -0.2, "x": 0.0, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:11:38.571021+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (96 bytes))
# {"send_state":{"time":"2019-11-15T11:11:38.738+09:00","state":"moving","destination":"倉庫1"}}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:11:38 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode= , next_mode=navi
# 2019/11/15 02:11:38 [ INFO] src.api - publish new state to robot ui(robotui_02), current_state= , next_state=moving, destination=倉庫1
# ```
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=current_mode,current_state" | jq .
# example)
#
# `current_mode` is `navi`, and `current_state` is `moving`
#
# ```json
# {
# "id": "delivery_robot_02",
# "type": "delivery_robot",
# "current_mode": {
# "type": "string",
# "value": "navi",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-12T17:35:14.048+09:00"
# }
# }
# },
# "current_state": {
# "type": "string",
# "value": "moving",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-12T17:35:14.059+09:00"
# }
# }
# }
# }
# ```
# #### send a `navi` ros message again
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.2, y: -0.2, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.2}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# confirm below:
#
# * a MQTT message to `/delivery_robot/delivery_robot_02/attrs` is sent.
# * no MQTT message to `/robot_ui/robotui_02/cmd` is sent.
# * no log message is shown.
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=current_mode,current_state" | jq .
# `current_mode` and `current_state` were not be changed because the`mode` sent from delivery_robot_02 was not changed.
# ### 4. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -1.49, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.45}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (537 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.45, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.49, "x": -0.5, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:13:29.364882+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (204 bytes))
# {"send_cmd":{"time":"2019-11-15T11:13:29.537+09:00","cmd":"navi","waypoints":[{"point":{"x":0.5,"y":-2,"z":0},"angle":null},{"point":{"x":6,"y":-1.99,"z":0},"angle":{"roll":0,"pitch":0,"yaw":3.14}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (147 bytes))
# {"send_token_info":{"time":"2019-11-15T11:13:29.580+09:00","token":"token_a","mode":"lock","lock_owner_id":"delivery_robot_02","prev_owner_id":""}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:13:29.537+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 6.0
# y: -1.99
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 3.14
# ---
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:13:29 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:13:29 [ INFO] src.token - lock token (token_a) by delivery_robot_02
# 2019/11/15 02:13:29 [ INFO] src.api - move robot(delivery_robot_02) to "place_L" (waypoints=[{'point': {'x': 0.5, 'y': -2, 'z': 0}, 'angle': None}, {'point': {'x': 6, 'y': -1.99, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': 3.14}}], order=None, caller=None
# 2019/11/15 02:13:29 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=token_a, mode=lock, lock_owner_id=delivery_robot_02, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is locked by `delivery_robot_02`, but no robot is waiting for this token yet.
# * `token_b` and `token_c` is not locked.
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=current_mode,current_state" | jq .
# example)
#
# `current_mode` is changed to `standby`, but `current_state` is not changed.
#
# ```json
# {
# "id": "delivery_robot_02",
# "type": "delivery_robot",
# "current_mode": {
# "type": "string",
# "value": "standby",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-13T14:57:20.575+09:00"
# }
# }
# },
# "current_state": {
# "type": "string",
# "value": "moving",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-13T14:55:12.979+09:00"
# }
# }
# }
# }
# ```
# #### send a `standby` ros message again
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -1.49, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.45}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# confirm below:
#
# * a MQTT message to `/delivery_robot/delivery_robot_02/attrs` is sent.
# * no MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent.
# * no log message is shown.
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=current_mode,current_state" | jq .
# `current_mode` and `current_state` were not be changed because the`mode` sent from delivery_robot_02 was not changed.
# ### 5. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.0, y: -1.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.4}
destination:
point: {x: 0.5, y: -2.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.4, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.1, "x": 0.0, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:16:44.344430+09:00"}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:16:44 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=standby, next_mode=navi
# ```
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_02)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_02}/?attrs=current_mode,current_state" | jq .
# `current_mode` is changed to `navi`, but `current_state` is not changed.
# ### 6. ship items sotred different warehouses each other to another destination
# #### post a shipment data (as WAREHOUSE system)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-type: application/json" https://api.${DOMAIN}/controller/api/v1/shipments/ -X POST -d @-<<__EOS__
{
"destination": {
"name": "会議室2"
},
"updated": [
{
"prev_quantity": 4,
"new_quantity": 3,
"reservation": 1,
"title": "美味しい天然水(2L)",
"place": "倉庫1"
},
{
"prev_quantity": 10,
"new_quantity": 8,
"reservation": 2,
"title": "地域名産炭酸水(500ml)",
"place": "倉庫2"
}
]
}
__EOS__
# example)
#
# ```
# HTTP/1.1 201 Created
# server: envoy
# date: Fri, 15 Nov 2019 02:18:38 GMT
# content-type: application/json
# content-length: 171
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 354
#
# {"caller":"warehouse","delivery_robot":{"id":"delivery_robot_01"},"order":{"destination":"place_Idash","source":"place_Q","via":["place_L","place_G"]},"result":"success"}
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (157 bytes))
# {"send_cmd":{"time":"2019-11-15T11:18:38.350+09:00","cmd":"navi","waypoints":[{"point":{"x":-0.5,"y":-1.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-0.46}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/turtlebot_01/cmd` )
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T11:18:38.350+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: -0.5
# y: -1.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.46
# ---
# ```
# #### confirm delivery robot entity (delivery_robot_01)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_01}/?attrs=send_cmd_status,current_routes,navigating_waypoints,remaining_waypoints_list" | jq .
# confirm below:
#
# * `send_cmd_status` is `PENDING`.
# * `current_routes`, `navigating_waypoints` and `remaining_waypoints_list` is filled.
# * `navigating_waypoints.waypoints` is the same of `waypoints` of MQTT message and ros message.
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_02`, but no robot is waiting for this token yet.
# * `token_b` and `token_c` is not locked.
# ### 7. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/turtlebot_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_01'
# type: 'delivery_robot'
# time: '2019-11-13T15:01:14.279+09:00'
# received_time: '2019-11-13T15:01:14.179+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: -0.5
# y: -1.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.46
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmdexe', ... (283 bytes))
# {"send_cmd": {"received_time": "2019-11-13T15:01:14.179+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}], "result": "ack", "time": "2019-11-13T15:01:14.279+09:00"}}
# ```
# #### confirm delivery robot entity (delivery_robot_01)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_01}/?attrs=send_cmd_status,send_cmd_info" | jq .
# confirm below:
# * `send_cmd_status` turns to `OK` and `send_cmd_info` has the result message.
# ### 8. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.0, y: -0.2, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.2}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (531 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.2, "roll": 0.0, "pitch": 0.0}, "point": {"y": -0.2, "x": 0.0, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:21:51.073989+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (96 bytes))
# {"send_state":{"time":"2019-11-15T11:21:51.237+09:00","state":"moving","destination":"倉庫1"}}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:21:51 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode= , next_mode=navi
# 2019/11/15 02:21:51 [ INFO] src.api - publish new state to robot ui(robotui_01), current_state= , next_state=moving, destination=倉庫1
# ```
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_01)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_01}/?attrs=current_mode,current_state" | jq .
# example)
#
# `current_mode` is `navi`, and `current_state` is `moving`
#
# ```json
# {
# "id": "delivery_robot_01",
# "type": "delivery_robot",
# "current_mode": {
# "type": "string",
# "value": "navi",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-13T15:05:00.209+09:00"
# }
# }
# },
# "current_state": {
# "type": "string",
# "value": "moving",
# "metadata": {
# "TimeInstant": {
# "type": "datetime",
# "value": "2019-11-13T15:05:00.221+09:00"
# }
# }
# }
# }
# ```
# #### send a `navi` ros message again
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.0, y: -0.2, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.2}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute abovecommand.
# confirm below:
#
# * a MQTT message to `/delivery_robot/delivery_robot_01/attrs` is sent.
# * no MQTT message to `/robot_ui/robotui_01/cmd` is sent.
# * no log message is shown.
# #### confirm the `current_mode` and `current_state` of delivery robot entity (delivery_robot_01)
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${DELIVERY_ROBOT_SERVICEPATH}" "https://api.${DOMAIN}/orion/v2/entities/${DELIVERY_ROBOT_01}/?attrs=current_mode,current_state" | jq .
# `current_mode` and `current_state` were not be changed because the`mode` sent from delivery_robot_01 was not changed.
# ### 9. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -1.51, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (537 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.51, "x": -0.5, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.46, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:23:07.141363+09:00"}
# ```
# #### confirm the MQTT and ROS message
#
# * no MQTT message is sent to `/delivery_robot/delivery_robot_01/cmd`
# * no ros message for delivery_robot_01 ( `/robot_bridge/turtlebot_01/cmd` ) is sent
# * a `suspend` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically because `token_a` has been locked by `delivery_robot_02`
#
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (150 bytes))
# {"send_token_info":{"time":"2019-11-15T11:23:07.305+09:00","token":"token_a","mode":"suspend","lock_owner_id":"delivery_robot_02","prev_owner_id":""}}
# ```
# `token_a` has been locked by `delivery_robot_02`, so `delivery_robot_01` can not move into restlicted area
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:23:07 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 02:23:07 [ INFO] src.token - wait token (token_a) by delivery_robot_01
# 2019/11/15 02:23:07 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_a, mode=suspend, lock_owner_id=delivery_robot_02, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_02`, and `delivery_robot_01` is now waiting for this token.
# * `token_b` and `token_c` is not locked.
# #### send a `standby` ros message again
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -1.51, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
destination:
point: {x: -0.5, y: -1.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.46}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# confirm below:
#
# * a MQTT message to `/delivery_robot/delivery_robot_01/attrs` is sent.
# * no MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent.
# * no log message is shown.
# ### 10. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.1, y: -0.8, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.8}
destination:
point: {x: 0.5, y: -2.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -0.8, "x": 0.1, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:31:27.429845+09:00"}
# ```
# ### 11. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 4.71, y: -1.99, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
destination:
point: {x: 4.71, y: -1.99, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (538 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.99, "x": 4.71, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.99, "x": 4.71, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:31:50.660751+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (97 bytes))
# {"send_state":{"time":"2019-11-15T11:31:50.796+09:00","state":"picking","destination":"倉庫1"}}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:31:50 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:31:50 [ INFO] src.api - publish new state to robot ui(robotui_02), current_state=moving, next_state=picking, destination=倉庫1
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_02`, and `delivery_robot_01` is now waiting for this token.
# * `token_b` and `token_c` is not locked.
# ### 12. emulate the completion of picking operation ( `delivery_robot_02`)
# #### post a movenext data
TOKEN=$(cat ~/core/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-Type: application/json" https://api.tech-sketch.jp/controller/api/v1/robots/${DELIVERY_ROBOT_02}/nexts/ -X PATCH -d '{}'
# example)
#
# ```
# HTTP/1.1 200 OK
# server: envoy
# date: Fri, 15 Nov 2019 02:33:18 GMT
# content-type: application/json
# content-length: 21
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 167
#
# {"result":"success"}
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (202 bytes))
# {"send_cmd":{"time":"2019-11-15T11:33:18.229+09:00","cmd":"navi","waypoints":[{"point":{"x":0.5,"y":-2,"z":0},"angle":null},{"point":{"x":-0.5,"y":-2.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.8}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` )
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:33:18.229+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: -0.5
# y: -2.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.8
# ---
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_02`, and `delivery_robot_01` is still waiting for this token.
# * `token_b` and `token_c` is not locked.
# ### 13. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/megarover_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_02'
# type: 'delivery_robot'
# time: '2019-11-13T15:11:57.778+09:00'
# received_time: '2019-11-13T15:11:56.778+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: -0.5
# y: -2.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.8
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmdexe', ... (341 bytes))
# {"send_cmd": {"received_time": "2019-11-13T15:11:56.778+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}], "result": "ack", "time": "2019-11-13T15:11:57.778+09:00"}}
# ```
# ### 14. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.6, y: -1.9, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.01}
destination:
point: {x: 0.5, y: -2.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (494 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.9, "x": 0.6, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:34:59.123268+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (99 bytes))
# {"send_state":{"time":"2019-11-15T11:34:59.258+09:00","state":"moving","destination":"会議室1"}}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:34:59 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=standby, next_mode=navi
# 2019/11/15 02:34:59 [ INFO] src.api - publish new state to robot ui(robotui_02), current_state=picking, next_state=moving, destination=会議室1
# ```
# ### 15. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -2.5, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.8}
destination:
point: {x: -0.5, y: -2.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.8}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (534 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:35:49.879274+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (206 bytes))
# {"send_cmd":{"time":"2019-11-15T11:35:50.052+09:00","cmd":"navi","waypoints":[{"point":{"x":-0.94,"y":-5.9,"z":0},"angle":null},{"point":{"x":0.7,"y":-7.4,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-0.15}}]}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:35:50.052+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: -0.94
# y: -5.9
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 0.7
# y: -7.4
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.15
# ---
# ```
# #### confirm that a `release` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (167 bytes))
# {"send_token_info":{"time":"2019-11-15T11:35:50.098+09:00","token":"token_a","mode":"release","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (204 bytes))
# {"send_cmd":{"time":"2019-11-15T11:35:50.192+09:00","cmd":"navi","waypoints":[{"point":{"x":0.5,"y":-2,"z":0},"angle":null},{"point":{"x":6,"y":-1.99,"z":0},"angle":{"roll":0,"pitch":0,"yaw":3.14}}]}}
# ```
# #### confirm that a ros message for delivery_robot_01 ( `/robot_bridge/turtlebot_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T11:35:50.192+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 6.0
# y: -1.99
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 3.14
# ---
# ```
# #### confirm that `resume` and `lock` MQTT messages to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (166 bytes))
# {"send_token_info":{"time":"2019-11-15T11:35:50.243+09:00","token":"token_a","mode":"resume","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (164 bytes))
# {"send_token_info":{"time":"2019-11-15T11:35:50.285+09:00","token":"token_a","mode":"lock","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# #### confirm log messages
# `delivery_robot_02` released `token_a`, so `delivery_robot_01` which is wating this token can lock it and can move into restlicted area.
#
# example)
#
# ```
# 2019/11/15 02:35:49 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:35:49 [ INFO] src.token - switch token (token_a) from delivery_robot_02 to delivery_robot_01
# 2019/11/15 02:35:50 [ INFO] src.api - move robot(delivery_robot_02) to "place_LB" (waypoints=[{'point': {'x': -0.94, 'y': -5.9, 'z': 0}, 'angle': None}, {'point': {'x': 0.7, 'y': -7.4, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -0.15}}]
# 2019/11/15 02:35:50 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=token_a, mode=release, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# 2019/11/15 02:35:50 [ INFO] src.api - move robot(delivery_robot_01) to "place_L" (waypoints=[{'point': {'x': 0.5, 'y': -2, 'z': 0}, 'angle': None}, {'point': {'x': 4.71, 'y': -1.99, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -1.57}}]
# 2019/11/15 02:35:50 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_a, mode=resume, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# 2019/11/15 02:35:50 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_a, mode=lock, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked, but its owner is switched from `delivery_robot_02` to `delivery_robot_01`, and `delivery_robot_01` is no longer waiting for this token.
# * `token_b` and `token_c` is not locked.
# ### 16. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.1, y: -0.8, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.8}
destination:
point: {x: 0.5, y: -2.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -0.8, "x": 0.1, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:40:34.011437+09:00"}
# ```
# ### 17. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 4.71, y: -1.99, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
destination:
point: {x: 4.71, y: -1.99, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (538 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.99, "x": 4.71, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.99, "x": 4.71, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:41:03.269719+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (97 bytes))
# {"send_state":{"time":"2019-11-15T11:41:03.635+09:00","state":"picking","destination":"倉庫1"}}
# ```
# #### confirm log messages
# example)
# ```
# 2019/11/15 02:41:03 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 02:41:03 [ INFO] src.api - publish new state to robot ui(robotui_01), current_state=moving, next_state=picking, destination=倉庫1
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`, and no robot is waiting for this token.
# * token_b and token_c is not locked.
# ### 18. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: -0.90, y: -5.9, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 1.01}
destination:
point: {x: -0.94, y: -5.9, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (496 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 1.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -5.9, "x": -0.9, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -5.9, "x": -0.94, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:43:03.972561+09:00"}
# ```
# ### 19. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 0.7, y: -7.4, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.15}
destination:
point: {x: 0.7, y: -7.4, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.15}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (534 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.15, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.4, "x": 0.7, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.15, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.4, "x": 0.7, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:43:27.652796+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (201 bytes))
# {"send_cmd":{"time":"2019-11-15T11:43:27.822+09:00","cmd":"navi","waypoints":[{"point":{"x":1.7,"y":-8,"z":0},"angle":null},{"point":{"x":2.3,"y":-7.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":0.05}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (147 bytes))
# {"send_token_info":{"time":"2019-11-15T11:43:27.874+09:00","token":"token_b","mode":"lock","lock_owner_id":"delivery_robot_02","prev_owner_id":""}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:43:27.822+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 1.7
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 2.3
# y: -7.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.05
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 02:43:27 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:43:27 [ INFO] src.token - lock token (token_b) by delivery_robot_02
# 2019/11/15 02:43:27 [ INFO] src.api - move robot(delivery_robot_02) to "place_RB" (waypoints=[{'point': {'x': 1.7, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 2.3, 'y': -7.5, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': 0.05}}]
# 2019/11/15 02:43:27 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=token_b, mode=lock, lock_owner_id=delivery_robot_02, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`, and no robot is waiting for this token.
# * now, `token_b` is locked by `delivery_robot_02`.
# * `token_c` is not locked yet.
# ### 20. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 2.1, y: -6.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.2}
destination:
point: {x: 2.3, y: -7.5, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.2, "roll": 0.0, "pitch": 0.0}, "point": {"y": -6.1, "x": 2.1, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:52:03.358945+09:00"}
# ```
# ### 21. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 2.3, y: -7.5, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.05}
destination:
point: {x: 2.3, y: -7.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.05}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (532 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.05, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.05, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:52:39.742774+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (152 bytes))
# {"send_cmd":{"time":"2019-11-15T11:52:39.920+09:00","cmd":"navi","waypoints":[{"point":{"x":4.9,"y":-7.3,"z":0},"angle":{"roll":0,"pitch":0,"yaw":0}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (150 bytes))
# {"send_token_info":{"time":"2019-11-15T11:52:39.983+09:00","token":"token_b","mode":"release","lock_owner_id":"","prev_owner_id":"delivery_robot_02"}}
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmdexe', ... (256 bytes))
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:52:39.920+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 4.9
# y: -7.3
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 02:52:39 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:52:39 [ INFO] src.token - release token (token_b) by delivery_robot_02
# 2019/11/15 02:52:39 [ INFO] src.api - move robot(delivery_robot_02) to "place_LC" (waypoints=[{'point': {'x': 4.9, 'y': -7.3, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': 0}}]
# 2019/11/15 02:52:40 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=<PASSWORD>, mode=release, lock_owner_id=, prev_owner_id=delivery_robot_02
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`, and no robot is waiting for this token.
# * `token_b` is released.
# * `token_c` is not locked yet.
# ### 22. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 2.4, y: -7.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
destination:
point: {x: 4.9, y: -7.3, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (527 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.1, "x": 2.4, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:55:54.427042+09:00"}
# ```
# ### 23. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 4.9, y: -7.3, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
destination:
point: {x: 4.9, y: -7.3, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (530 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:56:24.566158+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (300 bytes))
# {"send_cmd":{"time":"2019-11-15T11:56:24.727+09:00","cmd":"navi","waypoints":[{"point":{"x":6.06,"y":-8,"z":0},"angle":null},{"point":{"x":9.56,"y":-8,"z":0},"angle":null},{"point":{"x":10.6,"y":-8,"z":0},"angle":null},{"point":{"x":10.73,"y":-9.08,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.57}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (147 bytes))
# {"send_token_info":{"time":"2019-11-15T11:56:24.779+09:00","token":"token_c","mode":"lock","lock_owner_id":"delivery_robot_02","prev_owner_id":""}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T11:56:24.727+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 6.06
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 9.56
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 10.6
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 10.73
# y: -9.08
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.57
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 02:56:24 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:56:24 [ INFO] src.token - lock token (token_c) by delivery_robot_02
# 2019/11/15 02:56:24 [ INFO] src.api - move robot(delivery_robot_02) to "place_Jdash" (waypoints=[{'point': {'x': 6.06, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 9.56, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 10.6, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 10.73, 'y': -9.08, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -1.57}}]
# 2019/11/15 02:56:24 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=token_c, mode=lock, lock_owner_id=delivery_robot_02, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`, and no robot is waiting for this token.
# * `token_b` is not locked.
# * `token_c` is locked by `delivery_robot_02`.
# ### 24. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 6.0, y: -7.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
destination:
point: {x: 6.06, y: -8.0, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (528 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.1, "x": 6.0, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -8.0, "x": 6.06, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T11:59:04.755947+09:00"}
# ```
# ### 25. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 10.70, y: -9.01, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.56}
destination:
point: {x: 10.73 , y: -9.08, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (539 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.56, "roll": 0.0, "pitch": 0.0}, "point": {"y": -9.01, "x": 10.7, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -9.08, "x": 10.73, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T11:59:28.943973+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (103 bytes))
# {"send_state":{"time":"2019-11-15T11:59:29.080+09:00","state":"delivering","destination":"会議室1"}}
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 02:59:29 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 02:59:29 [ INFO] src.api - publish new state to robot ui(robotui_02), current_state=moving, next_state=delivering, destination=会議室1
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`, and no robot is waiting for this token.
# * `token_b` is not locked.
# * `token_c` is still locked by `delivery_robot_02`.
# ### 26. emulate the completion of picking operation ( `delivery_robot_01`)
# #### post a movenext data
TOKEN=$(cat ~/core/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-Type: application/json" https://api.tech-sketch.jp/controller/api/v1/robots/${DELIVERY_ROBOT_01}/nexts/ -X PATCH -d '{}'
# example)
#
# ```
# HTTP/1.1 200 OK
# server: envoy
# date: Fri, 15 Nov 2019 03:00:57 GMT
# content-type: application/json
# content-length: 21
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 94
#
# {"result":"success"}
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (202 bytes))
# {"send_cmd":{"time":"2019-11-15T12:00:57.929+09:00","cmd":"navi","waypoints":[{"point":{"x":0.5,"y":-2,"z":0},"angle":null},{"point":{"x":-0.5,"y":-2.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.8}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` )
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:00:57.929+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: -0.5
# y: -2.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.8
# ---
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is still locked by `delivery_robot_01`.
# * `token_b` is not locked.
# * `token_c` is still locked by `delivery_robot_02`.
# ### 27. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/turtlebot_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_01'
# type: 'delivery_robot'
# time: '2019-11-13T15:33:05.593+09:00'
# received_time: '2019-11-13T15:33:05.493+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: 0.5
# y: -2.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: -0.5
# y: -2.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.8
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmdexe', ... (341 bytes))
# {"send_cmd": {"received_time": "2019-11-13T15:33:05.493+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}], "result": "ack", "time": "2019-11-13T15:33:05.593+09:00"}}
# ```
# ### 28. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 0.6, y: -1.9, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.01}
destination:
point: {x: 0.5, y: -2.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (494 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -1.9, "x": 0.6, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -2.0, "x": 0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:02:11.575926+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (96 bytes))
# {"send_state":{"time":"2019-11-15T12:02:11.713+09:00","state":"moving","destination":"倉庫2"}}
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:02:11 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=standby, next_mode=navi
# 2019/11/15 03:02:11 [ INFO] src.api - publish new state to robot ui(robotui_01), current_state=picking, next_state=moving, destination=倉庫2
# ```
# ### 29. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: -0.5, y: -2.5, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.8}
destination:
point: {x: -0.5, y: -2.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.8}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (534 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.8, "roll": 0.0, "pitch": 0.0}, "point": {"y": -2.5, "x": -0.5, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:03:19.461786+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (206 bytes))
# {"send_cmd":{"time":"2019-11-15T12:03:19.823+09:00","cmd":"navi","waypoints":[{"point":{"x":-0.94,"y":-5.9,"z":0},"angle":null},{"point":{"x":0.7,"y":-7.4,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-0.15}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (150 bytes))
# {"send_token_info":{"time":"2019-11-15T12:03:19.882+09:00","token":"token_a","mode":"release","lock_owner_id":"","prev_owner_id":"delivery_robot_01"}}
# ```
# #### confirm that a ros message for delivery_robot_01 ( `/robot_bridge/turtlebot_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:03:19.823+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: -0.94
# y: -5.9
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 0.7
# y: -7.4
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -0.15
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:03:19 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 03:03:19 [ INFO] src.token - release token (token_a) by delivery_robot_01
# 2019/11/15 03:03:19 [ INFO] src.api - move robot(delivery_robot_01) to "place_LB" (waypoints=[{'point': {'x': -0.94, 'y': -5.9, 'z': 0}, 'angle': None}, {'point': {'x': 0.7, 'y': -7.4, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -0.15}}]
# 2019/11/15 03:03:19 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_a, mode=release, lock_owner_id=, prev_owner_id=delivery_robot_01
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` has been released.
# * `token_b` is not locked.
# * `token_c` is still locked by `delivery_robot_02`.
# ### 30. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: -0.90, y: -5.9, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 1.01}
destination:
point: {x: -0.94, y: -5.9, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (496 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 1.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -5.9, "x": -0.9, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -5.9, "x": -0.94, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:06:32.114819+09:00"}
# ```
# ### 31. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 0.7, y: -7.4, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -0.15}
destination:
point: {x: 0.7, y: -7.4, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -0.15}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (534 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -0.15, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.4, "x": 0.7, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -0.15, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.4, "x": 0.7, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:07:21.081359+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (203 bytes))
# {"send_cmd":{"time":"2019-11-15T12:07:21.229+09:00","cmd":"navi","waypoints":[{"point":{"x":1.7,"y":-8,"z":0},"angle":null},{"point":{"x":1.71,"y":-9.7,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.57}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (147 bytes))
# {"send_token_info":{"time":"2019-11-15T12:07:21.272+09:00","token":"token_b","mode":"lock","lock_owner_id":"delivery_robot_01","prev_owner_id":""}}
# ```
# #### confirm that a ros message for delivery_robot_01 ( `/robot_bridge/turtlebot_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:07:21.229+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 1.7
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 1.71
# y: -9.7
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.57
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:07:21 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 03:07:21 [ INFO] src.token - lock token (token_b) by delivery_robot_01
# 2019/11/15 03:07:21 [ INFO] src.api - move robot(delivery_robot_01) to "place_G" (waypoints=[{'point': {'x': 1.7, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 1.71, 'y': -9.7, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -1.57}}]
# 2019/11/15 03:07:21 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=<PASSWORD>, mode=lock, lock_owner_id=delivery_robot_01, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * now, `token_b` has been locked by `delivery_robot_01`.
# * `token_c` is locked by `delivery_robot_02`.
# ### 32. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 1.4, y: -5.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 1.01}
destination:
point: {x: 1.7, y: -8.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 1.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -5.1, "x": 1.4, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -8.0, "x": 1.7, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:10:34.546799+09:00"}
# ```
# ### 33. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 1.71, y: -9.7, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
destination:
point: {x: 1.71, y: -9.7, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: -1.57}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (536 bytes))
# {"errors": [], "pose": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -9.7, "x": 1.71, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": -1.57, "roll": 0.0, "pitch": 0.0}, "point": {"y": -9.7, "x": 1.71, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:10:58.740764+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (97 bytes))
# {"send_state":{"time":"2019-11-15T12:10:58.978+09:00","state":"picking","destination":"倉庫2"}}
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:10:58 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 03:10:59 [ INFO] src.api - publish new state to robot ui(robotui_01), current_state=moving, next_state=picking, destination=倉庫2
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * `token_b` is locked by `delivery_robot_01`.
# * `token_c` is locked by `delivery_robot_02`.
# ### 34. emulate the completion of picking operation ( `delivery_robot_01`)
# #### post a movenext data
TOKEN=$(cat ~/core/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-Type: application/json" https://api.tech-sketch.jp/controller/api/v1/robots/${DELIVERY_ROBOT_01}/nexts/ -X PATCH -d '{}'
# example)
#
# ```
# HTTP/1.1 200 OK
# server: envoy
# date: Fri, 15 Nov 2019 03:11:49 GMT
# content-type: application/json
# content-length: 21
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 104
#
# {"result":"success"}
# ```
# #### confirm the MQTT message
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (201 bytes))
# {"send_cmd":{"time":"2019-11-15T12:11:50.102+09:00","cmd":"navi","waypoints":[{"point":{"x":1.7,"y":-8,"z":0},"angle":null},{"point":{"x":2.3,"y":-7.5,"z":0},"angle":{"roll":0,"pitch":0,"yaw":0.05}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` )
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:11:50.102+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 1.7
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 2.3
# y: -7.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.05
# ---
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * `token_b` is locked by `delivery_robot_01`.
# * `token_c` is locked by `delivery_robot_02`.
# ### 35. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/turtlebot_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_01'
# type: 'delivery_robot'
# time: '2019-11-13T15:41:18.806+09:00'
# received_time: '2019-11-13T15:41:18.306+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: 1.7
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 2.3
# y: -7.5
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.05
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmdexe', ... (340 bytes))
# {"send_cmd": {"received_time": "2019-11-13T15:41:18.306+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": null, "point": {"y": -8.0, "x": 1.7, "z": 0.0}}, {"angle": {"yaw": 0.05, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}], "result": "ack", "time": "2019-11-13T15:41:18.806+09:00"}}
# ```
# ### 36. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 1.4, y: -6.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 1.01}
destination:
point: {x: 1.7, y: -8.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (493 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 1.01, "roll": 0.0, "pitch": 0.0}, "point": {"y": -6.1, "x": 1.4, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -8.0, "x": 1.7, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:12:56.621923+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (99 bytes))
# {"send_state":{"time":"2019-11-15T12:12:56.757+09:00","state":"moving","destination":"会議室2"}}
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:12:56 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=standby, next_mode=navi
# 2019/11/15 03:12:56 [ INFO] src.api - publish new state to robot ui(robotui_01), current_state=picking, next_state=moving, destination=会議室2
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * `token_b` is locked by `delivery_robot_01`.
# * `token_c` is locked by `delivery_robot_02`.
# ### 37. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 2.3, y: -7.5, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.05}
destination:
point: {x: 2.3, y: -7.5, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.05}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (532 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.05, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.05, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.5, "x": 2.3, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:13:55.012815+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (152 bytes))
# {"send_cmd":{"time":"2019-11-15T12:13:55.185+09:00","cmd":"navi","waypoints":[{"point":{"x":4.9,"y":-7.3,"z":0},"angle":{"roll":0,"pitch":0,"yaw":0}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (150 bytes))
# {"send_token_info":{"time":"2019-11-15T12:13:55.228+09:00","token":"token_b","mode":"release","lock_owner_id":"","prev_owner_id":"delivery_robot_01"}}
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmdexe', ... (256 bytes))
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:13:55.185+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 4.9
# y: -7.3
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:13:55 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 03:13:55 [ INFO] src.token - release token (token_b) by delivery_robot_01
# 2019/11/15 03:13:55 [ INFO] src.api - move robot(delivery_robot_01) to "place_LC" (waypoints=[{'point': {'x': 4.9, 'y': -7.3, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': 0}}]
# 2019/11/15 03:13:55 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=<PASSWORD>, mode=release, lock_owner_id=, prev_owner_id=delivery_robot_01
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * `token_b` has been released.
# * `token_c` is locked by `delivery_robot_02`.
# ### 38. emulate the move of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 4.1, y: -7.0, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
destination:
point: {x: 4.9, y: -7.3, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (527 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.0, "x": 4.1, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:16:26.917647+09:00"}
# ```
# ### 39. emulate the stop of `delivery_robot_01`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/turtlebot_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_01'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 4.9, y: -7.3, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
destination:
point: {x: 4.9, y: -7.3, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_01/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/attrs', ... (530 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 0.0, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.3, "x": 4.9, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:16:52.491320+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# `delivery_robot_01` can not lock `token_c`, so `delivery_robot_01` has to take refuge in 'place_U'
#
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (157 bytes))
# {"send_cmd":{"time":"2019-11-15T12:16:52.686+09:00","cmd":"navi","waypoints":[{"point":{"x":5.8,"y":-7.15,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.57}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (150 bytes))
# {"send_token_info":{"time":"2019-11-15T12:16:52.754+09:00","token":"token_c","mode":"suspend","lock_owner_id":"delivery_robot_02","prev_owner_id":""}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:16:52.686+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 5.8
# y: -7.15
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.57
# ---
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:16:52 [ INFO] src.api - update robot state, robot_id=delivery_robot_01, current_mode=navi, next_mode=standby
# 2019/11/15 03:16:52 [ INFO] src.token - wait token (token_c) by delivery_robot_01
# 2019/11/15 03:16:52 [ INFO] src.api - take refuge a robot(delivery_robot_01) in "place_U"
# 2019/11/15 03:16:52 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_c, mode=suspend, lock_owner_id=delivery_robot_02, prev_owner_id=
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` is not locked.
# * `token_b` is not locked.
# * `token_c` is still locked by `delivery_robot_02`, and `delivery_robot_01` has been waiting for this token.
# ### 40. emulate the completion of delivering operation ( `delivery_robot_02`)
# #### post a movenext data
TOKEN=$(cat ~/core/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens | map(select(.allowed_paths[] | contains ("^/controller/.*$"))) | .[0].token' -r)
curl -i -H "Authorization: bearer ${TOKEN}" -H "Content-Type: application/json" https://api.tech-sketch.jp/controller/api/v1/robots/${DELIVERY_ROBOT_02}/nexts/ -X PATCH -d '{}'
# example)
#
# ```
# HTTP/1.1 200 OK
# server: envoy
# date: Fri, 08 Nov 2019 12:33:51 GMT
# content-type: application/json
# content-length: 21
# access-control-allow-origin: *
# x-envoy-upstream-service-time: 121
#
# {"result":"success"}
# ```
# #### confirm the MQTT message
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (294 bytes))
# {"send_cmd":{"time":"2019-11-15T12:20:29.746+09:00","cmd":"navi","waypoints":[{"point":{"x":10.6,"y":-8,"z":0},"angle":null},{"point":{"x":9.56,"y":-8,"z":0},"angle":null},{"point":{"x":6.06,"y":-8,"z":0},"angle":null},{"point":{"x":5,"y":-8.0,"z":0},"angle":{"roll":0,"pitch":0,"yaw":3.14}}]}}
# ```
# #### confirm ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` )
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T12:20:29.746+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 10.6
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 9.56
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 6.06
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 5.0
# y: -8.0
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 3.14
# ---
# ```
# ### 41. notify that the command is received
# #### send a ros message
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute below command.
# ```
# rostopic pub -1 /robot_bridge/megarover_01/cmdexe uoa_poc2_msgs/r_result "
# id: 'delivery_robot_02'
# type: 'delivery_robot'
# time: '2019-11-13T15:48:23.940+09:00'
# received_time: '2019-11-13T15:48:23.740+09:00'
# received_cmd: 'navi'
# received_waypoints:
# -
# point:
# x: 10.6
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 9.56
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 6.06
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 5.0
# y: -8.1
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 3.14
# result: 'ack'
# errors: ['']
# "
# ```
# #### confirm the MQTT message
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmdexe', ... (461 bytes))
# {"send_cmd": {"received_time": "2019-11-13T15:48:23.740+09:00", "errors": [""], "received_cmd": "navi", "received_waypoints": [{"angle": null, "point": {"y": -8.0, "x": 10.6, "z": 0.0}}, {"angle": null, "point": {"y": -8.0, "x": 9.56, "z": 0.0}}, {"angle": null, "point": {"y": -8.0, "x": 6.06, "z": 0.0}}, {"angle": {"yaw": 3.14, "roll": 0.0, "pitch": 0.0}, "point": {"y": -8.1, "x": 5.0, "z": 0.0}}], "result": "ack", "time": "2019-11-13T15:48:23.940+09:00"}}
# ```
# ### 42. emulate the move of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'navi'
errors: ['']
pose:
point: {x: 10.1, y: -7.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 1.5}
destination:
point: {x: 10.6, y: -8.0, z: 0.0}
angle_optional:
valid: false
angle: {roll: 0.0, pitch: 0.0, yaw: 0.0}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (494 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 1.5, "roll": 0.0, "pitch": 0.0}, "point": {"y": -7.1, "x": 10.1, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": null, "point": {"y": -8.0, "x": 10.6, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "navi", "time": "2019-11-15T12:21:53.907278+09:00"}
# ```
# #### confirm that a MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (104 bytes))
# {"send_state":{"time":"2019-11-15T12:21:54.100+09:00","state":"moving","destination":"待機場所2"}}
# ```
# #### confirm log messages
# example)
#
# ```
# 2019/11/15 03:21:53 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=standby, next_mode=navi
# 2019/11/15 03:21:54 [ INFO] src.api - publish new state to robot ui(robotui_02), current_state=delivering, next_state=moving, destination=待機場所2
# ```
# ### 43. emulate the stop of `delivery_robot_02`
# #### send a ros message
echo "rostopic pub -1 /robot_bridge/megarover_01/state uoa_poc2_msgs/r_state \"
id: 'delivery_robot_02'
type: 'delivery_robot'
time: '$(now)'
mode: 'standby'
errors: ['']
pose:
point: {x: 5.0, y: -8.1, z: 0.0}
angle: {roll: 0.0, pitch: 0.0, yaw: 3.14}
destination:
point: {x: 5.0, y: -8.1, z: 0.0}
angle_optional:
valid: true
angle: {roll: 0.0, pitch: 0.0, yaw: 3.14}
covariance: [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
battery:
voltage: 0.0
current_optional: {valid: false, current: 0.0}
\""
# _On ROS Kinect PC_
# 1. start a terminal
# 1. execute above command.
# #### confirm the MQTT message to `/delivery_robot/delivery_robot_02/attrs`
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/attrs', ... (532 bytes))
# {"errors": [], "pose": {"angle": {"yaw": 3.14, "roll": 0.0, "pitch": 0.0}, "point": {"y": -8.1, "x": 5.0, "z": 0.0}}, "battery": {"current": null, "voltage": 0.0}, "destination": {"angle": {"yaw": 3.14, "roll": 0.0, "pitch": 0.0}, "point": {"y": -8.1, "x": 5.0, "z": 0.0}}, "covariance": [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], "mode": "standby", "time": "2019-11-15T12:22:38.714340+09:00"}
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_02/cmd', ... (155 bytes))
# {"send_cmd":{"time":"2019-11-15T12:22:38.935+09:00","cmd":"navi","waypoints":[{"point":{"x":2.6,"y":-8.2,"z":0},"angle":{"roll":0,"pitch":0,"yaw":3.14}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_02/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_02/cmd', ... (167 bytes))
# {"send_token_info":{"time":"2019-11-15T12:22:38.983+09:00","token":"token_c","mode":"release","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# #### confirm that a ros message for delivery_robot_02 ( `/robot_bridge/megarover_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_02"
# type: "delivery_robot"
# time: "2019-11-15T12:22:38.935+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 2.6
# y: -8.2
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 3.14
# ---
# ```
# #### confirm that a MQTT message to `/delivery_robot/delivery_robot_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/delivery_robot/delivery_robot_01/cmd', ... (251 bytes))
# {"send_cmd":{"time":"2019-11-15T12:22:39.086+09:00","cmd":"navi","waypoints":[{"point":{"x":6.06,"y":-8,"z":0},"angle":null},{"point":{"x":9.56,"y":-8,"z":0},"angle":null},{"point":{"x":9.43,"y":-9.1,"z":0},"angle":{"roll":0,"pitch":0,"yaw":-1.57}}]}}
# ```
# #### confirm that a `lock` MQTT message to `/robot_ui/robotui_01/cmd` is sent automatically
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (166 bytes))
# {"send_token_info":{"time":"2019-11-15T12:22:39.137+09:00","token":"token_c","mode":"resume","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# example)
#
# ```
# Client mosq-TrZhDaCwOfCzPWvm9w received PUBLISH (d0, q0, r0, m0, '/robot_ui/robotui_01/cmd', ... (164 bytes))
# {"send_token_info":{"time":"2019-11-15T12:22:39.186+09:00","token":"token_c","mode":"lock","lock_owner_id":"delivery_robot_01","prev_owner_id":"delivery_robot_02"}}
# ```
# #### confirm that a ros message for `delivery_robot_01` ( `/robot_bridge/turtlebot_01/cmd` ) is sent automatically
# example)
#
# ```yaml
# id: "delivery_robot_01"
# type: "delivery_robot"
# time: "2019-11-15T12:22:39.086+09:00"
# cmd: "navi"
# waypoints:
# -
# point:
# x: 6.06
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 9.56
# y: -8.0
# z: 0.0
# angle_optional:
# valid: False
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: 0.0
# -
# point:
# x: 9.43
# y: -9.1
# z: 0.0
# angle_optional:
# valid: True
# angle:
# roll: 0.0
# pitch: 0.0
# yaw: -1.57
# ---
# ```
# #### confirm log messages
# `delivery_robot_02` released `token_c`, so `delivery_robot_01` which is wating this token can lock it and can move into restlicted area.
#
# example)
#
# ```
# 2019/11/15 03:22:38 [ INFO] src.api - update robot state, robot_id=delivery_robot_02, current_mode=navi, next_mode=standby
# 2019/11/15 03:22:38 [ INFO] src.token - switch token (token_c) from delivery_robot_02 to delivery_robot_01
# 2019/11/15 03:22:38 [ INFO] src.api - move robot(delivery_robot_02) to "place_LBdash" (waypoints=[{'point': {'x': 2.6, 'y': -8.2, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': 3.14}}], order=None, caller=None
# 2019/11/15 03:22:39 [ INFO] src.api - publish new token_info to robot ui(robotui_02), token=token_c, mode=release, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# 2019/11/15 03:22:39 [ INFO] src.api - move robot(delivery_robot_01) to "place_Idash" (waypoints=[{'point': {'x': 6.06, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 9.56, 'y': -8, 'z': 0}, 'angle': None}, {'point': {'x': 9.43, 'y': -9.1, 'z': 0}, 'angle': {'roll': 0, 'pitch': 0, 'yaw': -1.57}}]
# 2019/11/15 03:22:39 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_c, mode=resume, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# 2019/11/15 03:22:39 [ INFO] src.api - publish new token_info to robot ui(robotui_01), token=token_c, mode=lock, lock_owner_id=delivery_robot_01, prev_owner_id=delivery_robot_02
# ```
# #### confirm token entity
TOKEN=$(cat ${CORE_ROOT}/secrets/auth-tokens.json | jq '.[0].settings.bearer_tokens[0].token' -r)
curl -sS -H "Authorization: bearer ${TOKEN}" -H "Fiware-Service: ${FIWARE_SERVICE}" -H "Fiware-Servicepath: ${TOKEN_SERVICEPATH}" https://api.${DOMAIN}/orion/v2/entities/?type=${TOKEN_TYPE} | jq .
# confirm below:
#
# * `token_a` and `token_b` is not locked.
# * `token_c` is still locked, but its owner is switched from `delivery_robot_02` to `delivery_robot_01`, and `delivery_robot_01` is no longer waiting for this token.
| docs/en-jupyter_notebook/azure_aks/05_simulate_scenario.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Gesture Sensor Example
#
# This example shows how to use the [gesture sensor](http://www.dfrobot.com.cn/goods-1191.html) on the board. The gesture can identify 8 states.
#
# For this notebook, a PYNQ Arduino is required, and the pins of gesture sensor should be set in the A4-A5.
from pynq.overlays.base import BaseOverlay
base = BaseOverlay("Robot.bit")
# ### 1. Instantiate gesture sensor
# Before use the gesture sensor
#
# This notebook is also for testing the capacity of gesture sensor
DIR = {"NONE": 0, "LEFT": 1, "RIGHT": 2, "UP": 3,
"DOWN": 4, "NEAR": 5, "FAR": 6, "ALL": 7}
# +
from pynq.lib.arduino import Gesture_sen
# Instantiate linetracker on Arduino
gesture = Gesture_sen(base.ARDUINO, channel = 0)
#default pins is left:CHANNEL_A3, right:CHANNEL_A2
#if you set the pins as the default configuration
#linetrack = LT_sen(base.ARDUINO) such a format islegal
# -
# ### 2. Test the capacity of gesture sensor
#
# run the code below, linetrackers will print the states
#
# while the code is running, give your gesture to the sensor and observe the states change
from time import sleep
while (1):
print(gesture.read_gesture())
sleep(1)
| notebooks/arduino_gesture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import george
from george import kernels
from scipy.optimize import minimize
from time import time
import argparse
import pickle
import matplotlib.pyplot as plt
conv=np.load("./etgrid/3962_cobyla_convergence.npy")
conv.shape
# +
param_names = ["Tstar","logL_star","logM_disk","logR_disk","h_0","logR_in",\
"log gamma","bix","log logM_env","logR_env","f_cav","ksi","loga_max","p","biy"]
np.set_printoptions(suppress=True)
z=np.sqrt(np.exp(conv[0][0]))
n=np.sqrt(np.exp(conv[738][0]))
for i in range(len(param_names)):
print(param_names[i], np.round(z[i+1],3), np.round(n[i+1],3))
# -
coords=np.load("./etgrid/3962_coords.npy")
eigenseds=np.load("./etgrid/3962_eigenseds.npy")
weights=np.load("./etgrid/3962_weights.npy")
pcamean=np.load("./etgrid/3962_mean.npy")
sedsflat=np.load("./etgrid/sedsflat.npy")
yerrs=[]
for i in range(16):
yerrs.append([x*0.01 for x in weights[i]])
kernel = 16*kernels.ExpSquaredKernel(15**2,ndim=15,axes=0)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=1)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=2)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=3)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=4)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=5)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=6)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=7)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=8)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=9)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=10)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=11)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=12)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=13)*\
kernels.ExpSquaredKernel(1**2,ndim=15,axes=14)
blankhodlr=george.GP(kernel,solver=george.HODLRSolver)
def pred(pred_coords,fit):
gp=blankhodlr
preds=[]
if "cobyla" in name:
hyperparams=np.transpose(np.array(fit).reshape(16,16))
else:
hyperparams=np.array(fit).reshape(16,16)
for i in range(len(weights)): # same covfunc for each weight and the sample mean
gp.set_parameter_vector(hyperparams[i])
gp.compute(coords,yerrs[i])
pred, pred_var = gp.predict(weights[i], pred_coords, return_var=True)
preds.append(pred)
reconst_SEDs=[]
for i in range(len(pred_coords)):
reconst=np.dot(np.array(preds)[:,i][0:15],eigenseds[0:15]) + pcamean + np.array(preds)[:,i][15]
reconst_SEDs.append(reconst)
return reconst_SEDs, preds
| convergence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sqlite3
import pandas as pd
timeframes = ['2015-05']
for timeframe in timeframes:
connection = sqlite3.connect('{}.db'.format(timeframe))
c = connection.cursor()
limit = 5000
last_unix = 0
cur_length = limit
counter = 0
test_done = False
while cur_length == limit:
df = pd.read_sql("SELECT * FROM parent_reply WHERE unix > {} and parent NOT NULL and score > 0 ORDER BY unix ASC LIMIT {}".format(last_unix,limit),connection)
last_unix = df.tail(1)['unix'].values[0]
cur_length = len(df)
if not test_done:
with open('test.from','a', encoding='utf8') as f:
for content in df['parent'].values:
f.write(content+'\n')
with open('test.to','a', encoding='utf8') as f:
for content in df['comment'].values:
f.write(str(content)+'\n')
test_done = True
else:
with open('train.from','a', encoding='utf8') as f:
for content in df['parent'].values:
f.write(content+'\n')
with open('train.to','a', encoding='utf8') as f:
for content in df['comment'].values:
f.write(str(content)+'\n')
counter += 1
if counter % 20 == 0:
print(counter*limit,'rows completed so far')
| creating database.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
# read data from file
time, vel, vel_err = np.loadtxt('VelocityVsTime.txt', skiprows=3, unpack=True)
#fit scipy
slope, intercept, r_value, p_value, std_err = stats.linregress(time, vel)
#fit statsmodel OLS
#time = sm.add_constant(time)
#X = np.column_stack((time,vel))
X = sm.add_constant(time)
fit_ols = sm.OLS(vel, X).fit()
#fit statsmodel WLS
fit_wls = sm.WLS(vel, X, weights=vel_err).fit()
# create plot
plt.figure(1, figsize=(14, 10))
plt.errorbar(time,
vel,
fmt='ro',
label="velocity",
yerr=vel_err,
markersize=5,
ecolor='black')
plt.plot(time, intercept + slope * time, 'b-', label="linear fit scipy")
plt.plot(time, fit_ols.fittedvalues, 'y--', label="OLS")
plt.plot(time, fit_wls.fittedvalues, 'g--', label="WLS")
plt.ylabel('velocity (m/s)')
plt.xlabel('time (s)')
plt.legend(loc='upper right')
#plt.savefig('VelocityVsTimeFit.eps')
# display plot on screen
plt.show()
print(
'##############################################################################################'
)
print(' Linear regression with Scipy Results')
print('Slope:', slope)
print('Intercept:', intercept)
print('R:', r_value)
print('R2:', r_value**2)
print('P:', p_value)
print('Std err:', std_err)
print(
'\n###############################################################################################'
)
print(fit_ols.summary())
print(
'\n###############################################################################################'
)
print(fit_wls.summary())
plt.savefig('VelocityVsTimeFit.eps')
# -
| chap7/chapter_7_exercise4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Widges and Interactions
# !conda install -y netcdf4
from netCDF4 import Dataset, num2date, date2num
from numpy import *
import matplotlib.pyplot as plt
# %matplotlib inline
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
x=linspace(0,1,100)
f=2
plt.plot(x, sin(2*pi*x*f))
def pltsin(f):
plt.plot(x,sin(2*pi*x*f))
pltsin(5)
# + active=""
# Interaccion empieza en 1 va a hasta 10 y se devuelve hasta 1
# En esta interaccion esta disponible solo para Jypiter no para terminal
# -
interact(pltsin, f=(1,10,0.1))
# # Add to the funtion to allow amplitude to be varied and add in an additional slider to vary both f and a
#
# Hint you might want to limit the magnitude of 'y' in the plot - look up matplotlib
def pltsin(f,a):
plt.plot(x,a * sin(2*pi*x*f))
interact(pltsin, f=(1, 10, 0.1), a=(1, 10, 0.1))
# # Climate data
f=Dataset('ncep-data/air.sig995.2013.nc')
air=f.variables['air'] #get variable
plt.imshow(air[364,:,:]) #display first timestep
def sh(time):
plt.imshow(air[time,:,:])
# +
#now make it interactive
interact(sh, time=(0,364,1))
# +
# Browse variable
def sh(var='air', time=0):
f=Dataset ('ncep-data/'+var+'.sig995.2013.nc')
vv=f.variables[var]
plt.imshow(vv[time,:,:])
# +
# Give a list of varibles
variabs= ['air','uwnd','vwnd','rhum']
# +
# Now interact with it
interact(sh, time=(0,355,1), var=variabs)
# +
# Browse variable
def sh(var='air', year="2013", time=0):
f=Dataset ('ncep-data/'+var+'.sig995.'+year+'.nc')
vv=f.variables[var]
plt.imshow(vv[time,:,:])
# +
# Create a list of years
years= [str(x) for x in range (2013,2016)]
years = ['2013', '2014', '2015']
# +
# Give a list of varibles
variabs= ['air','uwnd','vwnd','rhum']
# +
# Now interact with it
interact(sh, time=(0,355,1), var=variabs, year=years)
# -
| .ipynb_checkpoints/02-widgets-answers-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.0 64-bit (''torch'': conda)'
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv('/Users/aarsh/Downloads/rssi.csv',usecols=['DateTime','A2_RSSI'],parse_dates=['DateTime'])
df.info()
df = df.reset_index().set_index('DateTime')
df.head()
plt.figure(figsize=(20,5))
plt.plot(df['A2_RSSI'][:1000],color='blue')
plt.show()
# # Exploring Stationarity Of The Time Series
# - Rolling Mean and Std
# +
rollmean = df[:1000].resample(rule='3S').mean()
rollstd =df[:1000].resample(rule='3S').std()
plt.figure(figsize=(20,5))
plt.plot(df['A2_RSSI'][:1000],color='blue',label='Original')
plt.plot(rollmean['A2_RSSI'],color='red',label='Rolling Mean')
plt.plot(rollstd['A2_RSSI'],color='black',label='Rolling Std')
plt.show()
# -
# # <NAME>
# +
from statsmodels.tsa.stattools import adfuller
X = df['A2_RSSI'][0:20000].values
result = adfuller(X)
print('ADF Statistic:%f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key , value in result[4].items():
print('\t%s: %.3f' % (key , value))
if result[0] < result[4]["5%"]:
print("Time Series is Stationary")
else:
print("Time Series is Non - Stationary")
# -
# # Modelling (Isolation Forest)
# +
from sklearn.ensemble import IsolationForest
outliers_fraction = 0.06
X1 = df['A2_RSSI'][:1000].values.reshape(-1,1)
model = IsolationForest(contamination=outliers_fraction)
model.fit(X1)
df['anomaly'][:1000] = pd.Series(model.predict(X1))
a = df.loc[df['anomaly'] == -1]
plt.figure(figsize=(20,5))
plt.plot(df['A2_RSSI'][:1000],color = 'blue',label='Data')
plt.plot(a['A2_RSSI'][:1000] , linestyle='none',marker='X',color='red',markersize=12,label ='Anomaly')
plt.show()
# -
| sample_testing/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import cv2
import numpy as np
from matplotlib import pyplot as plt
from reprep.graphics import scale, posneg;
def show(x):
print('Image shape: %s' % str(x.shape))
plt.imshow(x, interpolation='nearest')
plt.show()
# -
filename = 'data/funky_rgb.png'
im1_bgr = cv2.imread(filename)
im1_rgb_full = cv2.cvtColor(im1_bgr, cv2.COLOR_BGR2RGB)
show(im1_rgb_full)
# downsample to make operations obvious
H, W = 128,128
im1_rgb = cv2.resize(im1_rgb_full, (W, H))
show(im1_rgb)
n = 3
kernel = np.ones((n, n), np.float32)/ (n*n)
print kernel
np.ones((3,3)) / 9
smoothed = cv2.filter2D(im1_rgb, -1, kernel)
show(smoothed)
dx = np.array([
[-1, 0, +1],
[-2, 0, +2],
[-1, 0, +1],
])
print dx
dy = dx.transpose()
print dy
smoothed = smoothed.astype('float32')
gx = cv2.filter2D(smoothed, -1, dx)
gy = cv2.filter2D(smoothed, -1, dy)
print gx.shape
gx_red = gx[:,:,2]
show(posneg(gx_red))
grayscale = np.sum(smoothed, axis=2)
show(grayscale)
grayscale_gx = cv2.filter2D(grayscale, -1, dx)
grayscale_gy = cv2.filter2D(grayscale, -1, dy)
show( np.hstack((grayscale_gy, grayscale_gx)))
strength = grayscale_gx * grayscale_gx + grayscale_gy * grayscale_gy
show(strength)
strength_y = grayscale_gy * grayscale_gy
show(strength_y)
gx_squashed = np.sum(gx, axis=2)
gy_squashed = np.sum(gy, axis=2)
print gx_squashed.shape
show(posneg(np.hstack((gx_squashed, gy_squashed))))
strength = gx*gx + gy*gy
show(scale(strength))
| catkin_ws/src/75-notebooks/35 - Image fitering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="1WX0cn0c_dh0"
# This is an implementation of TF-IDF based on the following link
# https://towardsdatascience.com/natural-language-processing-feature-engineering-using-tf-idf-e8b9d00e7e76
# If you failed to access the web page, you might access the pdf file in the following link
# https://drive.google.com/file/d/1J7J6p8hZ3lDmncxq1f6HHNstaGbtREHj/view?usp=sharing
import pandas as pd
import numpy as np
import re
# + colab={"base_uri": "https://localhost:8080/"} id="l7TvIW_p_qtf" outputId="4e865262-0470-428d-988d-477e0b5520bc"
# Simple documents
# Change the content, so you can have some intuition about TF-IDF
documentA = 'The man went out for a walk'
documentB = 'the children sat around the fire'
# split each document
bagOfWordsA = documentA.split(' ')
bagOfWordsB = documentB.split(' ')
print(bagOfWordsA)
print(bagOfWordsB)
# + colab={"base_uri": "https://localhost:8080/"} id="fZioW4lkAvPv" outputId="b455947b-41f3-43be-cf6d-1e57bcf1049b"
# Find the uniue set of words
uniqueWords = set(bagOfWordsA).union(set(bagOfWordsB))
print(uniqueWords)
# + colab={"base_uri": "https://localhost:8080/"} id="oD5jBSckA79d" outputId="09073118-3326-4fa3-cfa7-31a93b97ec50"
# create dictionary for each document and calculate the word frequency in each document
numOfWordsA = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsA:
numOfWordsA[word] += 1
print(numOfWordsA)
numOfWordsB = dict.fromkeys(uniqueWords, 0)
for word in bagOfWordsB:
numOfWordsB[word] += 1
print(numOfWordsB)
# + colab={"base_uri": "https://localhost:8080/"} id="nfb2d1jLBXcp" outputId="d415c5fb-f20a-44d7-aacb-bbb5c98a9f2b"
#importing stopword
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
#stopwords.words('english')
stopwords
# + colab={"base_uri": "https://localhost:8080/"} id="SoclVE6aCiTn" outputId="d51b5f79-de3e-419c-dc41-e30ad2f51ea7"
# Compute the Term Frequency
def computeTF(wordDict, bagOfWords):
tfDict = {}
bagOfWordsCount = len(bagOfWords)
for word, count in wordDict.items():
tfDict[word] = count / float(bagOfWordsCount)
return tfDict
tfA = computeTF(numOfWordsA, bagOfWordsA)
tfB = computeTF(numOfWordsB, bagOfWordsB)
print(tfA)
print(tfB)
# + colab={"base_uri": "https://localhost:8080/"} id="PguhTdg-C3Nx" outputId="11a4322b-2a88-46e2-f477-b6e457bd8c7f"
# Compute the inverse document frequency
def computeIDF(documents):
import math
N = len(documents)
idfDict = dict.fromkeys(documents[0].keys(), 0)
for document in documents:
for word, val in document.items():
if val > 0:
idfDict[word] += 1
for word, val in idfDict.items():
idfDict[word] = math.log(N / float(val))
return idfDict
idfs = computeIDF([numOfWordsA, numOfWordsB])
print(idfs)
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="4_wcG1zeDFJO" outputId="b8815d71-4193-4f69-c390-a9c8ddc1e0e1"
# Compute the TFxIDF
def computeTFIDF(tfBagOfWords, idfs):
tfidf = {}
for word, val in tfBagOfWords.items():
tfidf[word] = val * idfs[word]
return tfidf
tfidfA = computeTFIDF(tfA, idfs)
tfidfB = computeTFIDF(tfB, idfs)
df = pd.DataFrame([tfidfA, tfidfB])
df
# + colab={"base_uri": "https://localhost:8080/", "height": 111} id="zfXg3z3BDVh0" outputId="3714fd75-b4bd-4fa5-d8f5-674db40f5410"
# Obtaining TF-IDF using sklearn library
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform([documentA, documentB])
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
df = pd.DataFrame(denselist, columns=feature_names)
df
# + id="Q8BlgblvZJ0W"
# Tugas Besar 1 Implementasi Content Based Filtering dengan TF-IDF.
# a. Copy lirik "Reff" dari lagu berbahasa Indonesia (masing-masing 3 lagu),
# b. Paste ke link berikut (Pastikan 1 lagu 1 cell lirik_ref):
# https://docs.google.com/spreadsheets/d/1j98gjzwL-88GPiTGkRgEWhWWcKryvNAdlaDR_oBWgpY/edit?usp=sharing
# c. Cari informasi mengenai k-NN, dan yang diimplementasikan pada TF-IDF pada dataset di atas
# d. Buat satu prosedur yang menerima ID dari lagu, dan kembalikan 5 ID most-similar items
# + colab={"base_uri": "https://localhost:8080/", "height": 436} id="PpetkDTkHiAN" outputId="a1465627-6b98-47a0-bf1d-634390d3d3bf"
# Load data from google drive
from google.colab import drive
import os
drive.mount('/content/drive')
os.chdir("/content/drive/My Drive/Colab Notebooks/recsys") # me
fileNames = os.listdir()
song_csv = r'DatasetLaguIF_GAB - IF-GAB-03.csv'
song_df = pd.read_csv(song_csv)
song_df
# + id="e5936XAzfOEu"
# Get Data
songs_reff = song_df
reff_column_name = 'Bait_Reff'
# songs_reff = pd.DataFrame(songs_reff_list)
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="15bYPbCufOTk" outputId="a202189d-eb15-4c41-c8d3-6dca15c58edd"
# Preprocessing reff
def preprocess_reff(reff):
reff = re.sub(r"\n", " ", reff) # remove newline change it to space
reff = re.sub(r"\r", " ", reff) # remove newline change it to space
reff = re.sub(r"\r\n", " ", reff) # remove newline change it to space
reff = re.sub(r'[^A-Za-z0-9 ]+', '', reff)
return reff
songs_reff = songs_reff.dropna(axis=0, how="any")
songs_reff[reff_column_name].apply(preprocess_reff)
songs_reff
# + colab={"base_uri": "https://localhost:8080/", "height": 439} id="aNifW4Ugpq-a" outputId="092a6ff6-8ee9-4d77-8de5-c849803bdb6a"
# TF IDF song reff
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(songs_reff[reff_column_name].to_numpy())
feature_names = vectorizer.get_feature_names()
dense = vectors.todense()
denselist = dense.tolist()
tfidf_songs_reff = pd.DataFrame(denselist, columns=feature_names)
tfidf_songs_reff
# + colab={"base_uri": "https://localhost:8080/", "height": 439} id="xzrudAi3R9jL" outputId="6958ffb5-ee7e-431e-f4e3-1626bcd19f6d"
# Concat song id and tfidf results
tf_idf_with_id = tfidf_songs_reff.copy()
tf_idf_with_id.insert(loc=0, column='ID_Lagu', value=songs_reff['ID_Lagu'].values)
tf_idf_with_id
# + colab={"base_uri": "https://localhost:8080/"} id="OhU2-p_LogAJ" outputId="0b149365-8bb0-4c7d-f941-9b4921926c08"
# Train K-NN using TF-IDF Result
from sklearn.neighbors import NearestNeighbors
k = 5
nn = NearestNeighbors(algorithm='auto', n_neighbors=k)
nn.fit(tfidf_songs_reff)
# + id="YPJ-R2pig9qp"
# Procedure to get 5 nearest neigbors or in this case is the similar valud of TFIDF
# Buat satu prosedur yang menerima ID dari lagu, dan kembalikan 5 ID most-similar items
def get_similar_items_from_nearest_neigbors_results(data, id, nearest_neigbors, total_similar_items=5, column_id_name="ID_Lagu"):
idx_selected_id = data.index[data[column_id_name] == id].tolist()
if len(idx_selected_id) > 0:
values = data.drop(column_id_name, 1)
distances, indexes = nearest_neigbors.kneighbors([values.iloc[idx_selected_id[0]]], total_similar_items + 1, return_distance=True)
indexes = indexes[0][1:] # Remove itself
print(distances)
print(indexes)
if indexes is not None:
similar_items = []
for point_idx in indexes:
similar_items.append(data.iloc[point_idx][column_id_name])
return np.array(similar_items).astype(int)
else:
print("Can't show any similar items")
return None
else:
print("Song not found")
return None
# + colab={"base_uri": "https://localhost:8080/", "height": 255} id="XgqRuUegMCmF" outputId="2f20ded3-42df-4330-de45-868862dc26ce"
# Show 5 similar items
id = 104
similar_items = get_similar_items_from_nearest_neigbors_results(tf_idf_with_id, id, nn)
print("Similar Items")
songs_reff[songs_reff['ID_Lagu'].isin(similar_items)]
# + colab={"base_uri": "https://localhost:8080/", "height": 97} id="t0ruxMlkkQK6" outputId="01923624-a202-4390-e3bb-286f2528bec7"
print("Selected Items")
songs_reff[songs_reff['ID_Lagu'].isin([id])]
| project/Movies/sc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/airctic/icevision/blob/master/notebooks/custom_parser.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Custom Parser - Simple
# ## Instaling icevision
# +
# # !pip install icevision[all]
# # !pip install icedata
# -
# ## Imports
# As always, let's import everything from `icevision`. Additionally, we will also need `pandas` (you might need to install it with `pip install pandas`).
# + nbsphinx="hidden"
from icevision.all import *
import pandas as pd
# -
# ## Download dataset
# We're going to be using a small sample of the chess dataset, the full dataset is offered by roboflow [here](https://public.roboflow.com/object-detection/chess-full)
data_url = "https://github.com/airctic/chess_sample/archive/master.zip"
data_dir = icedata.load_data(data_url, 'chess_sample') / 'chess_sample-master'
# ## Understand the data format
# In this task we were given a `.csv` file with annotations, let's take a look at that.
# !!! danger "Important"
# Replace `source` with your own path for the dataset directory.
df = pd.read_csv(data_dir / "annotations.csv")
df.head()
# At first glance, we can make the following assumptions:
#
# * Multiple rows with the **same** filename, width, height
# * A **label** for each row
# * A **bbox** [xmin, ymin, xmax, ymax] for each row
#
# Once we know what our data provides we can create our custom `Parser`.
# ## Create the Parser
# The first step is to create a template record for our specific type of dataset, in this case we're doing standard object detection:
template_record = ObjectDetectionRecord()
# Now use the method `generate_template` that will print out all the necessary steps we have to implement.
Parser.generate_template(template_record)
# We can copy the template and use it as our starting point. Let's go over each of the methods we have to define:
#
# - `__init__`: What happens here is completely up to you, normally we have to pass some reference to our data, `data_dir` in our case.
#
# - `__iter__`: This tells our parser how to iterate over our data, each item returned here will be passed to `parse_fields` as `o`. In our case we call `df.itertuples` to iterate over all `df` rows.
#
# - `__len__`: How many items will be iterating over.
#
# - `imageid`: Should return a `Hashable` (`int`, `str`, etc). In our case we want all the dataset items that have the same `filename` to be unified in the same record.
#
# - `parse_fields`: Here is where the attributes of the record are collected, the template will suggest what methods we need to call on the record and what parameters it expects. The parameter `o` it receives is the item returned by `__iter__`.
# !!! danger "Important"
# Be sure to pass the correct type on all record methods!
class ChessParser(Parser):
def __init__(self, template_record, data_dir):
super().__init__(template_record=template_record)
self.data_dir = data_dir
self.df = pd.read_csv(data_dir / "annotations.csv")
self.class_map = ClassMap(list(self.df['label'].unique()))
def __iter__(self) -> Any:
for o in self.df.itertuples():
yield o
def __len__(self) -> int:
return len(self.df)
def record_id(self, o) -> Hashable:
return o.filename
def parse_fields(self, o, record, is_new):
if is_new:
record.set_filepath(self.data_dir / 'images' / o.filename)
record.set_img_size(ImgSize(width=o.width, height=o.height))
record.detection.set_class_map(self.class_map)
record.detection.add_bboxes([BBox.from_xyxy(o.xmin, o.ymin, o.xmax, o.ymax)])
record.detection.add_labels([o.label])
# Let's randomly split the data and parser with `Parser.parse`:
parser = ChessParser(template_record, data_dir)
train_records, valid_records = parser.parse()
# Let's take a look at one record:
show_record(train_records[0], display_label=False, figsize=(14, 10))
train_records[0]
# ## Next steps
# - This was just merged, come help us adjusting the documentation and fixing the bugs
# ## Conclusion
#
# And that's it! Now that you have your data in the standard library record format, you can use it to create a `Dataset`, visualize the image with the annotations and basically use all helper functions that IceVision provides!
# ## Happy Learning!
#
# If you need any assistance, feel free to join our [forum](https://discord.gg/JDBeZYK).
| notebooks/custom_parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="ORsio2qVD3Rl"
# ## 5.5 계층별 고객 판매 전략 (클러스터링, 차원 축소)
#
# + [markdown] colab_type="text" id="060CpHNuD3Rm"
# ### 공통 전처리
# + colab={} colab_type="code" id="Lyt_vTxUD3Rq"
# 공통 처리
# 불필요한 경고 메시지 무시
import warnings
warnings.filterwarnings('ignore')
# 라이브러리 임포트
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# 한글 글꼴 설정
import platform
if platform.system() == 'Windows':
plt.rc('font', family='Malgun Gothic')
elif platform.system() == 'Darwin':
plt.rc('font', family='Apple Gothic')
# 데이터프레임 출력용 함수
from IPython.display import display
# 숫자 출력 조정
# 넘파이 부동소수점 출력 자리수 설정
np.set_printoptions(suppress=True, precision=4)
# 판다스 부동소수점 출력 자리수 설정
pd.options.display.float_format = '{:.4f}'.format
# 데이터프레임 모든 필드 출력
pd.set_option("display.max_columns",None)
# 그래프 글꼴 크기 설정
plt.rcParams["font.size"] = 14
# 난수 시드
random_seed = 123
# + [markdown] colab_type="text" id="HQlDUVzRD3Rs"
# 데이터 집합 배포 웹 페이지
#
# https://archive.ics.uci.edu/ml/datasets/wholesale+customers
# + [markdown] colab_type="text" id="iB_ocZwOD3Rt"
# ### 5.5.4 데이터 읽어 들이기부터 데이터 확인까지
# + [markdown] colab_type="text" id="zNKorSZmD3Rt"
# #### 데이터 읽어 들이기
# + colab={} colab_type="code" id="TsPBG-ECD3Ru"
# 데이터 읽어 들이기
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale%20customers%20data.csv'
df = pd.read_csv(url)
# 우리말 필드명 정의
columns = [ '판매_채널', '지역', '신선식품', '유제품', '식료품',
'냉동식품', '세제_종이제품', '부식']
df.columns = columns
# + [markdown] colab_type="text" id="oTLRFKx3D3Rw"
# #### 데이터 확인하기
# + colab={"base_uri": "https://localhost:8080/", "height": 213} colab_type="code" id="gKvGiyRXD3Rw" outputId="fcc57a18-e2f1-44f1-d238-47dc58e2c32a"
# 데이터 내용 확인
display(df.head())
# 건수 확인
print(df.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 175} colab_type="code" id="Z1WIzw_tD3Rz" outputId="896dd8bc-e617-42c9-e9b8-85233b1dcc13"
# 누락 값 확인
print(df.isnull().sum())
# + colab={"base_uri": "https://localhost:8080/", "height": 158} colab_type="code" id="j-2hfTBnD3R1" outputId="ac0d6861-0ed7-406d-d5d5-f57bffeb4eaa"
# '판매_채널'의 레이블값 확인
print(df['판매_채널'].value_counts())
print()
# '지역'의 레이블값 확인
print(df['지역'].value_counts())
# + [markdown] colab_type="text" id="zSMqakKJD3R3"
# #### '판매_채널' 레이블 값의 의미
# 1. Horeca(호텔, 레스토랑, 카페) 298
# 2. Retail(소매점) 142
# + [markdown] colab_type="text" id="aLARYhrsD3R4"
# #### '지역' 레이블 값의 의미
# 1. Lisbon(리스본) 77
# 2. Oporto(포르투) 47
# 3. Other Region(기타) 316
#
#
# + [markdown] colab_type="text" id="wS0QnunZD3R4"
# #### 히스토그램 그리기
# + colab={"base_uri": "https://localhost:8080/", "height": 577} colab_type="code" id="e7El541nD3R5" outputId="c2a62863-b9c4-4808-d124-bf8d480d3579"
# '판매_채널'과 '지역'을 제외
df2 = df.drop(['판매_채널', '지역'], axis=1)
# 대상 필드의 히스토그램을 그리기
from pylab import rcParams
rcParams['figure.figsize'] = (8, 8)
df2.hist(bins=20)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="qI5f31AtD3R7"
# ### 5.5.5 클러스터링
# + colab={} colab_type="code" id="CfEPfY8cD3R7"
# 데이터 전처리 및 데이터 분할은 불필요함
# 알고리즘 선택
from sklearn.cluster import KMeans
# 클러스터 수를 지정
clusters=4
# 알고리즘 정의
algorithm = KMeans(n_clusters=clusters,
random_state=random_seed)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="xfGbHcwoD3R9" outputId="eb987da4-502b-40a2-8b9a-e4a5a4d52dd2"
# 학습 및 예측
y_pred = algorithm.fit_predict(df2)
# 결과 중 일부를 확인
print(y_pred[:20])
# + [markdown] colab_type="text" id="mZxGwWUDD3R_"
# ### 5.5.6 클러스터링 결과 분석
# + [markdown] colab_type="text" id="vPbPUf85D3R_"
# #### 그룹별 평균 계산하기
# + colab={"base_uri": "https://localhost:8080/", "height": 166} colab_type="code" id="q24sMbpvD3SA" outputId="1cc190f4-6a8d-4b10-a826-1fcd42ae80ac"
# 그룹별 평균 계산하기
df_cluster = df2.groupby(y_pred).mean()
display(df_cluster)
# + [markdown] colab_type="text" id="eKzdYkL8D3SC"
# #### 그룹별 평균 그래프 그리기
# + colab={"base_uri": "https://localhost:8080/", "height": 373} colab_type="code" id="wVb9iCt0D3SC" outputId="8058c1e3-92c7-4ec9-fb85-172ac4d14a41"
# 그룹별 평균을 막대그래프로
df_cluster.plot(kind='bar',stacked=True,
figsize=(10, 6),colormap='jet')
plt.show()
# + [markdown] colab_type="text" id="eFg89g0wD3SE"
# #### 분석 결과
#
# * **0**: 신선식품 중심
# * **1**: 식료품 중심
# * **2**: 매량구매
# * **3**: 소량구매
# + [markdown] colab_type="text" id="QGmD9ipaD3SF"
# #### 그룹과 판매 채널, 지역의 관계
# + colab={"base_uri": "https://localhost:8080/", "height": 887} colab_type="code" id="vzq4FBEHD3SF" outputId="0495656b-b77e-4285-a952-0adb33d9283e"
# 판매_채널, 지역 필드와 클러스터의 관계
# 판매_채널, 지역 필드를 df3으로 추출
df3 = df[['판매_채널', '지역']]
# 그래프 크기를 설정
rcParams['figure.figsize'] = (6,3)
# 그룹별로 그래프를 그림
for i in range(clusters):
fig, ax = plt.subplots()
w = df3[y_pred==i]
print(f'====== 그룹{i} ======')
w.hist(ax=ax)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="z3iYpMSZD3SH"
# #### 분석 결과
# 그룹0(신선 그룹)과 그룹3(소량구매 그룹)은 판매 채널 1(호텔/레스토랑/카페)과 관련이 깊다.
#
# 그룹1(식품 그룹)과 그룹2(대량구매 그룹)은 판매 채널 2(소매점)와 관련이 깊다.
#
# 지역과 그룹은 특별히 관련이 없어 보인다.
# + [markdown] colab_type="text" id="kPwgiwbtD3SI"
# ### 5.5.7 차원축소
# + colab={} colab_type="code" id="clpaya39D3SI"
# 알고리즘 선택
from sklearn.decomposition import PCA
# 모델 생성
# 산점도를 그리는 것이 목적이므로 2차원으로 축소
pca = PCA(n_components=2)
# + colab={"base_uri": "https://localhost:8080/", "height": 105} colab_type="code" id="obiGfHi0D3SK" outputId="12142fe3-6ff5-4ae5-9841-4c7391d428f6"
# 학습 및 변환
d2 = pca.fit_transform(df2)
# 결과 중 일부를 출력
print(d2[:5,:])
# + [markdown] colab_type="text" id="9O5StcBGD3SM"
# ### 5.5.8 차원축소를 활용하는 방법
# + [markdown] colab_type="text" id="ceborIy5D3SM"
# #### 산점도 그리기
# + colab={"base_uri": "https://localhost:8080/", "height": 486} colab_type="code" id="DFOj2JT-D3SN" outputId="56d1ed99-e0b2-4d8e-9150-4ed20d0dd64a"
# 그룹별로 색을 달리해 산포도 그리기
plt.figure(figsize=(8,8))
marks = ['.', 'x', '*', '+']
labels = ['신선', '식품', '대량', '소량']
colors = ['grey', 'lightblue', 'blue', 'black']
for i in range(clusters):
plt.scatter(d2[y_pred==i][:,0], d2[y_pred==i][:,1],
marker=marks[i], label=labels[i], s=100, c=colors[i])
plt.legend(fontsize=14)
plt.show()
# + [markdown] colab_type="text" id="xSroAT7PD3SP"
# #### 이상값 조사하기
# + colab={"base_uri": "https://localhost:8080/", "height": 77} colab_type="code" id="ygbDqd_lD3SQ" outputId="e75d96d3-337a-4f63-bf2c-834c604e1706"
# 신선 그룹의 이상값 조사하기
display(df[d2[:,0] > 100000])
# + [markdown] colab_type="text" id="S7SvVzFBD3SR"
# **고찰**
#
# 인덱스 181번 고객은 신선식품의 구매량이 특히 많다고 볼 수 있다.
# + colab={"base_uri": "https://localhost:8080/", "height": 77} colab_type="code" id="1aNy2yJVD3SS" outputId="df28c201-d4bc-4db2-9387-bb7a645a9b26"
# 대량구매 그룹의 이상값 조사하기
display(df[d2[:,1] > 80000])
# + [markdown] colab_type="text" id="J6jV6-4HD3SU"
# **고찰**
# 인덱스 85번 고객은 특히 식료품, 세제_종이제품의 구매액이 매우 높다.
# + colab={"base_uri": "https://localhost:8080/", "height": 284} colab_type="code" id="zgOgOJfhD3SU" outputId="e5c7fafd-3915-47d9-ff08-db1aa6c338b5"
# 통계정보를 확인
display(df2.describe())
# + colab={} colab_type="code" id="U6zUEK9PD3SW"
| notebooks/ch05_05_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # NYC Taxi Fare Prediction with RayDP and Pytorch
# +
import ray
import os
import pandas as pd, numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from pyspark.sql.functions import *
import raydp
from raydp.torch.estimator import TorchEstimator
from raydp.utils import random_split
# -
# ## Initialize or connect to existed Ray cluster
# Firstly, You need to init or connect to a ray cluster. Note that you should set include_java to True.
# For more config info in ray, please refer the ray doc. https://docs.ray.io/en/latest/package-ref.html
# ray.init(address="auto", redis_password="<PASSWORD>")
ray.init()
# After initialize ray cluster, you can use the raydp api to get a spark session
app_name = "NYC Taxi Fare Prediction with RayDP"
num_executors = 4
cores_per_executor = 1
memory_per_executor = "2GB"
spark = raydp.init_spark(app_name, num_executors, cores_per_executor, memory_per_executor)
# ## Distributed data preprocessing with pyspark
# +
# Then you can code as you are using spark
# The dataset can be downloaded from https://www.kaggle.com/c/new-york-city-taxi-fare-prediction/data
# Here we just use a subset of the training data
train = spark.read.format("csv").option("header", "true") \
.option("inferSchema", "true") \
.load("/mnt/DP_disk8/nyc_train.csv")
# Set spark timezone for processing datetime
spark.conf.set("spark.sql.session.timeZone", "UTC")
# -
# Clean up the outlier
def clean_up(data):
data = data.filter(col('pickup_longitude')<=-72) \
.filter(col('pickup_longitude')>=-76) \
.filter(col('dropoff_longitude')<=-72) \
.filter(col('dropoff_longitude')>=-76) \
.filter(col('pickup_latitude')<=42) \
.filter(col('pickup_latitude')>=38) \
.filter(col('dropoff_latitude')<=42) \
.filter(col('dropoff_latitude')>=38) \
.filter(col('passenger_count')<=6) \
.filter(col('passenger_count')>=1) \
.filter(col('fare_amount') > 0) \
.filter(col('fare_amount') < 250) \
.filter(col('dropoff_longitude') != col('pickup_longitude')) \
.filter(col('dropoff_latitude') != col('pickup_latitude'))
return data
# Add time related features
def add_time_features(data):
data = data.withColumn("day", dayofmonth(col("pickup_datetime")))
data = data.withColumn("hour_of_day", hour(col("pickup_datetime")))
data = data.withColumn("day_of_week", dayofweek(col("pickup_datetime"))-2)
data = data.withColumn("week_of_year", weekofyear(col("pickup_datetime")))
data = data.withColumn("month_of_year", month(col("pickup_datetime")))
data = data.withColumn("quarter_of_year", quarter(col("pickup_datetime")))
data = data.withColumn("year", year(col("pickup_datetime")))
@udf("int")
def night(hour, weekday):
if ((hour <= 20) and (hour >= 16) and (weekday < 5)):
return int(1)
else:
return int(0)
@udf("int")
def late_night(hour):
if ((hour <= 6) and (hour >= 20)):
return int(1)
else:
return int(0)
data = data.withColumn("night", night("hour_of_day", "day_of_week"))
data = data.withColumn("late_night", late_night("hour_of_day"))
return data
# Add distance related features
def add_distance_features(data):
@udf("float")
def manhattan(lat1, lon1, lat2, lon2):
return float(np.abs(lat2 - lat1) + np.abs(lon2 - lon1))
# Location of NYC downtown
ny = (-74.0063889, 40.7141667)
# Location of the three airport in NYC
jfk = (-73.7822222222, 40.6441666667)
ewr = (-74.175, 40.69)
lgr = (-73.87, 40.77)
# Features about the distance between pickup/dropoff and airport
data = data.withColumn("abs_diff_longitude", abs(col("dropoff_longitude")-col("pickup_longitude"))) \
.withColumn("abs_diff_latitude", abs(col("dropoff_latitude") - col("pickup_latitude")))
data = data.withColumn("manhattan", col("abs_diff_latitude")+col("abs_diff_longitude"))
data = data.withColumn("pickup_distance_jfk", manhattan("pickup_longitude", "pickup_latitude", lit(jfk[0]), lit(jfk[1])))
data = data.withColumn("dropoff_distance_jfk", manhattan("dropoff_longitude", "dropoff_latitude", lit(jfk[0]), lit(jfk[1])))
data = data.withColumn("pickup_distance_ewr", manhattan("pickup_longitude", "pickup_latitude", lit(ewr[0]), lit(ewr[1])))
data = data.withColumn("dropoff_distance_ewr", manhattan("dropoff_longitude", "dropoff_latitude", lit(ewr[0]), lit(ewr[1])))
data = data.withColumn("pickup_distance_lgr", manhattan("pickup_longitude", "pickup_latitude", lit(lgr[0]), lit(lgr[1])))
data = data.withColumn("dropoff_distance_lgr", manhattan("dropoff_longitude", "dropoff_latitude", lit(lgr[0]), lit(lgr[1])))
data = data.withColumn("pickup_distance_downtown", manhattan("pickup_longitude", "pickup_latitude", lit(ny[0]), lit(ny[1])))
data = data.withColumn("dropoff_distance_downtown", manhattan("dropoff_longitude", "dropoff_latitude", lit(ny[0]), lit(ny[1])))
return data
# Drop unused features
def drop_col(data):
data = data.drop("pickup_datetime") \
.drop("pickup_longitude") \
.drop("pickup_latitude") \
.drop("dropoff_longitude") \
.drop("dropoff_latitude") \
.drop("passenger_count") \
.drop("key")
return data
# +
train_data = clean_up(train)
train_data = add_time_features(train_data)
train_data = add_distance_features(train_data)
train_data = drop_col(train_data)
# -
# ## Distributed model training and evaluation
# Split data into train_dataset and test_dataset
train_df, test_df = random_split(train_data, [0.9, 0.1])
features = [field.name for field in list(train_df.schema) if field.name != "fare_amount"]
# +
# Define the model, loss function and optimizer
class NYC_Model(nn.Module):
def __init__(self, cols):
super(NYC_Model, self).__init__()
self.fc1 = nn.Linear(cols, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, 64)
self.fc4 = nn.Linear(64, 16)
self.fc5 = nn.Linear(16, 1)
self.bn1 = nn.BatchNorm1d(256)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(16)
def forward(self, *x):
x = torch.cat(x, dim=1)
x = F.relu(self.fc1(x))
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = self.bn2(x)
x = F.relu(self.fc3(x))
x = self.bn3(x)
x = F.relu(self.fc4(x))
x = self.bn4(x)
x = self.fc5(x)
return x.squeeze(1)
nyc_model = NYC_Model(len(features))
criterion = nn.SmoothL1Loss()
optimizer = torch.optim.Adam(nyc_model.parameters(), lr=0.001)
# -
# Create a distributed estimator based on the raydp api
estimator = TorchEstimator(num_workers=4, model=nyc_model, optimizer=optimizer, loss=criterion,
feature_columns=features, label_column="fare_amount", batch_size=256, num_epochs=30)
# Train the model
estimator.fit_on_spark(train_df, test_df)
# shutdown raydp and ray
estimator.shutdown()
raydp.stop_spark()
ray.shutdown()
| examples/pytorch_nyctaxi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from tqdm import tqdm
import scml
from scml.encoders import _deprecated_group_features
n_trials = 4
size = 10000000
dfs = []
statistics_1 = []
statistics_2 = []
for i in tqdm(range(n_trials)):
df = pd.DataFrame(
{
"a": np.random.rand(size),
"b": np.random.randint(100, size=size),
"c": np.random.randint(100, size=size),
},
)
dfs.append(df)
st = scml.group_statistics(df, column="a", group_columns=["b"])
statistics_1.append(st)
st = scml.group_statistics(df, column="a", group_columns=["b", "c"])
statistics_2.append(st)
# %%timeit
for i in range(n_trials):
scml.group_features(
dfs[i],
statistics=statistics_1[i],
column="a",
group_columns=["b"],
)
# %%timeit
for i in range(n_trials):
_deprecated_group_features(
dfs[i],
statistics=statistics_1[i],
column="a",
group_columns=["b"],
)
# %%timeit
for i in range(n_trials):
scml.group_features(
dfs[i],
statistics=statistics_2[i],
column="a",
group_columns=["b", "c"],
)
# %%timeit
for i in range(n_trials):
_deprecated_group_features(
dfs[i],
statistics=statistics_2[i],
column="a",
group_columns=["b", "c"],
)
| examples/speed_test_pd_merge.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### Code Setup
# +
# All imports and setup code goes here
import numpy as np
from matplotlib import pyplot as plt
#If you have not installed seaborn uncomment this line
from mpl_toolkits.mplot3d import Axes3D, proj3d
from matplotlib.patches import FancyArrowPatch, Patch
from IPython.display import HTML
from matplotlib import animation
import copy
# Pretty plots
try:
import seaborn as sns
sns.set_context("talk", font_scale=1.5, rc={"lines.linewidth": 2.5})
sns.set_style("whitegrid")
except ImportError:
import matplotlib
matplotlib.style.use("seaborn-talk")
# %matplotlib inline
# -
# ## Frame transformations
# Frequently, in scientific applications (modeling, controls etc.), geometry and computer graphics/vision, we need to transform between a local frame (or local/object frame/coordinates, denoted by $ \mathbf{x}_\mathcal{L} $ ) and a laboratory frame (or global/world frame/coordinates, denoted by $\mathbf{x} $). Note that the local frame can be at a different location (or) have a different orientation with respect to the global frame coordinates. In this notebook, we will see different ways of achieving the same.
class Arrow3D(FancyArrowPatch):
""" An arrow in 3 dimensions, that renders according to the view
set in the global matplotlib 3D frame
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
self.update(xs, ys, zs)
def __copy__(self):
obj = type(self).__new__(self.__class__)
obj.__dict__.update(self.__dict__)
return obj
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, deepcopy(v, memo))
return result
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
def update(self, xs, ys, zs):
self._verts3d = xs, ys, zs
# +
class frame_3D(object):
""" 3D frame class. Class for different rotation
and translation strategies. Implementation is kept generic
for OOP.
Default alignment to the global axes.
__Note__: Methods beginning with __, such as __refresh__ are
private-like in Python. This will be explained in class.
"""
def __init__(self, t_origin, *args, **kwargs):
""" Initialize members using an iterable
"""
# Instantaneous data structures
self.n_iter = -1
self.origin = np.zeros(3,)
self.frame_limits = np.eye(3)
self.last_rot_axis = None
self.color_dict = [{'color':'r'}, {'color':'g'}, {'color':'b'}]
self.axes = [None for i in np.arange(3)]
# History, to store and plot as ghosts
self.origin_dict = {}
self.frame_dict = {}
self.axes_dict = {}
# Origin has an update
self.set_origin(t_origin)
# Sets all initial properties before drawing
self.__prepare_draw(*args, **kwargs)
# To solve the origin problem. The dictionary is now numbered from
# 1 to n.
self.n_iter += 1
def __update_history(self):
""" Stores history of the path in dictionaries
Also updates old arrow properties for drawing
"""
# Append to history data structures
self.n_iter += 1
self.origin_dict.update({self.n_iter : self.origin.copy()})
self.frame_dict.update({self.n_iter: self.frame_limits.copy()})
# Copy old axes and update draw properties
temp_axes = [None for i in np.arange(3)]
# Can't list comprehend because of the damn copy thing
for i in np.arange(3):
# copy method on the entire list does not work because
# deepcopy fails for mpl objects
temp_axes[i] = copy.copy(self.axes[i])
# Update linestyles and alphas for the old arrow3Ds
temp_axes[i].set_linestyle('--')
temp_axes[i].set_alpha(0.5)
# Finally update the axes dict with the new arrow3Ds
self.axes_dict.update({self.n_iter: temp_axes})
# Weight alphas exponentially with base of 0.5
# to create the ghost effect
for iterno, iteraxis in self.axes_dict.items():
for i in np.arange(3):
iteraxis[i].set_alpha(0.5*np.exp(iterno-self.n_iter))
def __refresh(self):
""" For the current data, refresh the arrows, to later
redraw the canvas when needed
"""
# Update current axes from the origin and directions
data3D = self.__prepare_data(self.origin, self.frame_limits)
[self.axes[i].update(data3D[3*i], data3D[3*i+1], data3D[3*i+2]) for i in np.arange(3)]
def __prepare_data(self, t_origin, t_frame_limits, *args, **kwargs):
""" Prepare data to be draw on canvas """
# The arrow axes derived from matplotlib requires the data
# in a different format
# Hence i reshape and stack it accordingly
origin3D = t_origin.reshape(1, -1) - 0.0*t_origin.reshape(-1, 1)
data3D = np.dstack((origin3D, origin3D + t_frame_limits))
data3D = data3D.reshape(-1, 2)
return data3D
def __prepare_draw(self, *args, **kwargs):
""" Constructor-like class for drawing the first time on the canvas
New method, just to pass in the args and kwargs for setting the arrows
in mpl
"""
data3D = self.__prepare_data(self.origin, self.frame_limits)
for i in np.arange(3):
# Can't list comprehend because of this damn thing
kwargs.update(self.color_dict[i])
# Update axes now
self.axes[i] = Arrow3D(data3D[3*i], data3D[3*i+1], data3D[3*i+2], *args, **kwargs)
def clear(self):
""" Clear all histories and gives a `new` axes """
self.n_iter = 0
self.origin_dict.clear()
self.frame_dict.clear()
self.axes_dict.clear()
def set_origin(self, t_origin):
""" Sets origin of the frames. Does more checks to exactly do
what I need.
"""
t_origin = np.array(t_origin)
if len(t_origin) == 3:
if not np.allclose(self.origin, t_origin):
# Update only if not the first time setting it
if self.n_iter + 1:
self.__update_history()
self.origin = np.array(t_origin)
self.last_rot_axis = None
if self.n_iter + 1:
self.__refresh()
else:
from warnings import warn
warn("Origin retained because the new origin is the same as the old origin")
else:
raise RuntimeError("Cannot initialize frame3D object with more than 3 coordinates")
def process_origin(self, func, *func_args, **func_kwargs):
""" Takes in a function, and optional arguments
and makes it act on the origin
"""
try:
tmp = func(self.origin, *func_args, **func_kwargs)
except:
raise RuntimeError("Could not process function!")
return 1
# Once the function does not show an exception,
# update history and whatnot
self.__update_history()
self.origin = tmp
self.last_rot_axis = None
self.__refresh()
def process_frames(self, func, *func_args, **func_kwargs):
""" Takes in a function, and optional arguments
and makes it act on the frames
"""
try:
tmp_frame, tmp_rot_axis = func(self.frame_limits, *func_args, **func_kwargs)
except:
raise RuntimeError("Could not process function!")
return 1
# Once the function does not throw an exception,
# update history and whatnot
self.__update_history()
self.frame_limits = tmp_frame
self.last_rot_axis = tmp_rot_axis
self.__refresh()
def draw(self, renderer, clear_flag=True, text_flag=True):
"""Draws the axis on a given canvas
renderer is an axis-like element from mpl
"""
# Clear the renderer first
if clear_flag: renderer.clear()
# Draws the current arrows
[renderer.add_artist(ax) for ax in self.axes]
# Draws the current rotation axis, if not None
if np.any(self.last_rot_axis):
neg_tmp = self.origin - self.last_rot_axis
pos_tmp = self.origin + self.last_rot_axis
renderer.plot([neg_tmp[0], pos_tmp[0]],[neg_tmp[1], pos_tmp[1]],[neg_tmp[2], pos_tmp[2]], 'k.-', alpha=0.2)
# Draws all the previous ghost frames
for _, vals in self.axes_dict.items():
[renderer.add_artist(ax) for ax in vals]
# Draws the current origin
renderer.scatter(self.origin[0], self.origin[1], self.origin[2], s=30, c='k')
if text_flag : renderer.text(self.origin[0]-0.4, self.origin[1]-0.4, self.origin[2]-0.4, "{}".format(self.n_iter + 1), size=20)
# # Draws all the previous origins, but with some transparenct and connecting lines
# for key, vals in self.origin_dict.items():
# renderer.scatter(vals[0], vals[1], vals[2], s=30, c='k', alpha=0.5)
# renderer.text(vals[0]-0.4, vals[1]-0.4, vals[2]-0.4, "{}".format(key), size=20)
# renderer.plot([tmp[0], vals[0]],[tmp[1], vals[1]], [tmp[2], vals[2]], 'k--')
# Draws all the previous origins, but with some transparency and connecting lines
tmp = self.origin
min_dist = np.min(tmp)
max_dist = np.max(tmp)
# Do it this way to also draw the lines connecting them
# The above way is more efficient, but if we need to iterate in reverse,
# we lose more time.
for key in np.arange(self.n_iter, 0, -1):
vals = self.origin_dict[key]
renderer.scatter(vals[0], vals[1], vals[2], s=30, c='k', alpha=0.5)
if text_flag : renderer.text(vals[0]-0.4, vals[1]-0.4, vals[2]-0.4, "{}".format(key), size=20)
renderer.plot([tmp[0], vals[0]],[tmp[1], vals[1]], [tmp[2], vals[2]], 'k--', alpha=0.3)
tmp = vals
min_dist = min(min_dist, np.min(vals))
max_dist = max(max_dist, np.max(vals))
# Sets style in the plot
# Put it in another class maybe?
extension = 1.0
renderer.set_xlim(min(0.0, min_dist) - extension, max(0.0, max_dist) + extension)
renderer.set_ylim(min(0.0, min_dist) - extension, max(0.0, max_dist) + extension)
renderer.set_zlim(min(0.0, min_dist) - extension, max(0.0, max_dist) + extension)
renderer.set_xlabel(r'$x$')
renderer.set_ylabel(r'$y$')
renderer.set_zlabel(r'$z$')
# On recent version of matplotlib, this doesn't work, and is not needed
# renderer.set_aspect('equal')
def animate(self, t_fig, renderer, func):
# print(func_args)
# ang = 0.01*i
# print(func_args[0][i])
inplane_rate = 20 # in degrees
axis_rate = 0.0 # in degrees
time_array = np.linspace(0.01, 18.0, 200)
def rotate_inplane(frame):
return inplane_rate * frame
def rotate_axis(frame):
t = np.deg2rad(axis_rate * frame)
return [np.cos(t), np.sin(t)*np.sin(np.pi/6.0), np.sin(t)*np.cos(np.pi/6.0)]
def animate_in(i):
self.process_frames(func, rotate_inplane(i), about=rotate_axis(i), rad=False)
# rotate_inplane(i)
self.draw(renderer, text_flag=False)
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(t_fig, animate_in, frames=time_array)
return anim
# +
# produce figure
fig = plt.figure(figsize=(5,5), dpi=200)
ax = fig.add_subplot(111, projection='3d')
# define origin
o = np.array([0,0,0])
# the first frame
a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
a.draw(ax)
# -
a.set_origin([0.0, 1.0, 2.0])
a.draw(ax)
fig
def set_and_display(t_frame, t_arg):
t_frame.set_origin(t_arg)
t_frame.draw(ax)
set_and_display(a, [0.0, 2.0 ,3.0])
print(a.n_iter)
fig
# ## Frame translation
# The first serious attempt at describing a frame with only displacements to the origin of the frame is translation. This is given by the following formula
#
# $$ \mathbf{x}_\mathcal{L} = \mathbf{x} + \mathbf{t} $$
#
# Here $ \mathbf{t} $ is the notation for the translation vector. We show an example of this below.
# Question
def translate(t_o, t_t):
"""Translates origin to different location
Parameters
----------
t_o : frame/np.array
If frame object, then t_t is given by the process function of
the frame
Else just a numpy array (vector/collection of vectors) which you
want to translate
t_t : list/np.array
The vector of translation (t) in the formula above. Intende to be
a numpy array (vector/collection of vectors)
Returns
-------
trans_frame : np.array
The translated frame
"""
# fill in #
#pass
return t_o + t_t
# ### Using the process functions of the class
# Notice that the `translate` function defined above works for any list, numpy array or even our frame class! Ideally, we pass frames into this translate class like so:
# ```
# a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
# b = frame_3D([2.0, 1.0, 0.0], mutation_scale=20, arrowstyle='-|>')
# translate(a,b)
# ```
# and this can be done. But in our context, this makes less sense as we do not want to add frames together. Instead, the `frame3D` class exposes a function `process_origin` that takes the desired function along with arguments, like so:
# ```
# a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
# a.process_origin(translate, [3.0, 0.0, 0.0])
# ```
# where `[3.0, 0.0, 0.0]` in the example above is the second parameter (how much you want to move the origin by) to translate.
a.process_origin(translate, [3.0, 0.0, 0.0])
a.draw(ax)
fig
# ## Frame rotation
# Let's do a simple rotation about a single axis (`x`,`y` or `z`) for the `frame3D` object. Alias rotations in this case give rise to the following coordinate transform matrices:
#
# $$ R_{x}(\theta)={\begin{bmatrix}1&0&0\\0&\cos \theta &\sin \theta \\0&-\sin \theta &\cos \theta \\\end{bmatrix}} $$
#
# $$ R_{y}(\theta)={\begin{bmatrix}\cos \theta & 0 & -\sin \theta\\ 0&1&0 \\ \sin\theta & 0 & \cos \theta \\\end{bmatrix}}$$
#
# $$R_{z}(\theta)={\begin{bmatrix}\cos \theta &\sin \theta &0\\-\sin \theta &\cos\theta &0\\0&0&1\\\end{bmatrix}} $$
#
# We can implement these using the `process_frame` function exposed by the `frame3D` object.
# +
# Question
def rotate_about_axis(t_frame, t_angle, about='x', rad=False):
"""Rotates about one of the base axes
Parameters
----------
t_frame : frame/np.array
If frame object, then t_frame is given by the process function of
the frame
Else just a numpy array (vector/collection of vectors) which you
want to rotate
t_angle : float
Angle of rotation, in degrees. Use `rad` to change behavior
about : char/string
Rotation axis, as either 'x', 'y' or 'z'. Defaults to 'x'
rad : bool
Defaults to False. False indicates that the t_angle is in degrees rather
than in radians. True indicates you pass in radians.
Returns
-------
rot_frame : np.array
The rotated frame
about : np.array
The vector about which rotate_rodrigues effects rotation. Same as the
input argument
"""
# if rad:
# angle = t_angle
# else:
# angle = np.deg2rad(angle)
angle = t_angle if rad else np.deg2rad(t_angle)
if about == "x":
R = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(angle), np.sin(angle)],
[0.0, -np.sin(angle), np.cos(angle)]])
axis = np.array([1.0, 0.0, 0.0])
elif about == "y":
R = np.array([[np.cos(angle), 0.0, -np.sin(angle)],
[0.0, 1.0, 0.0],
[np.sin(angle), 0.0, np.cos(angle)]])
axis = np.array([0.0, 1.0, 0.0])
elif about == "z":
R = np.array([[np.cos(angle), np.sin(angle), 0.0],
[-np.sin(angle), np.cos(angle), 0.0],
[0.0, 0.0, 1.0]])
axis = np.array([0.0, 0.0, 1.0])
return R @ t_frame, axis
# -
# ### Test it out
a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
a.process_origin(translate, [1.0, 0.0, 0.0])
a.process_frames(rotate_about_axis, 45.0, about='z', rad=False)
a.draw(ax)
fig
a.process_origin(translate, [1.0, 0.0, 0.0])
a.process_frames(rotate_about_axis, 45.0, about='x', rad=False)
a.draw(ax)
fig
# ## Rotation using the Rodrigues formula
# Let's do a rotation about any arbitrary axis for the `frame3D` object. If we denote the unit-axis vector as $\mathbf{k}$ about which our frames undergo a rotation of $\theta$, then the rotations in this case give rise to the following coordinate transform matrices:
#
# $$\mathbf {R} =\mathbf {I} +(\sin \theta )\mathbf {K} +(1-\cos \theta )\mathbf {K} ^{2}$$
#
# Once again, we implement these rotations using the `process_frame` function exposed by the `frame3D` object.
# Question
def rotate_rodrigues(t_frame, t_angle, about=[0.0,0.0,2.0], rad=False):
"""Rotates about one of the axes
Parameters
----------
t_frame : frame/np.array
If frame object, then t_frame is given by the process function of
the frame
Else just a numpy array (vector/collection of vectors) which you
want to rotate
t_angle : float
Angle of rotation, in degrees. Use `rad` to change behavior
about : list/np.array
Rotation axis specified in the world coordinates
rad : bool
Defaults to False. True indicates that the t_angle is in degrees rather
than in radians. False indicates radians.
Returns
-------
rot_frame : np.array
The rotated frame
about : np.array
The vector about which rotate_rodrigues effects rotation. Same as the
input argument
"""
angle = t_angle if rad else np.deg2rad(t_angle)
about = np.array(about)
about_norm = np.linalg.norm(about)
about = about / about_norm
angle *= about_norm
k1, k2, k3 = about
K = np.array([[0.0, -k3, k2], [k3, 0.0, -k1], [-k2, k1, 0.0]])
R = np.eye(3) + np.sin(angle) * K + (1.0 - np.cos(angle)) * (K @ K)
print(R)
rot_frame = np.dot(R, t_frame)
return rot_frame, about
# ### Test it out
a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
a.process_origin(translate, [1.0, 0.0, 0.0])
a.process_frames(rotate_rodrigues, -45.0, about=[0.0, 0.0, 1.0], rad=False)
a.draw(ax)
fig
# ## A mechanics application
# As we have seen, frame rotations are integral in mechanics. One simple real life application is when an elastic rod undergoes torsion.
# 
# (Credits: Wikimedia, under CC-3.0 license)
#
# If fixed at one end, we can analytically derive expressions for the twist at any cross section. In fact, if you apply a couple $T$ at one end of the bar, the twist at any cross section is
#
# $$\varphi = {\frac {J_{\text{T}}T}{\ell }}G$$
#
# where all symbols retain their usual meaning. It is not hard to visualize such frames in our code
# +
fig = plt.figure(figsize=(5,5), dpi=200)
ax = fig.add_subplot(111, projection='3d')
ax.clear()
# Where are the cross sections located?
origin = np.arange(0.0, 3.3, 1.1)*np.sqrt(1)
# collection of frames to plot
frame_collection = [None for i in range(origin.shape[0])]
# total displacement of the angles
# can be done in this case as the equation above is linear
angle = np.linspace(0.5, 120.0, origin.shape[0])
# loop over, rotate according to the formula and plot frames
for i in range(origin.shape[0]):
frame_collection[i] = frame_3D([origin[i], 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
frame_collection[i].process_frames(rotate_rodrigues, angle[i], about=[1.0,0.0,0.0], rad=False)
frame_collection[i].draw(ax, clear_flag=False, text_flag=False)
# -
# The concept that you just discovered, that change of frames spatially can be expressed as a *rate*, was introduced earlier and called **curvature**.
# ## Identifying rotations
# In the soft filament code, there are a couple of locations where the inverse problem, for rotation, has to be solved. That is we need to find the arbitrary axis and angle around which a given `np.array/frame3D` object has been rotated. These can be done by using the following operators for
# - the angle
# $$ \theta = \arccos\left( \frac{\text{Tr}(\mathbf{R}) - 1}{2}\right) $$
# - the axis
# $$ \mathbf{K} = \left( \frac{\mathbf {R} - \mathbf {R}^T}{2 \sin \theta} \right) $$
#
# We seek to implement this in our framework.
def inverse_rotate(t_frameone, t_frametwo):
""" Finds the angle and axes of rotation given any two frames
Parameters
----------
t_frameone : frame/np.array
If frame object, then t_frame is given by the process function of
the frame
Else just a numpy array (vector/collection of vectors) which you
want to find the angle of
t_frametwo : frame/np.array
If frame object, then t_frame is given by the process function of
the frame
Else just a numpy array (vector/collection of vectors) which you
want to find the angle of
Both obey t_frametwo = R @ t_frameone
Returns
-------
angle : float
Returns the angle about which the frame rotates
axis : list/float like
Returns the axis about which the frames differ
"""
R = t_frametwo @ t_frameone.T
angle = np.arccos((np.trace(R) - 1.0) / 2.0)
K = (R - R.T) / (2.0 * np.sin(angle))
axis = np.array([-K[1,2], K[0, 2], -K[0, 1]])
return angle, axis
# ### Test it out
temp = np.load('frame_data.npz')
q_one = temp['q_one']
q_two = temp['q_two']
a, ax = inverse_rotate(q_one, q_two)
# np.rad2deg(a)
# ax * np.sqrt(38.0)
# ## A more complicated scenario involving temporal changes
# In your code, the frames will change with time and space. The following code attempts to do that
a = frame_3D([0.0, 0.0, 0.0], mutation_scale=20, arrowstyle='-|>')
a.process_origin(translate, [1.0, 0.0, 0.0])
a.draw(ax)
anim = a.animate(fig, ax, rotate_rodrigues)
# anim.save('frame.mp4', fps=30,
# extra_args=['-vcodec', 'h264',
# '-pix_fmt', 'yuv420p'])
# HTML(anim.to_jshtml())
| lectures/04_elastica/code/frame_transformations_simple_answers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# -*- coding: utf-8 -*-
### EXEMPLO DA AULA:
### Data: 07/07/2020
### By: <NAME>
# +
import pandas as pd
base = pd.read_csv('../../00_datasets/credit_data.csv')
# -
## Substituindo os valores negativos da coluna idades pela media das idades (sem levar em consideração as idades negativas):
media = base['age'][base['age'] > 0].mean()
base.loc[ base.age < 0, 'age' ] = media
## Separando previsores da classe:
previsores = base.iloc[: , 1:4].values
classe = base.iloc[:, 4].values
# +
## Preenchendo os valores vazios da coluna idade:
from sklearn.impute import SimpleImputer
import numpy as np
simple_imputer = SimpleImputer(missing_values= np.nan, strategy= 'mean')
simple_imputer = simple_imputer.fit( previsores[:, 0:3] )
previsores[:, 0:3] = simple_imputer.transform( previsores[:, 0:3] )
# -
# Após alguns testes meus, constatei que esse processamento feito pelo professor era desnecessário.
## Constatei também que esse processamento reduzia a precisão do algorítimo em 0.2%
"""
## Padronizando os dados:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
previsores = scaler.fit_transform( previsores )
"""
# +
## Separando base para treinamentos de base para teste:
from sklearn.model_selection import train_test_split
previsores_treinamento, previsores_teste, classe_treinamento, classe_teste = train_test_split( previsores, classe,
test_size = 0.25,
random_state = 0)
# +
## Aplicando árvore de decisão:
from sklearn.ensemble import RandomForestClassifier
classificador = RandomForestClassifier( n_estimators=40 , criterion = 'entropy', random_state=0 )
classificador.fit(previsores_treinamento, classe_treinamento)
previsoes = classificador.predict(previsores_teste)
# +
## Avaliando o algorítimo:
from sklearn.metrics import accuracy_score, confusion_matrix
precisao = accuracy_score(classe_teste, previsoes)
matriz = confusion_matrix(classe_teste, previsoes)
# -
precisao
matriz
| 03_aprendizagem-por-arvore-de-decisao-e-random-forest/Random_Forest/.ipynb_checkpoints/random_forest_credit_data-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] origin_pos=0 slideshow={"slide_type": "slide"}
# # Automatic Differentiation
# Differentiation is a crucial step in nearly all deep learning optimization algorithms.
#
# Deep learning frameworks speed up this work
# by **automatically calculating derivatives**, i.e., *automatic differentiation*.
# In practice,
# based on our designed model
# the system builds a **computational graph**,
# tracking which data combined through
# which operations to produce the output.
# Automatic differentiation enables the system to subsequently backpropagate gradients.
# + [markdown] slideshow={"slide_type": "slide"}
# ## A Simple Example
#
# **Differentiating the function
# $y = 2\mathbf{x}^{\top}\mathbf{x}$
# with respect to the column vector $\mathbf{x}$.**
#
# To start, let us create the variable `x` and assign it an initial value.
# + origin_pos=2 slideshow={"slide_type": "-"} tab=["pytorch"]
import torch
x = torch.arange(4.0)
x
# + [markdown] origin_pos=4 slideshow={"slide_type": "slide"}
# **Before we even calculate the gradient
# of $y$ with respect to $\mathbf{x}$,
# we will need a place to store it.**
# It is important that we do not allocate new memory
# every time we take a derivative with respect to a parameter
# because we will **often update the same parameters**
# thousands or millions of times
# and could quickly run out of memory.
# Note that a gradient of a scalar-valued function
# with respect to a vector $\mathbf{x}$
# is itself vector-valued and has the **same shape** as $\mathbf{x}$.
#
# + origin_pos=6 slideshow={"slide_type": "-"} tab=["pytorch"]
x.requires_grad_(True) # Same as `x = torch.arange(4.0, requires_grad=True)`
x.grad # The default value is None
# + [markdown] origin_pos=8 slideshow={"slide_type": "slide"}
# **Now let us calculate** $y = 2\mathbf{x}^{\top}\mathbf{x}$
#
# + origin_pos=10 tab=["pytorch"]
y = 2 * torch.dot(x, x)
y
# + [markdown] origin_pos=12 slideshow={"slide_type": "slide"}
# Since `x` is a vector of length 4,
# an dot product of `x` and `x` is performed,
# yielding the scalar output that we assign to `y`.
#
# Next, **we can automatically calculate the gradient of `y`
# with respect to each component of `x`**
# by calling the function for backpropagation and printing the gradient.
# + origin_pos=14 tab=["pytorch"]
y.backward()
x.grad
# + [markdown] origin_pos=16 slideshow={"slide_type": "slide"}
# We know that **The gradient of the function $y = 2\mathbf{x}^{\top}\mathbf{x}$
# with respect to $\mathbf{x}$ should be $4\mathbf{x}$.**
#
# Let us quickly verify that our desired gradient was calculated correctly.
# + origin_pos=18 tab=["pytorch"]
x.grad == 4 * x
# + [markdown] origin_pos=20 slideshow={"slide_type": "slide"}
# **Now let us calculate another functions of `x`.**
# Noticed that `y` should be a scalar
# + origin_pos=22 tab=["pytorch"]
# PyTorch accumulates the gradient in default, we need to clear the previous values
x.grad.zero_()
y = x.sum()
y.backward()
x.grad
# + [markdown] slideshow={"slide_type": "slide"}
# ## Detach from the computation
# If you would like to analyze `x` (with grad), you need to detach it from the current graph.
# -
x
x.numpy()
# + [markdown] slideshow={"slide_type": "slide"}
# Call `.detach()`
# -
x.detach()
x.detach().numpy()
| Tutorial-02/TUT2-3-automatic-differentiation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# ### This notebook deals with the instability arising out using the frs BC for traces and tries to find out if there is a mismatch between the input T and S and that at our boundaries at the time when the model is initialised (IC state is equal to the BC at time t = 0)
#
# ## The second part of this notebook also deals with the remaking of votemper and vosaline BC for NEMO (to remove the overturning effect being created in the model)
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import netCDF4 as nc
import xarray as xr
import matplotlib.cm as cm
from scipy.interpolate import interp1d
from salishsea_tools import (nc_tools, gsw_calls, viz_tools)
# #### Let us load the IC for our NEMO model
IC_file = nc.Dataset('/ocean/ssahu/CANYONS/wcvi/initial_conditions/West_coast_NEMO_IC_temperature_salinity_nomask_JP.nc')
votem_IC = IC_file.variables['votemper'][:];
vosal_IC = IC_file.variables['vosaline'][:];
votem_IC.shape
# #### Now let us load the T and S for the BC file (Let's begin the excersize by loading only the BC file of the west boundary)
BC_west = nc.Dataset('/ocean/ssahu/CANYONS/bdy_files/3d_NEMO_west_m04.nc');
votem_west_BC = BC_west.variables['votemper'][:];
vosal_west_BC = BC_west.variables['vosaline'][:];
votem_west_BC.shape
# #### By principle the outer values at the very first time step should be equal to the value of the IC (let's test that)
# +
BC_check = votem_west_BC[0,:,:,:]; print(BC_check.shape)
IC_check = votem_IC[0,:,:]; print (IC_check.shape)
# -
np.mean(BC_check)
np.mean(IC_check[:,:,:4])
# #### So they are not same and our fears just became real
BC_check[0,...]
np.where(IC_check[0,...] == np.max(IC_check[0,...]))
BC_check[0,0,0]
np.where(IC_check[0,...] == BC_check[0,...])
np.where(BC_check[0,...] == np.max(BC_check[0,...]))
IC_check[0,0,60]
BC_check[0,3,5]
np.where(BC_check == IC_check)
# ## The temperature and salinity doesnot seem to work in the BC file and we have a overturning effect of salinity in the south BC file;
#
# So we decide to make the BC files of temerature and salinity from the JP files straigh similar to the IC file but with a separate loop over time and and not from the numpy arrays (saved earlier)
# +
import numpy as np
import netCDF4 as nc
import xarray as xr
from scipy.interpolate import griddata, interp1d
from salishsea_tools import nc_tools,viz_tools
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from fancyimpute import KNN
# +
fname = '/home/ssahu/saurav/JP_BC/cat_42_days_T.nc';
gridT = xr.open_dataset(fname);
# -
gridT.keys
# +
#### Load JP's mask
mask = nc.Dataset('/data/mdunphy/NEP036-N30-OUT/INV/mesh_mask.nc');
tmask_JP = mask.variables['tmask'][0,:,55:400,446:701];
umask_JP = mask.variables['umask'][0,:,55:400,446:701];
vmask_JP = mask.variables['vmask'][0,:,55:400,446:701];
print(tmask_JP.shape, umask_JP.shape, vmask_JP.shape)
# +
#### Slice out our domain of WCVI from JP and mask it at the same time saying the zero values as NAN
votemper_JP = np.array(gridT['votemper'][:,:,55:400,446:701]);
vosaline_JP = np.array(gridT['vosaline'][:,:,55:400,446:701]);
for i in np.arange(votemper_JP.shape[0]):
votemper_JP[i,...] = np.ma.masked_array(votemper_JP[i,...], mask = tmask_JP[...]);
vosaline_JP[i,...] = np.ma.masked_array(vosaline_JP[i,...], mask = tmask_JP[:,...]);
votemper_JP[votemper_JP == 0] =['Nan'];
vosaline_JP[vosaline_JP == 0] = ['Nan'];
glamt_bc_JP = np.array(gridT['nav_lon'][55:400,446:701]);
gphit_bc_JP = np.array(gridT['nav_lat'][55:400,446:701]);
deptht_JP = np.array(gridT['deptht'][:]);
# -
votemper_JP.shape
votemper_JP[0,:,0,0]
# +
#### Load the WCVI points
fname_wcvi = '/ocean/ssahu/CANYONS/wcvi/grid/coordinates.nc'
with nc.Dataset(fname_wcvi, 'r') as coord:
gphit_wcvi = coord.variables['gphit'][0,...];
glamt_wcvi = coord.variables['glamt'][0,...];
print((glamt_bc_JP[0,0],gphit_bc_JP[0,0]), (glamt_wcvi[0,0],gphit_wcvi[0,0]))
# +
#Specify the rimwidth
N = 4
glamt_wcvi_bc_left = glamt_wcvi[:,:N]; gphit_wcvi_bc_left = gphit_wcvi[:,:N];
glamt_wcvi_bc_right = glamt_wcvi[:,-N:]; gphit_wcvi_bc_right = gphit_wcvi[:,-N:];
glamt_wcvi_top = glamt_wcvi[:N,:]; gphit_wcvi_top = gphit_wcvi[:N,:];
glamt_wcvi_bottom = glamt_wcvi[-N:,:]; gphit_wcvi_bottom = gphit_wcvi[-N:,:];
print(glamt_wcvi_bc_left.shape);
print(glamt_wcvi_bc_right.shape);
print(glamt_wcvi_bc_top.shape);
print(glamt_wcvi_bottom.shape);
# +
#For the left boundary
X = glamt_bc_JP.flatten();
Y = gphit_bc_JP.flatten();
points = (X[:],Y[:]);
xi_left = (glamt_wcvi_bc_left.flatten(), gphit_wcvi_bc_left.flatten());
votemper_bc_left = np.zeros((votemper_JP.shape[0], votemper_JP.shape[1], glamt_wcvi_bc_left.shape[0], glamt_wcvi_bc_left.shape[1]));
vosaline_bc_left = np.zeros((vosaline_JP.shape[0], vosaline_JP.shape[1], glamt_wcvi_bc_left.shape[0], glamt_wcvi_bc_left.shape[1]));
for p in np.arange(votemper_JP.shape[0]):
for i in np.arange(votemper_JP.shape[1]):
votemper_bc_left[p,i,...] = np.reshape(griddata(points, votemper_JP[p,i,...].flatten(), xi_left, method= 'linear'), glamt_wcvi_bc_left.shape)
vosaline_bc_left[p,i,...] = np.reshape(griddata(points, vosaline_JP[p,i,...].flatten(), xi_left, method= 'linear'), glamt_wcvi_bc_left.shape)
# -
vosaline_bc_left[0,:,0,0]
for i in np.arange(vosaline_bc.shape[0]):
for j in np.arange(vosaline_bc.shape[1]):
votemper_ic[i,j,...] = KNN(k=3).complete(votemper_bc[i,j,...]);
vosaline_ic[i,j,...] = KNN(k=3).complete(vosaline_bc[i,j,...]);
vosaline_bc[0,:,0,0]
# +
vosaline_bc[np.where(np.isnan(vosaline_bc))]=0;
votemper_bc[np.where(np.isnan(votemper_bc))]=0;
for t in np.arange(votemper_bc.shape[0]):
for i in np.arange(votemper_bc.shape[1]):
for p in np.arange(votemper_bc.shape[2]):
for l in np.arange(votemper_bc.shape[3]):
if votemper_bc[t,i,p,l] == 0:
votemper_bc[t,i,p,l] = votemper_bc[t,i-1,p,l]
else:
continue
for t in np.arange(vosaline_bc.shape[0]):
for i in np.arange(vosaline_bc.shape[1]):
for p in np.arange(vosaline_bc.shape[2]):
for l in np.arange(vosaline_bc.shape[3]):
if vosaline_bc[t,i,p,l] == 0:
vosaline_bc[t,i,p,l] = vosaline_bc[t,i-1,p,l]
else:
continue
# -
# #### Making the south boundary from the start
import numpy as np
import numpy.ma as ma
import netCDF4 as nc
import matplotlib.pyplot as plt
import matplotlib as mpl
from salishsea_tools import viz_tools, geo_tools,nc_tools
from scipy.interpolate import griddata, interp1d
import matplotlib.cm as cm
import xarray as xr
from grid_alignment import calculate_initial_compass_bearing as cibc
path = '/home/ssahu/saurav/JP_BC/'
gridT = xr.open_dataset(path+'cat_42_days_T.nc')
gridT.coords
votemper_unfiltered = np.array(gridT['votemper'][:,:,55:400,446:701]);
vosaline_unfiltered = np.array(gridT['vosaline'][:,:,55:400,446:701]);
glamt_bc_JP = np.array(gridT['nav_lon'][55:400,446:701]);
gphit_bc_JP = np.array(gridT['nav_lat'][55:400,446:701]);
# +
print(votemper_unfiltered.shape)
for a in np.arange(votemper_unfiltered.shape[0]):
for i in np.arange(votemper_unfiltered.shape[1]):
for l in np.arange(votemper_unfiltered.shape[2]):
for m in np.arange(votemper_unfiltered.shape[3]):
if votemper_unfiltered[a,i,l,m] == 0:
votemper_unfiltered[a,i,l,m] == ['Nan'];
for a in np.arange(vosaline_unfiltered.shape[0]):
for i in np.arange(vosaline_unfiltered.shape[1]):
for l in np.arange(vosaline_unfiltered.shape[2]):
for m in np.arange(vosaline_unfiltered.shape[3]):
if vosaline_unfiltered[a,i,l,m] == 0:
vosaline_unfiltered[a,i,l,m] == ['Nan'];
# +
votemper = np.empty(votemper_unfiltered.shape);
vosaline = np.empty(vosaline_unfiltered.shape);
for idx, val in enumerate(votemper_unfiltered[:,...]):
votemper[idx-1,...] = votemper_unfiltered[idx-2,...]*0.25 + votemper_unfiltered[idx-1,...]*0.5 + \
votemper_unfiltered[idx,...]*0.25;
vosaline[idx-1,...] = vosaline_unfiltered[idx-2,...]*0.25 + vosaline_unfiltered[idx-1,...]*0.5 + \
vosaline_unfiltered[idx,...]*0.25;
votemper[0,...] = votemper_unfiltered[0,...];
vosaline[0,...] = vosaline_unfiltered[0,...];
print(votemper.shape, vosaline.shape)
# +
fname_wcvi = '/ocean/ssahu/CANYONS/wcvi/grid/coordinates.nc'
with nc.Dataset(fname_wcvi, 'r') as coord:
gphit_wcvi = coord.variables['gphit'][0,...];
glamt_wcvi = coord.variables['glamt'][0,...];
# +
X = glamt_bc_JP.flatten()
Y = gphit_bc_JP.flatten()
points = (X[:],Y[:])
#Number of points to trim off the ends
N = 5
glamt_wcvi_bc_bottom = glamt_wcvi[0:N-1,:]; gphit_wcvi_bc_bottom = gphit_wcvi[0:N-1,:];
xi_bottom = (glamt_wcvi_bc_bottom.flatten(), gphit_wcvi_bc_bottom.flatten());
#votemper_wcvi = np.zeros((43,50,Nx,Ny));
votemper_wcvi_bottom = np.empty((43,50,glamt_wcvi_bc_bottom.shape[0], glamt_wcvi_bc_bottom.shape[1]));
vosaline_wcvi_bottom = np.empty((43,50,glamt_wcvi_bc_bottom.shape[0], glamt_wcvi_bc_bottom.shape[1]));
for p in np.arange(votemper_wcvi_bottom.shape[0]):
for i in np.arange(votemper_wcvi_bottom.shape[1]):
votemper_wcvi_bottom[p,i,...] = np.reshape(griddata(points, votemper[p,i,...].flatten(), xi_bottom, method= 'linear'), glamt_wcvi_bc_bottom.shape)
vosaline_wcvi_bottom[p,i,...] = np.reshape(griddata(points, vosaline[p,i,...].flatten(), xi_bottom, method= 'linear'), glamt_wcvi_bc_bottom.shape)
# -
print(votemper_wcvi_bottomi_bottom.shape)
| grid/Dealing_with_FRS_initial_T_and_S_remaking_temp_sal_BC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37
# language: python
# name: py37
# ---
# +
import sys
sys.path.append('../..')
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import pytorch_lightning as pl
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.metrics.functional import accuracy
import torchdiffeq
from torchdyn.models import *; from torchdyn import *
# -
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# +
batch_size=64
size=28
path_to_data='../../data/mnist_data'
all_transforms = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor(),
])
train_data = datasets.MNIST(path_to_data, train=True, download=True,
transform=all_transforms)
test_data = datasets.MNIST(path_to_data, train=False,
transform=all_transforms)
trainloader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
testloader = DataLoader(test_data, batch_size=batch_size, shuffle=True)
# -
# ## Train loop
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.lr = 1e-3
self.model = model
self.iters = 0
def forward(self, x):
return self.model(x)
def training_step(self, batch, batch_idx):
self.iters += 1
x, y = batch
x, y = x.to(device), y.to(device)
y_hat = self.model(x)
loss = nn.CrossEntropyLoss()(y_hat, y)
acc = accuracy(y_hat, y)
nfe = model[2].nfe ; model[2].nfe = 0
tqdm_dict = {'train_loss': loss, 'accuracy': acc, 'NFE': nfe}
self.logger.experiment.log(step=self.iters, row=tqdm_dict)
return {'loss': loss}
def test_step(self, batch, batch_nb):
x, y = batch
x, y = x.to(device), y.to(device)
y_hat = self(x)
test_acc = accuracy(y_hat, y)
test_loss = nn.CrossEntropyLoss()(y_hat, y)
metrics = {'test_loss': test_loss, 'test_accuracy': test_acc}
self.logger.experiment.log(step=self.iters, row=metrics)
return {'test_loss': test_loss}
def test_epoch_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['test_accuracy'] for x in outputs]).mean()
metrics = {'avg_test_loss': avg_loss, 'avg_test_accuracy': avg_acc}
self.logger.experiment.log(step=self.iters, row=metrics)
return {'avg_test_loss': avg_loss}
def validation_step(self, batch, batch_nb):
x, y = batch
x, y = x.to(device), y.to(device)
y_hat = self(x)
val_acc = accuracy(y_hat, y)
val_loss = nn.CrossEntropyLoss()(y_hat, y)
metrics = {'val_loss': val_loss, 'val_accuracy': val_acc}
self.logger.experiment.log(step=self.iters, row=metrics)
return {'val_loss': val_loss, 'val_accuracy': val_acc}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
avg_acc = torch.stack([x['val_accuracy'] for x in outputs]).mean()
metrics = {'avg_val_loss': avg_loss, 'avg_val_accuracy': avg_acc}
self.logger.experiment.log(step=self.iters, row=metrics)
return {'avg_val_loss': avg_loss}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.lr, weight_decay=1e-4)
def train_dataloader(self):
self.trloader_len = len(trainloader)
return trainloader
def val_dataloader(self):
self.vloader_len = len(testloader) # using test as val here
return testloader
def test_dataloader(self):
self.test_tloader_len = len(testloader)
return testloader
# ## Model
# +
func = nn.Sequential(DataControl(),
nn.Conv2d(12, 6, 3, padding=1),
nn.Tanh(),
).to(device)
neuralDE = NeuralDE(func,
solver='rk4',
sensitivity='adjoint',
s_span=torch.linspace(0, 1, 30)).to(device)
model = nn.Sequential(Augmenter(augment_dims=5),
nn.BatchNorm2d(6),
neuralDE,
nn.Conv2d(6, 1, 3, padding=1),
nn.Flatten(),
nn.Dropout(p=0.5),
nn.Linear(28*28, 10)).to(device)
# -
logger = WandbLogger(project='torchdyn-mnist-bench', log_model=True) # feel free to comment out or use a different logging scheme :)
# +
learn = Learner(model)
trainer = pl.Trainer(max_epochs=20,
logger=logger,
benchmark=True,
#limit_test_batches=0.25,
val_check_interval=0.05,
gpus=1,
progress_bar_refresh_rate=1
)
trainer.fit(learn)
# -
trainer.test(learn)
| test/benchmark/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from IPython.display import display
from pathlib import Path
import xlrd
input_list = [
Path('sokul_1.xls'),
Path('sokul_2.xls'),
]
result_df = None
for in_path in input_list:
try:
xl = pd.ExcelFile(in_path)
except UnicodeDecodeError:
xlrd_book = xlrd.open_workbook(in_path, on_demand=True, encoding_override="cp1251")
xl = pd.ExcelFile(xlrd_book)
df = xl.parse()
header_str = "Код"
cols = list(df.columns)
header_col = cols[1]
header_row = df.index.get_loc(df.index[df[header_col] == header_str][0])
df = xl.parse(header=header_row + 1)
df = df.rename(columns={
'Товар': 'Наименование',
'Шт./упак': 'Упак.',
})
df.insert(loc=df.columns.get_loc("Наименование") + 1, column='Производитель', value=pd.Series(np.nan, dtype="string"))
df.insert(loc=df.columns.get_loc("Цена с НДС") + 1, column='НДС', value=20)
df['Цена с НДС'] = df['Цена с НДС'].apply(lambda x: x if isinstance(x, float) else np.nan)
supplier = np.nan
for index, row in df.iterrows():
if pd.isna(row['Цена с НДС']) and pd.isna(row['Код УКТ']):
supplier = row['Наименование']
else:
df.at[index, 'Производитель'] = supplier
df = df[(df['Цена с НДС'].notnull())]
df = df[['Код', 'Наименование', 'Производитель', 'Цена с НДС', 'НДС', 'Упак.']]
if result_df is None:
result_df = df
else:
result_df = result_df.append(df, ignore_index=True)
display(result_df)
# -
| plconv/convert/sokul.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: mrna_display_py
# language: python
# name: mrna_display_py
# ---
# +
import os, sys, inspect
working_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(working_dir)
sys.path.insert(0, parent_dir)
from scripts_py import utility_functions as uf
# +
top_peptide = 'VWDPRTFYLSRI'
ala_scan_peptides = [
'VWDPRTFYLSRI', 'AWDPRTFYLSRI', 'VADPRTFYLSRI',
'VWAPRTFYLSRI', 'VWDARTFYLSRI', 'VWDPATFYLSRI',
'VWDPRAFYLSRI', 'VWDPRTAYLSRI', 'VWDPRTFALSRI',
'VWDPRTFYASRI', 'VWDPRTFYLARI', 'VWDPRTFYLSAI',
'VWDPRTFYLSRA']
formated_ala_scan_peptides = []
for peptide in ala_scan_peptides:
formated_ala_scan_peptides += [uf.format_sequence_based_on_mismatches(
top_peptide,
peptide)]
for p in formated_ala_scan_peptides:
print(p)
# +
top_dna = 'AATCCTAGACAAATAAAACGTCCTCGGATCCCAAAC'
ala_scan_dna = [
'AATCCTAGACAAATAAAACGTCCTCGGATCCCAAAC', 'AATGCGGCTCAGATAAAAGGTGCGCGGATCCCACAC',
'AATGCGGCTCAGATAAAAGGTGCGCGGATCCCACGC', 'AATGCGGCTCAGATAAAAGGTGCGCGGATCCGCCAC',
'AATGCGGCTCAGATAAAAGGTGCGCGGCGCCCACAC', 'AATGCGGCTCAGATAAAAGGTGCGCGCATCCCACAC',
'AATGCGGCTCAGATAAAAGGTCGCCGGATCCCACAC', 'AATGCGGCTCAGATAAAACGCGCGCGGATCCCACAC',
'AATGCGGCTCAGATACGCGGTGCGCGGATCCCACAC', 'AATGCGGCTCAGCGCAAAGGTGCGCGGATCCCACAC',
'AATGCGGCTCGCATAAAAGGTGCGCGGATCCCACAC', 'AATGCGCGCCAGATAAAAGGTGCGCGGATCCCACAC',
'AATCGCGCTCAGATAAAAGGTGCGCGGATCCCACAC', 'CGCGCGGCTCAGATAAAAGGTGCGCGGATCCCACAC']
formated_ala_scan_dna = []
for dna in ala_scan_dna:
formated_dna = uf.format_sequence_based_on_mismatches(
top_dna,
dna)
cdna_at_count = uf.get_au_count(dna)
print(f"{formated_dna} {cdna_at_count}")
# -
| notebooks/ala_scan.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data integration with ICESat-2 - Part II
#
# __Credits__
# * <NAME>
# * <NAME>
# * <NAME>
#
# ```{admonition} Learning Objectives
# **Goals**
# - Access NSIDC data sets and acquire IS-2 using icepyx
# - Analyze point and raster data together with IS-2
# - Advanced visualizations of multiple datasets
# ```
#
# For this tutorial, feel free to run the code along with us as we live code by downsizing the zoom window and splitting your screen (or using two screens). Or you can simply watch the zoom walkthrough. Don't worry if you fall behind on the code. The notebook is standalone and you can easily run the code at your own pace another time to catch anything you missed.
# ## Python environment
# ### GrIMP libraries
# This notebook makes use of two packages for working with data from the Greenland Ice Mapping Project (GrIMP) that are stored remotely at NSIDC. These packages are:
# - [grimpfunc](https://github.com/fastice/grimpfunc): Code for searching NISDC catalog for GrIMP data, subsetting the data, and working with flowlines.
# - [nisardev](https://github.com/fastice/nisardev): Classes for working with velocity and image data.
# +
import numpy as np
import nisardev as nisar
import os
import matplotlib.colors as mcolors
import grimpfunc as grimp
import matplotlib.pyplot as plt
import geopandas as gpd
import pandas as pd
from datetime import datetime
import numpy as np
import xarray as xr
import importlib
import requests
import pyproj
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import panel
from dask.diagnostics import ProgressBar
import h5py
import random
import ipyleaflet
from ipyleaflet import Map,GeoData,LegendControl,LayersControl,Rectangle,basemaps,basemap_to_tiles,TileLayer,SplitMapControl,Polygon,Polyline
import ipywidgets
import datetime
import re
ProgressBar().register()
panel.extension()
# -
# Sometimes the above cell will return an error about a missing module. If this happens, try restarting the kernel and re-running the above cells.
# ## NSIDC Login
# For remote access to the velocity data at NSIDC, run these cells to login with your NASA EarthData Login (see [NSIDCLoginNotebook](https://github.com/fastice/GRiMPNotebooks/blob/master/NSIDCLoginNotebook.ipynb) for further details). These cells can skipped if all data are being accessed locally. First define where the cookie files need for login are saved.
#
# These environment variables are used by GDAL for remote data access via [vsicurl](https://gdal.org/user/virtual_file_systems.html).
env = dict(GDAL_HTTP_COOKIEFILE=os.path.expanduser('~/.grimp_download_cookiejar.txt'),
GDAL_HTTP_COOKIEJAR=os.path.expanduser('~/.grimp_download_cookiejar.txt'))
os.environ.update(env)
# Now enter credentials, which will create the cookie files above as well as *.netrc* file with the credentials.
# #!rm ~/.netrc
myLogin = grimp.NASALogin()
myLogin.view()
# ## Load glacier termini
# In this section we will read shapefiles stored remotely at NSIDC.
#
# The first step is to get the *urls* for the files in the NSDIC catalog.
myTerminusUrls = grimp.cmrUrls(mode='terminus') # mode image restricts search to the image products
myTerminusUrls.initialSearch();
# Using the ```myTerminusUrls.getURLS()``` method to return the urls for the shape files, read in termini and store in a dict, `myTermini`, by year.
# + tags=[]
myTermini = {}
for url in myTerminusUrls.getURLS():
year = os.path.basename(url).split('_')[1] # Extract year from name
myTermini[year] = gpd.read_file(f'/vsicurl/&url={url}') # Add terminus to data frame
print(f'/vsicurl/&url={url}')
# -
myTermini['2000'].plot()
for year in myTermini:
myTermini[year] = myTermini[year].to_crs('EPSG:4326') # to lat/lon
myTermini['2000'].plot()
# ## Flowlines
# In this section we will work with a two collections of flowlines from [<NAME> al., 2020](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112). The full set of flowines for all of Greenland can be downloaded from [Zenodo](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112).
#
# The data are stored in the subdirectory *shpfiles* and can be read as follows:
glaciers = {}
for i in range(1, 3):
glaciers[f'000{i}'] = gpd.read_file(f'shpfiles/glacier000{i}.shp').to_crs('EPSG:4326');
# Note this is the same procedure as for the termini, except we are using a filename instead of a url.
# ## ICESat-2 ATL06
# Now that we have the flowlines and termini, we are going to plot these alongside the ICESat-2 and ATM tracks. Remember the data we worked with yesterday? Here we are going to use that again for this mappinp project.
# +
# Load the ICESat-2 data
is2_file = 'processed_ATL06_20190420093051_03380303_005_01_full.h5'
with h5py.File(is2_file, 'r') as f:
is2_gt2r = pd.DataFrame(data={'lat': f['gt2r/land_ice_segments/latitude'][:],
'lon': f['gt2r/land_ice_segments/longitude'][:],
'elev': f['gt2r/land_ice_segments/h_li'][:]}) # Central weak beam
is2_gt2l = pd.DataFrame(data={'lat': f['gt2l/land_ice_segments/latitude'][:],
'lon': f['gt2l/land_ice_segments/longitude'][:],
'elev': f['gt2l/land_ice_segments/h_li'][:]}) # Central strong beam
# Load the ATM data
atm_file = 'ILATM2_20190506_151600_smooth_nadir3seg_50pt.csv'
atm_l2 = pd.read_csv(atm_file)
# Look only at the nadir track
atm_l2 = atm_l2[atm_l2['Track_Identifier']==0]
# Change the longitudes to be consistent with ICESat-2
atm_l2['Longitude(deg)'] -= 360
# -
# Next the data are subsetted to the range of ATM latitudes.
# Subset the ICESat-2 data to the ATM latitudes
is2_gt2r = is2_gt2r[(is2_gt2r['lat']<atm_l2['Latitude(deg)'].max()) & (is2_gt2r['lat']>atm_l2['Latitude(deg)'].min())]
is2_gt2l = is2_gt2l[(is2_gt2l['lat']<atm_l2['Latitude(deg)'].max()) & (is2_gt2l['lat']>atm_l2['Latitude(deg)'].min())]
# ## Plot lidar tracks, flowlines and termini
# Checking everything looks good before analysis by plotting the data over imagery rendered via ipyleaflet.
# +
center = [69.2, -50]
zoom = 8
mapdt1 = '2019-05-06'
m = Map(basemap=basemap_to_tiles(basemaps.NASAGIBS.ModisAquaTrueColorCR, mapdt1),center=center,zoom=zoom)
gt2r_line = Polyline(
locations=[
[is2_gt2r['lat'].min(), is2_gt2r['lon'].max()],
[is2_gt2r['lat'].max(), is2_gt2r['lon'].min()]
],
color="green" ,
fill=False
)
m.add_layer(gt2r_line)
gt2l_line = Polyline(
locations=[
[is2_gt2l['lat'].min(), is2_gt2l['lon'].max()],
[is2_gt2l['lat'].max(), is2_gt2l['lon'].min()]
],
color="green" ,
fill=False
)
m.add_layer(gt2l_line)
atm_line = Polyline(
locations=[
[atm_l2['Latitude(deg)'].min(), atm_l2['Longitude(deg)'].max()],
[atm_l2['Latitude(deg)'].max(), atm_l2['Longitude(deg)'].min()]
],
color="orange" ,
fill=False
)
m.add_layer(atm_line)
legend = LegendControl({'ICESat-2':'green','ATM':'orange'}, name = 'Lidar', position="topleft")
m.add_control(legend)
tLegend = {}
for i in range(3, 5):
for key in myTermini:
# Create list of lat/lon pairs
r = lambda: random.randint(0,255)
cr = '#%02X%02X%02X' % (r(),r(),r())
term_coords = [[[xy[1],xy[0]] for xy in geom.coords] for geom in myTermini[key].loc[myTermini[key]['Glacier_ID'] == i].geometry]
term_data = Polyline(locations=term_coords, weight=2, color=cr, fill=False)
m.add_layer(term_data)
tLegend[key] = cr
legend = LegendControl(tLegend, name="Terminus", position="topright")
m.add_control(legend)
for glacier in glaciers:
gl_data = GeoData(geo_dataframe = glaciers[glacier],
style={'color': 'black', 'weight':1.0},
name = f'{glacier}')
m.add_layer(gl_data)
m.add_control(LayersControl())
m
# -
# ## Plot Flowines Using Remote Greenland Ice Mapping Project Data
# ICESat measures thinning and thickening, which often is driven by changes in the flow of the glacier.
#
# Thus, to understand whether elevation change is driven by ice dynamics or changes in surface mass balance (net melting and snowfall), we need to look at how the flow velocity is evolving with time.
#
# This section demonstrates how Greenland Ice Mapping Project (GrIMP) [data](https://nsidc.org/data/measures/grimp) can be remotely accessed. As an example, will used flowlines from [Felikson et al., 2020](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112) distributed via [Zenodo](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112).
#
# Here we will use:
# - ```grimp.Flowlines``` to read, manipulate, and store the flowlines data;
# - ```grimp.cmrUrls``` to search the NISDC catalog; and
# - ```nisar.nisarVelSeries``` to build a time-dependent stack of velocity data, which be plotted, interpolated etc.
# ### Read Shapefiles
# In the examples presented here we will use glaciers 1 & 2 in the Felikson data base, [Felikson et al., 2020](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112), which were retrieved from [Zenodo](https://agupubs.onlinelibrary.wiley.com/doi/10.1029/2020GL090112).
#
# Each glacier's flowlines are used to create `grimp.Flowlines` instances, which are saved in a dictionary, `myFlowlines` with glacier id: '0001' and '0002'.
#
# Each `Flowlines` read a set of flowlines for each glacier and stores in a dictionary of `myFlowlines.flowlines`. The code to do this looks something like:
#
# ```
# flowlines = {}
# shapeTable = gpd.read_file(shapefile)
# for index, row in shapeTable.iterrows(): # loop over features
# fl = {} # New Flowline
# fl['x'], fl['y'] = np.array([c for c in row['geometry'].coords]).transpose()
# fl['d'] = computeDistance(fl['x'], fl['y'])
# flowlines[row['flowline']] = fl
# ```
# For further detail, see the full [class definition](https://github.com/fastice/grimpfunc/blob/master/grimpfunc/Flowlines.py)
# To limit the plots to the downstream regions, the flowlines are all truncated to a `length` of 50km.
#
# Within each myFlowines entry (a `grimp.Flowlines` instance), the individual flowlines are maintained as a dictionary `myFlowlines['glacierId'].flowlines`.
myShapeFiles = [f'./shpfiles/glacier000{i}.shp' for i in range(1, 3)] # Build list of shape file names
myFlowlines = {x[-8:-4]: grimp.Flowlines(shapefile=x, name=x[-8:-4], length=50e3) for x in myShapeFiles}
myFlowlines
# Each flowline is indexed as shown here:
myFlowlines['0001'].flowlines.keys()
# The data for the flow line is simple, just `x`, `y` polar stereographic coordinates (EPSG=3413) and the distance, `d`, from the start of the flowline.
myFlowlines['0001'].flowlines['03'].keys()
# These coordinates for a given index can be returned as `myFlowlines['0001'].xym(index='03')` or `myFlowlines['0001'].xykm(index='03')` depending on whether m or km are preferred.
# <a id='bounds'></a>The area of interest can be defined as the union of the bounds for all of the flowlines computed as shown below along with the unique set of flowline IDs across all glaciers. We will use the bounding box [below](#subsettext) to subset the data.
myBounds = {'minx': 1e9, 'miny': 1e9, 'maxx': -1e9, 'maxy': -1e9} # Initial bounds to force reset
flowlineIDs = [] #
for myKey in myFlowlines:
# Get bounding box for flowlines
flowlineBounds = myFlowlines[myKey].bounds
# Merge with prior bounds
myBounds = myFlowlines[myKey].mergeBounds(myBounds, flowlineBounds)
# Get the flowline ids
flowlineIDs.append(myFlowlines[myKey].flowlineIDs())
# Get the unique list of flowlines ids (used for legends later)
flowlineIDs = np.unique(flowlineIDs)
print(myBounds)
print(flowlineIDs)
# ### Search Catalog for Velocity Data
# We now need to locate velocity data from the GrIMP data set. For this exercise, we will focus on the annual velocity maps of Greenland. To do this, we will use the [grimp.cmrUrls](https://github.com/fastice/grimpfunc/blob/master/grimpfunc/cmrUrls.py) tool, which will do a GUI based search of NASA's Common Metadata Repository ([CMR](https://earthdata.nasa.gov/eosdis/science-system-description/eosdis-components/cmr)). Search parameters can be passe directly to `initialSearch` method to perform the search.
myUrls = grimp.cmrUrls(mode='subsetter', verbose=True) # nisar mode excludes image and tsx products and allows only one product type at a time
myUrls.initialSearch(product='NSIDC-0725')
# The `verbose` flag causes the CMR search string to be printed. The search basically works by a) reading the parameters from the search panel (e.g., product, date, etc) and creating a search string, which returns the search result.
response = requests.get('https://cmr.earthdata.nasa.gov/search/granules.json?provider=NSIDC_ECS&sort_key[]=start_date&sort_key[]='
'producer_granule_id&scroll=false&page_size=2000&page_num=1&short_name=NSIDC-0725&version=3&temporal[]='
'2000-01-01T00:00:01Z,2022-03-10T00:23:59&bounding_box[]=-75.00,60.00,-5.00,82.00&producer_granule_id[]='
'*&options[producer_granule_id][pattern]=true')
search_results = response.json()
search_results;
# Under the hood, the `cmrUrls` code can filter the json to get a list of urls:
myUrls.getURLS()
# ### Load the Velocity Data
# GrIMP produces full Greenland velocity maps. Collectively, there are more than 400 full Greenland maps, totalling several hundred GB of data, which may be more than a user interested in a few glaciers wants to download and store on their laptop. Fortunately using Cloud Optimized Geotiffs only the data are actually needed are downloaded. As a quick review, COGs have the following properties:
#
# - All the metadata is at the beginning of the file, allowing a single read to obtain the layout.
# - The data are tiled (i.e., stored as a series of blocks like a checkerboard) rather than as a line-by-line raster.
# - A consistent set of overview images (pyramids) are stored with the data.
# While the velocity data are stored as multiple files at NSIDC, they can all be combined into a single [nisarVelSeries](https://github.com/fastice/nisardev/blob/main/nisardev/nisarVelSeries.py) instance, which has the following properties:
#
# - Built on Xarray,
# - Dask (parallel operations),
# - Local and remote subsetting (Lazy Opens), and
# - Subsets can be saved for later use
# Before loading the data, we must setup the filename template for the multi-band velocity products.
#
# Specifically, we must put a '\*' where the band identifier would go and remove the trailing '.tif' extension.
urlNames = [x.replace('vv','*').replace('.tif','') for x in myUrls.getCogs()] # getCogs filters to ensure tif products
urlNames[0:5]
# We can now create a `nisarVelocitySeries` object, which will create a large time series stack with all of the data.
myVelSeries = nisar.nisarVelSeries() # Create Series
myVelSeries.readSeriesFromTiff(urlNames, url=True, readSpeed=False) # readSpeed=False computes speed from vx, vy rather than downloading
myVelSeries.xr # Add semicolon after to suppress output
# For the annual data set, this step produces a ~7GB data sets, which expands to 370GB for the full 6-12-day data set.
#
# <a id='subsettext'></a>To avoid downloading unnessary data, the data can be subsetted using the bounding box we created [above](#bounds) from the flowlines.
myVelSeries.subSetVel(myBounds) # Apply subset
myVelSeries.subset # Add semicolon after to suppress output
# The volume of the data set is now a far more manageable ~15MB, which is still located in the archive.
#
# With dask, operations can continue without downloading, until the data are finally needed to do something (e.g., create a plot).
#
# If lots of operations are going to occur, however, it is best to download the data upfront.
myVelSeries.loadRemote() # Load the data to memory
# ### Overview Images
# In the above, we picked out a small region for Greenland and downloaded a full-res data series. But in some cases, we may want the full image at reduced resolution (e.g., for an overview map).
#
# Here we can take advantage of the overviews to pull a single velocity map at reduced resolution (`overviewLevel=3`).
urlNames[-1]
myOverview = nisar.nisarVelSeries() # Create Series
myOverview.readSeriesFromTiff([urlNames[-1]], url=True, readSpeed=False, overviewLevel=3) # readSpeed=False computes speed from vx, vy rather than downloading
myOverview.loadRemote()
myOverview.xr # Add semicolon after to suppress output
# ### Display Flowlines and Velocity
# <a id='mapplot'></a>In the next cell, we put the above pieces together:
# - Display speed with linear and log color bars,
# - Use the overview image for an inset.
# - Plot the flowline line locations.
# - For a later [plot](#points), extract a point 10-km along each flowline using `myFlowlines[glacierId].extractPoints(10, None, units='km')`
# +
# set up figure and axis
# #%matplotlib inline
fig, axes = plt.subplots(1, 2, figsize=(21, 12))
# Create a dictionary for accumulating glacier points
glacierPoints = {}
# generate a color dict that spans all flowline ids, using method from a flowline instance
flowlineColors = list(myFlowlines.values())[0].genColorDict(flowlineIDs=flowlineIDs)
# Plot velocity maps
# Saturate at 2000 m/yr to preserve slow detail
myVelSeries.displayVelForDate('2020-01-01', ax=axes[0], labelFontSize=12, plotFontSize=9, titleFontSize=14,
vmin=0, vmax=2000, units='km', scale='linear', colorBarSize='3%')
myVelSeries.displayVelForDate('2020-01-01', ax=axes[1], labelFontSize=12, plotFontSize=9, titleFontSize=14,
vmin=1, vmax=3000, units='m', scale='log', midDate=False, colorBarSize='3%')
# Plot location inset
height = 3
axInset = inset_axes(axes[0], width=height * myOverview.sx/myOverview.sy, height=height, loc=1)
myOverview.displayVelForDate(None, ax=axInset, vmin=1, vmax=3000, colorBar=False, scale='log', title='')
axInset.plot(*myVelSeries.outline(), color='r')
axInset.axis('off')
#
# Loop over each glacier and plot the flowlines
for glacierId in myFlowlines:
# Plot the flowline Match units to the map
myFlowlines[glacierId].plotFlowlineLocations(ax=axes[0], units='km', colorDict=flowlineColors)
myFlowlines[glacierId].plotFlowlineLocations(ax=axes[1], units='m', colorDict=flowlineColors)
#
myFlowlines[glacierId].plotGlacierName(ax=axes[0], units='km', color='w', fontsize=12,fontweight='bold', first=False)
myFlowlines[glacierId].plotGlacierName(ax=axes[1], units='m', color='w', fontsize=12,fontweight='bold', first=False)
# Generates points 10km from downstream end of each flowline
points10km = myFlowlines[glacierId].extractPoints(10, None, units='km')
glacierPoints[glacierId] = points10km
for key in points10km:
axes[0].plot(*points10km[key], 'r.')
#
# Add legend
for ax in axes:
# Create a dict of unique labels for legend
h, l = ax.get_legend_handles_labels()
by_label = dict(zip(l, h)) # will overwrite identical entries to produce unique values
ax.legend(by_label.values(), by_label.keys(), title='Flowline ID', ncol=2, loc='lower left', fontsize=14)
#fig.tight_layout()
# -
# ### Interpolation
# A common function with the velocity date is interpolating data for plotting points or profiles, which can be easily done with the `nisarVelSeries.interp` method.
# +
# Using km
vx, vy, vv = myVelSeries.interp(*myFlowlines[glacierId].xykm(), units='km')
print(vx.shape, vx[0, 100], vy[0, 100], vv[0, 100])
# or units of meters
vx, vy, vv = myVelSeries.interp(*myFlowlines[glacierId].xym(), units='m')
print(vx.shape, vx[0, 100], vy[0, 100], vv[0, 100])
# or entirely different coordinate system
xytoll = pyproj.Transformer.from_crs(3413, 4326)
lat, lon = xytoll.transform(*myFlowlines[glacierId].xym())
vx, vy, vv = myVelSeries.interp(lat, lon, sourceEPSG=4326)
print(vx.shape, vx[0, 100], vy[0, 100], vv[0, 100])
# Or would prefer an xarray rather than nparray
result = myVelSeries.interp(*myFlowlines[glacierId].xykm(), units='km', returnXR=True)
result
# -
# ### Plot Central Flowlines at Different Times
# This example will demonstrate plotting the nominally central flowline ('06') for each of the six years for which there are currently data. While we are using flow lines here, any profile data could be used (e.g., a flux gate).
flowlineId ='06' # Flowline id to plot
fig, axes = plt.subplots(np.ceil(len(myFlowlines)/4).astype(int), 2, figsize=(16, 8)) # Setup plot
# Loop over glaciers
for glacierId, ax in zip(myFlowlines, axes.flatten()):
#
# return interpolated values as vx(time index, distance index)
vx, vy, vv = myVelSeries.interp(*myFlowlines[glacierId].xykm(), units='km')
#
# loop over each profile by time
for speed, myDate in zip(vv, myVelSeries.time):
ax.plot(myFlowlines[glacierId].distancekm(), speed, label=myDate.year)
#
# pretty up plot
ax.legend(ncol=2, loc='upper right', fontsize=15)
ax.set_xlabel('Distance (km)', fontsize=18)
ax.set_ylabel('Speed (m/yr)', fontsize=18)
ax.set_title(f'Glacier {glacierId}', fontsize=20)
#
# Resize tick labels
for ax in axes.flatten():
ax.tick_params(axis='both', labelsize=15)
plt.tight_layout()
# ### Plot Points Through Time
# <a id='points'></a>When the map plots were generated [above](#mapplot), a set of points 10-k from the start of each flowline was extracted:
glacierPoints
# The time series for each set of. points can be plotted as:
# #%matplotlib inline
fig, axes = plt.subplots(1, 2, figsize=(16, 8))
# Loop over glaciers
for glacierId, ax in zip(glacierPoints, axes.flatten()):
# Loop over flowlines
for flowlineId in glacierPoints[glacierId]:
#
# interpolate to get results vx(time index) for each point
vx, vy, v = myVelSeries.interp(*glacierPoints[glacierId][flowlineId], units='km')
ax.plot(myVelSeries.time, v, marker='o', linestyle='-', color=flowlineColors[flowlineId],label=f'{flowlineId}')
#
# pretty up plot
ax.legend(ncol=3, loc='upper right', title='Flowline ID')
ax.set_xlabel('year', fontsize=18)
ax.set_ylabel('Speed (m/yr)', fontsize=18)
ax.set_title(f'Glacier {glacierId}', fontsize=20)
#
# Resize tick labels
for ax in axes.flatten():
ax.tick_params(axis='both', labelsize=15)
plt.tight_layout()
# ### Save the Data
# While it is convenient to work the date remotely, its nice to be be able to save the data for further processing.
#
# The downloaded subset can be saved in a netcdf and reloaded for to `velSeries` instance for later analysis.
#
# Note makes sure the data have been subsetted so only the the subset will be saved (~15MB in this example). If not, the entire Greeland data set will be saved (370GB).
#
# Change `saveData` and `reloadData` below to test this capability.
saveData = True # Set to True to save data
if saveData:
myVelSeries.toNetCDF('Glaciers1-2example.nc')
# Now open open the file and redo a plot from above with the saved data.
reloadData = True # Set to True to reload the saved data
if reloadData:
fig, axes = plt.subplots(np.ceil(len(myFlowlines)/2).astype(int), 2, figsize=(16, 8)) # Setup plot
myVelCDF = nisar.nisarVelSeries() # Create Series
myVelCDF.readSeriesFromNetCDF('Glaciers1-2example.nc')
#
for glacierId, ax in zip(myFlowlines, axes.flatten()):
# return interpolated values as vx(time index, distance index)
vx, vy, vv = myVelCDF.interp(*myFlowlines[glacierId].xykm(), units='km')
# loop over each profile by time
for speed, myDate in zip(vv, myVelSeries.time):
ax.plot(myFlowlines[glacierId].distancekm(), speed, label=myDate.year)
# pretty up plot
ax.legend(ncol=2, loc='upper right', fontsize=15)
ax.set_xlabel('Distance (km)', fontsize=18)
ax.set_ylabel('Speed (m/yr)', fontsize=18)
ax.set_title(f'Glacier {glacierId}', fontsize=20)
# For other combinations could have
for ax in axes.flatten():
ax.tick_params(axis='both', labelsize=15)
plt.tight_layout()
# ### Summary for GrIMP Data
# Using the [nisardev](https://github.com/fastice/nisardev) and [grimp](https://github.com/fastice/grimpfunc) we were easily able to perform many of the typical functions needed for the analysis of glaciers by accesssing remote [GrIMP data](https://nsidc.org/data/measures/grimp) such as:
# - Accessing stacks of velocity data;
# - Display velocity maps; and
# - Interpolating data to points or lines;
#
# When working with larger data sets (e.g., the 300+ [6/12 day velocity maps](https://nsidc.org/data/measures/grimp) at NSIDC), downloads can take longer (several minutes), but are still 2 to 3 orders of magnitude faster than downloading the full data set.
#
# Once downloaded, the data are easily saved for later use.
#
# Other notebooks demonstrated the use of these tools are available through [GrIMPNotebooks](https://github.com/fastice/GrIMPNotebooks) repo at github.
#
# As mentioned above, velocity data can help provide context for elevation change measurements. Next we look at elevation change for the Jakobshavn region.
# ## Comparing ICESat-2 Data with Other Datasets
#
# Last time, we did a bit of work to add ICESat-2 and Operation Icebridge data to Pandas Dataframes. We only covered the basic operations that you can do with Pandas, so today we are going to do a more thorough analysis of the data here.
#
# Since we already downloaded the ICESat-2/ATM files of interest, we are not going to use icepyx just yet - we will go ahead and reload the data from yesterday.
#
# (Prompt) I forgot how to load the ICESat-2 data from a .h5 file. What do I need to do?
#
# (Prompt) I also forgot how to load the ATM data. How do I read the CSV?
# We established last time that ATM aligns best with the central ICESat-2 beams, particularly the central strong beam (GT2L). Let's see if that is reflected in the elevation profiles...
# +
# Load the ICESat-2 data
is2_file = 'processed_ATL06_20190420093051_03380303_005_01_full.h5'
with h5py.File(is2_file, 'r') as f:
is2_gt2r = pd.DataFrame(data={'lat': f['gt2r/land_ice_segments/latitude'][:],
'lon': f['gt2r/land_ice_segments/longitude'][:],
'elev': f['gt2r/land_ice_segments/h_li'][:]}) # Central weak beam
is2_gt2l = pd.DataFrame(data={'lat': f['gt2l/land_ice_segments/latitude'][:],
'lon': f['gt2l/land_ice_segments/longitude'][:],
'elev': f['gt2l/land_ice_segments/h_li'][:]}) # Central strong beam
# Load the ATM data
atm_file = 'ILATM2_20190506_151600_smooth_nadir3seg_50pt.csv'
atm_l2 = pd.read_csv(atm_file)
# Look only at the nadir track
atm_l2 = atm_l2[atm_l2['Track_Identifier']==0]
# Change the longitudes to be consistent with ICESat-2
atm_l2['Longitude(deg)'] -= 360
# -
# Subset the ICESat-2 data to the ATM latitudes
is2_gt2r = is2_gt2r[(is2_gt2r['lat']<atm_l2['Latitude(deg)'].max()) & (is2_gt2r['lat']>atm_l2['Latitude(deg)'].min())]
is2_gt2l = is2_gt2l[(is2_gt2l['lat']<atm_l2['Latitude(deg)'].max()) & (is2_gt2l['lat']>atm_l2['Latitude(deg)'].min())]
# +
# Make a 2D plot of along-track surface height
import matplotlib.pyplot as plt
# #%matplotlib widget
fig, ax = plt.subplots(1, 1)
plt.plot(is2_gt2r['lat'], is2_gt2r['elev'], label='gt2r')
plt.plot(is2_gt2l['lat'], is2_gt2l['elev'], label='gt2l')
plt.plot(atm_l2['Latitude(deg)'], atm_l2['WGS84_Ellipsoid_Height(m)'], label='atm')
plt.xlabel('latitude')
plt.ylabel('elevation [m]')
plt.xlim([69.185, 69.275])
plt.ylim([100, 550])
plt.legend()
plt.show()
# -
# Sure enough, GT2L and ATM match very well! Since they are very close to each other, we can do a quick accuracy assessment between the two.
#
# The ATM DataFrame is larger than the ICESat-2 dataframe, so we're going to apply a simple spline interpolant to downscale the ICESat-2 data.
# +
from scipy.interpolate import splrep,splev
fig, ax = plt.subplots(1, 1)
# Apply a spline interpolant to the ICESat-2 data
spl = splrep(is2_gt2l['lat'], is2_gt2l['elev'], s=0)
is2_spl = splev(atm_l2['Latitude(deg)'], spl, der=0)
# Calculate GT2L bias and add it to the ATM DataFrame
atm_l2['bias'] = atm_l2['WGS84_Ellipsoid_Height(m)'] - is2_spl
# Plot the bias curve
plt.plot(atm_l2['Latitude(deg)'], atm_l2['bias'])
#plt.plot(atm_l2['Latitude(deg)'], atm_l2['WGS84_Ellipsoid_Height(m)'])
#plt.plot(atm_l2['Latitude(deg)'], is2_spl)
plt.xlabel('latitude')
plt.ylabel('bias [m]')
plt.xlim([69.2, 69.26])
plt.ylim([-20, 20])
plt.show()
print('Mean bias: %s m' %(atm_l2['bias'].mean()))
# -
# Through some relatively simple operations, we found that ATM and ICESat-2 differ by ~0.33 m on average. Between this plot and the elevation plot above, what do you think might be causing some of the differences?
#
# We will revisit ICESat-2 and ATM near the end of this tutorial. Now, we are going to look at ice velocities and flow lines from the GRIMP project.
# ### Visualization
# We are now going to revisit the GRIMP data one last time to visualize all of the data together. We have conducted a bias assessment between the two lidars, so now we are going to look at how the land ice heights change over time.
#
# First let's take a look at ATM data from previous years. The CSV file we are going to use is pre-processed L2 data for 2011-2018, much like the data from 2019. These flights are slightly east of the 2019 flight, which was adjusted to better align with ICESat-2.
# +
fig, axes = plt.subplots(1, 3, figsize=(15, 5))
# Read in the ATM CSV file
atm_2011_2018 = pd.read_csv('ILATM2_2011_2019_v3.csv')
lltoxy = pyproj.Transformer.from_crs(4326, 3413)
# #%matplotlib widget
# Loop through the valid years and plot surface height
years = ['2011', '2013', '2014', '2015', '2016', '2018']
for i,year in enumerate(years):
lat = atm_2011_2018['Latitude_'+year]
elev = atm_2011_2018['elev_'+year]
axes[1].plot(lat, elev, label=year)
#
#
myVelSeries.displayVelForDate('2020-01-01', ax=axes[0], labelFontSize=12, plotFontSize=9, titleFontSize=14,
vmin=1, vmax=3000, units='m', scale='log', midDate=False, colorBarSize='3%', colorBarPosition='bottom')
axes[0].axis('off')
lltoxy = pyproj.Transformer.from_crs(4326, 3413)
for i, year in enumerate(years[3:]):
lat, lon = atm_2011_2018['Latitude_'+year], atm_2011_2018['Longitude_'+year]
x, y = lltoxy.transform(lat, lon)
axes[0].plot(x, y, 'w', linewidth=2)
v = myVelSeries.interp(lat, lon, sourceEPSG=4326, returnXR=True).sel(time=datetime.datetime(int(year), 6, 1),
method='nearest').sel(band='vv')
axes[2].plot(lat, v, label=year)
#
axes[1].set_xlabel('latitude')
axes[1].set_ylabel('elevation [m]')
axes[1].legend()
axes[2].set_xlabel('latitude')
axes[2].set_ylabel('Speed [m/yr]')
axes[2].legend()
fig.tight_layout()
# -
# Across these three figures, we can compare the ATM surface heights with ice velocities over the region. It's obvious that the greatest ice velocities are at the lower elevations, and vice-versa.
#
# We can also see a distinct decrease in ice velocity for 2018 - let's make a time series to see the ice height changes observed by ATM...
# +
# Set the latitude bounds of the surface trough
lat_bounds = [69.1982, 69.2113]
# 2013 has the longest streak of data over this region. We are going to downscale the other years to its length.
lat_2013 = atm_2011_2018['Latitude_2013'][(atm_2011_2018['Latitude_2013']>lat_bounds[0]) & (atm_2011_2018['Latitude_2013']<lat_bounds[1])]
# First, downscale the 2011 data to 2013 resolution
lat = atm_2011_2018['Latitude_2011']
elev = atm_2011_2018['elev_2011'][(lat>lat_bounds[0]) & (lat<lat_bounds[1])].reset_index(drop=True)
lat = lat[(lat>lat_bounds[0]) & (lat<lat_bounds[1])].reset_index(drop=True)
spl = splrep(lat[::-1], elev[::-1], s=0)
slp_2011 = splev(lat_2013, spl, der=0)
# Calculate ice loss relative to 2011
delta_h = [0]
std_h = [0]
for i,year in enumerate(years[1:]): # Start loop at 2013 (2012 has no data)
if year != 2013: # Downscale other years to 2013 resolution
lat = atm_2011_2018['Latitude_'+year]
elev = atm_2011_2018['elev_'+year][(lat>lat_bounds[0]) & (lat<lat_bounds[1])].reset_index(drop=True)
# Downscale the data with splines
lat = lat[(lat>lat_bounds[0]) & (lat<lat_bounds[1])].reset_index(drop=True)
spl = splrep(lat[::-1], elev[::-1], s=0)
spl_year = splev(lat_2013, spl, der=0)
# Now calculate the difference relative to 2011
delta_h.append((spl_year - slp_2011).mean())
std_h.append((spl_year - slp_2011).std())
else:
lat = atm_2011_2018['Latitude_'+year]
elev = atm_2011_2018['elev_'+year][(lat>lat_bounds[0]) & (lat<lat_bounds[1])].reset_index(drop=True)
# Calculate the difference relative to 2011
delta_h.append((elev[::-1] - slp_2011).mean())
std_h.append((spl_year - slp_2011).std())
# #%matplotlib widget
fig, ax = plt.subplots(1, 1)
plt.errorbar(years, delta_h, yerr=std_h, marker='.', markersize=12, capsize=4)
plt.xlabel('year')
plt.ylabel('$\Delta$ h [m]')
plt.show()
# -
# Ta-da!! Using a few operations, we were able to use ATM data to derive a rough time series of ice sheet elevation change over Jakobshavan. We can see that there is a significant loss in ice between 2011 and 2013, followed by a gradual decrease up through 2016. Interestingly, there is a non-negligible increase in ice height in 2018, which may explain the decrease in ice velocity for the same year.
#
# We're going to try and do the same thing, but for ICESat-2. Because it was launched in late-2018, we are going to try and grab interseasonal measurements from RGT 338 for 2019-2021.
# +
# Time to go through the icepyx routine again!
import icepyx as ipx
# Specifying the necessary icepyx parameters
short_name = 'ATL06'
lat_bounds = [69.1982, 69.2113]
spatial_extent = [-50, 69.1982, -48.5, 69.2113] # KML polygon centered on Jakobshavan
date_range = ['2019-04-01', '2021-12-30']
rgts = ['338'] # IS-2 RGT of interest
# +
# Setup the Query object
region = ipx.Query(short_name, spatial_extent, date_range, tracks=rgts)
# Show the available granules
region.avail_granules(ids=True)
# +
# Set Earthdata credentials
uid = 'uwhackweek'
email = '<EMAIL>'
region.earthdata_login(uid, email)
# Order the granules
region.order_granules()
# -
path = '/tmp/DataIntegration/'
region.download_granules(path)
# +
import h5py
fig, ax = plt.subplots(1, 1)
# Iterate through the files to grab elevation, and derive elevation differences relative to April 2019
files = ['processed_' + granule for granule in region.avail_granules(ids=True)[0]]
# Get the initial data from April 2019
with h5py.File(files[0]) as f:
elev_42019 = f['gt2l/land_ice_segments/h_li'][:]
lat_42019 = f['gt2l/land_ice_segments/latitude'][:]
plt.plot(lat_42019, elev_42019, label=files[0][16:24])
delta_h = [0]
std_h = [0]
for file in files[1:]:
try:
with h5py.File(file) as f:
cloud_flag = np.mean(f['gt2l/land_ice_segments/geophysical/cloud_flg_asr'][:])
# Filter out cloudy scenes
if cloud_flag < 2:
lat = f['gt2l/land_ice_segments/latitude'][:]
elev = f['gt2l/land_ice_segments/h_li'][:]
date = file[16:24] # Get date of IS-2 overpass
# Find the difference relative to April 2019
delta_h.append(np.mean(elev - elev_42019))
std_h.append(np.std(elev - elev_42019))
# Plot the elevation data
plt.plot(lat, elev, label=date)
else:
print('Cloudy scene - no data loaded')
except:
print('Cloudy scene - no data loaded')
plt.xlabel('latitude')
plt.ylabel('elevation [m]')
plt.legend()
plt.show()
# -
# Plot the ice sheet change time series
dates = ['20190420','20200117', '20200717', '202110115']
fig, ax = plt.subplots(1, 1)
plt.errorbar(dates, delta_h, yerr=std_h, marker='.', markersize=12, capsize=4)
plt.xlabel('date')
plt.ylabel('$\Delta$ h [m]')
plt.show()
# There we go! We lost some data due to cloud cover, and the mean change has some spread, but the ICESat-2 data continues to show a downward trend that was suggested by ATM. Note that these changes are relative to an ICESat-2 observation - if we plotted these on the previous figure, the trend would be even more pronounced!
# + [markdown] tags=[]
# ## Summary
#
# 🎉 Congratulations! You've completed this tutorial and have learned how to:
# * Access and plot glacier velocity using GrIMP data and python packages
# * Compared ICESat-2 elevations with ATM elevations, and
# * Integrated velocities and elevation changes for Jakobshavn into one plot;
#
# These are advanced methods for integrating, analyzing, and visualizing multiple kinds of data sets with ICESat-2, which can be adopted for other kinds of data and analyses.
| book/tutorials/DataIntegration/dataintegration-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
#export
from exp.nb_00 import *
import operator
def test(a,b,cmp,cname=None):
if cname is None: cname=cmp.__name__
assert cmp(a,b),f"{cname}:\n{a}\n{b}"
def test_eq(a,b): test(a,b,operator.eq,'==')
# -
test_eq(TEST,'test')
# +
#export
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
import pickle, gzip, math, torch, matplotlib as mpl
import matplotlib.pyplot as plt
from torch import tensor
MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'
# -
path = datasets.download_data(MNIST_URL, ext='.gz'); path
with gzip.open(path,'rb') as f:
((x_train, y_train),(x_valid, y_valid),_) = pickle.load(f,encoding='latin-1')
x_train,y_train,x_valid,y_valid = map(tensor,(x_train,y_train,x_valid,y_valid))
n,c = x_train.shape
n,c
x_train,x_train.shape,y_train,y_train.shape, y_train.min(),y_train.max()
assert n==y_train.shape[0]==50000
test_eq(c,28*28)
test_eq(y_train.min(),0)
test_eq(y_train.max(),9)
mpl.rcParams['image.cmap'] = 'gray'
img = x_train[0]
img.view(28,28).type()
img.view(28,28).shape
plt.imshow(img.view(28,28))
#input->single neron need [input,1]
#now [input,n] n for nodes in next layer
weights = torch.randn(784,10)
# every node need bias
bias = torch.zeros(10)
def matmul(a,b):
ar,ac = a.shape
br,bc = b.shape
assert ac==br
c = torch.zeros(ar,bc)
for i in range(ar):
for j in range(bc):
for k in range(ac):
c[i,j] += a[i,k] * b[k,j]
return c
m1 = x_valid[:5]
m1
m2 = weights
m1.shape,m2.shape
# %time t1 = matmul(m1,m2)
t1.shape
len(x_train)
a = tensor([2,4,-1])
b = tensor([2.4,2,6])
a,b
a+b
(a<b).float().mean()
m = tensor([[1.,2,3],[4,5,6],[7,8,9]]);m
(m*m).sum().sqrt()
def matmul(a,b):
ar,ac = a.shape
br,bc = b.shape
assert ac==br
c = torch.zeros(ar,bc)
for i in range(ar):
for j in range(bc):
#before we did three different times through loop k now same time three multiplication
c[i,j] = (a[i,:] * b[:,j]).sum()
return c
# %timeit -n 10 _ = matmul(m1,m2)
def near(a,b): return torch.allclose(a,b,rtol=1e-3,atol=1e-5)
def test_near(a,b) : test(a,b,near)
test_near(t1,matmul(m1,m2))
# +
#Broadcasting
# -
a
a>0
a+1
m
2*m
c = tensor([10,20,30])
c
m
m.shape
c.shape
m+c
c+m
# +
#stride of 0
# -
t = c.expand_as(m)
t
m+t
t.storage()
t.shape
m.storage()
t.stride()
m.stride()
c
c.unsqueeze(0)
c.shape
c.unsqueeze(1)
m
c.shape
c[:,None].shape
c.shape
c[None].shape
c
c[:,None].expand_as(m)
m+c[:,None]
def matmul(a,b):
ar,ac = a.shape
br,bc = b.shape
assert ac==br
c = torch.zeros(ar,bc)
for i in range(ar):
c[i] = (a[i].unsqueeze(-1)*b).sum(dim=0)
return c
# %timeit -n 10 _=matmul(m1,m2)
test_near(t1,matmul(m1,m2))
c
c[None] > c[:,None]
def matmul(a,b): return torch.einsum('ik,kj->ij',a,b)
# %timeit -n 10 _=matmul(m1,m2)
test_near(t1,matmul(m1,m2))
# %timeit -n 10 t2 = m1.matmul(m2)
t2 = m1@m2
test_near(t1,t2)
m1.shape,m2.shape
# !python notebook2script.py 01_matmul.ipynb
| 01_matmul.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Writing Your Own Graph Algorithms
# The analytical engine in GraphScope derives from [GRAPE](https://dl.acm.org/doi/10.1145/3282488), a graph processing system proposed on SIGMOD-2017. GRAPE differs from prior systems in its ability to parallelize sequential graph algorithms as a whole. In GRAPE, sequential algorithms can be easily **plugged into** with only minor changes and get parallelized to handle large graphs efficiently.
#
# In this tutorial, we will show how to define and run your own algorithm in PIE and Pregel models.
#
# Sounds like fun? Excellent, here we go!
# +
# Install graphscope package if you are NOT in the Playground
# !pip3 install graphscope
# -
# ## Writing algorithm in PIE model
# GraphScope enables users to write algorithms in the [PIE](https://dl.acm.org/doi/10.1145/3282488) programming model in a pure Python mode, first of all, you should import **graphscope** package and the **pie** decorator.
# +
# Import the graphscope module.
import graphscope
from graphscope.framework.app import AppAssets
from graphscope.analytical.udf.decorators import pie
graphscope.set_option(show_log=True) # enable logging
# -
# We use the single source shortest path ([SSSP](https://en.wikipedia.org/wiki/Shortest_path_problem)) algorithm as an example. To implement the PIE model, you just need to **fulfill this class**
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
pass
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
# The **pie** decorator contains two params named `vd_type` and `md_type` , which represent the vertex data type and message type respectively.
#
# You may specify types for your own algorithms, optional values are `int`, `double`, and `string`.
# In our **SSSP** case, we compute the shortest distance to the source for all nodes, so we use `double` value for `vd_type` and `md_type` both.
#
# In `Init`, `PEval`, and `IncEval`, it has **frag** and **context** as parameters. You can use these two parameters to access the fragment data and intermediate results. Detail usage please refer to [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html).
#
# ### Fulfill Init Function
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
pass
@staticmethod
def IncEval(frag, context):
pass
# The `Init` function are responsable for 1) setting the initial value for each node; 2) defining the strategy of message passing; and 3) specifing aggregator for handing received message on each rounds.
#
# Note that the algorithm you defined will run on a property graph. So we should get the vertex label first by `v_label_num = frag.vertex_label_num()`, then we can traverse all nodes with the same label
# and set the initial value by `nodes = frag.nodes(v_label_id)` and `context.init_value(nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate)`.
#
# Since we are computing the shorest path between the source node and others nodes. So we use `PIEAggregateType.kMinAggregate` as the aggregator for mesaage aggregation, which means it will
# perform `min` operation upon all received messages. Other avaliable aggregators are `kMaxAggregate`, `kSumAggregate`, `kProductAggregate`, and `kOverwriteAggregate`.
#
# At the end of `Init` function, we register the sync buffer for each node with `MessageStrategy.kSyncOnOuterVertex`, which tells the engine how to pass the message.
# ### Fulfill PEval Function
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
src = int(context.get_config(b"src"))
graphscope.declare(graphscope.Vertex, source)
native_source = False
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
if frag.get_inner_node(v_label_id, src, source):
native_source = True
break
if native_source:
context.set_node_value(source, 0)
else:
return
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(source, e_label_id)
for e in edges:
dst = e.neighbor()
distv = e.get_int(2)
if context.get_node_value(dst) > distv:
context.set_node_value(dst, distv)
@staticmethod
def IncEval(frag, context):
pass
# In `PEval` of **SSSP**, it gets the queried source node by `context.get_config(b"src")`.
#
# `PEval` checks each fragment whether it contains source node by `frag.get_inner_node(v_label_id, src, source)`. Note that the `get_inner_node` method needs a `source` parameter in type `Vertex`, which you can declare by `graphscope.declare(graphscope.Vertex, source)`
#
# If a fragment contains the source node, it will traverse the outgoing edges of the source with `frag.get_outgoing_edges(source, e_label_id)`. For each vertex, it computes the distance from the source, and updates the value if the it less than the initial value.
# ### Fulfill IncEval Function
@pie(vd_type="double", md_type="double")
class SSSP_PIE(AppAssets):
@staticmethod
def Init(frag, context):
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
nodes = frag.nodes(v_label_id)
context.init_value(
nodes, v_label_id, 1000000000.0, PIEAggregateType.kMinAggregate
)
context.register_sync_buffer(v_label_id, MessageStrategy.kSyncOnOuterVertex)
@staticmethod
def PEval(frag, context):
src = int(context.get_config(b"src"))
graphscope.declare(graphscope.Vertex, source)
native_source = False
v_label_num = frag.vertex_label_num()
for v_label_id in range(v_label_num):
if frag.get_inner_node(v_label_id, src, source):
native_source = True
break
if native_source:
context.set_node_value(source, 0)
else:
return
e_label_num = frag.edge_label_num()
for e_label_id in range(e_label_num):
edges = frag.get_outgoing_edges(source, e_label_id)
for e in edges:
dst = e.neighbor()
distv = e.get_int(2)
if context.get_node_value(dst) > distv:
context.set_node_value(dst, distv)
@staticmethod
def IncEval(frag, context):
v_label_num = frag.vertex_label_num()
e_label_num = frag.edge_label_num()
for v_label_id in range(v_label_num):
iv = frag.inner_nodes(v_label_id)
for v in iv:
v_dist = context.get_node_value(v)
for e_label_id in range(e_label_num):
es = frag.get_outgoing_edges(v, e_label_id)
for e in es:
u = e.neighbor()
u_dist = v_dist + e.get_int(2)
if context.get_node_value(u) > u_dist:
context.set_node_value(u, u_dist)
# The only difference between `IncEval` and `PEval` of **SSSP** algorithm is that `IncEval` are invoked
# on each fragment, rather than only the fragment with source node. A fragment will repeat the `IncEval` until there is no messages received. When all the fragments are finished computation, the algorithm is terminated.
# ### Run Your Algorithm on the p2p network Graph.
# +
# Load p2p network dataset
from graphscope.dataset import load_p2p_network
graph = load_p2p_network(directed=False)
# -
# Then initialize your algorithm and query the shorest path from vertex `6` over the graph.
sssp = SSSP_PIE()
ctx = sssp(graph, src=6)
# Runing this cell, your algorithm should evaluate successfully. The results are stored in vineyard in the distributed machies. Let's fetch and check the results.
r1 = (
ctx.to_dataframe({"node": "v:host.id", "r": "r:host"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r1
# ### Dump and Reload Your Algorithm
#
# You can dump and save your define algorithm for future use.
# +
import os
# specify the path you want to dump
dump_path = os.path.expanduser("~/sssp_pie.gar")
# dump
SSSP_PIE.to_gar(dump_path)
# -
# Now, you can find a package named `sssp_pie.gar` in your `~/`. Reload this algorithm with following code.
# +
from graphscope.framework.app import load_app
# specify the path you want to dump
dump_path = os.path.expanduser("~/sssp_pie.gar")
sssp2 = load_app("SSSP_PIE", dump_path)
# -
# ## Write Algorithm in Pregel Model
# In addition to the sub-graph based PIE model, GraphScope supports vertex-centric Pregel model. To define a Pregel algorithm, you should import **pregel** decorator and fulfil the functions defined on vertex.
import graphscope
from graphscope.framework.app import AppAssets
from graphscope.analytical.udf.decorators import pregel
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
pass
@staticmethod
def Compute(messages, v, context):
pass
# The **pregel** decorator has two parameters named `vd_type` and `md_type`, which represent the vertex data type and message type respectively.
#
# You can specify the types for your algorithm, options are `int`, `double`, and `string`. For **SSSP**, we set both to `double`.
#
# Since Pregel model are defined on vertex, the `Init` and `Compute` functions has a parameter `v` to access the vertex data. See more details in [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html).
# ### Fulfill Init Function¶
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
pass
# The `Init` function sets the initial value for each node by `v.set_value(1000000000.0)`
# ### Fulfill Compute function¶
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
# The `Compute` function for **SSSP** computes the new distance for each node by the following steps:
#
# 1) Initialize the new value with value 1000000000
# 2) If the vertex is source node, set its distance to 0.
# 3) Compute the `min` value of messages received, and set the value if it less than the current value.
#
# Repeat these, until no more new messages(shorter distance) are generated.
# ### Optional Combiner
# Optionally, we can define a combiner to reduce the message communication overhead.
@pregel(vd_type="double", md_type="double")
class SSSP_Pregel(AppAssets):
@staticmethod
def Init(v, context):
v.set_value(1000000000.0)
@staticmethod
def Compute(messages, v, context):
src_id = context.get_config(b"src")
cur_dist = v.value()
new_dist = 1000000000.0
if v.id() == src_id:
new_dist = 0
for message in messages:
new_dist = min(message, new_dist)
if new_dist < cur_dist:
v.set_value(new_dist)
for e_label_id in range(context.edge_label_num()):
edges = v.outgoing_edges(e_label_id)
for e in edges:
v.send(e.vertex(), new_dist + e.get_int(2))
v.vote_to_halt()
@staticmethod
def Combine(messages):
ret = 1000000000.0
for m in messages:
ret = min(ret, m)
return ret
# ### Run Your Pregel Algorithm on Graph.
# Next, let's run your Pregel algorithm on the graph, and check the results.
sssp_pregel = SSSP_Pregel()
ctx = sssp_pregel(graph, src=6)
r2 = (
ctx.to_dataframe({"node": "v:host.id", "r": "r:host"})
.sort_values(by=["node"])
.to_numpy(dtype=float)
)
r2
# ### Aggregator in Pregel
# Pregel aggregators are a mechanism for global communication, monitoring, and counting. Each vertex can provide a value to an aggregator in superstep `S`, the system combines these
# values using a reducing operator, and the resulting value is made available to all vertices in superstep `S+1`. GraphScope provides a number of predefined aggregators for Pregel algorithms, such as `min`, `max`, or `sum` operations on data types.
#
# Here is a example for use a builtin aggregator, more details can be found in [Cython SDK API](https://graphscope.io/docs/reference/cython_sdk.html)
@pregel(vd_type="double", md_type="double")
class Aggregators_Pregel_Test(AppAssets):
@staticmethod
def Init(v, context):
# int
context.register_aggregator(
b"int_sum_aggregator", PregelAggregatorType.kInt64SumAggregator
)
context.register_aggregator(
b"int_max_aggregator", PregelAggregatorType.kInt64MaxAggregator
)
context.register_aggregator(
b"int_min_aggregator", PregelAggregatorType.kInt64MinAggregator
)
# double
context.register_aggregator(
b"double_product_aggregator", PregelAggregatorType.kDoubleProductAggregator
)
context.register_aggregator(
b"double_overwrite_aggregator",
PregelAggregatorType.kDoubleOverwriteAggregator,
)
# bool
context.register_aggregator(
b"bool_and_aggregator", PregelAggregatorType.kBoolAndAggregator
)
context.register_aggregator(
b"bool_or_aggregator", PregelAggregatorType.kBoolOrAggregator
)
context.register_aggregator(
b"bool_overwrite_aggregator", PregelAggregatorType.kBoolOverwriteAggregator
)
# text
context.register_aggregator(
b"text_append_aggregator", PregelAggregatorType.kTextAppendAggregator
)
@staticmethod
def Compute(messages, v, context):
if context.superstep() == 0:
context.aggregate(b"int_sum_aggregator", 1)
context.aggregate(b"int_max_aggregator", int(v.id()))
context.aggregate(b"int_min_aggregator", int(v.id()))
context.aggregate(b"double_product_aggregator", 1.0)
context.aggregate(b"double_overwrite_aggregator", 1.0)
context.aggregate(b"bool_and_aggregator", True)
context.aggregate(b"bool_or_aggregator", False)
context.aggregate(b"bool_overwrite_aggregator", True)
context.aggregate(b"text_append_aggregator", v.id() + b",")
else:
if v.id() == b"1":
assert context.get_aggregated_value(b"int_sum_aggregator") == 62586
assert context.get_aggregated_value(b"int_max_aggregator") == 62586
assert context.get_aggregated_value(b"int_min_aggregator") == 1
assert context.get_aggregated_value(b"double_product_aggregator") == 1.0
assert (
context.get_aggregated_value(b"double_overwrite_aggregator") == 1.0
)
assert context.get_aggregated_value(b"bool_and_aggregator") == True
assert context.get_aggregated_value(b"bool_or_aggregator") == False
assert (
context.get_aggregated_value(b"bool_overwrite_aggregator") == True
)
context.get_aggregated_value(b"text_append_aggregator")
v.vote_to_halt()
| tutorials/5_writing_your_own_algorithms.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Step 2
#
# In this notebook we trigger a one-off export of data from MySQL to Neptune via S3. We use an AWS Glue job to export product, product category and supplier data to CSV files, which the job copies to S3. We then use the Neptune bulk load API to load the data from S3 into Neptune.
#
# For details on converting from a relational data model to a graph data model, see [Converting a Relational Data Model to a Graph Model](https://github.com/aws-samples/aws-dbs-refarch-graph/tree/master/src/converting-to-graph#converting-a-relational-data-model-to-a-graph-model).
# <img src="https://s3.amazonaws.com/aws-neptune-customer-samples/neptune-sagemaker/images/mysql-2-neptune-02.png"/>
# # Export from MySQL to S3
#
# First, we'll export some 'static' dimension data (product, product_category, supplier) from MySQL to S3 using a Glue job, `export_from_mysql_to_s3`, the script for which can be found [here](https://github.com/aws-samples/amazon-neptune-samples/tree/master/gremlin/glue-neptune/glue-jobs/mysql-neptune/export-from-mysql-to-s3.py). The script uses the [neptune-python-utils](https://github.com/awslabs/amazon-neptune-tools/tree/master/neptune-python-utils) Python library.
#
# The export creates CSV files formatted according to the Amazon Neptune [bulk load format](https://docs.aws.amazon.com/neptune/latest/userguide/bulk-load-tutorial-format-gremlin.html).
# %run './glue_utils.py'
# %run '../util/neptune.py'
# We'll clear the database before running the import to simulate this comprising a first step in a migration to a fresh Neptune database.
neptune.clear()
neptune.close()
# Now we'll run the export job.
job_name = glue_resource('export_from_mysql_to_s3')
run_job(job_name)
# # Import from S3
#
# Next, we'll trigger Neptune's bulk load API, and load the graph using the CSV data in S3.
#
# The import creates Product and Supplier vertices and connects them with SUPPLIER edges. (A supplier supplies many products.)
# +
from neptune_python_utils.bulkload import BulkLoad
bulkload = BulkLoad(source=os.environ['S3_EXPORT_PATH'])
bulkload.load()
# -
# ## Query the graph
#
# Let's query the graph to review the results of the import.
# +
from neptune_python_utils.gremlin_utils import GremlinUtils
gremlin_utils = GremlinUtils()
conn = gremlin_utils.remote_connection()
g = gremlin_utils.traversal_source(connection=conn)
# -
# ### Count the number of Product and Supplier vertices in the graph
print('# Products : {}'.format(g.V().hasLabel('Product').count().next()))
print('# Suppliers: {}'.format(g.V().hasLabel('Supplier').count().next()))
# ### Show the products supplied by a supplier
#
# Product vertices are connected to Supplier vertices using an outgoing SUPPLIER edge.
# +
results = (g.V().hasLabel('Supplier').limit(1).
project('supplier', 'products').
by('name').
by(in_('SUPPLIER').values('name').fold()).next())
print('Supplier: {}\nProducts: {}'.format(results['supplier'], results['products']))
# -
conn.close()
| gremlin/glue-neptune/notebooks/step-2--export-to-neptune-via-s3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Structural Estimation
# 1. This notebook shows how to **estimate** the consumption model in **ConsumptionSaving.pdf** using **Simulated Minimum Distance (SMD)**
# 2. It also shows how to calculate **standard errors** and **sensitivity measures**
# ## Simulated Minimum Distance
# **Data:** We assume that we have data available for $N$ households over $T$ periods, collected in $\{w_i\}_i^N$.
#
# **Goal:** We wish to estimate the true, unknown, parameter vector $\theta_0$. We assume our model is correctly specified in the sense that the observed data stems from the model.
# **Overview:**
#
# 1. We focus on matching certain (well-chosen) **empirical moments** in the data to **simulated moments** from the model.
#
# 2. We calculate a $J\times1$ vector of moments in the data, $\Lambda_{data} = \frac{1}{N}\sum_{i=1}^N m(\theta_0|w_i)$. This could e.g. be average consumption over the life-cycle, the income variance or regressions coefficients from some statistical model.
#
# 3. To estimate $\theta$ we chose $\theta$ as to **minimize the (squared) distance** between the moments in the data and the same moments calculated from simulated data. Let $\Lambda_{sim}(\theta) = \frac{1}{N_{sim}}\sum_{s=1}^{N_{sim}} m(\theta|w_s)$ be the same moments calculated on simulated data for $N_{sim}=S\times N$ observations for $T_{sim}$ periods from the model for a given value of $\theta$. As we change $\theta$, the simulated outomes will change and the moments will too.
# The **Simulated Minimum Distance (SMD)** estimator then is
#
# $$
# \hat{\theta} = \arg\min_{\theta} g(\theta)'Wg(\theta)
# $$
#
# where $W$ is a $J\times J$ positive semidefinite **weighting matrix** and
#
# $$
# g(\theta)=\Lambda_{data}-\Lambda_{sim}(\theta)
# $$
#
# is the distance between $J\times1$ vectors of moments calculated in the data and the simulated data, respectively. Concretely,
#
# $$
# \Lambda_{data} = \frac{1}{N}\sum_{i=1}^N m(\theta_0|w_i) \\
# \Lambda_{sim}(\theta) = \frac{1}{N_{sim}}\sum_{s=1}^{N_{sim}} m(\theta|w_s)
# $$
#
# are $J\times1$ vectors of moments calculated in the data and the simulated data, respectively.
# **Settings:** In our baseline setup, we will have $N=5,000$ observations for $T=40$ periods, and simulate $N_{sim}=100,000$ synthetic consumers for $T_{sim} = 40$ periods when estimating the model.
#
# **Solution of consumption-saving model:** This estimator requires the solution (and simulation) of the model each trial guess of $\theta$ as we search for the one that minimizes the objective function. Therefore, structural estimation can in general be quite time-consuming. We will use the EGM to solve the consumption model quite fast and thus be able to estimate parameters within a couple of minutes. Estimation of more complex models might take significantly longer.
# > **Note I:** When regressions coefficients are used as moments, they are sometimes referred to as **auxiliary parameters** (APs) and the estimator using these APs as an **Indirect Inference (II)** estimator ([<NAME>, 1993](https://doi.org/10.1002/jae.3950080507)).
#
# > **Note II:** The estimator used is also called a **simulated method of momoments (SMM)** estimator. I.e. a simulated General Method of Moments (GMM) estimator.
# # Setup
# +
# %load_ext autoreload
# %autoreload 2
import time
import numpy as np
import scipy.optimize as optimize
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
import figs
from ConsumptionSavingModel import ConsumptionSavingModelClass
from SimulatedMinimumDistance import SimulatedMinimumDistanceClass
# -
# # Estimation choices
# +
# a. model settings
N = 5_000
N_sim = 100_000
par = {'simlifecycle':True,
'sim_mini':1.0 ,
'simT':40,
'simN':N_sim,
'Nxi':4,
'Npsi':4,
'Na':100}
par_true = par.copy()
par_true['simN'] = N
# b. parameters to estimate
est_par = {
'rho': {'guess':2.0,'lower':0.5,'upper':5.0,},
'beta': {'guess':0.97,'lower':0.90,'upper':0.999},
}
est_par_names = [key for key in est_par.keys()]
# c. moment function used in estimation.
def mom_func(data,ids=None):
""" returns the age profile of wealth """
if ids is None:
mean_A = np.mean(data.A[:,1:],axis=0)
else:
mean_A = np.mean(data.A[ids,1:],axis=0)
return mean_A
# d. choose weighting matrix
weighting_matrix = 0
# 0: identity (equal weight),
# 1: inverse of variance on the diagonal (removes scale),
# 2: inverse of covaraince matrix between estimation moments (optimal weighting matrix)
# -
# # Data and estimator
# Construct **data**.
# +
# a. setup model to simulate data
true = ConsumptionSavingModelClass(name='true',par=par_true)
true.solve()
true.simulate(seed=2019) # this seed is different from the default
# b. data moments
datamoms = mom_func(true.sim)
moment_names = [i for i in range(true.par.age_min+1,true.par.age_min+true.par.simT)]
# -
# **Bootstrap** variance of estimation moments used when later calculting standard errors below (and potentially for weighting matrix).
num_boot = 200
num_moms = datamoms.size
smd = SimulatedMinimumDistanceClass(est_par,mom_func,datamoms=datamoms)
smd.Omega = smd.bootstrap_mom_var(true.sim,N,num_boot,num_moms)
# **Setup estimator**.
smd.plot({'data':moment_names},{'data':datamoms},xlabel='age',ylabel='wealth',hide_legend=True)
# # Estimate the model
model = ConsumptionSavingModelClass(name='estimated',par=par)
# Choose **weighting matrix**:
if weighting_matrix == 0:
W = np.eye(smd.datamoms.size) # identity
elif weighting_matrix == 1:
W = np.diag(1.0/np.diag(smd.Omega)) # inverse of variance on the diagonal
else:
W = np.linalg.inv(smd.Omega) # optimal weighting matrix
# ## Estimation results
# +
# a. estimate the model (can take several minutes)
# %time est = smd.estimate(model,W)
# b. print estimation results
print(f'\n True Est. ')
for key in est_par.keys():
print(f'{key:5s} {getattr(true.par,key):2.3f} {est[key]:2.3f}')
# -
# Show **model-fit**:
plot_data_x = {'data':moment_names,'simulated':moment_names}
plot_data_y = {'data':datamoms,'simulated':mom_func(model.sim)}
smd.plot(plot_data_x,plot_data_y,xlabel='age',ylabel='wealth')
# ## Standard errors
# The SMD estimator is **asymptotic Normal** and standard errors have the same form as standard GMM estimators scaled with the adjustment factor $(1+S^{-1})$ due to the fact that we use $S$ simulations of the model.
#
# The **standard errors** are thus
#
# $$
# \begin{align}
# \text{Var}(\hat{\theta})&=(1+S^{-1})\Gamma\Omega\Gamma'/N \\
# \Gamma &= -(G'WG)^{-1}G'W \\
# \Omega & = \text{Var}(m(\theta_0|w_i))
# \end{align}
# $$
#
# where $G=\frac{\partial g(\theta)}{\partial \theta}$ is the $J\times K$ **Jacobian** with respect to $\theta$. $\Gamma$ is related to what is sometimes called the "influence function".
#
# **Calculating $\Omega$**:
#
# 1. Can sometimes be done **analytically**
# 2. Can always be done using a **bootstrap** as done above
#
# **Calculating the Jacobian, $G$:** This is done using numerical finite differences.
# +
# a. number of datasets simulated per individual in original data
S = model.par.simN/N
# b. find standard errors
Gamma, grad_theta = smd.calc_influence_function(est['theta'],model,W)
Var_theta = (1.0+1.0/S) * Gamma @ smd.Omega @ Gamma.T /N
se = np.sqrt(np.diag(Var_theta))
# b. print estimation results
print(f' True Est. (se)')
for i,(key,val) in enumerate(est_par.items()):
print(f'{key:5s} {getattr(true.par,key):2.3f} {est[key]:2.3f} ({se[i]:2.3f})')
# -
# # Sensitivity Analysis
# We now look into a **sensitivity analysis** of our estimation. Concretely, we implement the **informativeness measure** from [Honoré, Jørgensen and de Paula (2019)](https://doi.org/10.1002/jae.2779) and the **sensitivity to calibrated parameters** in [Jørgensen (2020)](https://www.ifs.org.uk/uploads/CWP1620-Sensitivity-to-Calibrated-Parameters.pdf). Further details can be found in these papers.
# ## The informativeness of estimation moments
# The measures are motivated by those proposed in [Honoré, Jørgensen and de Paula (2019)](https://doi.org/10.1002/jae.2779). All the measures proposed in that paper is calculated, but we will focus on their measure 4 that asks **"what is the change in the asymptotic variance from completely excluding the k'th moment?"**. If the *k*th is very informative about a parameter, the asymptotic varaince of that parameter should increase significantly, if we leave out the *k*th moment.
info = smd.informativeness_moments(grad_theta,smd.Omega,W)
smd.plot_heat(info['M4e'],est_par_names,moment_names,annot=False)
# **Conclusion:** We can see that especially the wealth level for younger households are very informative regarding both $\rho$ and $\beta$. This is likely due to the fact that for low level of resources (which is the case at younger ages), the value of both these parameters affect consumption and saving decisions a lot. Thus, the level of saving especially in young ages are very informative and help to identify the two parameters.
# ## Sensitivity to calibrated parameters
# The mesure is motivated by the one proposed in [Jørgensen (2020)](https://www.ifs.org.uk/uploads/CWP1620-Sensitivity-to-Calibrated-Parameters.pdf). Note that the estimation moments are all functions of the $L$ calibrated parameters, which we will denote $\gamma$, $g(\theta|\gamma)$.
#
# The **sensitivity measure** is defined as
#
# $$
# \begin{align}
# S &= \Gamma D
# \end{align}
# $$
#
# where $D=\frac{\partial g(\theta|\gamma)}{\partial \gamma}$ is the $J\times L$ **Jacobian** with respect to $\gamma$.
#
# *We only need to calculate $D$* since we have already calculated $\Gamma$ when we calculated standard errors above. We use numerical finite differences to calcualte this object.
# **Chosen calibrated paramters:** $R$, $G$, $\sigma_{\psi}$, $\sigma_{\xi}$.
cali_par_names = ('R','G','sigma_psi','sigma_xi')
cali_par = np.array([getattr(model.par,name) for name in cali_par_names])
# **Calculate the sensitivty measure:**
grad_gamma = smd.num_grad(cali_par,model,cali_par_names)
sens_cali = Gamma @ grad_gamma
# **Plot sensitivity measure**
smd.plot_heat(sens_cali,est_par_names,cali_par_names)
# **Check:** We can compare this to a brute-force approach in which we re-estimate the model for marginal changes in the calibrated parameters. This takes considerable time, however. The results are almost identical.
sens_cali_brute = smd.sens_cali_brute_force(model,est['theta'],W,cali_par_names)
smd.plot_heat(sens_cali_brute,est_par_names,cali_par_names)
# **Arbitrary changes in $\gamma$**: We can also investigate larger simultaneous changes in $\gamma$.
# +
# a. set new calibrated parameters
cali_par_new = {'G':1.05}
# b. update calibrated parameters in new version of the model
model_new = model.copy()
for key,val in cali_par_new.items():
setattr(model_new.par,key,val)
# c. calculate new objective function
obj_vec = smd.diff_vec_func(est['theta'],model,est_par_names)
obj_vec_new = smd.diff_vec_func(est['theta'],model_new,est_par_names)
# d. approximate change in theta
Gamma_new,_ = smd.calc_influence_function(est['theta'],model_new,W)
theta_delta = Gamma_new @ obj_vec_new - Gamma @ obj_vec
# e. extrapolate the gradient
theta_delta_extrap = np.zeros(theta_delta.size)
for j,key in enumerate(cali_par_new):
theta_delta_extrap += sens_cali[:,j]*(cali_par_new[key]-getattr(model.par,key))
print(theta_delta_extrap)
# -
# **Check:** Again, we can compare this approximation to a brute-force re-estimation of the model for the changed $\gamma$.
est_new = smd.estimate(model_new,W)
theta_delta_brute = est_new['theta'] - est['theta']
print(theta_delta_brute)
| 00. DynamicProgramming/04. Structural Estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] papermill={} tags=[]
# <img width="10%" alt="Naas" src="https://landen.imgix.net/jtci2pxwjczr/assets/5ice39g4.png?w=160"/>
# + [markdown] papermill={} tags=[]
# # SAP-HANA - Query data
# <a href="https://app.naas.ai/user-redirect/naas/downloader?url=https://raw.githubusercontent.com/jupyter-naas/awesome-notebooks/master/SAP-HANA/SAP-HANA_Query_data.ipynb" target="_parent">
# <img src="https://img.shields.io/badge/-Open%20in%20Naas-success?labelColor=000000&logo=data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPHN2ZyB3aWR0aD0iMTAyNHB4IiBoZWlnaHQ9IjEwMjRweCIgdmlld0JveD0iMCAwIDEwMjQgMTAyNCIgeG1sbnM9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiB4bWxuczp4bGluaz0iaHR0cDovL3d3dy53My5vcmcvMTk5OS94bGluayIgdmVyc2lvbj0iMS4xIj4KIDwhLS0gR2VuZXJhdGVkIGJ5IFBpeGVsbWF0b3IgUHJvIDIuMC41IC0tPgogPGRlZnM+CiAgPHRleHQgaWQ9InN0cmluZyIgdHJhbnNmb3JtPSJtYXRyaXgoMS4wIDAuMCAwLjAgMS4wIDIyOC4wIDU0LjUpIiBmb250LWZhbWlseT0iQ29tZm9ydGFhLVJlZ3VsYXIsIENvbWZvcnRhYSIgZm9udC1zaXplPSI4MDAiIHRleHQtZGVjb3JhdGlvbj0ibm9uZSIgZmlsbD0iI2ZmZmZmZiIgeD0iMS4xOTk5OTk5OTk5OTk5ODg2IiB5PSI3MDUuMCI+bjwvdGV4dD4KIDwvZGVmcz4KIDx1c2UgaWQ9Im4iIHhsaW5rOmhyZWY9IiNzdHJpbmciLz4KPC9zdmc+Cg=="/>
# </a>
# + papermill={} tags=[]
# !pip install hdbcli
# + [markdown] papermill={} tags=[]
# ## Step 1 : Import libraries
# + papermill={} tags=[]
import sap_hana_connector
# + [markdown] papermill={} tags=[]
# ## Step 2 : Declare variables
# + papermill={} tags=[]
type: 'SapHana'
name: 'JPAK_LIVE'
user: 'USER'
password: 'PASSWORD'
port: 30015
host: 'HOST'
# + [markdown] papermill={} tags=[]
# ## Step 3 : Query to get specific table in SAP HANA
# + papermill={} tags=[]
query: 'SELECT * FROM JPAK_LIVE.OINV T0'
# + [markdown] papermill={} tags=[]
# ## Step 4 : Return Data Frame
# + papermill={} tags=[]
df = query
| SAP-HANA/SAP-HANA_Query_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# # Parsing the output of AWS Textract
#
# I tried to parse via tesseract. It didn't work. While I was successful in identifying specific cells in the tables the OCR was not refined enough to be useful. I couldn't get a well-fitting product.
#
# ## I now try using AWS Textract
#
# Preliminary runs seem to suggest it is more accurate. Still not error-free, but much closer. However, it outputs individual words, not lines or columns or something so there's still some work to be done to transform the sequence of words into a table.
#
#
# +
# Import packages and some setup.
import csv
import os
import json
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
chartDirs = ['chart19', 'chart20', 'chart21', 'chart22', 'chart23']
# -
# Convert a specific page of the pdf to png.
import pdf2image
def getPngPage(fname) -> Image:
tmp = pdf2image.convert_from_path(
fname,
dpi=600,
grayscale=True
)[0]
tmpData = np.asarray(tmp)
return Image.fromarray((tmpData > 128).astype(np.uint8)*255)
class BlockTable:
class Block:
# Page coordinates are based on the top left corner of the page. Increasing value right and down.
def __init__(self, text, confidence, boundingBox, padding=None):
self.text = text
self.confidence = confidence
self.boundingBox = boundingBox
self.padding = padding or [1.0, 1.0]
@property
def cx(self):
return (2 * self.boundingBox['Left'] + self.boundingBox['Width']) / 2
@property
def cy(self):
return (2 * self.boundingBox['Top'] + self.boundingBox['Height']) / 2
@property
def width(self):
return self.boundingBox['Width']
@property
def height(self):
return self.boundingBox['Height']
@property
def left(self):
return self.cx - (self.width * self.padding[0]) / 2
@property
def right(self):
return self.cx + (self.width * self.padding[0]) / 2
@property
def top(self):
return self.cy - (self.height * self.padding[1]) / 2
@property
def bottom(self):
return self.cy + (self.height * self.padding[1]) / 2
def overlapsColumns(self, other):
return (self.left < other.left and self.right > other.right) or \
(other.left < self.left and other.right > self.right) or \
(self.left < other.left and self.right > other.left) or \
(self.left < other.right and self.right > other.right)
@property
def asNumber(self):
if self.text == '(10)':
return -1
retStr = ""
for c in self.text:
if c in '0123456789':
retStr = retStr + c
if len(retStr):
return int(retStr)
return -1
def inspect(self, img):
x, y = img.size
box = ((self.left-0.02)*x, (self.top-0.02)*y, (self.right+0.02)*x, (self.bottom+0.02)*y)
cut = img.crop(box)
cut.show()
self.text = input("Please enter the number shown \n(%s) > " % self.text)
print("Reset text to %s" % self.text)
def __repr__(self, *args, **kwargs):
return self.text
def __init__(self, headers, js, sourceImage):
self.headers = headers
self.img = sourceImage
blocks = filter(lambda blk: blk['BlockType'] == 'WORD', js['Blocks'])
blocks = list(map(lambda b: BlockTable.Block(b['Text'],
b['Confidence'],
b['Geometry']['BoundingBox'],
[1, 1]), blocks))
# print([b.text for b in blocks[:40]])
self.blockHeaders = {}
for block in blocks:
if list(map(lambda b: b.text, self.blockHeaders.keys())) == self.headers:
break
if block.text in headers and block.cy < 0.25:
block.padding = [3,1]
self.blockHeaders.update({block: []})
for block in blocks:
for k in self.blockHeaders.keys():
if k.cy < block.cy and block.overlapsColumns(k):
self.blockHeaders[k].append(block)
break
for header, column in self.blockHeaders.items():
column.sort(key=lambda e: e.cy)
@property
def numpyArray(self):
columns = list(map(lambda e: [e[0]] + e[1], self.blockHeaders.items()))
columns.sort(key=lambda lst: lst[0].cx)
numRows = max(map(lambda col: len(col) - 1, columns))
ret = np.ndarray((numRows, len(columns)), dtype=np.int32) * 0 - 1
for i, col in enumerate(columns):
for j, cell in enumerate(col):
if j > 0:
ret[j-1,i] = cell.asNumber
return ret
def inspectMistakes(self, threshold):
"""
Given a confidence threshold, visually inspect and correct any boxes
with a confidence interval lower than the threshold.
"""
for k, v in self.blockHeaders.items():
for block in v:
if block.confidence < threshold or (k.text != 'Year' and block.text[-4] == '1'):
block.inspect(self.img)
# +
# Function for getting the table from a singular page.
def getTable(js, colHeaders, tableImg):
table = BlockTable(js, colHeaders, tableImg)
threshold = 85
table.inspectMistakes(threshold)
return table.numpyArray
# -
table19 = getTable(
['Year', *[str(i) for i in range(89, 102)]],
json.load(open('/home/jordan/OpenJustice/resources/colonial-1970-migration/chart19/apiResponse.json')),
getPngPage('/home/jordan/OpenJustice/resources/colonial-1970-migration/chart19/chart19.pdf'))
# +
# print(table19.shape)
plt.plot(table19[:,0])
np.savetxt('table19.csv', table19, delimiter=',')
| scripts/parse_textract.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('../..')
import pyotc
pyotc.plt.ion()
# -
# ### Generate HeightCalibration object and get PSD data from files
# +
hc = pyotc.HeightCalibration()
basename = 'B01_'
directory = '../exampleData/height_calibration_single_psds/'
hc.get_psd_files(basename, directory=directory);
# bounds = {'x': (10, 20e3), 'y': (10, 20e3), 'z': (10, 4e3)}
bounds = (1, 45e3)
# bounds = None
#f_ex = {'x': [12, 88], 'y': [8, 48, 52], 'z': [48]}
f_ex = None
hc.gen_psd_fits(bounds=bounds, f_exclude=f_ex)
# -
# ### Fit the PSDs
# +
''' If dynamic is set to True, the set height is used to determine if a
fit is run with a Lorentzian PSD or a hydrodynamically correct PSD. '''
hc.setup_psd_fits(model='lorentzian', lp_filter=True, lp_fixed=True, f3dB=8800, alpha=0.3)
bounds = {'x': (1, 20e3), 'y': (1, 20e3), 'z': (1, 10e3)}
#bounds = (100, 45e3)
#bounds = None
#f_ex = {'x': [12, 88], 'y': [8, 48, 52], 'z': [48]}
f_ex = 8.0 # exclude crosstalk from excitation
# kwargs = {'f_c': 4e3} # initial guesses
kwargs = {}
hc.fit_psds(bounds=bounds, f_exclude=f_ex,
fitreport=0, plot_fits=True,
use_heights=False, plot_kws={'showLegend': False}, **kwargs);
# -
# ### determine focal shift
hc.determine_focal_shift(idx_slice=slice(22, -4))
# #### set the focal shift
hc.focal_shift = 0.63
hc.plot_pc_results()
# ### exclude some data
hc.exclude_heights_outside(0.2, 10, reset=True)
hc.fit_rel_drag(method='radius', plot_fit=True, h0=-0.6)
# #### find out which surface height was determined
hc.rel_drag_fit.params['h0']
# ### fit all data
hc.fit_height_data(method='radius', fit_dissens_osci=True, plot_fit=True, fit_drag_first=False)
hc.save_hc_data()
# #### adjust heights
hc.add_height_offset(0.5809)
hc.reset_recalc()
hc.plot_pc_results()
# ### re-fit psds (if necessary)
# +
hc.setup_psd_fits(model='lorentzian', lp_filter=True, lp_fixed=True, f3dB=8800, alpha=0.3)
# heights = [1.00, ]
heights = None # fit all available psds
# names = ['x', 'y'] # fit only particular axes
names = None # fir all available axes
bounds = {'x': (1, 20e3), 'y': (1, 20e3), 'z': (1, 10e3)}
#bounds = (100, 45e3)
#bounds = None
#f_ex = {'x': [12, 88], 'y': [8, 48, 52], 'z': [48]}
f_ex = 8.0
# kwargs = {'f_c': 4e3} # initial guesses
kwargs = {}
hc.fit_psds(heights=heights, names=names, bounds=bounds, f_exclude=f_ex,
fitreport=0, plot_fits=True, use_heights=True,
plot_kws={'showLegend': False}, **kwargs);
# -
hc.plot_pc_results()
hc.exclude_heights_outside(0.8, 10, reset=True)
hc.fit_rel_drag(plot_fit=True)
hc.rel_drag_fit.params['h0']
hc.fit_height_data(fit_drag_first=False, fit_dissens_osci=True, plot_fit=True)
# save height-dependent data
hc.save_hc_data(basename, directory)
# save height-fit results
hc.write_results_to_file()
| examples/notebooks/height_calibration_single_psds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Multivariate / Regression Imputation
#
# The IterativeImputer class models each feature with missing values as a function of other features, and uses that estimate for imputation.
#
# It does so in an iterated round-robin fashion: at each step, a feature column is designated as output y and the other feature columns are treated as inputs x.
#
# A regressor is fit on (x, y) for known y. Then, the regressor is used to predict the missing values of y.
#
# This is done for each feature in an iterative fashion, and then is repeated for max_iter imputation rounds. The results of the final imputation round are returned.
#
# Apply Iterative imputer with 10 iterations on the provided array x. Round the imputed values to integers
#
# Then apply your model on another array x_test (containing some more MV) without fitting again
# +
import numpy as np
#Note: This estimator is still experimental for now: default parameters or details of behaviour might change
#without any deprecation cycle.
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
x=np.array([[10, 21], [32, 61], [40, 81], [np.nan, 32], [71, np.nan]])
print("Original x: \n",x)
#Apply Iterative imputer with 10 iterations on array x
imp = IterativeImputer(max_iter=10)
transformed_x= np.round(imp.fit_transform(x))
print("Regression imputed x: \n",transformed_x)
#Apply Iterative imputer on another array x_test without fitting again
print("\nUsing the same imputation model again...")
x_test = np.array([[np.nan, 2], [6, np.nan], [np.nan, 6]])
print("Test data with a lot of missing values: \n",x_test)
print("The model learned that the second feature is about double the first: \n",np.round(imp.transform(x_test)))
# -
# # Deterministic vs. Stochastic Regression Imputation
#
# In Deterministic Regression Imputation, predicted values out of regression model are (directly) used to impute MV
# This approach is simple, but distorts distributions (variances, correlations etc.)
#
# In Stochastic Regression Imputation a residual component is added to the pure regression results.
# The residual is calculated as a random value based on the given distribution
#
# To have a baseline for comparison, start by applying deterministic regression imputation on the provided array x, similar to the previous task.
#
# Now, applying stochastic regression imputation on the same data. Hint: the sample_posterior attribute will be needed.
#
# Experiment a bit with this kind of imputation by assigning different values to the random_state variable and checking the results!
#
# +
import numpy as np
#Note: This estimator is still experimental for now: default parameters or details of behaviour might change
#without any deprecation cycle.
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
x=np.array([[1, 2, 3, 15, 17], [3, 6.5, 8, 14, 17], [4, 7.5, 13, 15, 19], [np.nan, 3, 5, 12, 16], [7, np.nan, 22, 11, 17]])
print("Original x: \n",x)
#Deterministic Regression Imputation: sample_posterior=False
imp = IterativeImputer(max_iter=10, sample_posterior=False)
transformed_x= np.round(imp.fit_transform(x),1)
print("Deterministic Regression Imputation of x: \n",transformed_x)
#Stochastic Regression Imputation: sample_posterior=True
random_seed=0
imp = IterativeImputer(max_iter=10, random_state=random_seed,sample_posterior=True)
transformed_x= np.round(imp.fit_transform(x),1)
print("Stochastic Regression Imputation of x (round 1): \n",transformed_x)
#Random seed can be used to create different randomizations
random_seed=1
imp = IterativeImputer(max_iter=10, random_state=random_seed,sample_posterior=True)
transformed_x= np.round(imp.fit_transform(x),1)
print("Stochastic Regression Imputation of x (round 2): \n",transformed_x)
| notebooks/Solutions/DATAPREP_03c_MV_Handling_Multivariate_Imputation_Lab_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: gis
# language: python
# name: gis
# ---
# + [markdown] toc-hr-collapsed=true
# # Introduction to Python
# -
# Today we will cover the basics of Python and get familiar with interactive computing in a *Jupyter notebook*.
#
# Notebooks allow text and graphics to be combined with code that can be run interactively, with the results appearing inline.
#
# This is a notebook that you are looking at right now.
#
# This tutorial was adapted from <NAME>'s [Coding Bootcamp](https://github.com/gboeing/ppd430/blob/main/modules/03-coding-bootcamp-i/lecture.ipynb) lecture as part of his Urban Informatics course at USC. Thank you to Geoff for making this valuable educational content available, in addition to numerous tools like [OSMNX](https://osmnx.readthedocs.io/en/stable/).
# ## First Line of Code
# The first bit of code that is usually demonstrated when you are learning a programming language is usually to make the computer print "Hello world!"
#
# In Python, we can do this using the `print` command. Click into the cell bellow and hit `Shift + Enter` to run the cell.
print("Hello world!")
# Congrats, you just wrote your first line of Python code!
# Note how a `[1]` appears to the left of the code cell. This number indicates the order in which cells have been executed.
#
# Try running the above cell again and see what happens.
# ## Writing Comments
# It is a good practice to write comments in your code documenting what the code does. This is helpful so that someone else can more easily follow your code, and also for yourself to remember what you were doing (this can be surprisingly helpful!).
#
# In Python, comments are preceded by the `#` symbol.
# This is a comment
# It is readable by humans
# But ignored by the computer
print("Hello again!")
# Before we learn any more Python, let's get familiar with the Jupyter Notebook that we are currently using.
# + [markdown] toc-hr-collapsed=true
# ## Jupyter Notebook Interface
# -
# ### Cells
#
# A cell is a container for text to be displayed in the notebook or code to be executed by the notebook’s kernel.
#
# There are two types of cells:
#
# 1) **Code cell**: contains code to be executed in the kernel. When the code is run, the notebook displays the output below the code cell that generated it.
#
# 2) **Markdown cell**: contains text formatted using Markdown and displays its output in-place when the Markdown cell is run
#
# There is always one **active** cell highlighted by a blue bar on the left. This indicates your current location in the notebook.
#
# Any selected cell can exist in two modes:
#
# 1) **Edit mode**: when the cell is ready for us to type something into the cell. This is indicated by the cursor blinking.
#
# 2) **Command mode**: when the cell is ready for us to perform a command, like running the cell or inserting a new one.
#
# In order to enter edit mode, simply click into a cell or hit `Enter` on the currently selected cell.
#
# In order to enter command mode, hit the `esc` key. You will see that the cursor stops blinking upon entering command mode.
# ### Jupyter Keyboard Shortcuts
#
# When you are in command mode, you can use keyboard shortcuts to run commands. Below is a list of some of Jupyter's keyboard shortcuts. You don't need to memorize them immediately, but this should give you a good idea of what's possible.
#
# - Toggle between edit and command mode with `Esc` and `Enter`, respectively
# - Once in command mode:
# - Scroll up and down your cells with your `Up` and `Down` arrows
# - `Shift + Enter` to run a cell and move to the next cell
# - `Control + Enter` to run a cell and remain on that cell
# - Press `A` or `B` to insert a new cell above or below the active cell
# - Press `M` to transform the active cell to a Markdown cell
# - Press `Y` to transform the active cell to a code cell
# - Press `D + D` (`D` twice) to delete the active cell
# - Press `Z` to undo cell deletion
# - Hold `Shift` and press `Up` or `Down` to select multiple cells at once
# - With multiple cells selected, `Shift + M` will merge your selection
# - `Ctrl + Shift + -` will split the active cell at the cursor
# ### Practice
# Let's do some exercises to get used keyboard shortcuts in Jupyter.
# +
# Practice adding a cell above this one using the A shortcut
# +
# Practice adding a cell below this one using the B shortcut
# +
# Practice converting this cell to a Markdown cell
# -
# Practice converting this cell to a code cell
# +
# Practice deleting this cell with D + D
# +
# Practice undoing a cell deletion with Z
# +
# Practice selecting multiple cells at once with Shift + Up or Shift + Down
# +
# Practice merging multiple selected cells with Shift + M
# -
# Practice splitting the below code into two cells with Ctrl + Shift + -
x = 1
y = 2
# + [markdown] toc-hr-collapsed=true
# ## Markdown
#
# Markdown is a lightweight, easy to learn language for formatting plain text.
#
# -
# Below is some example markdown text. Select the cell and enter into edit mode to see the raw markdown.
# This is some plain text that forms a paragraph. You can add emphasis via **bold** and __bold__, or *italic* and _italic_.
#
# Paragraphs must be separated by an empty line.
#
# * Sometimes we want to include lists.
# * Which can be bulleted using asterisks.
#
# 1. Lists can also be numbered.
# 2. If we want an ordered list.
#
# We can embed hyperlinks like [this](https://google.com/).
# + [markdown] toc-hr-collapsed=true
# # Variables
# -
# Variables are containers for storing data values. A variable is created the moment you first assign a value to it.
# Create a variable called x and populate it with the number 5:
x = 5
x * 2
# You can store text in a variable, too. In Python, text (or `strings`) must be enclosed by quotation marks (single and double are both fine).
# Overwrite that variable x and populate it with a string.
x = 'hello'
x
x * 2
# Notice how the multiplication operator performs a different function when used on a string, versus a number.
# Clearly, variables can store data of different types. And different data types do different things.
# ## Numeric Variables
# variables contain values and those values can vary
x = 5
# what is the value of x?
x
# you can perform operations on variables, just like you can with numbers
x + 3
# what is the value of x, now?
x
# create a new variable y from an operation on x
y = x * 2
# what is the value of y?
y
# outputting values only displays the last thing output
# this is different from printing! it is kinda confusing!
x
y
# use print to write multiple value(s) to the "console"
print(x)
print(y)
# you can comma-separate values to print multiple values to the console on one line
print(x, y)
# you can also print the result of an expression
print(x * y)
# now it's your turn
# in a single line, create a new variable z and set it equal to x divided the sum of x plus y
# + [markdown] tags=[] toc-hr-collapsed=true
# ## String Variables
# -
# We can also do operations on text. Pieces of text are called `strings` in Python (and in programming generally).
#
# Strings must be contained within quotation marks (single `'` or double`"` both work).
# Create a variable called `name` and populate it with your name. Be sure to wrap your name in quotation marks.
name = "Will"
print("Hello world! My name is", name)
# We can use some numeric functions on strings.
# +
first_name = "Will"
last_name = "Geary"
# you can "concatenate" strings with the + operator
print("Hello world! My name is", first_name + last_name)
# -
# Let's format this output by add a space between the first and last name.
print("Hello world! My name is", first_name + " " + last_name)
# We can even multiply string by a number, though we may never need to do this.
# Multiple a string and a number
name * 100
# We cannot divide a string by a number though, and doing so will throw a `TypeError`. We will discuss `types` and `Error`s later in this tutorial.
# Try to divide a string by a number
# Throws an error
name / 2
# +
# let's try concatenating multiple strings
city = "Brooklyn"
sep = ", "
state = "NY"
zip_code = "11225"
# you can "concatenate" strings with the + operator
location = city + sep + state + " " + zip_code
print(location)
# -
# Let's investigate some more things we can do with lists.
# create a new string to work with
sentence = "This is INFO 615."
sentence
# ### `len()` function
# `len` is a built-in Python function that tells us the length of an object.
#
# When you use `len` with a `string`, it tells you how many characters are in that string.
# how many characters are in your name? use len function
len(name)
# what is the length of the string?
len(sentence)
# ### `strip()` method
# Use the strip method to remove characters from the beginning and end of a string.
sentence.strip(".")
sentence.strip("This is ")
# Notice, `strip()` only for removing characters at the beginning or end of a string. It does not work with characters in the middle.
sentence.strip("is")
sentence
# create a new string to contain the stripped version of our string
new_sentence = sentence.strip("This is")
new_sentence
# you can create a string variable and pass it into the strip method as an argument
to_strip = "INFO 615."
sentence.strip(to_strip)
# ### `replace()` method
# `replace()` returns a string where a specified value is replaced with a specified value
sentence.replace("is", "XX")
# If there are multple matching values, they will all be replaced.
sentence.replace("s", "$$")
# ### `split()` method
# Use `split()` to break a string into chunks (sometimes called "tokens").
sentence.split()
# By default, `split()` breaks up a string based on spaces, but you can pass other substrings to split on.
sentence.split("i")
sentence.split("-")
# ### `join()` method
# Use the string join method to turn a list into a string.
sentence.split()
join_string = ' ^-_-^ '
join_string.join(sentence.split())
# ### `find()` method
# Use the find method to return the index of the first instance of some substring within another string.
sentence.find("INFO")
# + [markdown] toc-hr-collapsed=true
# # Data Types
# -
# There are four basic types of data in Python:
#
# - Numeric
# - Integers (`int`)
# - Decimals (`float`)
# - Text
# - Strings (`str`)
# - Must be surrounded by either single quotes or double quotes, i.e. “hello”
# - Boolean (`bool`)
# - Booleans represent one of two values: True or False
#
# There are other data types in Python, but we will focus on these three first.
# ## `Type()` Function
# You can see what data type a variable is by using the `type()` function.
type(3)
type(3.0)
type("3.0")
type(True)
# ## Casting
# Sometimes you will need to convert a variable from one type to another. This is called ✨ casting ✨
# Casting in python is done using constructor functions:
#
# - `int()` constructs an integer number from an integer, a float (by removing all decimals), or a string (providing the string represents an integer number)
# - `float()` constructs a float number from an integer, a float, or a string (if the string represents a float or an integer)
# - `str()` constructs a string from a variety of data types, including strings, integers and floats
#
#
# Cast a `float` to an `int`.
x = int(2.8)
x
# Cast a `str` to an `int`.
x = int("3")
x
# Cast an `int` to a `float`.
x = float(3)
x
# + [markdown] toc-hr-collapsed=true
# # Arithmetic Operators
#
# Let's review the basic mathematical operations that we can perform with Python.
# -
# Add two integers
2 + 2
# Multiply two integers
2 * 3
# Divide two integers
10 / 5
# Raise 2 to the 4th power
2 ** 4
# take the square root of 9 (by raising it to the power of 1/2)
9 ** (1 / 2)
# ## Incrementation
# We can increment a variable using the + operator. Increment means to increase a number by another number, usually 1 but not necessarily 1.
x = 4
x = x + 1
x
x = x + 1
x
# We can accomplish the same thing (incrementation) using `+=` operator, which is a bit cleaner.
x = 4
x += 1
x
x += 1
x
# ## Modulo Operator
# The `%` symbol in Python is called the Modulo Operator. The Modulo returns the remainder of a division problem
# Let's try it. 12 is evenly divisible by 3, so 12 % 3 equals zero.
12 % 3
# 10, on the other hand, is not evenly divisible by 3. 9 is evenly divisible by 3, and then there is 1 remaining to get to 10. This is why 10 % 3 returns a value of 1.
10 % 3
# As expected, 11 % 3 returns a remainder of 2.
11 % 3
# Thus, the `%` operator can be used to determine if one number evenly divides into another. When the remainder of a division is zero, that implies one number must be evenly divisible by the other.
# ## `Floor()` Function
from math import floor
# The floor of a number is the largest integer less than or equal to the number. This can be thought of as “rounding down”.
floor(3.4)
floor(3.9)
floor(3)
# ## Floor Division
# Floor division is an operation in Python that divides two numbers and rounds the result down to the nearest integer
# We can perform floor division in Python with the double `//` operator.
101 // 4
# Note how Floor Division is related to the modulo operator, which returns the remainder of 1.
101 % 25
# ## Practice
# +
# Now you try
# In a single line of code, divide the sum of ninety plus seventy by the product of twelve and eleven
# Order of operations (PEMDAS) matters
# Result should be: 1.21212121...
# + [markdown] toc-hr-collapsed=true
# # Comparison Operators
# -
# ## Equality (`==`)
# You can check if two variables are equal using the double equal sign `==`.
# +
a = 5
b = 10
a == b
# -
# ## Inequality (`!=`)
# Conversely, you can check if two variables are *not* equal using the `!=` operator.
# +
a = 5
b = 10
a != b
# -
# ## Greater Than (>)
# +
a = 5
b = 10
a > b
# -
# ## Less Than (<)
# +
a = 5
b = 10
a < b
# -
# ## Greater Than or Equal To (>=)
# +
a = 5
b = 10
a >= b
# -
# ## Less Than or Equal To (<=)
# +
a = 5
b = 10
a <= b
# + [markdown] tags=[] toc-hr-collapsed=true
# # Logical Operators
# -
# Logical operators can used to combine conditional statements.
# ## `and`
#
# `and` returns `True` if both statements are `True`.
# +
x = 3
x < 5 and x < 10
# -
# ## `or`
# `or` returns `True` if at least one statement is `True`.
x < 2 or x < 5
# `not` reverses the result, turning `True` to `False` or `False` to `True`.
not(x < 5 and x < 10)
not(x < 2 or x < 5)
# # Membership Operators
# Membership operators are used to test if a variable is contained within another variable.
'h' in 'hello'
'h' not in 'hello'
# As we will see in the next section, membership operators are particularly relevant with data structures like `lists` and `dictionaries`.
# + [markdown] toc-hr-collapsed=true
# # Data Structures
# + [markdown] toc-hr-collapsed=true
# ## Lists
# -
# Lists are used to store multiple items in a single variable.
#
# Lists are created using square brackets.
# ### Properties of Lists
#
# - Ordered
# - Lists have a defined order, and that order will not change (unless we change it)
# - If you add new items to a list, the new items will be placed at the end of the list
# - Indexed
# - The first item has index [0], the second item has index [1], etc.
# - Changeable
# - We can change, add, and remove items in a list after it has been created
# - Allow duplicates
# - Since lists are indexed, lists can have multiple items with the same value
#
# ### Create a list
# Create a list named `food`.
food = ['eggs', 'bananas', 'spinach', 'milk', 'bread']
type(food)
# ### Accessing items in list
# Select the first item from the list (in position zero).
food[0]
# Select the second item from the list.
food[1]
# Select the third item from the list.
food[2]
# Select the last item from the list.
# ### Using negative indexing
# Select the last item in the list using `-1`
food[-1]
# Select the second to last item from the list.
food[-2]
# ### List Slicing
#
# You can return a subset or “slice” of a list by specifying where to start and where to end the range
# Select the first three items from the list.
food[:3]
# Select the last two items from the list.
food[-2:]
# Select the middle of the list, excluding the first and last item.
food[1:-1]
# ### Changing Items in a List
# To change the value of a specific item, refer to the index number.
#
# Replace the first list item with something else:
food[0] = 'onion'
food
# ### Adding Items to a List
# You can add an item to the end of a list with `.append()`
food.append("cheese")
food
# You can insert an item into a specific position in a list with `.insert()`
food.insert(1, "orange")
food
# ### Removing Items from a list
# There are a few options for removing items from a list.
# The `remove()` method removes an item by its specified content.
food.remove('onion')
food
# The `pop()` method removes an item by its index position. It also returns the item that was removed.
food.pop(2)
food
# If you don't specify the index position, `pop()` will remove the last item by default.
food.pop()
food
# Lastly, you can remove an item by its index position using `del` function. Unlike `pop()`, `del` does not return the item that was removed.
del food[0]
food
# ### Sorting a list
# You can sort a list with `sort()`.
# This will sort the list alphanumerically, ascending, by default.
food.sort()
food
# To sort descending, use the keyword argument `reverse = True`.
food.sort(reverse = True)
food
# ### Copying a list
# You can make a copy of a list with `.copy()`
food_copy = food.copy()
food_copy
# Note that these are now two separate variables. Changes made to one will not impact the other.
food.sort()
food
food_copy
# ### Joining lists
#
# You can join (or "concatenate") two lists with the `+` operator.
more_food = ['kiwi', 'orange', 'lemon', 'grapefruit']
food + more_food
# Note that the list join above does not impact the contents of either list.
food
# You can join lists and update the original list to reflect the new contents after joining like this:
food = food + more_food
food
# ### Clear all items from a list
# The `clear()` method removes all the items from a list.
food.clear()
# The list is now empty
food
# + [markdown] tags=[] toc-hr-collapsed=true
# ## Dictionaries
# -
# Dictionaries are used to store data values in key : value pairs
#
# Dictionaries are written with curly brackets, and have keys and values
prices = {
'eggs': 3.50,
'bananas': 0.80,
'spinach': 4.00,
'milk': 2.00,
'bread': 2.00
}
type(prices)
# ### Accessing items in a dictionary
# Retrieve the value associated with they key `eggs`:
prices['eggs']
# Retrieve the value associated with they key `bananas`:
prices['bananas']
# View all of the keys in this dictionary.
prices.keys()
# View all of the values in this dictionary.
prices.values()
# ### Checking for membership within a dictionary
# Is `eggs` in this dict?
'eggs' in prices
# Is `tomatoes` in this dict?
'tomatoes' in prices
# ### Changing items in a dictionary
# You can change the value of a specific item by referring to its key name.
prices
prices['bananas'] = 1.0
prices
# You can also use the `update()` method to change items in a dictionary with the items from the given argument.
prices.update({"bread": 4.0})
prices
# ### Adding items to a dictionary
# You can add an item to a dictionary by referring to its key name.
prices
prices['onion'] = 1.20
prices
# ### Removing items from a dictionary
# There are several methods to remove items from a dictionary.
#
# The `pop()` method removes the item with the specified key name. Note that `pop()` returns the value that is removed.
prices.pop('eggs')
prices
# Alternately, you can use the `del` keyword to delete an item by specified key name. Unlike `pop()`, `del` does not return the value.
del prices['spinach']
prices
# ### Make a copy of a dictionary
# Similar to lists, you can make a copy of a dictionary with `.copy()`
prices_copy = prices.copy()
prices_copy
# ### `zip()` function
# You may need to merge two lists into a dictionary. This can be done with the `zip()` function.
#
# `zip()` takes a list of keys, and a list of values, and merges them together to form a dictionary.
# +
keys = ['a', 'b', 'c', 'd']
values = [0, 1, 2, 3]
# zip the two lists together
# and construct a dictionary
new_dict = dict(zip(keys, values))
# -
new_dict
# + [markdown] toc-hr-collapsed=true
# ## Sets
# -
# Sets are used to store unique items in a single variable. When using a set, all of its elements must be unique.
# Duplicates will be dropped from a set
set1 = {"a", "b", "c", "c", "c"}
set1
# A set is a collection which is:
# - unordered
# - unchangeable
# - unindexed
#
# Set items are unchangeable, but you can remove items and add new items.
# You *cannot* access items in a set by referring to an index or a key. Trying to do so throws an error.
# Throws an error
set1["a"]
# ### Checking for membership within a set
# You can, however, check if an item is present in the set:
"a" in set1
# You can check the length of a set with `len()`.
len(set1)
# Sets can include any date type, and can even include a mix of data types.
set2 = {"abc", 34, True, 40, "male"}
set2
# ### Adding items to a set
# You can add items to a set with `add()`
set1 = {"apple", "banana", "cherry"}
set1.add("orange")
set1
# ### Adding one set to another set
# You can add items from one set to another with `update()`.
# +
set1 = {"apple", "banana", "cherry"}
set2 = {"pineapple", "mango", "papaya"}
set1.update(set2)
# -
set1
# The second object in the `update()` method does not have to be a set, it can be any iterable object (tuples, lists, dictionaries etc.)
# +
set1 = {"apple", "banana", "cherry"}
list1 = ["kiwi", "orange"]
set1.update(list1)
# -
set1
# ### Joining two sets with `union()`
# You can use the `union()` method tor return a new set containing all items from both sets.
set1 = {"a", "b" , "c"}
set2 = {1, 2, 3}
set3 = set1.union(set2)
set3
# ### Joining two sets with `intersection()`
# The `intersection()` method will return a new set that only contains the items that are present in both sets.
# +
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "apple"}
z = x.intersection(y)
# -
z
# ### Joining two sets with `symmetric_difference()`
# +
x = {"apple", "banana", "cherry"}
y = {"google", "microsoft", "apple"}
z = x.symmetric_difference(y)
# -
z
# + [markdown] tags=[] toc-hr-collapsed=true
# # If / Else Statements
# -
# In computer science, control flow is the order in which individual statements, instructions or function calls of an imperative program are executed. A set of statements executed as a group is often called a block.
# The first Python control structure we will learn is the "if statement".
#
# The "if statement" is a decision-making statement that guides a program to make decisions based on whether a provided condition is True of False.
#
# An "if statement" is written by using the `if` keyword.
# ## Indentation
# Python is unique in that it relies on indentation (whitespace at the beginning of a line) to define scope in the code.
# ## The `if` keyword
# Use an if statement to execute indented code only if some condition is true.
x = 9
if x < 10:
# Code block following the if must be indented
print(str(x) + " is less than 10")
# You can chain conditions together with and/or group conditions with parentheses for readibility.
x = 3.5
if (x >= 3) and (x <= 6):
print("x is between 3 and 6")
# ## The `else` keyword
# If / Else statements allow us to write branching conditional statements of the form: “If X then do Y, otherwise do Z”.
# if/else statement to handle different branches of execution
sentence = "Today is Wednesday."
if "Wed" in sentence:
print("Yes")
else:
print("No")
# ## The `elif` keyword
#
# The `elif` keyword is pythons way of saying "if the previous conditions were not true, then try this condition".
# If the first if statement evaluates to false, `elif` (i.e., "else if") executes a code block if its condition is true.
#
# `else` executes a code block if no preceding block evaluated to true.
x = 10
if x < 10:
print("x is less than 10")
elif x == 10:
print("x equals 10")
else:
print("x is greater than 10")
# + [markdown] toc-hr-collapsed=true
# # For Loops
# -
# Loops let us iterate over a container of elements, handling each element in sequence, one at a time.
# ## Loop through a list
sentence = "This is INFO 615"
# loop through list of words in string
for word in sentence.split():
print(word)
for word in sentence.split():
print("s" in word)
# ## Count with `enumerate()`
# enumerate lets you loop through a list and count along
# this function returns a tuple
for count, letter in enumerate(sentence):
print(count, letter)
# ## The `range()` function
# `range()` produces a range of integer values.
range(9)
# Convert it to list to explicitly see what's in the range.
list(range(9))
# You can loop through a range.
for x in range(9):
print(x, x ** 2)
# Because range goes up to but does **not** include the ending number, you must add 1 to include it.
n = 10
list(range(n + 1))
# The `range()` function optionally takes start, end, and step arguments.
#
# Step sets the size of the increment used in range (default is a step value of 1).
# See what happens when you set the step value to 2.
list(range(10, 20, 2))
# ## Practicing for loops
# Now it's your turn.
#
# 1) Loop through the numbers 1 through 15, using modulo to print 'even' if each is evenly divisible by 2, and 'odd' if not
# Your code below
# 2. Print out only the integers in the following list
# +
my_list = [3.3, 19.75, 6, 3.3, 8]
# Your code below
# -
# # While Loops
# With the while loop we can execute a set of statements as long as a condition is True.
#
# **Warning**: If a condition continues to be True, the while loop will run forever in an infinite loop
#
# a while loop repeats as long as some condition is True
# beware infinite loops!
x = 5
while x > 0:
print(x)
x = x - 1
print("blast off!")
# add the numbers 1 through 9 to a list
my_list = []
x = 1
while x <= 9:
my_list.append(x)
x = x + 1
my_list
# + [markdown] toc-hr-collapsed=true
# # Functions
# -
# A function is a reusable block of code which only runs when it is called.
#
# You can pass data, known as parameters, into a function.
#
# A function can return data as a result.
#
# Like with for-loops, indentation matters. The block of code contained within a function must be indented.
def hello_world():
print("Hello, world!")
hello_world()
# ## Function arguments
# Information can be passed into functions as arguments.
#
# Arguments are specified after the function name, inside the parentheses.
#
# You can add as many arguments as you want, just separate them with a comma.
#
# Let's create a simple example function.
def full_name(first_name, last_name):
print(first_name + " " + last_name)
full_name("Will", "Geary")
# Let's create a more complicated function involving comparison operators and if/else statements.
def my_function(value):
if value < 10:
print(value, "is less than 10")
elif value == 10:
print(value, "equals 10")
else:
print(value, "is greater than 10")
my_function(7)
# ## `print()` versus `return`
# As we have seen, we can use `print()` to print something to the console.
def my_function(x):
print(5 * x)
my_function(10)
# `print()` is useful as we are developing and debugging Python code. However, `print()` doesn't actually give us the value itself.
#
# For example:
result = my_function(10)
# See what's in the result variable:
result
# The result variable doesn't contain anything!
type(result)
# In order to actually get the result of a function, use `return` rather than `print`.
#
# `return` causes the function to exit, so it must be the very last line of a function.
#
# Note that `return` does not use parenthesis like `print()` does.
def another_function(x):
# Use return instead of print
return 5 * x
result = another_function(10)
# Now the result variable actually stores the value.
result
# ## Call one function inside another
# We can use one function inside another.
def square(x):
return x**2
def square_and_divide_by(x, n):
res1 = square(x)
output = res1 / n
return output
square_and_divide_by(9, 2)
# + [markdown] toc-hr-collapsed=true
# # Exercises
#
# ## Decompose an Address
# -
# Let's say we have a bunch of street address locations and the data is a bit messy. Some addresses contain zip codes within the text, some don't. Let's write a function that will search for a zip code in a piece of text.
# We know that a U.S. ZIP Codes are always five digits long. If we are working with addresses within the U.S., we can use this fact to search for a five digit long string at the end of an address. We need to be careful though, some addresses may have five digit or possibly even longer numbers at the beginning of the address. So we only want to look to the right of the last character of the address that is not numeric.
# In practice, we would use a geocoding tool to geocode an address. Just to give you a preview of how handy this is:
from geopy.geocoders import Nominatim
geolocator = Nominatim(user_agent="my_app")
address = "175 W 25th St New York, NY 10001"
result = geolocator.geocode(address)
result
result.raw
result.raw['display_name'].split(",")[-2].strip()
# However, let's ignore that for the time being as we acquire these skills.
# Use `split()` to break up the address on each space, and grab the last chunk.
last_chunk = address.split(" ")[-1]
last_chunk
len(last_chunk) == 5
last_chunk.isnumeric()
# First, let's write a small function which returns True if a string is a five digit number, False otherwise:
def could_be_a_zip_code(x):
if len(x) == 5 and x.isnumeric():
return True
else:
return False
could_be_a_zip_code('11225')
# Try running the zip code on a integer:
could_be_a_zip_code(11225)
# Why does this fail? How can we fix this issue in the function?
# +
# Optional
# Write an improved version of the function above
# Which handles different data types
# -
# Let's write an address cleaner function which plucks out the zip code and returns both the cleaned address and the zip code separately.
def address_cleaner(address):
# Initialize an output dictionary
# That we will populate with the results
# And return at the end of the function
output = {}
# Get the last chunk of text
last_chunk = address.split(" ")[-1]
# Use the function that we just created in the cell above
if could_be_a_zip_code(last_chunk):
# Let's clean the address
# By replacing the zip zode characters
# in the input address with nothing
clean_address = address.replace(last_chunk,"")
# Remove any remaining extra white space
clean_address = clean_address.strip()
# Populate the output dictionary
output['zip_code'] = last_chunk
output['clean_address'] = clean_address
output['raw_address'] = address
# If not, populate with output with None values
else:
output['zip_code'] = None
output['clean_address'] = None
output['raw_address'] = address
# Return the output
return output
results = address_cleaner(address)
results
| modules/02-learn-python/lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# General imports
import numpy as np
from deepmod_l1.analytical import theta_analytical
#Plotting imports
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# -
# # Making library
# +
x_sample = np.linspace(-5, 5, 50, dtype=np.float32)
t_sample = np.linspace(0, 5, 50, dtype=np.float32)
x_grid, t_grid = np.meshgrid(x_sample, t_sample, indexing='ij')
# Analytical
time_deriv, theta_noiseless = theta_analytical(x_grid, t_grid, 0.5, 0.25)
# -
# # V1; no convergence and selection
noise = np.var(time_deriv) * 1e-3
normalization = np.linalg.norm(theta_noiseless, axis=0)
theta = theta_noiseless / normalization
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
# +
def initialize(theta, t):
# Noise level
beta = 1 / (np.var(t) * 0.1) # beta = 1/sigma^2
# Finding best initial vector
projection = np.concatenate([((phi_i[:, None].T @ t).T @ (phi_i[:, None].T @ t)) / (phi_i[:, None].T @ phi_i[:, None]) for phi_i in theta.T])
start_idx = np.argmax(projection)
# Initializing alphas
alfa = np.ones((theta.shape[1], 1)) * np.inf
alfa[start_idx] = theta[:, start_idx:start_idx+1].T @ theta[:, start_idx:start_idx+1] / (projection[start_idx] - 1/beta)
Phi = theta[:, [start_idx]]
return Phi, alfa, beta
def posterior(Phi, t, alfa, beta):
Sigma = np.linalg.inv(alfa[alfa != np.inf] * np.eye(Phi.shape[1]) + beta * Phi.T @ Phi) # posterior covariance
mu = beta * Sigma @ Phi.T @ t # posterior mean
return Sigma, mu
def sparse_quality_factor(theta, Phi, Sigma, alfa, beta):
B = beta * np.eye(Phi.shape[0])
precalc = B @ Phi @ Sigma @ Phi.T @ B
Sm = np.concatenate([phi_i[:, None].T @ B @ phi_i[:, None] - phi_i[:, None].T @ precalc @ phi_i[:, None] for phi_i in theta.T])
Qm = np.concatenate([phi_i[:, None].T @ B @ t - phi_i[:, None].T @ precalc @ t for phi_i in theta.T])
sm = Sm/(1 - Sm/alfa)
qm = Qm/(1 - Sm/alfa)
return sm, qm
def update_design_matrix(theta, sm, qm, alfa):
idx = np.random.choice(theta.shape[1])
phi_i = theta[:, idx:idx+1]
theta_i = qm[idx, 0]**2 - sm[idx, 0]
# Decididing what to do
if (theta_i > 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i #reestimating
elif (theta_i > 0) & (alfa[idx, 0] == np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i # adding alpha
elif (theta_i< 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = np.inf #removing alpha
Phi = theta[:, alfa[:, 0] != np.inf] #rebuilding phi
return Phi, alfa
def update_noise(Phi, t, mu, Sigma, alfa):
beta = (Phi.shape[0] - Phi.shape[1] + np.sum(alfa[alfa != np.inf] * np.diag(Sigma))) / ((t - Phi @ mu).T @ (t - Phi @ mu))
return beta
# -
# Initializing
Phi, alfa, beta = initialize(theta, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
for it in np.arange(150):
Phi, alfa = update_design_matrix(theta, sm, qm, alfa)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
if it % 50 == 0:
print(alfa)
1/beta
noise
A = alfa[alfa != np.inf] * np.eye(Phi.shape[1]) + beta * Phi.T @ Phi
np.linalg.cond(A)
alfa
mu.T / normalization[alfa[:, 0] != np.inf]
qm**2 - sm
# # Adding convergence
noise = np.var(time_deriv) * 1e-3
normalization = np.linalg.norm(theta_noiseless, axis=0)
theta = theta_noiseless / normalization
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
# +
def initialize(theta, t):
# Noise level
beta = 1 / (np.var(t) * 0.1) # beta = 1/sigma^2
# Finding best initial vector
projection = np.concatenate([((phi_i[:, None].T @ t).T @ (phi_i[:, None].T @ t)) / (phi_i[:, None].T @ phi_i[:, None]) for phi_i in theta.T])
start_idx = np.argmax(projection)
# Initializing alphas
alfa = np.ones((theta.shape[1], 1)) * np.inf
alfa[start_idx] = theta[:, start_idx:start_idx+1].T @ theta[:, start_idx:start_idx+1] / (projection[start_idx] - 1/beta)
Phi = theta[:, [start_idx]]
return Phi, alfa, beta
def posterior(Phi, t, alfa, beta):
Sigma = np.linalg.inv(alfa[alfa != np.inf] * np.eye(Phi.shape[1]) + beta * Phi.T @ Phi) # posterior covariance
mu = beta * Sigma @ Phi.T @ t # posterior mean
return Sigma, mu
def sparse_quality_factor(theta, Phi, Sigma, alfa, beta):
B = beta * np.eye(Phi.shape[0])
precalc = B @ Phi @ Sigma @ Phi.T @ B
Sm = np.concatenate([phi_i[:, None].T @ B @ phi_i[:, None] - phi_i[:, None].T @ precalc @ phi_i[:, None] for phi_i in theta.T])
Qm = np.concatenate([phi_i[:, None].T @ B @ t - phi_i[:, None].T @ precalc @ t for phi_i in theta.T])
sm = Sm/(1 - Sm/alfa)
qm = Qm/(1 - Sm/alfa)
return sm, qm
def update_design_matrix(theta, sm, qm, alfa):
idx = np.random.choice(theta.shape[1])
phi_i = theta[:, idx:idx+1]
theta_i = qm[idx, 0]**2 - sm[idx, 0]
# Decididing what to do
if (theta_i > 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i #reestimating
elif (theta_i > 0) & (alfa[idx, 0] == np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i # adding alpha
elif (theta_i< 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = np.inf #removing alpha
Phi = theta[:, alfa[:, 0] != np.inf] #rebuilding phi
return Phi, alfa
def update_noise(Phi, t, mu, Sigma, alfa):
beta = (Phi.shape[0] - Phi.shape[1] + np.sum(alfa[alfa != np.inf] * np.diag(Sigma))) / ((t - Phi @ mu).T @ (t - Phi @ mu))
return beta
def convergence(sm, qm, alfa):
dt = qm**2 - sm
delta_alfa= sm**2 / dt - alfa # check a_new - a
converged = np.max(np.abs(delta_alfa[dt > 0])) < 10**-6 # if max delta_a < 10^-6 and all other dt < 0, it has converged
return converged
# +
# Initializing
Phi, alfa, beta = initialize(theta, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
converged = False
while converged == False:
Phi, alfa = update_design_matrix(theta, sm, qm, alfa)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
converged = convergence(sm, qm, alfa)
# -
alfa
mu
qm**2 - sm
# # Including choice
noise = np.var(time_deriv) * 0.1
normalization = np.linalg.norm(theta_noiseless, axis=0)
theta = theta_noiseless / normalization
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
# +
def initialize(theta, t):
# Noise level
beta = 1 / (np.var(t) * 0.1) # beta = 1/sigma^2
# Finding best initial vector
projection = np.concatenate([((phi_i[:, None].T @ t).T @ (phi_i[:, None].T @ t)) / (phi_i[:, None].T @ phi_i[:, None]) for phi_i in theta.T])
start_idx = np.argmax(projection)
# Initializing alphas
alfa = np.ones((theta.shape[1], 1)) * np.inf
alfa[start_idx] = theta[:, start_idx:start_idx+1].T @ theta[:, start_idx:start_idx+1] / (projection[start_idx] - 1/beta)
Phi = theta[:, [start_idx]]
return Phi, alfa, beta
def posterior(Phi, t, alfa, beta):
Sigma = np.linalg.inv(alfa[alfa != np.inf] * np.eye(Phi.shape[1]) + beta * Phi.T @ Phi) # posterior covariance
mu = beta * Sigma @ Phi.T @ t # posterior mean
return Sigma, mu
def sparse_quality_factor(theta, Phi, Sigma, alfa, beta):
B = beta * np.eye(Phi.shape[0])
precalc = B @ Phi @ Sigma @ Phi.T @ B
Sm = np.concatenate([phi_i[:, None].T @ B @ phi_i[:, None] - phi_i[:, None].T @ precalc @ phi_i[:, None] for phi_i in theta.T])
Qm = np.concatenate([phi_i[:, None].T @ B @ t - phi_i[:, None].T @ precalc @ t for phi_i in theta.T])
sm = Sm/(1 - Sm/alfa)
qm = Qm/(1 - Sm/alfa)
return sm, qm, Sm, Qm
def update_design_matrix(theta, sm, qm, alfa, Sm, Qm):
idx = optimal_vec(sm, qm, Sm, Qm, alfa)
phi_i = theta[:, idx:idx+1]
theta_i = qm[idx, 0]**2 - sm[idx, 0]
# Decididing what to do
if (theta_i > 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i #reestimating
elif (theta_i > 0) & (alfa[idx, 0] == np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i # adding alpha
elif (theta_i< 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = np.inf #removing alpha
Phi = theta[:, alfa[:, 0] != np.inf] #rebuilding phi
return Phi, alfa
def update_noise(Phi, t, mu, Sigma, alfa):
beta = (Phi.shape[0] - Phi.shape[1] + np.sum(alfa[alfa != np.inf] * np.diag(Sigma))) / ((t - Phi @ mu).T @ (t - Phi @ mu))
return beta
def convergence(sm, qm, alfa):
dt = qm**2 - sm
delta_alfa= sm**2 / dt - alfa # check a_new - a
converged = np.max(np.abs(delta_alfa[dt > 0])) < 10**-6 # if max delta_a < 10^-6 and all other dt < 0, it has converged
return converged
def optimal_vec(sm, qm, Sm, Qm, alfa):
basis_idx = alfa != np.inf # idx of bases in model
set_idx = alfa == np.inf # idx of bases not in model
add_basis = (Qm**2 - Sm)/Sm + np.log(Sm/Qm**2)
del_basis = Qm**2/(Sm - alfa) - np.log(1-Sm/alfa)
alfa_new = sm**2/(qm**2 - sm)
redo_basis = Qm**2/(Sm + (1/alfa_new-1/alfa)**-1) - np.log(1 + Sm * (1/alfa_new-1/alfa))
#Making everything into nice matrix
add_basis[basis_idx] = np.nan
dt = qm**2 - sm
add_basis[dt <= 0] = np.nan #stuff above assumes dt > 0
del_basis[set_idx] = np.nan
redo_basis[set_idx] = np.nan
# Deciding update
possible_update = np.concatenate((add_basis, redo_basis, del_basis), axis=1)
idx = np.unravel_index(np.nanargmax(possible_update), possible_update.shape)[0]
return idx
# +
# Initializing
Phi, alfa, beta = initialize(theta, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
converged = False
while converged == False:
Phi, alfa = update_design_matrix(theta, sm, qm, alfa, Sm, Qm)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
converged = convergence(sm, qm, alfa)
# -
mu / normalization[alfa[:, 0] != np.inf][:, None]
Sigma
alfa
1/beta
noise
for idx in np.arange(10):
noise = np.var(time_deriv) * 0.1
normalization = np.linalg.norm(theta_noiseless, axis=0)
theta = theta_noiseless / normalization
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
# Initializing
Phi, alfa, beta = initialize(theta, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
converged = False
while converged == False:
Phi, alfa = update_design_matrix(theta, sm, qm, alfa, Sm, Qm)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
converged = convergence(sm, qm, alfa)
print(mu.shape)
# +
noise = np.var(time_deriv) * 0.05
normalization = np.linalg.norm(theta_noiseless, axis=0)
theta = theta_noiseless / normalization
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
for idx in np.arange(10):
# Initializing
Phi, alfa, beta = initialize(theta, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
converged = False
while converged == False:
Phi, alfa = update_design_matrix(theta, sm, qm, alfa, Sm, Qm)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
converged = convergence(sm, qm, alfa)
print(mu)
# -
Sigma
normalization @ normalization.T
# # Function for training
# +
def initialize(theta, t):
# Noise level
beta = 1 / (np.var(t) * 0.1) # beta = 1/sigma^2
# Finding best initial vector
projection = np.concatenate([((phi_i[:, None].T @ t).T @ (phi_i[:, None].T @ t)) / (phi_i[:, None].T @ phi_i[:, None]) for phi_i in theta.T])
start_idx = np.argmax(projection)
# Initializing alphas
alfa = np.ones((theta.shape[1], 1)) * np.inf
alfa[start_idx] = theta[:, start_idx:start_idx+1].T @ theta[:, start_idx:start_idx+1] / (projection[start_idx] - 1/beta)
Phi = theta[:, [start_idx]]
return Phi, alfa, beta
def posterior(Phi, t, alfa, beta):
Sigma = np.linalg.inv(alfa[alfa != np.inf] * np.eye(Phi.shape[1]) + beta * Phi.T @ Phi) # posterior covariance
mu = beta * Sigma @ Phi.T @ t # posterior mean
return Sigma, mu
def sparse_quality_factor(theta, Phi, Sigma, alfa, beta):
B = beta * np.eye(Phi.shape[0])
precalc = B @ Phi @ Sigma @ Phi.T @ B
Sm = np.concatenate([phi_i[:, None].T @ B @ phi_i[:, None] - phi_i[:, None].T @ precalc @ phi_i[:, None] for phi_i in theta.T])
Qm = np.concatenate([phi_i[:, None].T @ B @ t - phi_i[:, None].T @ precalc @ t for phi_i in theta.T])
sm = Sm/(1 - Sm/alfa)
qm = Qm/(1 - Sm/alfa)
return sm, qm, Sm, Qm
def update_design_matrix(theta, sm, qm, alfa, Sm, Qm):
idx = optimal_vec(sm, qm, Sm, Qm, alfa)
phi_i = theta[:, idx:idx+1]
theta_i = qm[idx, 0]**2 - sm[idx, 0]
# Decididing what to do
if (theta_i > 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i #reestimating
elif (theta_i > 0) & (alfa[idx, 0] == np.inf):
alfa[idx, 0] = sm[idx, 0]**2 / theta_i # adding alpha
elif (theta_i< 0) & (alfa[idx, 0] != np.inf):
alfa[idx, 0] = np.inf #removing alpha
Phi = theta[:, alfa[:, 0] != np.inf] #rebuilding phi
return Phi, alfa
def update_noise(Phi, t, mu, Sigma, alfa):
beta = (Phi.shape[0] - Phi.shape[1] + np.sum(alfa[alfa != np.inf] * np.diag(Sigma))) / ((t - Phi @ mu).T @ (t - Phi @ mu))
return beta
def convergence(sm, qm, alfa):
dt = qm**2 - sm
delta_alfa= sm**2 / dt - alfa # check a_new - a
converged = np.max(np.abs(delta_alfa[dt > 0])) < 10**-6 # if max delta_a < 10^-6 and all other dt < 0, it has converged
return converged
def optimal_vec(sm, qm, Sm, Qm, alfa):
basis_idx = alfa != np.inf # idx of bases in model
set_idx = alfa == np.inf # idx of bases not in model
add_basis = (Qm**2 - Sm)/Sm + np.log(Sm/Qm**2)
del_basis = Qm**2/(Sm - alfa) - np.log(1-Sm/alfa)
alfa_new = sm**2/(qm**2 - sm)
redo_basis = Qm**2/(Sm + (1/alfa_new-1/alfa)**-1) - np.log(1 + Sm * (1/alfa_new-1/alfa))
#Making everything into nice matrix
add_basis[basis_idx] = np.nan
dt = qm**2 - sm
add_basis[dt <= 0] = np.nan #stuff above assumes dt > 0
del_basis[set_idx] = np.nan
redo_basis[set_idx] = np.nan
# Deciding update
possible_update = np.concatenate((add_basis, redo_basis, del_basis), axis=1)
idx = np.unravel_index(np.nanargmax(possible_update), possible_update.shape)[0]
return idx
def SBL(theta, t):
# Normalizing
normalization = np.linalg.norm(theta, axis=0)
theta_normalized = theta / normalization
# Initializing
Phi, alfa, beta = initialize(theta_normalized, t)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta_normalized, Phi, Sigma, alfa, beta)
# Running
converged = False
while converged == False:
Phi, alfa = update_design_matrix(theta_normalized, sm, qm, alfa, Sm, Qm)
Sigma, mu = posterior(Phi, t, alfa, beta)
sm, qm, Sm, Qm = sparse_quality_factor(theta_normalized, Phi, Sigma, alfa, beta)
beta = update_noise(Phi, t, mu, Sigma, alfa)
converged = convergence(sm, qm, alfa)
# Rescaling
factor = normalization[alfa[:, 0] != np.inf][:, None]
mu = mu / factor
Sigma = Sigma / (factor @ factor.T)
return alfa, mu, Sigma, 1/beta
# +
noise = np.var(time_deriv) * 0.1
normalization = np.linalg.norm(theta_noiseless, axis=0)
t = time_deriv + np.random.normal(scale= np.sqrt(noise), size=time_deriv.shape)
alfa, mu, Sigma, inferred_noise = SBL(theta_noiseless, t)
# -
alfa
mu
Sigma
| notebooks/Bayesian/Implementing_SBL_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="N9QuzdI9gIR4"
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# + [markdown] id="6Bs9z7Oy9iDY"
# ### Load Data
# + colab={"base_uri": "https://localhost:8080/", "height": 195} id="K_3ODMzs--vs" outputId="323b57ff-6e25-46e6-cd3d-1bfb7ce96848"
train = pd.read_csv("twitter_train.csv")
# test = pd.read_csv("twitter_test.csv") # no label, so split train into train and valid set
# print(train.shape) # 31962 x 3
train.head() # label: 1 = racist/sexist; 0 = not racist/sexist
# + id="ev5Nb1_5bJpX"
# sample tweets from both labels
np.random.seed(22)
idx0 = np.where(train['label'] == 0)
idx1 = np.where(train['label'] == 1)
train['tweet'][np.random.choice(idx0[0],10)]
# + id="ev5Nb1_5bJpX"
train['tweet'][np.random.choice(idx1[0],10)]
# + [markdown] id="tZigcbvZbL3x"
# ## Format Data
# -
stopwords = STOPWORDS
stopwords.update(["’", "“", "”", 'ªð', 'ºï', '¼ð', 'ó¾', "000", "000 000", "amp", 'urð', "https", "co", "user",'aren', 'couldn', 'didn', 'doesn', 'don', 'hadn', 'hasn', 'haven', 'isn', 'let', 'll', 'mustn', 're', 'shan', 'shouldn', 've', 'wasn', 'weren', 'won', 'wouldn'])
# + id="HBXggr-cHpeP"
vec=CountVectorizer(stop_words=stopwords, min_df = 2)
vectors = vec.fit_transform(train["tweet"]) # create tf idf from full train data
idx_to_word = np.array(vec.get_feature_names())
# -
idx_to_word[np.asarray((np.sum(vectors, axis=0) >100))[0]]
# + [markdown] id="gyeHb11ubWkL"
# #### y to one-hot
# -
def list2onehot(y, list_classes):
"""
y = list of class lables of length n
output = n x k array, i th row = one-hot encoding of y[i] (e.g., [0,0,1,0,0])
"""
Y = np.zeros(shape = [len(y), len(list_classes)], dtype=int)
for i in np.arange(Y.shape[0]):
for j in np.arange(len(list_classes)):
if y[i] == list_classes[j]:
Y[i,j] = 1
return Y
# + id="Ox5DOrBxbUS4"
# X = train['tweet'] # word form (31962 tweets,)
X = np.asarray(vectors.todense(), dtype = np.float32) # freq form (31962 tweets, 41104 word frequencies)
y = train['label']
y_oh = list_to_onehot(y,[0, 1])
# y_test_oh = list_to_onehot(y_test,[0, 1])
# + [markdown] id="lxYCx7hWPaQX"
# ### Split train and valid
# + id="XZ6vgK50h26w"
X_train =[]
X_test = []
y_test = []
y_train = []
for i in np.arange(X.shape[0]):
# for each example i, make it into train set with probabiliy 0.8 and into test set otherwise
U = np.random.rand() # Uniform([0,1]) variable
if U < 0.8:
# X_train = np.concatenate(X_train,X[i,:])
X_train.append(X[i,:])
y_train.append(y_oh[i,:].copy())
else:
# X_test = np.concatenate(X_test,X[i,:])
X_test.append(X[i,:])
y_test.append(y_oh[i,:].copy())
X_train = np.asarray(X_train)
X_test = np.asarray(X_test)
y_train = np.asarray(y_train)
y_test = np.asarray(y_test)
# + id="LyA65MJpbnwT"
print(X_train.shape, X_test.shape)
print(y_train.shape, y_test.shape)
# -
del train
# + [markdown] id="asW0RcciDZyq"
# ### Wordcloud for each Label
# + colab={"base_uri": "https://localhost:8080/", "height": 631} id="k4fZYd4u_NN1" outputId="3ed227ea-4a97-4548-f356-32f4ba66d1ce"
# word cloud for different labels
def plot_wordcloud(Y, H, idx_to_word, categories):
# plot the class-conditioanl PMF as wordclouds
# Y = (n x k), H = (p x n) (\Phi in lecture note), W = (p x k)
# idx_to_word = list of words used in the vectorization of documents
# categories = list of class labels
# prior on class labels = empirical PMF = [ # class i examples / total ]
# class-conditional for class i = [ # word j in class i examples / # words in class i examples]
class_conditional_PMF = []
for i in np.arange(Y.shape[1]):
idx = np.where(Y[:,i]==1)
sub_H = H[:,idx[0]]
word_count_per_class = np.sum(sub_H, axis=1)
class_conditional_PMF.append(word_count_per_class/np.sum(word_count_per_class))
### topic mode
num_words_to_sample = 10000
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(14, 7), subplot_kw={'xticks': [], 'yticks': []})
for ax, i in zip(axs.flat, np.arange(Y.shape[0])):
dist = class_conditional_PMF[i]
# sample (num_words_to_sample) proportionate to CC_PMF
list_words = []
for j in range(num_words_to_sample):
idx = np.random.choice(np.arange(H.shape[0]), p=dist)
list_words.append(idx_to_word[idx])
Y = " ".join(list_words) # list to string
# print(Y)
stopwords = STOPWORDS
stopwords.update(["’", "“", "”", "000", "000 000", "https", "co", "user"])
wordcloud = WordCloud(background_color="black",
relative_scaling=0.1,
width=400,
height=400).generate(Y)
ax.imshow(wordcloud, interpolation='bilinear')
ax.set_xlabel(categories[i], fontsize='20')
# ax.axis("off")
plt.tight_layout()
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.08)
plt.savefig('class_conditional_PMF_wordcloud_twitter.pdf', bbox_inches='tight')
# + colab={"base_uri": "https://localhost:8080/", "height": 631} id="k4fZYd4u_NN1" outputId="3ed227ea-4a97-4548-f356-32f4ba66d1ce"
categories = ['non-racist/sexist','racist/sexist']
plot_wordcloud(Y=np.asarray(y_train), H=X_train.T, idx_to_word=idx_to_word, categories=categories)
# + [markdown] id="c-Xsny8L_F5a"
# ## Naive Bayes Classifier
# + id="XQfhBCwB7OZ5"
# subset from 23800 tweets (for memory reasons:
# MemoryError: Unable to allocate 7.28 GiB for an array with shape (23759, 41104) and data type float64)
X_train_sm = []
y_train_sm = []
for i in np.arange(X_train.shape[0]):
U = np.random.rand() # Uniform([0,1]) variable
if U < 0.3:
# X_train = np.concatenate(X_train,X[i,:])
X_train_sm.append(X[i,:])
y_train_sm.append(y_oh[i,:].copy())
# + id="NrwGheMOb3E_"
X_train_sm = np.array(X_train_sm)
y_train_sm = np.array(y_train_sm)
X_train_sm.shape
# + id="bahXbtKIb5Jt"
def fit_NB(Y, H,pseudocount = 10**-5):
'''
Fit Multinomial Naive Bayes Calssifier
Use the Maximum Likelihood prior and class conditional probabilities (in closed forms)
Y = (n x k), H = (p x n) (\Phi in lecture note), W = (p x k)
prior on class labels = empirical PMF = [ # class i examples / total ]
class-conditional for class i = [ # word j in class i examples / # words in class i examples]
Output = prior (k, ), class_conditional_PMF = (k, p)
'''
k = Y.shape[1] # number of classes (2)
prior = np.sum(Y, axis=0)/np.sum(np.sum(Y, axis=0))
# print(prior)
class_conditional_PMF = []
for i in np.arange(Y.shape[1]):
idx = np.where(Y[:,i]==1) # select indices with class 0 or 1
sub_H = H[:,idx[0]] + pseudocount # add psuedocount
word_count_per_class = np.sum(sub_H, axis=1)
class_conditional_PMF.append(word_count_per_class/np.sum(word_count_per_class))
return prior, np.asarray(class_conditional_PMF)
#test
prior, class_conditional_PMF = fit_NB(Y=y_train_sm, H=X_train_sm.T)
print(prior)
print(class_conditional_PMF)
# + id="Sc3fXo5Wb6-Y"
class_conditional_PMF.shape
X_train_sm.shape
# + id="3d7GhTP_b9QS"
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=[15, 5])
for i in [0,1]:
ax.plot(np.arange(X_train_sm.shape[1]), class_conditional_PMF[i], label=categories[i])
ax.legend(fontsize='15')
plt.suptitle("Class conditional PMFs", fontsize='15')
plt.savefig('class_conditional_PMF_dist_twitter.pdf', bbox_inches='tight')
# + id="gkzT9_xHb-uZ"
def predict_MNB(X_test, prior, class_conditional_PMF):
'''
Compute predicted PMF for the test data given prior and class_conditional_PMF
Simple use of Bayes' Theorem
X_test = (p x n) (words x docs)
'''
print(X_test.shape)
print(class_conditional_PMF.shape)
P = class_conditional_PMF / np.min(class_conditional_PMF) # normalize so that log(P) is not too small
Q = np.exp(X_test @ np.log(P).T)
Q = Q * np.repeat(prior[:, np.newaxis], repeats=Q.shape[0], axis=1).T
sum_of_rows = Q.sum(axis=1)
return Q / sum_of_rows[:, np.newaxis]
# test
predictive_PMF = predict_MNB(X_test, prior, class_conditional_PMF)
plt.plot(predictive_PMF[4])
# + id="tuN_mdBGcA3i"
def compute_accuracy_metrics(Y_test, P_pred, use_opt_threshold=False, verbose=True):
# y_test = binary label
# P_pred = predicted probability for y_test
# compuate various binary classification accuracy metrics
fpr, tpr, thresholds = metrics.roc_curve(Y_test, P_pred, pos_label=None)
mythre = thresholds[np.argmax(tpr - fpr)]
myauc = metrics.auc(fpr, tpr)
# print('!!! auc', myauc)
# Compute classification statistics
threshold = 0.5
if use_opt_threshold:
threshold = mythre
Y_pred = P_pred.copy()
Y_pred[Y_pred < threshold] = 0
Y_pred[Y_pred >= threshold] = 1
mcm = confusion_matrix(Y_test, Y_pred)
tn = mcm[0, 0]
tp = mcm[1, 1]
fn = mcm[1, 0]
fp = mcm[0, 1]
accuracy = (tp + tn) / (tp + tn + fp + fn)
sensitivity = tn / (tn + fp)
specificity = tp / (tp + fn)
precision = tp / (tp + fp)
fall_out = fp / (fp + tn)
miss_rate = fn / (fn + tp)
# Save results
results_dict = {}
results_dict.update({'Y_test': Y_test})
results_dict.update({'Y_pred': Y_pred})
results_dict.update({'AUC': myauc})
results_dict.update({'Opt_threshold': mythre})
results_dict.update({'Accuracy': accuracy})
results_dict.update({'Sensitivity': sensitivity})
results_dict.update({'Specificity': specificity})
results_dict.update({'Precision': precision})
results_dict.update({'Fall_out': fall_out})
results_dict.update({'Miss_rate': miss_rate})
results_dict.update({'Confusion_mx': mcm})
if verbose:
for key in [key for key in results_dict.keys()]:
if key not in ['Y_test', 'Y_pred', 'Confusion_mx']:
print('% s ===> %.3f' % (key, results_dict.get(key)))
print('Confusion matrix \n ===>\n', mcm)
return results_dict
# + id="cEqPWwPOcDCk"
results_dict = compute_accuracy_metrics(Y_test=onehot_to_list(y_test), P_pred=predictive_PMF[:,1], verbose=True)
# -
# # LR
# +
# def fit_LR_GD(Y, H, W0=None, sub_iter=100, stopping_diff=0.01):
# '''
# Convex optimization algorithm for Logistic Regression using Gradient Descent
# Y = (n x 1), H = (p x n) (\Phi in lecture note), W = (p x 1)
# Logistic Regression: Y ~ Bernoulli(Q), Q = sigmoid(H.T @ W)
# MLE -->
# Find \hat{W} = argmin_W ( sum_j ( log(1+exp(H_j.T @ W) ) - Y.T @ H.T @ W ) )
# '''
# if W0 is None:
# W0 = np.random.rand(H.shape[0],1) #If initial coefficients W0 is None, randomly initialize
# W1 = W0.copy()
# i = 0
# grad = np.ones(W0.shape)
# while (i < sub_iter) and (np.linalg.norm(grad) > stopping_diff):
# Q = 1/(1+np.exp(-H.T @ W1)) # probability matrix, same shape as Y
# # grad = H @ (Q - Y).T + alpha * np.ones(W0.shape[1])
# grad = H @ (Q - Y)
# W1 = W1 - (np.log(i+1) / (((i + 1) ** (0.5)))) * grad
# i = i + 1
# # print('iter %i, grad_norm %f' %(i, np.linalg.norm(grad)))
# return W1
# H_train = np.vstack((np.ones(X_train.shape[0]), X_train.T)) # add first row of 1's for bias features
#W = fit_LR_GD(Y=y_train, H=H_train)
# H_test = np.vstack((np.ones(X_test.shape[0]), X_test.T))
# Q = 1 / (1 + np.exp(-H_test.T @ W))
# compute_accuracy_metrics(Y_test=np.argmax(y_test,axis=1), P_pred = np.argmax(Q,axis=1))
# -
LR = LogisticRegression()
LR.fit(X_train, np.argmax(y_train,axis=1))
LRpreds = LR.predict(X_test)
compute_accuracy_metrics(Y_test=np.argmax(y_test,axis=1), P_pred = LRpreds)
def fit_LR(vectorizer=CountVectorizer, min_df = 0, train_size=0.8, penalty = 'l2', solver = 'lbfgs'):
vec=vectorizer(stop_words=stopwords, min_df =min_df)
vectors = vec.fit_transform(train["tweet"])
idx_to_word = np.array(vec.get_feature_names())
X = np.asarray(vectors.todense(), dtype = np.float32)
y = train['label']
X_train, X_test, y_train,y_test = train_test_split(X, y,train_size = train_size, random_state = 1)
LR = LogisticRegression(penalty = penalty, solver = solver)
LR.fit(X_train,y_train)
LRpreds = LR.predict(X_test)
accuracy = compute_accuracy_metrics(Y_test=y_test, P_pred = LRpreds)
coefs = np.ravel(LR.coef_)
top_ten_thresh = np.sort(coefs, )[-11]
bottom_ten_thresh = np.sort(coefs, )[10]
top_df = pd.DataFrame({'words': idx_to_word[coefs > top_ten_thresh], 'coefs': coefs[coefs > top_ten_thresh]})
top_df = top_df.sort_values(by=['coefs'], ascending = False, ignore_index=True)
bottom_df = pd.DataFrame({'words': idx_to_word[coefs < bottom_ten_thresh], 'coefs': coefs[coefs < bottom_ten_thresh]})
bottom_df = bottom_df.sort_values(by=['coefs'], ascending = True, ignore_index=True)
return {'Accuracy': accuracy.get('Accuracy'), 'top': top_df, 'bottom': bottom_df, 'n_cols': X_train.shape[1]}
accuracies = []
for i in range(1,11):
accuracies.append(fit_LR(min_df = i, train_size = 0.5).get('Accuracy'))
sizes.append(i)
plt.plot(range(1,11),accuracies)
n_columns = []
for i in range(1,11):
n_columns.append(fit_LR(min_df = i, train_size = 0.3).get('n_cols'))
plt.plot(range(1,11),n_columns)
accuracies = []
sizes = []
for i in np.arange(0.1,1,0.1):
accuracies.append(fit_LR(min_df = 2, train_size = i))
sizes.append(i)
plt.plot(sizes, accuracies)
fit_LR(min_df = 2, train_size = 0.8, vectorizer=TfidfVectorizer)
fit_LR(min_df = 2, train_size = 0.8, vectorizer=CountVectorizer)
top_df = fit_LR(min_df = 2, train_size = 0.8, vectorizer=CountVectorizer).get('top')
bottom_df = fit_LR(min_df = 2, train_size = 0.8, vectorizer=CountVectorizer).get('bottom')
top_df
bottom_df
| notebooks/lr_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pandas
# https://pandas.pydata.org/
#
# 10 Minutes to pandas
#
# http://pandas.pydata.org/pandas-docs/stable/10min.html
#
# Tutorials
#
# http://pandas.pydata.org/pandas-docs/stable/tutorials.html
import numpy as np
import pandas as pd
# **Series**
# +
# pd.Series( data = [ ] , index = [ ] )
ser1 = pd.Series( [10, 20, 30] )
ser1
# -
ser1.index = ['a', 'b', 'c']
ser1
ser2 = pd.Series( {'a':100, 'b':200, 'c':300} )
ser2
ser3 = pd.Series( [1, 2, 3, 4], index = ['USA', 'Germany', 'France', 'Japan'] )
ser3
ser4 = pd.Series( [1, 2, 5, 4], index = ['USA', 'Germany', 'Italy', 'Japan'] )
ser4
ser3 + ser4
# **DataFrame**
# pd.DataFrame( data = [ ], index = [ ], columns = [ ] )
df = pd.read_csv( 'gapminder.csv', index_col = 'Unnamed: 0' )
df.head()
df['income'][:5]
df['gross_income'] = df['income'] * df['population']
df.head()
df.drop( labels = 'gross_income', axis = 1, inplace = True )
df.head()
# Selecting rows and columns
df.loc[3]
df.loc[ 5, 'country' ]
df.loc[ [ 10, 100, 1000 ], ['continent', 'country'] ]
# Conditional Selection
df[ df['income'] > 50000 ]
# multiple conditions : & (and) , | (or)
df[ ( df['income'] > 50000 ) & ( df['life_exp'] > 80 ) ]
# Setting and resetting indices
df.set_index('year')
df.dropna().reset_index()
# Missing Data
df['income'].fillna( value = 0 )
# Groupby
by_year = df.groupby('year')
by_year
by_year.mean().tail()
by_year.describe().tail()
by_year.describe()[ 'income' ]
# Multi-Index and Index Hierarchy
outside = df['year']
inside = df['continent']
asia_df = df[ df['continent'] == 'asia' ]
amer_df = df[ df['continent'] == 'americas' ]
eur_df = df[ df['continent'] == 'europe' ]
afr_df = df[ df['continent'] == 'africa' ]
asia_df.groupby( by = 'year' ).mean()
| python/pandas/1-pandas-Series-DataFrame.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Exercise 1 Part C**
#
# | trace | elementary function | current function value | elementary function derivative | $\nabla_{x}$ | $\nabla_{y}$ |
# | :---: | :---: | :---------: | :---: |:---: |:---: |
# | $x_1$ | $x_1$ | $x$ | $\dot{x_1}$ | $1$ | $0$ |
# | $x_2$ | $x_2$ | $y$ | $\dot{x_2}$ | $0$ | $1$ |
# | $x_3$ | $w_{11}x_1$ | $w_{11}x$ | $w_{11}\dot{x_1}$ | $w_{11}$ | $0$ |
# | $x_4$ | $w_{21}x_1$ | $w_{21}x$ | $w_{21}\dot{x_1}$ | $w_{21}$ | $0$ |
# | $x_5$ | $w_{12}x_2$| $w_{12}y$ | $w_{12}\dot{x_2}$| $0$ | $w_{12}$ |
# | $x_6$ | $w_{22}x_2$| $w_{22}y$ | $w_{22}\dot{x_2}$ | $0$ | $w_{22}$ |
# | $x_7$ | $x_3+x_5$ | $w_{11}x+w_{12}y$ | $\dot{x_3}+\dot{x_5}$ | $w_{11}$ | $w_{12}$ |
# | $x_8$ | $x_4+x_6$ | $w_{21}x+w_{22}y$ | $\dot{v_4}+\dot{v_6}$ | $w_{21}$ | $w_{22}$ |
# | $x_9$ | $z(x_7)$ | $z(w_{11}x+w_{12}y)$ | $\dot{x_7}z'(x_7)$ | $w_{11}z'(w_{11}x+w_{12}y)$ | $w_{12}z'(w_{11}x+w_{12}y)$ |
# | $x_{10}$ | $z(x_8)$ | $z(w_{21}x+w_{22}y)$ | $\dot{x_8}z'(x_8)$ | $w_{21}z'(w_{21}x+w_{22}y)$ |$w_{22}z'(w_{21}x+w_{22}y)$ |
# | $x_{11}$ | $w_{1,out}x_9$ | $w_{1,out}z(w_{11}x+w_{12}y)$ | $w_{1,out}\dot{x_9}$ | $w_{1,out}w_{11}z'(w_{11}x+w_{12}y)$ | $w_{1,out}w_{12}z'(w_{11}x+w_{12}y)$ |
# | $x_{12}$ | $w_{2,out}x_{10}$ | $w_{2,out}z(w_{21}x+w_{22}y)$ | $w_{2,out}\dot{x_{10}}$ | $w_{2,out}w_{21}z'(w_{21}x+w_{22}y)$ | $w_{2,out}w_{22}z'(w_{21}x+w_{22}y)$ |
# | $f$ | $x_{11}+x_{12}$| $w_{1,out}z(w_{11}x+w_{12}y)+w_{2,out}z(w_{21}x+w_{22}y)$ | $\dot{x_{11}}+\dot{x_{12}}$ | $w_{1,out}w_{11}z'(w_{11}x+w_{12}y)+w_{2,out}w_{21}z'(w_{21}x+w_{22}y)$ | $w_{1,out}w_{12}z'(w_{11}x+w_{12}y)+w_{2,out}w_{22}z'(w_{21}x+w_{22}y)$ |
#
| homework/HW4/.ipynb_checkpoints/Trace Table-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
books = pd.read_csv('./data/books.csv')
books
genres = pd.read_csv('./data/genres.csv')
genres
# Zanimajo nas le knjige z oceno 4 ali več, ki so izšle po letu 1500
books = books[(books.rating > 4) & (books.publication_year >= 1500)]
books['decade'] = books['publication_year'] // 10 * 10
decade_count = books.groupby('decade').count()
decade_count = decade_count['title']
decade_count = decade_count.sort_values(ascending=False)
# Desetletja z največ uspešnicami
decade_count.head(10)
# Komentar: potrdila se je hipoteza, da so najbolj brane knjige sodobne
best_authors = books.groupby('author').count()
best_authors = best_authors[['title']].rename({'title':'count'}, axis=1)
# Dvajset najuspešnejših avtorjev
best_authors.sort_values('count', ascending=False).head(20)
# Komentar: potrdila se je hipoteza, da so najuspešnejši avtorji večinoma moški
# Korelacija letnica-ocena
books.plot(kind = 'scatter', x = 'decade', y = 'rating')
# Komentar: v nasprotju s predvidevanji vidimo, da z leti ne narašča le število popularnih knjih, temveč tudi njihova ocena
romances = books[books.title.isin(genres[genres.genre == 'Romance'].title)]
romances
romances['century'] = romances['decade'] // 100 * 100
romances.head()
romances = romances.groupby('century').count()[['title']].rename({'title':'count'}, axis=1)
romances
romances.plot(kind = 'bar')
# Komentar: največ romantičnih romanov je sodobnih, a to je lahko zgolj posledica dejstva, da je večina knjig na seznamu sodobnih. Oglejmo si, iz katerega časa so ljubezenski romani v relativnem smislu največje uspešnice.
books['century'] = books['decade'] // 100 * 100
books.head()
century_count = books.groupby('century').count()
century_count = century_count['title']
century_count = century_count.sort_values(ascending=False)
century_count.head()
century_count = century_count.reset_index()
century_count.columns = ['century', 'count']
century_count.head()
romances.reset_index(inplace=True)
romances.head()
romances_percentage = romances.merge(century_count, on='century')
romances_percentage.columns = ['century', 'romances', 'total']
romances_percentage['percentage_of_romances'] = romances_percentage['romances'] / romances_percentage['total']
romances_percentage.sort_values('percentage_of_romances')
romances_percentage
# Relativni delež romantičnih knjig
romances_percentage.plot(kind='bar', x='century', y='percentage_of_romances')
# Komentar: v nasprotju s predvidevanji je največji delež romantičnih knjig iz 20. stoletja
| analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Notebook for generating and saving SBM CLUSTER graphs
# +
import numpy as np
import torch
import pickle
import time
# %matplotlib inline
import matplotlib.pyplot as plt
import scipy.sparse
# -
# # Generate SBM CLUSTER graphs
# +
def schuffle(W,c):
# relabel the vertices at random
idx=np.random.permutation( W.shape[0] )
#idx2=np.argsort(idx) # for index ordering wrt classes
W_new=W[idx,:]
W_new=W_new[:,idx]
c_new=c[idx]
return W_new , c_new , idx
def block_model(c,p,q):
n=len(c)
W=np.zeros((n,n))
for i in range(n):
for j in range(i+1,n):
if c[i]==c[j]:
prob=p
else:
prob=q
if np.random.binomial(1,prob)==1:
W[i,j]=1
W[j,i]=1
return W
def unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q):
c = []
for r in range(nb_of_clust):
if clust_size_max==clust_size_min:
clust_size_r = clust_size_max
else:
clust_size_r = np.random.randint(clust_size_min,clust_size_max,size=1)[0]
val_r = np.repeat(r,clust_size_r,axis=0)
c.append(val_r)
c = np.concatenate(c)
W = block_model(c,p,q)
return W,c
class generate_SBM_graph():
def __init__(self, SBM_parameters):
# parameters
nb_of_clust = SBM_parameters['nb_clusters']
clust_size_min = SBM_parameters['size_min']
clust_size_max = SBM_parameters['size_max']
p = SBM_parameters['p']
q = SBM_parameters['q']
# block model
W, c = unbalanced_block_model(nb_of_clust, clust_size_min, clust_size_max, p, q)
# shuffle
W, c, idx = schuffle(W,c)
# signal on block model
u = np.zeros(c.shape[0])
for r in range(nb_of_clust):
cluster = np.where(c==r)[0]
s = cluster[np.random.randint(cluster.shape[0])]
u[s] = r+1
# target
target = c
# convert to pytorch
W = torch.from_numpy(W)
W = W.to(torch.int8)
idx = torch.from_numpy(idx)
idx = idx.to(torch.int16)
u = torch.from_numpy(u)
u = u.to(torch.int16)
target = torch.from_numpy(target)
target = target.to(torch.int16)
# attributes
self.nb_nodes = W.size(0)
self.W = W
self.rand_idx = idx
self.node_feat = u
self.node_label = target
# configuration
SBM_parameters = {}
SBM_parameters['nb_clusters'] = 6
SBM_parameters['size_min'] = 5
SBM_parameters['size_max'] = 35
SBM_parameters['p'] = 0.55
SBM_parameters['q'] = 0.25
print(SBM_parameters)
data = generate_SBM_graph(SBM_parameters)
print(data)
#print(data.nb_nodes)
#print(data.W)
#print(data.rand_idx)
#print(data.node_feat)
#print(data.node_label)
# +
#Plot Adj matrix
W = data.W
plt.spy(W,precision=0.01, markersize=1)
plt.show()
idx = np.argsort(data.rand_idx)
W = data.W
W2 = W[idx,:]
W2 = W2[:,idx]
plt.spy(W2,precision=0.01, markersize=1)
plt.show()
# -
# +
# Generate and save SBM graphs
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
def generate_semisuperclust_dataset(nb_graphs):
dataset = []
for i in range(nb_graphs):
if not i%250:
print(i)
data = generate_SBM_graph(SBM_parameters)
graph = DotDict()
graph.nb_nodes = data.nb_nodes
graph.W = data.W
graph.rand_idx = data.rand_idx
graph.node_feat = data.node_feat
graph.node_label = data.node_label
dataset.append(graph)
return dataset
def plot_histo_graphs(dataset, title):
# histogram of graph sizes
graph_sizes = []
for graph in dataset:
graph_sizes.append(graph.nb_nodes)
plt.figure(1)
plt.hist(graph_sizes, bins=50)
plt.title(title)
plt.show()
def SBMs_CLUSTER(nb_graphs, name):
dataset = generate_semisuperclust_dataset(nb_graphs)
print(len(dataset))
with open(name+'.pkl',"wb") as f:
pickle.dump(dataset,f)
plot_histo_graphs(dataset,name)
start = time.time()
nb_graphs = 10000 # train
#nb_graphs = 3333 # train
#nb_graphs = 500 # train
#nb_graphs = 20 # train
SBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_train')
nb_graphs = 1000 # val
#nb_graphs = 333 # val
#nb_graphs = 100 # val
#nb_graphs = 5 # val
SBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_val')
nb_graphs = 1000 # test
#nb_graphs = 333 # test
#nb_graphs = 100 # test
#nb_graphs = 5 # test
SBMs_CLUSTER(nb_graphs, 'SBM_CLUSTER_test')
print('Time (sec):',time.time() - start) # 190s
# -
| data/SBMs/generate_SBM_CLUSTER.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Add style to a Layer
#
# In this example, a simple style is added to the Layer using [CARTO VL](https://carto.com/developers/carto-vl/) syntax.
#
# For more information, run `help(Layer)`.
# +
from cartoframes.auth import set_default_credentials
from cartoframes.viz import Map, Layer
set_default_credentials('cartoframes')
# -
Map(
Layer(
"SELECT * FROM global_power_plants WHERE country IN ('Brazil')",
'''
color: blue
width: 5
strokeColor: rgb(255,255,255)
strokeWidth: 1
'''
)
)
| examples/layers/add_style_layer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
#Packages for visualisation
#Scikit-Learn Packages
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
#Visualisation and Other packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# -
iris = datasets.load_iris()
print (iris.keys())
print (type(iris.data), type(iris.target))
iris.data.shape
iris.target_names
X = iris.data
y = iris.target
#Splitting the dataset into train and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .3, random_state=21, stratify=y)
df = pd.DataFrame(X_train, columns=iris.feature_names)
print(df.head())
print (df.info())
#Visual EDA
plt.figure()
pd.plotting.scatter_matrix(df, c = y_train, figsize = [8, 8], s=150, marker = 'D')
plt.show()
#Building Classifier on the Iris Dataset
knn = KNeighborsClassifier(n_neighbors=8)
knn.fit(X_train, y_train)
y_prediction = knn.predict(X_test)
print ("Prediction on Test Sample : \n {}".format(y_prediction))
#KNN Score Computation for Unlabelled Data
knn.score(X_test, y_test)
# +
#Nearest Neighbors v/s Accuracy
neighbors = np.arange(1, 9)
train_accuracy = np.empty(len(neighbors))
test_accuracy = np.empty(len(neighbors))
for i, k in enumerate(neighbors):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
train_accuracy[i] = knn.score(X_train, y_train)
test_accuracy[i] = knn.score(X_test, y_test)
# -
# Compute plot
plt.title('k-NN: Varying Number of Neighbors')
plt.plot(neighbors, test_accuracy, label = 'Testing Accuracy')
plt.plot(neighbors, train_accuracy, label = 'Training Accuracy')
plt.legend()
plt.xlabel('Number of Neighbors')
plt.ylabel('Classifier Accuracy')
plt.show()
| Classification/Classifier-Iris.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="uOtwCpbb226Z"
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="o6JF3Q2j28il" outputId="7a9c5a03-fbef-4327-877a-8d4a36a072ed"
retail = pd.read_csv('https://raw.githubusercontent.com/PacktWorkshops/The-Data-Analysis-Workshop/master/Chapter08/Datasets/online_retail_II.csv')
retail.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="wd6smOJG3DkT" outputId="455b70bb-908f-41c6-9de7-797e7e48f3d1"
retail.rename(index = str, columns = {
'Invoice' : 'invoice',
'StockCode' : 'stock_code',
'Quantity' : 'quantity',
'InvoiceDate' : 'date',
'Price' : 'unit_price',
'Country' : 'country',
'Description' : 'desc',
'Customer ID' : 'cust_id'
}, inplace = True)
retail.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="4Ty38CBI3Khb" outputId="704d6eb8-9b64-4a0d-f2e0-f0a5da4faaf7"
retail.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" id="Ftdl0yR83OAS" outputId="57f410fd-6cbe-4bb4-84f8-7cd04e844380"
retail.isnull().sum().sort_values(ascending = False)
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="7mLoj6Kc3Qxc" outputId="ac8757fc-d22d-4d4f-f70a-9980fe08375c"
retail.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 111} colab_type="code" id="_ebHJxaO3WPL" outputId="9efc98fe-9a42-454e-b564-d379c1e8f21e"
retail.loc[retail['unit_price'] == 25111.090000]
# + colab={"base_uri": "https://localhost:8080/", "height": 80} colab_type="code" id="mSM6ufhJ3YE7" outputId="95a0e116-c893-44f0-82dd-0f6d5ff9a233"
retail.loc[retail['unit_price'] == -53594.360000]
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="-xsbA5GN3hK4" outputId="d077450f-9462-4b52-9bf6-532e5d1bd275"
(retail['unit_price'] <= 0).sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="VCZTw7GA3jEx" outputId="de12ddf7-eb69-4e93-e114-02e988471322"
(retail['quantity'] <= 0).sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="KqpWFgpk3kWA" outputId="84add5ab-916c-4b40-f1a7-3b5334b3144b"
((retail['unit_price'] <= 0) & (retail['quantity'] <= 0) & (retail['cust_id'].isnull())).sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="Zcn2wNYo3s-v" outputId="2daad40e-f193-4cfa-e8a6-dc4d21cb6e21"
null_retail = retail[retail.isnull().any(axis=1)]
null_retail.head()
# + colab={} colab_type="code" id="cpeKtu4W3uUG"
new_retail = retail.dropna()
# + colab={} colab_type="code" id="D-r6Ha4A31lx"
new_retail = new_retail[(new_retail['unit_price'] > 0) & (new_retail['quantity'] > 0)]
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="6ZtwuBiE33P-" outputId="4bb86d02-3b5a-4dc0-c994-133127e3e91d"
new_retail.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 389} colab_type="code" id="-Dzq-Mbl35e2" outputId="126a90db-7c53-487e-a7f0-e36c7df130ca"
plt.subplots(figsize = (12, 6))
up = sns.boxplot(new_retail.unit_price)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="S0GU-c7S4ANN" outputId="f08d1f09-957e-4769-8e5b-4a42826d3db0"
new_retail = new_retail[new_retail.unit_price < 6000]
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="eOyZ38ii4CA2" outputId="8668f69f-367b-4621-b01a-1a32bc1d5894"
new_retail.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 280} colab_type="code" id="lqndq7ep4MlW" outputId="bcc99744-2eb5-4124-9010-c20a32beb72b"
up_new = sns.boxplot(new_retail.unit_price)
# + colab={"base_uri": "https://localhost:8080/", "height": 388} colab_type="code" id="q0EDlr-W4TOF" outputId="3ee703bf-ce67-45b0-a3ae-f85c0fedadc6"
plt.subplots(figsize = (12, 6))
q = sns.boxplot(new_retail.quantity)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="gMnvTANe4VdM" outputId="22369dd2-37a3-4863-9688-3a4d93b963c6"
new_retail = new_retail[new_retail.quantity < 15000]
# + colab={"base_uri": "https://localhost:8080/", "height": 297} colab_type="code" id="xNsH2Wy94iPm" outputId="8656264f-f4d9-47fa-a8b8-a6cabc293a4f"
new_retail.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 279} colab_type="code" id="SS04zP2K4opr" outputId="128b2d42-e9b3-4f3d-ec9b-a1e096fdf8d5"
q_new = sns.boxplot(new_retail.quantity)
# + colab={"base_uri": "https://localhost:8080/", "height": 238} colab_type="code" id="-9EuuHow46MR" outputId="4ca522da-054b-4433-928d-539f5dd4f3be"
new_retail[(new_retail.desc.isnull()) & (new_retail.cust_id.isnull())]
new_retail.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="WMG5rAHV5AAa" outputId="9a037bf2-3ef8-4f47-f313-621bdc97f1c4"
retail = new_retail
retail.head()
# -
| Chapter08/Exercise8.01/Exercise8.01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Initial snow depth in SnowDegreeDay and SnowEnergyBalance
# **Problem:** Show that setting initial snow depth `h0_snow` has no effect in `SnowDegreeDay` and `SnowEnergyBalance`.
# Import the Babel-wrapped `SnowEnergyBalance` and `SnowDegreeDay` components and create instances:
from cmt.components import SnowEnergyBalance, SnowDegreeDay
seb, sdd = SnowEnergyBalance(), SnowDegreeDay()
# Initialize the components with cfg files that, for simplicity, use the same time step and run duration:
seb.initialize('./input/snow_energy_balance-1.cfg')
sdd.initialize('./input/snow_degree_day-1.cfg')
# Store initial values of model time and maximum snowpack depth from the two components:
# +
time = [sdd.get_current_time()]
sdd_snow_depth = [sdd.get_value('snowpack__depth').max()]
seb_snow_depth = [seb.get_value('snowpack__depth').max()]
print time, sdd_snow_depth, seb_snow_depth
# -
# Advance both models to the end, saving the model time and maximum snowpack depth values at each step:
while sdd.get_current_time() < sdd.get_end_time():
seb.update()
sdd.update()
time.append(sdd.get_current_time())
seb_snow_depth.append(seb.get_value('snowpack__depth').max())
sdd_snow_depth.append(sdd.get_value('snowpack__depth').max())
# Check the values:
print time
print seb_snow_depth
print sdd_snow_depth
# **Here's the key point:** the snow depth in each model after the first update is set by the equation on line 506 of **snow_base.py**. After the first update, the snow depth evolves according to the physics of the component. See:
# +
rho_H2O = 1000.0
h_swe = sdd.get_value('snowpack__liquid-equivalent_depth').max()
rho_snow = sdd.get_value('snowpack__z_mean_of_mass-per-volume_density').max()
print h_swe * (rho_H2O / rho_snow) # sdd
h_swe = seb.get_value('snowpack__liquid-equivalent_depth').max()
rho_snow = seb.get_value('snowpack__z_mean_of_mass-per-volume_density').max()
print h_swe * (rho_H2O / rho_snow) # seb
# -
# Plot the snow depth time series output from each component:
# %matplotlib inline
import matplotlib.pyplot as plt
# +
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(time, seb_snow_depth, 'r', time, sdd_snow_depth, 'b')
ax.set_xlabel('Time (s)')
ax.set_ylabel('Snowpack depth (m)')
ax.set_title('Snowpack depth vs. time')
ax.set_xlim((time[0], time[-1]))
ax.set_ylim((0.49, 0.51))
# -
# Finalize the components:
seb.finalize(), sdd.finalize()
# **Result:** Setting the initial snow depth `h0_snow` in the TopoFlow snow components has no effect. The value is overwritten in the first time step by a value calculated from SWE and the densities of snow and water.
| initial_snow_depth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
df_dep = pd.read_csv('departamentos-nuevo.csv', dtype="string")
df_dep_sc = pd.read_csv('departamentos-nuevo-sc.csv', dtype="string")
df_casa = pd.read_csv('casas-nuevo.csv', dtype="string")
df_casa_sc = pd.read_csv('casas-nuevo-sc.csv', dtype="string")
def convertir(precios):
lista=[]
for i in precios:
if i[0]=='U':
dato=i
i=i.replace("USD ","")
a=float(i)
a=round(a*20.58,1)
a=int(a)
a=str(a)
lista.append(a)
else:
lista.append(i)
return lista
# -
df_dep['nprecio']=df_dep['precio'].str.replace("MN ", "", regex=True).str.replace("$","",regex=False).str.replace(",","",regex=False)
df_dep['nprecio']=convertir(df_dep['nprecio'])
df_dep['nprecio']=pd.to_numeric(df_dep["nprecio"], errors="coerce")
df_dep['nprecio']=df_dep['nprecio'].replace(0,np.NaN)
print('Precio promedio de renta de departamentos en Querétaro: ')
print("$",round(df_dep['nprecio'].mean(),3))
df_dep_sc['nprecio']=df_dep_sc['precio'].str.replace("MN ", "", regex=True).str.replace("$","",regex=False).str.replace(",","",regex=False)
df_dep_sc['nprecio']=convertir(df_dep_sc['nprecio'])
df_dep_sc['nprecio']=pd.to_numeric(df_dep_sc["nprecio"], errors="coerce")
df_dep_sc['nprecio']=df_dep_sc['nprecio'].replace(0,np.NaN)
print('Precios sin El Campanario')
print('Precio promedio de renta de departamentos en Querétaro: ')
print("$",round(df_dep_sc['nprecio'].mean(),3))
df_casa['nprecio']=df_casa['precio'].str.replace("MN ", "", regex=True).str.replace("$","",regex=False).str.replace(",","",regex=False)
#df_casa['nprecio']=convertir(df_casa['nprecio'])
df_casa['nprecio']=pd.to_numeric(df_casa["nprecio"], errors="coerce")
df_casa['nprecio']=df_casa['nprecio'].replace(0,np.NaN)
print('Precio promedio de renta de casas en Querétaro: ')
print("$",round(df_casa['nprecio'].mean(),3))
print()
df_casa_sc['nprecio']=df_casa_sc['precio'].str.replace("MN ", "", regex=True).str.replace("$","",regex=False).str.replace(",","",regex=False)
#df_casa_sc['nprecio']=convertir(df_casa_sc['nprecio'])
df_casa_sc['nprecio']=pd.to_numeric(df_casa_sc["nprecio"], errors="coerce")
df_casa_sc['nprecio']=df_casa_sc['nprecio'].replace(0,np.NaN)
print('Precio sin El Campanario')
print('Precio promedio de renta de casas en Querétaro: ')
print("$",round(df_casa_sc['nprecio'].mean(),3))
print()
| Real Estate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import time
import numpy as np
import pandas as pd
import pickle
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
# +
import pickle
f = open('/home/henry/Insight/Yogee/Datasets/Model_dataset/ModelDf.pckl', 'rb')
ModelDf = pickle.load(f)
f.close()
# +
#Add columns for tranist dataset
NanDfValues = np.zeros([np.shape(ModelDf)[0],1])
NanDfValues[:] = np.nan
NanDf = pd.DataFrame(NanDfValues,columns=['Transit'])
ModelDf = pd.concat([ModelDf, NanDf], axis=1, sort=False)
# -
TransitDf = ModelDf[ModelDf['year']==2011]
TransitDf = TransitDf.reset_index(drop=True)
# +
import pickle
f = open('/home/henry/Insight/Yogee/Datasets/Transit_dataset/TransitDf.pckl', 'rb')
TransitDf = pickle.load(f)
f.close()
# -
#for i in range(0,TransitDf.shape[0]):
for i in range(1034,TransitDf.shape[0]):
zipcode = '{0:0{width}}'.format(np.int(TransitDf.loc[i,'zip']), width=5)
population = TransitDf.loc[i,'population']
if population>100:
option = webdriver.ChromeOptions()
#option.add_argument(" — incognito")
browser = webdriver.Chrome(executable_path='/home/henry/Downloads/chromedriver', chrome_options=option)
browser.get("https://alltransit.cnt.org/metrics/?addr="+zipcode)
# Wait 20 seconds for page to load
timeout = 20
time.sleep(10)
try:
WebDriverWait(browser, timeout).until(EC.visibility_of_element_located((By.XPATH, "//h5[@class='theme-value-L-248']")))
except TimeoutException:
print("Timed out waiting for page to load")
browser.quit()
# find_elements_by_xpath returns an array of selenium objects.
titles_element = browser.find_elements_by_xpath("//h5[@class='theme-value-L-248']")
# use list comprehension to get the actual repo titles and not the selenium objects.
titles = [x.text for x in titles_element]
# print out all the titles.
#print('titles:')
#print(titles, '\n')
browser.quit()
if titles[0].replace('.', '').isdigit():
TransitScore = np.float(titles[0])
TransitDf.loc[i,'Transit'] = TransitScore
TransitDf.iloc[1750:1794]
# +
import pickle
f = open('/home/henry/Insight/Yogee/Datasets/Transit_dataset/TransitDf.pckl', 'wb')
pickle.dump(TransitDf, f)
f.close()
# -
| Scrape Transit Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CHAPTER 4 - FUNCTIONS
# ### 4.2 Built-in functions
# The max and min functions give us the largest and smallest values in a list, respectively:
max('Hello world')
min('Hello world')
len('Hello world')
dir(str) # Built-in functions for a string
# ### 4.3 Type conversion functions
int('32')
int('Hello')
# int can convert floating-point values to integers, but it doesn’t round off; it chops
# off the fraction part:
int(3.99999)
int(-2.3)
# float converts integers and strings to floating-point numbers:
float(32)
float('3.14159')
# Finally, str converts its argument to a string:
str(32)
str(3.14159)
# ### 4.4 Random numbers
# +
import random
for i in range(10):
x = random.random()
print(x)
# -
# The function randint takes the parameters low and high, and returns an integer
# between low and high (including both).
random.randint(5, 10)
random.randint(5, 10)
# To choose an element from a sequence at random, you can use choice:
t = [1, 2, 3]
random.choice(t)
random.choice(t)
# ### 4.5 Math functions
import math
print(math)
degrees = 45
radians = degrees / 360.0 * 2 * math.pi
math.sin(radians)
math.sqrt(2) / 2.0
# ### 4.6 Adding new functions
def print_lyrics():
print("I'm a lumberjack, and I'm okay.")
print('I sleep all night and I work all day.')
print(print_lyrics)
print(type(print_lyrics))
print_lyrics()
def repeat_lyrics():
print_lyrics()
print_lyrics()
repeat_lyrics()
# ### 4.9 Parameters and arguments
def print_twice(bruce):
print(bruce)
print(bruce)
print_twice('Spam')
print_twice(17)
import math
print_twice(math.pi)
print_twice('Spam '*4)
# ### 4.10 Fruitful functions and void functions
result = print_twice('Bing')
print(result)
print(type(None))
# +
def addtwo(a, b):
added = a + b
return added
x = addtwo(3, 5)
print(x)
| CHAPTER 4.ipynb |
# <a href="https://colab.research.google.com/github/mottaquikarim/PYTH2/blob/master/src/PSETS/nb/dict_psets.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # DICTS
#
# ## DICT_BASICS
#
# ### P1.PY
#
#
#
# +
"""
Intro to Dict Concepts
"""
# A) Declare an empty dict as d1.
# B) Create a dict called d2 containing the first and last names below:
# <NAME>, <NAME>, <NAME>, <NAME>
# C) Add Allison Zhang person to d2.
# D) How many people are now in d2? Print out all their *first* names in the vars below.
### num_people =
### first_names =
# E) Delete a random person from d2 and print his/her name in the var below.
### x =
# F) Re-add the name you deleted to the end of d2.
# -
#
#
# ### P2.PY
#
#
#
# +
"""
Predators & Prey
"""
# A) Create a dict called "pred_prey", containing:
### 3 carnivorous marine animals
### For each carnivore, 3 examples of its prey
pred_prey = None
# B) Print out the 2nd predator and its prey in this format:
#### predator2: prey1, prey2, & prey3
# C) Print a unique collection of all the prey in a variable called "prey".
prey = None
# -
#
#
# ### P3.PY
#
#
#
# +
"""
Merging Dicts
"""
# Merge these two dicts without creating a new one.
d1 = {'a': 100, 'b': 200}
d2 = {'c': 300, 'd': 400, 'e': 500}
# -
#
#
# ### P4.PY
#
#
#
# +
"""
Lists to Dicts
"""
# Turn these two lists into a dict called grades.
names = ['Taq', 'Zola', 'Valerie', 'Valerie']
scores = [[98, 89, 92, 94], [86, 45, 98, 100], [100, 100, 100, 100], [76, 79, 80, 82]]
### grades =
# -
#
#
# ## DICT_MANIP_OPS
#
# ### P1.PY
#
#
#
# +
"""
Basic Login
"""
# Imagine you work for a movie streaming service. You're in charge of safeguarding user privacy by ensuring the login feature remains secure. For the sake of example only, below is the dict of user login info. Normally, you wouldn't have access to see this unencrypted of course!
users = {
'<EMAIL>': 'PassWord',
'<EMAIL>': '<PASSWORD>',
'<EMAIL>': '<PASSWORD>',
'<EMAIL>': '<PASSWORD>',
'<EMAIL>': '<PASSWORD>'
# etc
}
# A user enters the below login info (email and password) for your app. Search your database of user logins to see if this account exists and if the password matches what you have on file. If the login credentials are correct, print "Successful login!". Otherwise, print "The login info you entered does not match any of our records."
current_user = { '<EMAIL>': '<PASSWORD>' }
# -
#
#
# ### P3.PY
#
#
#
# +
"""
Math with Girl Scout Cookies
"""
# Print out the number of boxes of girl scout cookies that each girl in the troop sold in the below format:
# Wendy: _____
# Connie: _____
# Francesca: _____
Wendy = {'tagalongs': 5, 'thin mints': 12, 'samoas': 8}
Connie = {'tagalongs': 10, 'thin mints': 4, 'samoas': 12}
Francesca = {'tagalongs': 18, 'thin mints': 14, 'samoas': 10}
### salesW =
### salesC =
### salesF =
# For each type of girl scout cookie, print out the total number of boxes sold in the below format:
# tagalongs: _____
# thin mints: _____
# samoas: _____
### total_tagalongs =
### total_thinmints =
### total_samoas =
# For each type of girl scout cookie, print out the average number of boxes sold in the below format:
# tagalongs: _____
# thin mints: _____
# samoas: _____
### avg_tagalongs =
### avg_thinmints =
### avg_samoas =
# Print out total the number of boxes of cookies the girls sold collectively as follows:
# "This year we sold ______ boxes!"
### boxes_sold =
# -
#
#
# ### P4.PY
#
#
#
# +
"""
Inverting Keys & Values
"""
# Invert dict1 - make the current keys into values and the current values into keys.
dict1 = { "k1" : "v1", "k2" : "v2", "k3" : "v1" }
# -
#
#
# ## COUNTERS_CHALLENGE
#
# ### P1.PY
#
#
#
# +
"""
Word Frequency
"""
# Print out the number of words in this movie quote. Find and print out the most common word in the quote and how many times it was used.
### Hint: You do not need a loop for this. Look up the Counter docs in python3.
from collections import Counter
princess_bride = [
'Hello', 'my', 'name', 'is', 'Inigo', 'Montoya',
'You', 'killed', 'my', 'father',
'Prepare', 'to', 'die'
]
### fave_word =
# p.s. You might use this to help analyze the most common topic in comments or reviews from your users to help understand the best places to improve you product.
# -
#
#
# ### P2.PY
#
#
#
# +
"""
Summing Dict Values
"""
# Two Kindergarten teachers poll their classes for what fruit they want to eat for snacktime tomorrow. Only one of them is going shopping, so she needs to know how many of each fruit she needs to buy in total. Tally these up and assign them to the "shopping_list" dict.
poll1 = {'apples': 8, 'bananas': 12}
poll2 = {'apples': 6, 'bananas': 6, 'clementines': 8}
### shopping_list =
# -
#
#
| src/PSETS/nb/dict_psets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import glob
import sys
import os
sys.path.insert(0, '../src/')
# %matplotlib inline
from mytools import *
import groupdynamics_logs_lib
# -
# # Helper functions
# +
import imp
def reload():
imp.reload(groupdynamics_logs_lib)
# -
# # Parameters
base_directory = '~/Datasets/GroupDynamicsData/Session_*'
# +
# with Timer():
# folder_names = glob.glob(os.path.expanduser(base_directory))
# teams = {}
# for directory in folder_names:
# team_log = groupdynamics_logs_lib.TeamLogsLoader(directory=directory)
# team_log.messages.to_csv('~/Datasets/GroupDynamicsData/msgs/' + directory.split('_')[-1]+'.csv')
# -
# # Main
with Timer():
folder_names = sorted(glob.glob(os.path.expanduser(base_directory)))
teams = {}
for index, directory in enumerate(folder_names):
if index < 31: # << CHECK HERE >> REMOVE THIS LINE.
print(directory, '...')
team_log = groupdynamics_logs_lib.TeamLogsLoader(directory=directory)
teams[team_log.team_id] = team_log
len(teams)
# ## Manuallying adding reported missing data by subjects right after their experiment (given to me by <NAME> (<EMAIL>))
# +
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.1', 'GD_solo_asbestos2', 'confidence', '90%', '2020-02-11 17:15:39']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_disaster0', 'answer', '$1,000,000', '2020-02-11 17:20:24']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_disaster0', 'confidence', '50', '2020-02-11 17:20:26']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_disaster1', 'answer', '$1,000,000', '2020-02-11 17:22:28']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_disaster1', 'confidence', '80', '2020-02-11 17:22:30']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_surgery1', 'confidence', '80', '2020-02-11 17:40:56']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_school1', 'answer', '9/10', '2020-02-11 17:52:53']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_school1', 'confidence', '80', '2020-02-11 17:52:55']
teams['gds130'].answers.sort_values(by='timestamp', inplace=True)
teams['gds130'].influences.loc[len(teams['gds130'].influences)] = [
'gds130.1', 'GD_influence_asbestos1', 'self', '30', '2020-02-11 17:13:00']
teams['gds130'].influences.loc[len(teams['gds130'].influences)] = [
'gds130.1', 'GD_influence_asbestos1', 'other', '70', '2020-02-11 17:13:01']
teams['gds130'].influences.sort_values(by='timestamp', inplace=True)
teams['gds129'].influences.loc[len(teams['gds129'].influences)] = [
'gds129.1', 'GD_influence_asbestos1', 'self', '40', '2020-02-10 17:11:58']
teams['gds129'].influences.loc[len(teams['gds129'].influences)] = [
'gds129.1', 'GD_influence_asbestos1', 'other', '60', '2020-02-10 17:11:59']
teams['gds129'].influences.sort_values(by='timestamp', inplace=True)
# teams['gds138']
# -
# ## Manually adding by extracted answers from msgs
# +
teams['gds103'].answers.loc[len(teams['gds103'].answers)] = [
'gds103.2', 'GD_solo_sports1', 'answer', '0.5', '2021-01-01 12:00:00']
teams['gds104'].answers.loc[len(teams['gds104'].answers)] = [
'gds104.1', 'GD_solo_disaster0', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_asbestos1', 'answer', '18.50', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_disaster0', 'answer', '20000', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_disaster1', 'answer', '100000', '2021-01-01 12:00:00']
teams['gds110'].answers.loc[len(teams['gds110'].answers)] = [
'gds110.1', 'GD_solo_disaster1', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds120'].answers.loc[len(teams['gds120'].answers)] = [
'gds120.1', 'GD_solo_asbestos3', 'answer', '20', '2021-01-01 12:00:00']
teams['gds121'].answers.loc[len(teams['gds121'].answers)] = [
'gds121.2', 'GD_solo_asbestos2', 'answer', '25', '2021-01-01 12:00:00']
teams['gds121'].answers.loc[len(teams['gds121'].answers)] = [
'gds121.2', 'GD_solo_disaster1', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds122'].answers.loc[len(teams['gds122'].answers)] = [
'gds122.2', 'GD_solo_surgery0', 'answer', '1', '2021-01-01 12:00:00']
teams['gds123'].answers.loc[len(teams['gds123'].answers)] = [
'gds123.2', 'GD_solo_disaster0', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds124'].answers.loc[len(teams['gds124'].answers)] = [
'gds124.2', 'GD_solo_surgery0', 'answer', '0.99', '2021-01-01 12:00:00']
teams['gds125'].answers.loc[len(teams['gds125'].answers)] = [
'gds125.1', 'GD_solo_disaster0', 'answer', '13125', '2021-01-01 12:00:00']
teams['gds125'].answers.loc[len(teams['gds125'].answers)] = [
'gds125.1', 'GD_solo_disaster1', 'answer', '13125', '2021-01-01 12:00:00']
teams['gds125'].answers.loc[len(teams['gds125'].answers)] = [
'gds125.1', 'GD_solo_disaster2', 'answer', '30000', '2021-01-01 12:00:00']
teams['gds126'].answers.loc[len(teams['gds126'].answers)] = [
'gds126.2', 'GD_solo_disaster1', 'answer', '500000', '2021-01-01 12:00:00']
teams['gds127'].answers.loc[len(teams['gds127'].answers)] = [
'gds127.1', 'GD_solo_disaster0', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.1', 'GD_solo_disaster0', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds130'].answers.loc[len(teams['gds130'].answers)] = [
'gds130.2', 'GD_solo_surgery1', 'answer', '0.85', '2021-01-01 12:00:00']
teams['gds131'].answers.loc[len(teams['gds131'].answers)] = [
'gds131.1', 'GD_solo_school1', 'answer', '0.5', '2021-01-01 12:00:00']
teams['gds131'].answers.loc[len(teams['gds131'].answers)] = [
'gds131.2', 'GD_solo_sports0', 'answer', '0', '2021-01-01 12:00:00']
teams['gds118'].answers.loc[len(teams['gds118'].answers)] = [
'gds118.2', 'GD_solo_disaster1', 'answer', '2', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_school1', 'answer', '0.95', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_school2', 'answer', '0.95', '2021-01-01 12:00:00']
teams['gds106'].answers.loc[len(teams['gds106'].answers)] = [
'gds106.1', 'GD_solo_school3', 'answer', '0.95', '2021-01-01 12:00:00']
teams['gds108'].answers.loc[len(teams['gds108'].answers)] = [
'gds108.1', 'GD_solo_disaster3', 'answer', '1000000', '2021-01-01 12:00:00']
teams['gds116'].answers.loc[len(teams['gds116'].answers)] = [
'gds116.1', 'GD_solo_asbestos3', 'answer', '25', '2021-01-01 12:00:00']
teams['gds116'].answers.loc[len(teams['gds116'].answers)] = [
'gds116.1', 'GD_solo_school0', 'answer', '0.9', '2021-01-01 12:00:00']
teams['gds116'].answers.loc[len(teams['gds116'].answers)] = [
'gds116.1', 'GD_solo_school2', 'answer', '0.9', '2021-01-01 12:00:00']
teams['gds116'].answers.loc[len(teams['gds116'].answers)] = [
'gds116.1', 'GD_solo_school3', 'answer', '0.9', '2021-01-01 12:00:00']
teams['gds116'].answers.loc[len(teams['gds116'].answers)] = [
'gds116.1', 'GD_solo_surgery2', 'answer', '0.95', '2021-01-01 12:00:00']
teams['gds128'].answers.loc[len(teams['gds128'].answers)] = [
'gds128.1', 'GD_solo_surgery0', 'answer', '0.6', '2021-01-01 12:00:00']
teams['gds128'].answers.loc[len(teams['gds128'].answers)] = [
'gds128.1', 'GD_solo_surgery1', 'answer', '0.6', '2021-01-01 12:00:00']
teams['gds129'].answers.loc[len(teams['gds129'].answers)] = [
'gds129.2', 'GD_solo_school2', 'answer', '0.5', '2021-01-01 12:00:00']
teams['gds131'].answers.loc[len(teams['gds131'].answers)] = [
'gds131.1', 'GD_solo_disaster1', 'answer', '250000', '2021-01-01 12:00:00']
teams['gds131'].answers.loc[len(teams['gds131'].answers)] = [
'gds131.2', 'GD_solo_disaster0', 'answer', '10000', '2021-01-01 12:00:00']
teams['gds131'].answers.loc[len(teams['gds131'].answers)] = [
'gds131.2', 'GD_solo_disaster2', 'answer', '150000', '2021-01-01 12:00:00']
teams['gds132'].answers.loc[len(teams['gds132'].answers)] = [
'gds132.1', 'GD_solo_surgery0', 'answer', '0.3', '2021-01-01 12:00:00']
teams['gds132'].answers.loc[len(teams['gds132'].answers)] = [
'gds132.1', 'GD_solo_surgery1', 'answer', '0.3', '2021-01-01 12:00:00']
teams['gds117'].answers.loc[len(teams['gds117'].answers)] = [
'gds117.1', 'GD_solo_disaster1', 'answer', '50000', '2021-01-01 12:00:00']
teams['gds117'].answers.loc[len(teams['gds117'].answers)] = [
'gds117.2', 'GD_solo_sports2', 'answer', '0.25', '2021-01-01 12:00:00']
# +
# data[data['Group'] == '106']
# -
# ## Generating the full file
reload()
with Timer():
data = groupdynamics_logs_lib.get_all_groups_info_in_one_dataframe(teams)
data.sort_values(by=['Group', 'Person', 'Issue'], inplace=True)
for group in np.unique(data.Group):
d = data[data['Group'] == group]
# nn = len(np.where(d[['Initial opinion', 'Period1 opinion', 'Period2 opinion', 'Period3 opinion']] == '')[0])
nn = len(np.where(d == '')[0])
if nn == 0:
print(group)
data.to_csv('DyadData.csv', index=False)
data.head()
# ##### Separate details about each team log
teams['gds101'].messages
for i in range(len(teams)):
answers_shape = teams[i].get_answers_in_simple_format().shape
if answers_shape != (20, 5):
print('Team {} was not completed since its shape was: {}'.format(i, answers_shape))
teams['gds116'].influences
teams[0].answers
answers_in_simple_format = teams[0].get_answers_in_simple_format()
influence_orders, influence_matrices = teams[0].get_influence_matrices2x2()
answers_in_simple_format
for index in range(15):
print(influence_orders[index])
print(influence_matrices[index])
print('\n')
teams[0].get_frustrations_in_simple_format()
| notebooks/.ipynb_checkpoints/1- Logs analysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python2
# ---
# %load_ext autoreload
# %autoreload 2
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
import tensorflow as tf
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
# %matplotlib inline
import seaborn as sns
sns.set()
# The files include:
#
# cosmology_camb.dat : the input training cosmology, only 5 parameters: Om, Ob, sigma_8, h, n_s. w and N_eff are not used here because the analytic method is only for LCDM.
#
# HOD_design_m_4_n_400_tinker.dat : 400 HOD designs for the training set.
#
# EH_test_200COSMO_tinker.dat : the 200 test cosmologies from Tinker.
#
# EH_test_200COSMO_tinker.dat : the 1000 test HODs, just use the first 200.
#
# Cosmo_err.dat : the fractional error of wp estimated from the test boxes.
#
# wp_clustering_emu: folder contains the wp data for training, the columns are rp, wp
#
# test_200COSMO_tinker_wp_clustering_emu: folder contains the wp data for test, same format as training set.
#
# example.py: is an example script for my GP modeling. you should fill out the missing places. My comment on line 31 may not be right: because 0-49 for line 1 and 50-99 for line 2 etc will result repeated HOD sampling for different cosmologies, 400/50=8<40, so a better choice might be just randomly choose 50 HOD for each cosmology. (edited)
dir = '/home/sean/Downloads/Zhongxu_data/for_Sean/'
cosmo_data_fname = 'EH_test_200COSMO_tinker.dat'
hod_data_fname = 'GP_test_HOD_1000.dat'
from os.path import join
cosmo_colnames = ['Om', 'Ob', 'sigma_8', 'h', 'n_s']
cosmo_data = np.loadtxt(join(dir, cosmo_data_fname), delimiter=' ')
hod_colnames = ['M1', 'alpha', 'Mmin', 'sigma_logM']
hod_data = np.loadtxt(join(dir, hod_data_fname), delimiter = ' ')
training_file = '/home/sean/PearceRedMagicXiCosmoFixedNd.hdf5'
#test_file = '/home/sean/PearceRedMagicXiCosmoTest.hdf5'
# + active=""
# training_file = '/home/sean/PearceRedMagicXiCosmo.hdf5'
# test_file = '/home/sean/PearceRedMagicXiCosmoTest.hdf5'
# -
em_method = 'nn'
split_method = 'random'
a = 1.0
z = 1.0/a - 1.0
# + active=""
# emu.scale_bin_centers
# -
fixed_params = {'z':z}#, 'r':17.0389993 }
# + active=""
# n_leaves, n_overlap = 50, 1
# emu = ExtraCrispy(training_file, n_leaves, n_overlap, split_method, method = em_method, fixed_params=fixed_params,
# custom_mean_function = None, downsample_factor = 1.0)
# -
emu = OriginalRecipe(training_file, method = em_method, fixed_params=fixed_params,
hyperparams = {'hidden_layer_sizes': (10),
'activation': 'relu', 'verbose': True,
'tol': 1e-8, 'learning_rate_init':1e-4,\
'max_iter':10, 'alpha':0, 'early_stopping':False, 'validation_fraction':0.3})
# + active=""
# #convert zhongxu's data to my format
# emu.get_param_names()
# + active=""
# my_cosmo_data = np.zeros((cosmo_data.shape[0], 7))
# my_hod_data = np.zeros((200, 4))
#
# my_cosmo_data[:,0] = cosmo_data[:,1]*(cosmo_data[:,3])**2
# my_cosmo_data[:,1] = cosmo_data[:,0]*(cosmo_data[:,3])**2 - my_cosmo_data[:,0]
# my_cosmo_data[:,2] = -1.0
# my_cosmo_data[:,3] = cosmo_data[:,4]
# #my_cosmo_data[:,4]
# my_cosmo_data[:, 5] = cosmo_data[:,3]*100
# my_cosmo_data[:, 6] = 3.046
# + active=""
# from classy import Class
# cosmo = Class()
#
# for i, row in enumerate(cosmo_data):
# Om, Ob, sigma_8, h, n_s = row
# params = {
# 'output': 'mPk',
# 'sigma8': sigma_8,
# 'n_s': n_s,
# 'h': h,
# 'non linear': 'halofit',
# 'omega_b': Ob*h*h,
# 'omega_cdm': (Om-Ob)*h*h,
# 'z_pk': 0.0}
#
#
# cosmo.set(params)
# cosmo.compute()
# #print cosmo.pm
# val = cosmo.get_current_derived_parameters(['ln10^{10}A_s'])['ln10^{10}A_s']
# my_cosmo_data[i,4] = val
# + active=""
# my_hod_data[:,0] = hod_data[:200,3]
# my_hod_data[:,1] = np.log10(hod_data[:200,2])
# my_hod_data[:,2] = np.log10(hod_data[:200,0])
# my_hod_data[:,3] = hod_data[:200,1]
# +
clustering_dir = 'test_200COSMO_tinker_wp_clustering_emu/'
from glob import glob
clustering_files = sorted(glob(join(dir, clustering_dir) + '*') )
# -
nbins = 9
zx = np.zeros((len(clustering_files)*nbins, 12))
zy = np.zeros((len(clustering_files)*nbins,))
# +
for i, cf in enumerate(clustering_files):
if i%1000==0:
print i
data = np.loadtxt(cf, delimiter = ' ')
rs = np.log10(data[:,0])
wp = np.log10(data[:,1])
fbase = cf.split('/')[-1]
split_fbase = fbase.split('_')
cosmo, hod = int(split_fbase[1]), int(split_fbase[3])
zx[i*nbins:(i+1)*nbins, :7] = my_cosmo_data[cosmo]
zx[i*nbins:(i+1)*nbins, 7:-1] = my_hod_data[hod]
zx[i*nbins:(i+1)*nbins, -1] = rs
zy[i*nbins:(i+1)*nbins] = wp
# -
np.savetxt('zx.npy', zx)
np.savetxt('zy.npy', zy)
zx = np.loadtxt('zx.npy')
zy = np.loadtxt('zy.npy')
# +
idxs = np.random.choice(emu.x.shape[0], size = int(emu.x.shape[0]*1.0), replace=False)
x_train, y_train,yerr_train = emu.x[idxs, :],emu.y[idxs],emu.yerr[idxs]
y_train = y_train*(emu._y_std + 1e-5) + emu._y_mean
yerr_train = yerr_train*(emu._y_std+1e-5)
# -
idxs
len(emu.get_param_names())
unique_cosmos = np.unique(x_train[:, :7], axis =0)#*(emu._x_std[:7]+1e-5) + emu._x_mean[:7]
unique_cosmos.shape
left_out_cosmo = unique_cosmos[0]
is_loc = np.all(x_train[:,:7] == left_out_cosmo, axis = 1)
x_test = x_train[is_loc]
x_train = x_train[~is_loc]
y_test = y_train[is_loc]
y_train = y_train[~is_loc]
yerr_test = yerr_train[is_loc]
yerr_train = yerr_train[~is_loc]
# + active=""
# x_test, y_test, ycov_test, _ = emu.get_data(test_file, fixed_params, None, False)
# x_test = (x_test - emu._x_mean)/(emu._x_std+1e-5)
#
# #split_ycov = np.dsplit(ycov_test, ycov_test.shape[-1])
# #fullcov = block_diag(*[yc[:,:,0] for yc in split_ycov])
# #yerr_test = np.sqrt(np.hstack(np.diag(syc[:,:,0]) for syc in split_ycov))
# + active=""
# from sklearn.model_selection import train_test_split
# x_train, x_test, y_train, y_test, yerr_train, _ = train_test_split(x_train, y_train,yerr_train, test_size = 0.3, shuffle = True)
# + active=""
# pnames = emu.get_param_names()
# for i in xrange(x_train.shape[1]):
# for j in xrange(i+1,x_train.shape[1]):
# plt.scatter(x_train[:,i], x_train[:,j])
# plt.scatter(x_test[:,i], x_test[:,j])
# plt.title('%s vs %s'%(pnames[i], pnames[j]))
# plt.show();
# + active=""
# plt.plot(x_np[:emu.n_bins, -1:], y_np[:emu.n_bins])
# -
def n_layer_fc(x, hidden_sizes, training=False, l = 1e-8):
initializer = tf.variance_scaling_initializer(scale=2.0)
regularizer = tf.contrib.layers.l2_regularizer(l)
fc_output = tf.layers.dense(x, hidden_sizes[0], activation=tf.nn.relu,
kernel_initializer = initializer, kernel_regularizer = regularizer)
#kernel_regularizer = tf.nn.l2_loss)
#fc2_output = tf.layers.dense(fc1_output, hidden_sizes[1], activation=tf.nn.relu,
# kernel_initializer = initializer, kernel_regularizer = regularizer)
for size in hidden_sizes[1:]:
fc_output = tf.layers.dense(fc_output, size, activation=tf.nn.relu, kernel_initializer=initializer,
kernel_regularizer = regularizer)
pred = tf.layers.dense(fc_output, 1, kernel_initializer=initializer,
kernel_regularizer = regularizer)[:,0]#,
return pred
def novel_fc(x, hidden_sizes, training=False, l = (1e-6, 1e-6, 1e-6), p = (0.5, 0.5, 0.5),\
n_cosmo_params = 7, n_hod_params = 4):
cosmo_sizes, hod_sizes, cap_sizes = hidden_sizes
if type(l) is float:
cosmo_l, hod_l, cap_l = l, l, l
else:
cosmo_l, hod_l, cap_l = l
if type(p) is float:
cosmo_p, hod_p, cap_p = p,p,p
else:
cosmo_p, hod_p, cap_p = p
initializer = tf.variance_scaling_initializer(scale=2.0)
#onlly for duplicating r
n_params = n_cosmo_params+n_hod_params
cosmo_x = tf.slice(x, [0,0], [-1, n_cosmo_params])
cosmo_x = tf.concat(values=[cosmo_x, tf.slice(x, [0, n_params-1], [-1, -1]) ], axis = 1)
#print tf.shape(cosmo_x)
#print tf.shape(tf.slice(x, [0, n_params-1], [-1, -1]))
hod_x = tf.slice(x, [0, n_cosmo_params], [-1, -1])
cosmo_regularizer = tf.contrib.layers.l2_regularizer(cosmo_l)
cosmo_out = cosmo_x
for size in cosmo_sizes:
fc_output = tf.layers.dense(cosmo_out, size,
kernel_initializer = initializer,\
kernel_regularizer = cosmo_regularizer)
bd_out = tf.layers.dropout(fc_output, cosmo_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
cosmo_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
hod_regularizer = tf.contrib.layers.l1_regularizer(hod_l)
hod_out = hod_x
for size in hod_sizes:
fc_output = tf.layers.dense(hod_out, size,
kernel_initializer = initializer,\
kernel_regularizer = hod_regularizer)
bd_out = tf.layers.dropout(fc_output, hod_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
hod_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
cap_out=tf.concat(values=[cosmo_out, hod_out], axis = 1)
return cap_out
def pretrain_cap(cap_input, hidden_sizes, training=False, l = (1e-6, 1e-6, 1e-6), p = (0.5, 0.5, 0.5)):
initializer = tf.variance_scaling_initializer(scale=2.0)
cosmo_sizes, hod_sizes, cap_sizes = hidden_sizes
if type(l) is float:
cosmo_l, hod_l, cap_l = l, l, l
else:
cosmo_l, hod_l, cap_l = l
if type(p) is float:
cosmo_p, hod_p, cap_p = p,p,p
else:
cosmo_p, hod_p, cap_p = p
cap_out=cap_input
cap_regularizer = tf.contrib.layers.l2_regularizer(cap_l)
for size in cap_sizes:
fc_output = tf.layers.dense(cap_out, size,
kernel_initializer = initializer,\
kernel_regularizer = cap_regularizer)
bd_out = tf.layers.dropout(fc_output, cap_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
cap_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
pred = tf.layers.dense(cap_out, 1, kernel_initializer=initializer,
kernel_regularizer = cap_regularizer)[:,0]#,
return pred
def final_cap(cap_input, hidden_sizes, training=False, l = (1e-6, 1e-6, 1e-6), p = (0.5, 0.5, 0.5)):
initializer = tf.variance_scaling_initializer(scale=2.0)
cosmo_sizes, hod_sizes, cap_sizes = hidden_sizes
if type(l) is float:
cosmo_l, hod_l, cap_l = l, l, l
else:
cosmo_l, hod_l, cap_l = l
if type(p) is float:
cosmo_p, hod_p, cap_p = p,p,p
else:
cosmo_p, hod_p, cap_p = p
cap_out=cap_input
cap_regularizer = tf.contrib.layers.l2_regularizer(cap_l)
for size in cap_sizes:
fc_output = tf.layers.dense(cap_out, size,
kernel_initializer = initializer,\
kernel_regularizer = cap_regularizer)
bd_out = tf.layers.dropout(fc_output, cap_p, training = training)
bn_out = tf.layers.batch_normalization(bd_out, axis = -1, training=training)
cap_out = tf.nn.relu(bn_out)#tf.nn.leaky_relu(bn_out, alpha=0.01)
pred = tf.layers.dense(cap_out, 1, kernel_initializer=initializer,
kernel_regularizer = cap_regularizer)[:,0]#,
return pred
def optimizer_init_fn(learning_rate = 1e-7):
return tf.train.AdamOptimizer(learning_rate)#, beta1=0.9, beta2=0.999, epsilon=1e-6)
from sklearn.metrics import r2_score, mean_squared_error
def check_accuracy(sess, val_data,batch_size, x, weights, preds, is_training=None):
val_x, val_y = val_data
perc_acc, scores = [],[]
for idx in xrange(0, val_x.shape[0], batch_size):
feed_dict = {x: val_x[idx:idx+batch_size],
is_training: 0}
y_pred = sess.run(preds, feed_dict=feed_dict)
#print y_pred.shape, val_y[idx:idx+batch_size].shape
score = r2_score(val_y[idx:idx+batch_size], y_pred)
scores.append(score)
perc_acc = np.mean(emu._y_std*np.abs(val_y[idx:idx+batch_size]-y_pred)/np.abs(emu._y_std*val_y[idx:idx+batch_size] + emu._y_mean) )
print 'Val score: %.6f, %.2f %% Loss'%(np.mean(np.array(scores)), 100*np.mean(np.array(perc_acc)))
device = '/device:GPU:0'
#device = '/cpu:0'
def train(model_init_fn, optimizer_init_fn,num_params, pretrain_data, train_data, val_data, hidden_sizes,\
num_pretrain_epochs = 500, num_epochs=1000, batch_size = 200, l = 1e-6, p = 0.5, print_every=10):
tf.reset_default_graph()
pretrain = True
with tf.device(device):
# Construct the computational graph we will use to train the model. We
# use the model_init_fn to construct the model, declare placeholders for
# the data and labels
x = tf.placeholder(tf.float32, [None,num_params])
y = tf.placeholder(tf.float32, [None])
weights = tf.placeholder(tf.float32, [None])
is_training = tf.placeholder(tf.bool, name='is_training')
cap_input = model_init_fn(x, hidden_sizes, is_training, l = l, p=p)
if pretrain:
preds = pretrain_cap(cap_input, hidden_sizes, is_training, l=l, p=p)
else:
preds = final_cap(cap_input, hidden_sizes, is_training, l=l, p=p)
# Compute the loss like we did in Part II
#loss = tf.reduce_mean(loss)
with tf.device('/cpu:0'):
loss = tf.losses.mean_squared_error(labels=y,\
predictions=preds, weights = weights)#weights?
#loss = tf.losses.absolute_difference(labels=y, predictions=preds, weights = tf.abs(1.0/y))#weights?
pass
with tf.device(device):
optimizer = optimizer_init_fn()
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer.minimize(loss)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
#t = 0
pretrain_x, pretrain_y = pretrain_data
rand_idxs = range(pretrain_x.shape[0])
for epoch in range(num_pretrain_epochs):
#print('Starting epoch %d' % epoch)
np.random.shuffle(rand_idxs)
losses = []
for idx in xrange(0, pretrain_x.shape[0], batch_size):
feed_dict = {x: pretrain_x[rand_idxs[idx:idx+batch_size]],\
y: pretrain_y[rand_idxs[idx:idx+batch_size]],\
weights: np.ones_like(pretrain_y[rand_idxs[idx:idx+batch_size]]),\
is_training:1}
loss_np, _ = sess.run([loss, train_op], feed_dict=feed_dict)
losses.append(loss_np)
if epoch % print_every == 0:
loss_avg = np.mean(np.array(losses))
print('Epoch %d, loss = %e' % (epoch, loss_avg))
check_accuracy(sess, val_data, batch_size, x, weights, preds, is_training=is_training)
pretrain = False
train_x, train_y, train_yerr = train_data
rand_idxs = range(train_x.shape[0])
for epoch in range(num_epochs):
#print('Starting epoch %d' % epoch)
np.random.shuffle(rand_idxs)
losses = []
for idx in xrange(0, train_x.shape[0], batch_size):
yerrbatch = train_yerr[rand_idxs[idx:idx+batch_size]]
_bs = yerrbatch.shape[0]
feed_dict = {x: train_x[rand_idxs[idx:idx+batch_size]],\
y: train_y[rand_idxs[idx:idx+batch_size]] + np.random.randn(_bs)*yerrbatch,\
weights: 1/yerrbatch,\
is_training:1}
loss_np, _ = sess.run([loss, train_op,], feed_dict=feed_dict)
losses.append(loss_np)
if epoch % print_every == 0:
loss_avg = np.mean(np.array(losses))
print('Epoch %d, loss = %e' % (epoch, loss_avg))
check_accuracy(sess, val_data, batch_size, x, weights, preds, is_training=is_training)
#t += 1
train(novel_fc, optimizer_init_fn, x_train.shape[1],\
(zx, zy), (x_train, y_train, yerr_train), (x_test, y_test),\
[(100,100), (200,100,200), (500,100)], num_pretrain_epochs = 500, num_epochs= int(1e3),\
batch_size = 200, l = (1e-6, 1e-8, 1e-8), p = (0.333, 0.1, 0.1),\
print_every = 100)
np.abs(emu.goodness_of_fit(training_file, statistic = 'log_frac')).mean()
np.abs(emu.goodness_of_fit(training_file, statistic = 'frac')).mean()
fit_idxs = np.argsort(gof.mean(axis = 1))
emu.goodness_of_fit(training_file).mean()#, statistic = 'log_frac')).mean()
model = emu._emulator
ypred = model.predict(emu.x)
plt.hist( np.log10( (emu._y_std+1e-5)*np.abs(ypred-emu.y)/np.abs((emu._y_std+1e-5)*emu.y+emu._y_mean) ))
( (emu._y_std+1e-5)*np.abs(ypred-emu.y)/np.abs((emu._y_std+1e-5)*emu.y+emu._y_mean) ).mean()
emu._y_mean, emu._y_std
for idx in fit_idxs[:10]:
print gof[idx].mean()
print (ypred[idx*emu.n_bins:(idx+1)*emu.n_bins]-emu.y[idx*emu.n_bins:(idx+1)*emu.n_bins])/emu.y[idx*emu.n_bins:(idx+1)*emu.n_bins]
plt.plot(emu.scale_bin_centers, ypred[idx*emu.n_bins:(idx+1)*emu.n_bins], label = 'Emu')
plt.plot(emu.scale_bin_centers, emu.y[idx*emu.n_bins:(idx+1)*emu.n_bins], label = 'True')
plt.legend(loc='best')
plt.xscale('log')
plt.show()
print dict(zip(emu.get_param_names(), emu.x[8*emu.n_bins, :]*emu._x_std+emu._x_mean))
emu.get_param_names()
# + active=""
# #print emu.x.shape
# #print emu.downsample_x.shape
# if hasattr(emu, "_emulators"):
# print emu._emulators[0]._x.shape
# else:
# print emu._emulator._x.shape
# -
emu._ordered_params
# + active=""
# x, y, y_pred = emu.goodness_of_fit(training_file, statistic = 'log_frac')
# + active=""
# x, y, y_pred
# + active=""
# N = 25
# for _y, yp in zip(y[:N], y_pred[:N]):
# #plt.plot(emu.scale_bin_centers , (_y - yp)/yp ,alpha = 0.3, color = 'b')
#
# plt.plot(emu.scale_bin_centers, _y, alpha = 0.3, color = 'b')
# plt.plot(emu.scale_bin_centers, yp, alpha = 0.3, color = 'r')
#
# plt.loglog();
# + active=""
# %%timeit
# #truth_file = '/u/ki/swmclau2/des/PearceRedMagicWpCosmoTest.hdf5'
# gof = emu.goodness_of_fit(training_file, N = 100, statistic = 'log_frac')
# -
gof = emu.goodness_of_fit(training_file, statistic = 'frac')
print gof.mean()
for row in gof:
print row
gof = emu.goodness_of_fit(training_file, statistic = 'frac')
print gof.mean()
model = emu._emulator
model.score(emu.x, emu.y)
# +
ypred = model.predict(emu.x)
np.mean(np.abs(ypred-emu.y)/emu.y)
# +
plt.plot(emu.scale_bin_centers, np.abs(gof.mean(axis = 0)) )
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*0.01)
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*0.05)
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*0.1)
plt.loglog();
# -
plt.plot(emu.scale_bin_centers, np.abs(gof.T),alpha = 0.1, color = 'b')
plt.plot(emu.scale_bin_centers, np.ones_like(emu.scale_bin_centers)*0.01, lw = 2, color = 'k')
plt.loglog();
gof[:,i].shape
# +
for i in xrange(gof.shape[1]):
plt.hist(np.log10(gof[:, i]), label = str(i), alpha = 0.2);
plt.legend(loc='best')
plt.show()
# -
| notebooks/Test Tf NN Emu Zhongxu Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VMware EXSi Visualisations 2
import requests
import pandas as pd
import json
import matplotlib.pyplot as plt
from matplotlib.colors import rgb2hex
from itertools import cycle
from IPython.core.display import display, HTML
from modules.Network import *
display(HTML("<style>.container { width:70% !important; }</style>"))
HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide();
} else {
$('div.input').show();
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<form action="javascript:code_toggle()"><input type="submit" value="Click here to toggle on/off the raw code."></form>''')
# We get the json file with the information from: https://wmwaredata.s3.us-east-2.amazonaws.com/machines.json
url = 'https://wmwaredata.s3.us-east-2.amazonaws.com/machines.json'
r = requests.get(url, allow_redirects=True)
open('data/machines.json', 'wb').write(r.content)
json_file = 'data/machines.json'
df = pd.read_json(json_file)
df
# ## Option 1: A subnetwork for each VM
# +
# CREATE JSON FILE WITH NODES AND EDGES
network = Network()
network.groups = ['Name', 'GuestId', 'NumCpu', 'MemoryMB', 'PowerState', 'HardwareVersion', 'Version', 'ConnectionState']
#colors = []
#for k in range(len(df)):
# colors.append(rgb2hex(plt.cm.plasma(13*k)))
pastels = []
for k in range(9):
pastels.append(rgb2hex(plt.cm.Pastel1(k)))
for k in range(8):
pastels.append(rgb2hex(plt.cm.Pastel2(k)))
colors = []
for i, color in zip(range(len(df)), cycle(pastels)):
colors.append(color)
for i, color in zip(range(len(df)), colors):
# NODES
labels = []
label = df.iloc[i]['Name'] + "\n" + df.iloc[i]['Id']
labels.append(label)
network.add_node(label, color=color, shape='box', repeat_nodes=True)
for group in network.groups[1:]:
label = group + ':\n' + str(df.iloc[i][group])
labels.append(label)
network.add_node(label, color=color, repeat_nodes=True)
#label = 'VMResourceConfiguration:\n' + wrap_by_word(df.iloc[i]['VMResourceConfiguration'], n=1)
#labels.append(label)
#network.add_node(label, color=color, shape='box', repeat_nodes=True)
# EDGES
for label in labels[1:]:
network.add_edge(network.nodesIds[labels[0]][-1], network.nodesIds[label][-1])
network.save_to_json('data/nodes_edges_3.json')
# + language="html"
# <div id="mynetwork"></div>
# + language="javascript"
# requirejs.config({
# paths: {
# vis: 'vis'
# }
# });
#
#
# require(['vis'], function(vis){
#
# var json = $.getJSON("data/nodes_edges_3.json")
# .done(function(data){
# var data = {
# nodes: data.nodes,
# edges: data.edges
# };
# var network = new vis.Network(container, data, options);
# });
#
# var options = {
# width: '1200px',
# height: '1000px',
# locale: 'en',
# physics: true,
# interaction: {
# hover:true,
# tooltipDelay: 300
# },
# layout: {
# randomSeed: 1,
# improvedLayout: true,
# }
# };
#
# var container = document.getElementById("mynetwork");
# });
# -
# ## Option 2: Connect all subnetworks to VMHost node
# +
# CREATE JSON FILE WITH NODES AND EDGES
network = Network()
network.groups = ['Name', 'GuestId', 'NumCpu', 'MemoryMB', 'PowerState', 'HardwareVersion', 'Version', 'ConnectionState']
name_labels = []
for i, color in zip(range(len(df)), colors):
# NODES
labels = []
label = df.iloc[i]['Name'] + "\n" + df.iloc[i]['Id']
labels.append(label)
name_labels.append(label)
network.add_node(label, color=color, shape='box', repeat_nodes=True)
for group in network.groups[1:]:
label = group + ':\n' + str(df.iloc[i][group])
labels.append(label)
network.add_node(label, color=color, repeat_nodes=True)
#label = 'VMResourceConfiguration:\n' + wrap_by_word(df.iloc[i]['VMResourceConfiguration'], n=1)
#labels.append(label)
#network.add_node(label, color=color, shape='box', repeat_nodes=True)
# EDGES
for label in labels[1:]:
network.add_edge(network.nodesIds[labels[0]][-1], network.nodesIds[label][-1])
if is_unique(df['VMHost']):
# Host Node
host_label = 'VMHost:\n' + df.iloc[0]['VMHost']
network.add_node(label=host_label, color="#5CD6D6", shape='box')
# Conect Host node to all VMs
for label in name_labels:
network.add_edge(network.nodesIds[host_label][0], network.nodesIds[label][0], color="#5CD6D6", arrows="")
network.save_to_json('data/nodes_edges_4.json')
# + language="html"
# <div id="mynetwork2"></div>
# + language="javascript"
# requirejs.config({
# paths: {
# vis: 'vis'
# }
# });
#
#
# require(['vis'], function(vis){
#
# var json = $.getJSON("data/nodes_edges_4.json")
# .done(function(data){
# var data = {
# nodes: data.nodes,
# edges: data.edges
# };
# var network = new vis.Network(container, data, options);
# });
#
# var options = {
# width: '1200px',
# height: '1000px',
# locale: 'en',
# physics: true,
# interaction: {
# hover:true,
# tooltipDelay: 300
# },
# layout: {
# randomSeed: 1,
# improvedLayout: true,
# }
# };
#
# var container = document.getElementById("mynetwork2");
# });
# -
| upwork-devs/Pappaterra-Lucia/VMs-visualizations-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="p8kxqpQu_GIt" executionInfo={"status": "ok", "timestamp": 1604657433156, "user_tz": -330, "elapsed": 1381, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
import torch
from torchvision import transforms
import torch.nn as nn
# + id="ZvaYAvnP_JVC" executionInfo={"status": "ok", "timestamp": 1604657435782, "user_tz": -330, "elapsed": 1456, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
import torchvision.datasets as datasets
train_dataset = datasets.MNIST(root='../../data/',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = datasets.MNIST(root='../../data/',
train=False,
transform=transforms.ToTensor())
# + id="f2tHhyRC_KsE" executionInfo={"status": "ok", "timestamp": 1604657439383, "user_tz": -330, "elapsed": 1381, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=32,
shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=32,
shuffle=False)
# + id="XKarMpqH_POc" executionInfo={"status": "ok", "timestamp": 1604657443530, "user_tz": -330, "elapsed": 1917, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, num_layers,
num_classes):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers,
batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
h = torch.zeros(self.num_layers, x.size(0),
self.hidden_size).to(device)
c = torch.zeros(self.num_layers, x.size(0),
self.hidden_size).to(device)
out, _ = self.lstm(x, (h, c))
out = self.fc(out[:, -1, :])
return out
# + id="t342I2Re_g7u" executionInfo={"status": "ok", "timestamp": 1604657449933, "user_tz": -330, "elapsed": 1333, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
# + id="9evhoriP_mqn" executionInfo={"status": "ok", "timestamp": 1604657454858, "user_tz": -330, "elapsed": 1515, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
device = get_default_device()
# + id="1tq9LHxv_n05" executionInfo={"status": "ok", "timestamp": 1604657469641, "user_tz": -330, "elapsed": 11896, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}} outputId="3e9ef55e-6ca5-44f2-aa8c-036fc806f17b" colab={"base_uri": "https://localhost:8080/"}
# Hyper parameters
learning_rate = 0.001
sequence_length = 28
hidden_size = 128
num_classes = 10
batch_size = 64
input_size = 28
num_layers = 2
num_epochs = 3
model = RNN(input_size, hidden_size, num_layers, num_classes)
to_device(model, device)
# + id="PPBJb-iY_sDy" executionInfo={"status": "ok", "timestamp": 1604657477919, "user_tz": -330, "elapsed": 1303, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}}
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# + id="VZBXWvAr_vWG" executionInfo={"status": "ok", "timestamp": 1604657524272, "user_tz": -330, "elapsed": 42992, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}} outputId="66ba77dc-4bb6-4a05-c990-6ed3caef9287" colab={"base_uri": "https://localhost:8080/"}
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = images.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward pass and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# + id="apZeOSoV_ym_" executionInfo={"status": "ok", "timestamp": 1604657531643, "user_tz": -330, "elapsed": 1900, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gic_7Zi8AHs21rojHGmhGem0zBic5vbDU2Oz8ghUA=s64", "userId": "07021581168947286599"}} outputId="d5fdce81-f512-47af-cf59-9e3fafe48af6" colab={"base_uri": "https://localhost:8080/"}
# Evaluate the model
model.eval()
with torch.no_grad():
right = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, sequence_length,
input_size).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
right += (predicted == labels).sum().item()
print('Test Accuracy of the model on the 10000 test images: {} %'.format(100 * right / total))
| SimpleRNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data collection & Munging
#
# In dit notebook zal the ruwe datasets omgezet worden naar makkelijk bewerkbare data in een Pandas dataframe.
#
# #### De XML dataset
# In de XML dataset staat wel ontwikkeling zich afspelen op het veld. Je kunt denken aan bal winst of een goal. Ook wordt er bijghouden waar de ontwikkeling plaats vind. Bijvoorbeeld L2 of R1 (Zie afbeelding).
# <img src="./voetbalveld-indeling.png" width=300>
#
# ##### XML structuur
# De XML is als volgt op gebouwd:
# ```XML
# <ALL_INSTANCES>
# <instance>
# <ID>1</ID>
# <start>4.3110428970</start>
# <end>40.9711876731</end>
# <code>TEG</code>
# <label>
# <group>Naar zone</group>
# <text>M2 TEG</text>
# </label>
# </instance>
# </ALL_INSTANCES>
# ```
# Er komen in ```<ALL_INSTANCES>``` meerdere instances voor.
# Om een xml intelezen hebben van de XML library ElementTree nodig. Hiermee kunnen we makkelijk alle data uit de XML halen. We hebben ook pandas nodig omdat we pandas willen gebruiken voor de analyse.
import xml.etree.ElementTree as ET
import pandas as pd
# De eerste stap zal zijn om de XML dataset om te zetten naar een pandas dataframe. Hiervoor maken we een functie waarin we de root van de dataset kunnen meegeven. We maken er een functie van omdat we dit voor meerdere datasets willen doen.
# <br />
# <br />
# In de functie zullen door alle childeren van de root gaan (we zitten dan in ```<instance>```) en daarvan de subchilderen. voor elke subchild checken we of het een label tag is. Dit doen we omdat de label tag meer subchilderen heeft. Als dat niet zo is dan voegen we de tekst van de tag toe aan de record en als de tag naam niet in de header list zit voegen we die ook toe. Als de tag wel een label loopen we door de childeren van die label tag. Een instance tag kan een verschillende hoeveelheid label tags bevatten. Daarom moeten we meerdere headers toevoegen voor de label tags.
# <br />
# <br />
# Zoals eerder gezegd heeft niet elke instance een gelijke hoeveelheid labels. Bijvoorbeeld de instance met id twee heeft maar 1 label terwijl instance met id 1 een meer dan 10 labels heeft. We zullen dus null waarde krijgen. Dit is niet erg we zullen gewoon de null waarde invullen.
# <br />
# <br />
# Als dat allemaal gedaan is krijgen we een mooie pandas dataset waar we mee kunnen werken.
# +
def xmlRootToDataframe(root):
all_instances = root[1]
if(root[1].tag == 'ALL_INSTANCES'):
all_instances = root[1]
else:
all_instances = root[0]
all_records = []
headers = []
for i, child in enumerate(all_instances):
current_label = 0
record = []
for subchild in child:
label_name = ''
if subchild.tag == 'label':
subrecord = []
label_name = subchild.tag+'.'+str(current_label)
if label_name not in headers:
headers.append(label_name)
for subsubchild in subchild:
subrecord.append(subsubchild.text)
current_label += 1
record.append(subrecord)
else:
record.append(subchild.text)
if subchild.tag not in headers and subchild.tag not in 'label':
headers.append(subchild.tag)
all_records.append(record)
return pd.DataFrame(all_records, columns=headers)
tree = ET.parse('datasets/new-xml/20160410 FC Utrecht - NEC-new.xml')
root = tree.getroot()
utrechtNec = xmlRootToDataframe(root)
tree= ET.parse('datasets/new-xml/20160417 NEC - Cambuur-new.xml')
root = tree.getroot()
necCambuur = xmlRootToDataframe(root)
tree= ET.parse('datasets/new-xml/20160420 PEC - NEC-new.xml')
root = tree.getroot()
pecNec = xmlRootToDataframe(root)
tree= ET.parse('datasets/new-xml/20160501 NEC - Roda JC-new.xml')
root = tree.getroot()
NecRoda = xmlRootToDataframe(root)
necCambuur.head()
# -
utrechtNec[['ID', 'start', 'end', 'code', 'label.0']].dtypes
# De datatypes van alle kolommen zijn nu objecten. Dit is niet idiaal en zal later beperkingen opleveren. laten we start en end numeric maken. Ook willen we categorical data maken van de code kolom. We maken de id kolom ook gelijk de index.
# <br />
# <br />
# We beginnen de index veranderen daarna zetten we de datatypes om.
# +
def changeDataTypes(df):
df = df.set_index('ID')
df.index.name = None
df[['start','end']] = df[['start','end']].apply(pd.to_numeric)
#df.code = df.code.astype('category')
return df
utrechtNec = changeDataTypes(utrechtNec)
necCambuur = changeDataTypes(necCambuur)
pecNec = changeDataTypes(pecNec)
NecRoda = changeDataTypes(NecRoda)
print(utrechtNec[['start','end', 'code', 'label.0']].dtypes)
# -
utrechtNec.head()
# We hebben nu een nog best wel ruwe dataset waar we niet veel mee kunnen. Laten we een dataframe maken die de positie van de bal en wie de bal bezit maken. In de code kolom staat waar de bal zich bevind. We zullen dit extraheren en toevoegen aan een eigen dataset met de x en y als balpostitie.
# +
def balBezitDF(df):
balbezit = df.code.replace({'^L': '1', '^M':'2', '^R':'3'}, regex=True).str.extract('(?P<x>^[0-3])(?P<y>[0-4])(?P<bezit>.TEG)?', expand=True).dropna(how='all')
tijd = df[['start', 'end']]
balbezit = pd.concat([balbezit, df[df['code'].str.contains("Goal")]['code']], axis=1)
balbezit[['x', 'y']] = balbezit[['x', 'y']].fillna(0)
balbezit = pd.merge(tijd,balbezit, left_index = True, right_index=True)
balbezit.bezit = balbezit.bezit.fillna('NEC')
balbezit.code = balbezit.code.fillna('Nothing')
balbezit[['x', 'y']] = balbezit[['x', 'y']].astype('int')
balbezit.index = balbezit.index.astype('int')
balbezit.sort_index(inplace=True)
balbezit = balbezit.reset_index(drop=True)
return balbezit
utrechtNecBB = balBezitDF(utrechtNec)
necCambuurBB = balBezitDF(necCambuur)
pecNecBB = balBezitDF(pecNec)
NecRodaBB = balBezitDF(NecRoda)
# -
# Nu we de data hebben omgezet naar een dataset waar we mee kunnen werken is het tijd om de data te exporteren naar een csv om het later te kunnen gebruiken in een ander notebook. We maken net zoals de vorige keren een functie aan die dit voor ons doet.
# +
def exportDataframe(df, naam):
path = 'datasets/bewerkte-datasets/'+naam+'.csv'
df.to_csv(path_or_buf=path, header=list(df))
exportDataframe(utrechtNecBB, 'Balbezit Utrecht-NEC')
exportDataframe(necCambuurBB, 'Balbezit NEC-Cambuur')
exportDataframe(pecNecBB, 'Balbezit PEC-NEC')
exportDataframe(NecRodaBB , 'Balbezit NEC-Roda')
# -
| Data collection & munging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sampling RDDs
# [Introduction to Spark with Python, by <NAME>](https://github.com/jadianes/spark-py-notebooks)
# So far we have introduced RDD creation together with some basic transformations such as `map` and `filter` and some actions such as `count`, `take`, and `collect`.
# This notebook will show how to sample RDDs. Regarding transformations, `sample` will be introduced since it will be useful in many statistical learning scenarios. Then we will compare results with the `takeSample` action.
# +
import findspark
findspark.init()
import pyspark
sc = pyspark.SparkContext()
# -
# ## Getting the data and creating the RDD
# In this case we will use the complete dataset provided for the KDD Cup 1999, containing nearly half million network interactions. The file is provided as a Gzip file that we will download locally.
import urllib
f = urllib.request.urlretrieve ("http://kdd.ics.uci.edu/databases/kddcup99/kddcup.data.gz", "kddcup.data.gz")
# Now we can use this file to create our RDD.
data_file = "./kddcup.data.gz"
raw_data = sc.textFile(data_file)
# ## Sampling RDDs
# In Spark, there are two sampling operations, the transformation `sample` and the action `takeSample`. By using a transformation we can tell Spark to apply successive transformation on a sample of a given RDD. By using an action we retrieve a given sample and we can have it in local memory to be used by any other standard library (e.g. Scikit-learn).
# ### The `sample` transformation
# The `sample` transformation takes up to three parameters. First is whether the sampling is done with replacement or not. Second is the sample size as a fraction. Finally we can optionally provide a *random seed*.
raw_data_sample = raw_data.sample(False, 0.1, 1234)
sample_size = raw_data_sample.count()
total_size = raw_data.count()
print("Sample size is {} of {}".format(sample_size, total_size))
# But the power of sampling as a transformation comes from doing it as part of a sequence of additional transformations. This will show more powerful once we start doing aggregations and key-value pairs operations, and will be specially useful when using Spark's machine learning library MLlib.
# In the meantime, imagine we want to have an approximation of the proportion of `normal.` interactions in our dataset. We could do this by counting the total number of tags as we did in previous notebooks. However we want a quicker response and we don't need the exact answer but just an approximation. We can do it as follows.
# +
from time import time
# transformations to be applied
raw_data_sample_items = raw_data_sample.map(lambda x: x.split(","))
sample_normal_tags = raw_data_sample_items.filter(lambda x: "normal." in x)
# actions + time
t0 = time()
sample_normal_tags_count = sample_normal_tags.count()
tt = time() - t0
sample_normal_ratio = sample_normal_tags_count / float(sample_size)
print("The ratio of 'normal' interactions is {}".format(round(sample_normal_ratio,3)))
print("Count done in {} seconds".format(round(tt,3)))
# -
# Let's compare this with calculating the ratio without sampling.
# +
# transformations to be applied
raw_data_items = raw_data.map(lambda x: x.split(","))
normal_tags = raw_data_items.filter(lambda x: "normal." in x)
# actions + time
t0 = time()
normal_tags_count = normal_tags.count()
tt = time() - t0
normal_ratio = normal_tags_count / float(total_size)
print("The ratio of 'normal' interactions is {}".format(round(normal_ratio,3)))
print("Count done in {} seconds".format(round(tt,3)))
# -
# We can see a gain in time. The more transformations we apply after the sampling the bigger this gain. This is because without sampling all the transformations are applied to the complete set of data.
# ### The `takeSample` action
# If what we need is to grab a sample of raw data from our RDD into local memory in order to be used by other non-Spark libraries, `takeSample` can be used.
# The syntax is very similar, but in this case we specify the number of items instead of the sample size as a fraction of the complete data size.
# +
t0 = time()
raw_data_sample = raw_data.takeSample(False, 400000, 1234)
normal_data_sample = [x.split(",") for x in raw_data_sample if "normal." in x]
tt = time() - t0
normal_sample_size = len(normal_data_sample)
normal_ratio = normal_sample_size / 400000.0
print("The ratio of 'normal' interactions is {}".format(normal_ratio))
print("Count done in {} seconds".format(round(tt,3)))
# -
# The process was very similar as before. We obtained a sample of about 10 percent of the data, and then filter and split.
#
# However, it took longer, even with a slightly smaller sample. The reason is that Spark just distributed the execution of the sampling process. The filtering and splitting of the results were done locally in a single node.
| nb3-rdd-sampling/nb3-rdd-sampling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploration: NumPy and Pandas
#
# A fundamental component of mastering data science concepts is applying and practicing them. This exploratory notebook is designed to provide you with a semi-directed space to do just that with the Python, NumPy, and Pandas skills that you either covered in an in-person workshop or through Microsoft Learn. The specific examples in this notebook apply NumPy and Pandas concepts in a life-sciences context, but they are applicable across disciplines and industry verticals.
#
# This notebook is divided into different stages of exploration. Initial suggestions for exploration are more structured than later ones and can provide some additional concepts and skills for tackling data-science challenges with real-world data. However, this notebook is designed to provide you with a launchpad for your personal experimentation with data science, so feel free to add cells and running your own experiments beyond those suggested here. That is the power and the purpose of a platform like Jupyter notebooks!
# ## Setup and Refresher on Notebooks
#
# Before we begin, you will need to important the principal libraries used to explore and manipulate data in Python: NumPy and Pandas. The cell below also imports Matplotlib, the main visualization library in Python. For simplicity and consistency with prior instruction, industry-standard aliases are applied to these imported libraries. The cell below also runs `%matplotlib inline` magic command, which instructs Jupyter to display Matplotlib output directly in the notebook.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# As it might have been a while since you last worked with Jupyter notebooks, here is a quick refresher on efficiently using them.
#
# ### Notebook Cells
#
# Notebook cells are divided into Markdown text cells and interactive code cells. You can easily recognize code cells by the `In [ ]:` to the left of them.
#
# Code in a code cells has only been executed -- and is thus available for use in other code cells in the notebook -- if there is a number beside the code cell (for example, `In [1]:`).
#
# To run the code in a cell, you can click the **Run** icon at the top left of a code cell or press **`Ctrl` + `Enter`**.
#
# Cells in Jupyter notebooks can be in one of two modes: edit or command. The active cell (the cell that you have clicked on or navigated to) is in edit mode if it has a green border around it. Code cells are in edit mode by default when you click on them. (If, however, you navigate to a code cell by other means, it will be in command mode; simply press **`Enter`** to change it to edit mode.)
#
# An active cell with a blue border is in command mode. You can change from edit mode to command mode in a cell by pressing **`Esc`** or by running the contents of a cell. Running a Markdown cell will render the Markdown.
#
# To add cells to a notebook, select **Insert cell** from the menu at the top of the screen or in the margin to the left of a cell in edit mode. You can delete unneeded cells by clicking the **Delete cell** icon at the top right of the cell.
#
# Entering command mode enables you to use keyboard shortcuts to quickly work with your notebook. Here are some of the most useful ones:
# - Add cell above: **`A`**
# - Add cell below: **`B`**
# - Delete cell: **`D`**,**`D`** (press **`D`** twice)
#
# Cheatography has a good compendium of additional keyboard shortcuts [here](https://cheatography.com/weidadeyue/cheat-sheets/jupyter-notebook/).
#
# You can run code cells from edit mode or command mode.
#
# ### Documentation and Help
#
# Documentation for Python objects and functions is available directly in Jupyter notebooks. In order to access the documentation, simply put a question mark in front of the object or function in a code cell and execute the cell (for example, `?print`). A window containing the documentation will then open at the bottom of the notebook.
#
# On to exploration!
# ## Section 1: Guided Exploration
#
# The dataset provided with this notebook is entitled `genome.txt`. It contains close to 1 million single-nucleotide polymorphisms (SNPs) from build 36 of the human genome. SNPs are variations of a single nucleotide that occurs at specific positions in a genome and thus serve as useful sign posts for pinpointing susceptibility to diseases and other genetic conditions. As a result, they are particularly useful in the field of bioinformatics.
#
# This particular set of SNPs was originally generated by the genomics company 23andME in February 2011 and is drawn from the National Center for Biotechnology Information within the United States National Institutes of Health and is used here as data in the public domain.
# ### Import and Investigate the Data
#
# Recall that `pd.read_csv()` is the most common way to pull external data into a Pandas DataFrame. Despite its name, this function works on a variety of file formats containing delimited data. Go ahead and try it on `genome.txt`.
#
# (If you need a refresher on the syntax for using this function, refer back to the Reactor Pandas materials, to the build-in Jupyter documentation with `?pd.read_csv`, or to the [Pandas documentation](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.read_csv.html).)
genome = pd.read_csv('Data/genome.txt')
genome.head()
genome = pd.read_csv('Data/genome.txt', sep='\t')
genome.head()
# Is the DataFrame now in a more useful form? If so, what initial information can gather about the dataset, such as its features or number of observations? (The [`head()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.head.html) and [`info()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.info.html) methods are some of the tools that you can use for your preliminary investigation of the dataset.)
genome.info()
# ### Interpreting the Dataset
#
# **`rsid`** is short for Reference SNP cluster ID; it is the identification number used by researchers and in databases to refer to specific SNPs. (For more information, see the [23andMe](https://customercare.23andme.com/hc/en-us/articles/212196908-What-Are-RS-Numbers-Rsid-) reference page on RS numbers.
#
# **`chromosome`** refers to which of the 23 human chromosomes (22 numbered and one X or Y chromosome) or the mitochondrial chromosoe on which an SNP appears.
#
# **`position`** refers to the specific location of a given SNP on the reference human genome. (For more information on the reference human genome, see the [human genome overview page](https://www.ncbi.nlm.nih.gov/grc/human) on the Genome Reference Consortium web site.)
#
# **`genotype`** refers to the pair of nucleotides that form an SNP inherited at a given chromosomal position (one coming from the father and one coming from the mother). (For more information on genotypes -- as well as a list of notable genotypes -- visit the [genotype page](https://www.snpedia.com/index.php/Genotype) at SNPedia.)
# ## Section 2: Intermediate Exploration
#
# Now that you know more about the dataset, the next step is to start asking questions of it. A common next attribute of this particular dataset to investigate is how many SNPs occur per chromosome. Use the `value_counts()` method to determine this.
#
# (**Note:** You might also find it helpful to refer back to the Reactors Pandas material to work through this section.)
genome['chromosome'].value_counts()
# Numbers are good for precision, but often a visualization can provide a more succinct (and digestible) presentation of data. The `plot()` method is a convenient way to invoke Matplotlib directly from your DataFrame. Use the `plot()` method to creat a bar chart of of the SNP value counts per chromosome. (**Hint:** You can do this either by specifying the `kind` argument in the `plot()` method or by using the `plot.bar()` method.)
genome['chromosome'].value_counts().plot(kind='bar')
# The `value_counts()` method automatically sorts the chromosomes highest occurrence of SNPs to the lowest. What if we want to view the data sorted by the order in which chromosomes appear in the dataset? Try to see if you can figure out how to do so. (The `sort_index()` method is one possibility. Another possibility gets raised in the comments in this Stack Overflow [article](https://stackoverflow.com/questions/43855474/changing-sort-in-value-counts).)
genome['chromosome'].value_counts()[genome['chromosome'].unique()]
# Now try plotting this output. (One of the joys of Pandas is that you can continue to string methods to objects in order to generate powerful, compact code.)
genome['chromosome'].value_counts()[genome['chromosome'].unique()].plot(kind='bar')
# ### Grouping Data
#
# If you haven't tried it already, the `groupby()` method can be useful in situations like this to turn the values of one of the columns in a DataFrame into the index for the DataFrame. Try using that method using the `chromosome` column coupled with the `count()` method.
genome.groupby('chromosome').count()
# You can also plot your now grouped DataFrame. (**Note:** Your bar graph might not turn out the way you expected. If so, discuss with your partner or group what might solve this problem.)
genome.groupby('chromosome')['genotype'].count().plot(kind='bar');
# Changing what you group by is another means of asking questions about your data. Now try grouping by `genotype`. What does this new grouping tell you?
genome.groupby('genotype')['chromosome'].count().plot.bar();
# **Note:** The **D** and **DD** are not nucleotides themselves; DNA nucleotides can only be adenine (A), thymine (T), guanine (G) and cytosine (C). Rather, the **D** stands for genotypes in which in which one or more base pairs (or even an entire part of a chromosome) has been **deleted** during DNA replication.
#
# Similarly, **I** and **II** represent genotypes of [wobble base pairs](https://en.wikipedia.org/wiki/Wobble_base_pair) that do not do not follow the conventional A-T and C-G pairing in DNA. Such genotypes are responsible for ailments such as [sickle-cell disease](https://en.wikipedia.org/wiki/Sickle_cell_disease).
# ### Pivoting Data
#
# You can summarize your DataFrame in a fashion similar to pivot tables in spreadsheets by use of the [`pivot()`](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.pivot.html) function. Try using this function on your grouped data, but be advised that the `index`, `columns`, and `values` arguments for this function all require `str` or `object` inputs, so it might be easiest to first capture the output from the `groupby()` in a new DataFrame. (This Stack Overflow [answer](https://stackoverflow.com/questions/10373660/converting-a-pandas-groupby-output-from-series-to-dataframe) given by <NAME>, the creator of Pandas, might prove useful in doing this.)
grouped_genome = genome.groupby(['genotype', 'chromosome'])['rsid'].count().reset_index()
grouped_genome.rename(columns={'rsid': 'count'}, inplace=True)
grouped_genome.head()
pivot_genome = grouped_genome.pivot(index='genotype',
columns='chromosome',
values='count')
pivot_genome.head()
# ### Stacked Bar Graph
#
# With the data pivoted, you can try somewhat more advanced visualization of your data such as a stacked bar graph in which you can see not just how many instances of different genotypes there are in the dataset, but also how many of those instances occur on each chromosome. Build upon what you know about visualiziing with Matplotlib and refer to the documentation for more hints about how to execute this.
pivot_genome.plot.bar(stacked=True, figsize=(10,7));
# ?pivot_genome.plot.bar
# ## Section 3: Individual Exploration
#
# There is still a lot you can do with this dataset or related ones. Ideas include:
# - Moving the legend for the stacked bar graph off to one side.
#
# Students can play with settings like bbox_to_anchor to arrive at an appearance they like.
fig = plt.figure()
ax = pivot_genome.plot.bar(stacked=True, figsize=(10,7))
ax.legend(loc='upper center', bbox_to_anchor=(1.09, 1.014), ncol=1)
plt.show()
# - Researching the average number of base pairs per human chromosome to see (and visualize) what proportion of each chromosome is represented in this dataset.
# Create a new DataFrame to house the aggregated genotype totals by chromosome.
chrom_num = genome.groupby('chromosome')['genotype'].count().reset_index()
chrom_num.tail(10)
# It is subtle, but there are actually two rows for Chromosome 21.
# Combine the genotype totals for both rows and delete the surplus row.
chrom_num.iloc[20, 1] = chrom_num.iloc[20, 1] + chrom_num.iloc[21, 1]
chrom_num.drop([21], axis=0, inplace=True)
chrom_num.reset_index(inplace=True, drop=True)
chrom_num.tail(10)
# Now make the chromosome number the index for the DataFrame.
chrom_num.set_index('chromosome', inplace=True)
chrom_num.tail()
# Looked up number of base pairs per chromosome from https://ghr.nlm.nih.gov/chromosome
# and https://www.nature.com/scitable/topicpage/mtdna-and-mitochondrial-diseases-903/.
# Add these numbers as a new column to the DataFrame.
mil_base_pairs = [249, 243, 198, 191, 181, 171, 159, 146, 141, 133, 135, 134, 115, 107, 102, 90, 83, 78, 59, 63, 48, 51, 0.017, 155, 59]
chrom_num['million base pairs'] = mil_base_pairs
chrom_num.head()
# Find the proportion that the number of genotypes represents for each chromosome.
chrom_num['proportion'] = chrom_num['genotype'].divide(chrom_num['million base pairs']).divide(1000000)
chrom_num.tail()
# Now normalize the proportions of genotypes and basepairs based on the column totals to graph.
chrom_num['gt_proportion'] = chrom_num['genotype'].divide(chrom_num['genotype'].sum())
chrom_num['bp_proportion'] = chrom_num['proportion'].divide(chrom_num['proportion'].sum())
chrom_num.tail()
# Graph just the 'gt_proportion' and 'bp_proportion'.
chrom_num[['gt_proportion', 'bp_proportion']].plot(kind='bar');
# Mitochondrial genotypes are ridiculously overrepresented in the data.
# Remove that row and recompute to see how the other chromosomes stacked up.
chrom_num.drop(['MT'], axis=0, inplace=True)
chrom_num['proportion'] = chrom_num['genotype'].divide(chrom_num['million base pairs']).divide(1000000)
chrom_num['gt_proportion'] = chrom_num['genotype'].divide(chrom_num['genotype'].sum())
chrom_num['bp_proportion'] = chrom_num['proportion'].divide(chrom_num['proportion'].sum())
chrom_num.tail()
# No regraph the two columns of interest from the DataFrame.
# Genotypes on the lower-numbered chromosomes are far overrepresented a proporition to the size of those chromosomes.
chrom_num[['gt_proportion', 'bp_proportion']].plot(kind='bar');
# - Use the NCBI [Genome Data Viewer](https://www.ncbi.nlm.nih.gov/genome/gdv/) to locate more datasets to investigate (or download via the NCBI [FTP site](https://ftp.ncbi.nlm.nih.gov/)).
| online-event-resources/data-science-and-machine-learning/machine-learning-with-genomics/genome.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="3"
from fastai.vision.all import *
from fastai.distributed import *
import torch
import torch.nn as nn
import torchvision
from torchvision import models
import torchvision.transforms as transforms
# +
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
trainset = torchvision.datasets.CIFAR100(
root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=256, shuffle=True, num_workers=8)
testset = torchvision.datasets.CIFAR100(
root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(
testset, batch_size=256, shuffle=False, num_workers=8)
dls = DataLoaders(trainloader, testloader)
# -
model = models.resnet34(pretrained=False)
model.fc.in_features
model.fc= nn.Linear(in_features=512, out_features=100)
model = model.cuda()
learn = Learner(dls, model, loss_func=LabelSmoothingCrossEntropy(), metrics=accuracy, cbs=[CudaCallback, SaveModelCallback()], path=Path.cwd()/'resnet_tmp_cifar100')
learn.model = learn.model.cuda()
learn.lr_find()
learn.fit_one_cycle(240, 5.7e-4)
learn.validate()
| notebooks/resnet_cifar100_fastai.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## NBA Career Prediction Experiment
experiment_label = 'logreg03f_rev'
# ### Aim:
# * To apply my improved undestanding of how to interpret validation results to improve model tuning.
# * Week 1 result of this model was test AUC = 0.71032
#
# ### Findings:
#
# Lower cv number gives more robust model. At cv=6, train and val scores are v close.
# Specified to use roc_auc metric in cv.
# Exploring L1 ratios
#
# AUC results train, val:
# * l1_ratio = 0 : 0.71, 0.71
# * l1_ratio = 0.5 : same, although some cv runs failed to converge
# * l1_ratio = 1 : same again.
#
# Let's go with l1_ratio = 0, ie l2 penalty.
#
# Revisting the features:
# * exclude that original set 'MIN', 'FGM', 'FGA', 'TOV', '3PA', 'FTM', 'FTA', 'REB' : slight recall (val 0,1) improvement from 0.65, 0.62 up to 0.67, 0.62 . AUC still 0.71.
#
# Submit
# * Let's submit to test: result = 0.71125.
# * That is an improvement on Week 1, where I was using too many cv iterations!
#
# ## Set up
import pandas as pd
import numpy as np
from joblib import dump, load # simpler than pickle!
import matplotlib.pyplot as plt
import seaborn as sns
# ## Data
# +
#load data
data_path = '../data/raw/uts-advdsi-nba-career-prediction'
train_raw = pd.read_csv(data_path + '/train.csv')
test_raw = pd.read_csv(data_path + '/test.csv')
# +
#shapes & head
print(train_raw.shape)
print(test_raw.shape)
train_raw.head()
# -
test_raw.head()
# info
train_raw.info()
#variable descriptions
train_raw.describe()
test_raw.describe()
# +
#correlation of potential features
corr = train_raw.corr('pearson')
#hide the top triangle, set fig and axes, choose colours
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
cmap = sns.color_palette("RdBu", 20)
sns.heatmap(corr, mask=mask, cmap=cmap, vmax=1, vmin=-1,
square=True, linewidths=.05, cbar_kws={"shrink": .3})
# -
corr
# ### Observations
#
# Strong correlation between
# * MIN minutes played
# * PTS points per game ***
# * FGM field goals made
# * FGA field goal attempts
# * TOV turnovers
#
# Strong correlation between
# * 3pt attempts
# * 3pt made ***
#
# Strong correlation between
# * free throw attempts
# * free throw made
# * PTS points per game ***
#
# Strong correlation between
# * rebounds
# * (off and def rebounds) ***
# ## Decisions
#
# Exclude features listed above that are not marked with stars, in favour of those marked with stars.
# Also include features not listed above.
#
# and TARGET_5Yrs is our target.
# ## Cleaning
train = train_raw.copy()
test = test_raw.copy()
cols_drop = ['Id_old', 'Id', 'MIN', 'FGM', 'FGA', 'TOV', '3PA', 'FTM', 'FTA', 'REB']
train.drop(cols_drop, axis=1, inplace=True)
test.drop(cols_drop, axis=1, inplace=True)
train.head()
test.head()
train_target = train.pop('TARGET_5Yrs')
# # Modelling
# +
#transformations
# fit scaler to training data
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train = scaler.fit_transform(train)
dump(scaler, '../models/aj_' + experiment_label + '_scaler.joblib')
# transform test data
test = scaler.transform(test)
# +
#examine shapes
print('train:' + str(train.shape))
print('test:' + str(test.shape))
# +
# split training into train & validation
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(train, train_target, test_size=0.2, random_state=8)
# -
#import models
from sklearn.linear_model import LogisticRegressionCV
# +
# Define model
model = LogisticRegressionCV(class_weight='balanced',
Cs=20,
max_iter=300,
penalty='elasticnet',
solver='saga',
l1_ratios=[0],
scoring='roc_auc',
cv=6,
random_state=8)
# -
#fit model to training data
model.fit(X_train, y_train)
# +
#save model to file
dump(model, '../models/aj_' + experiment_label + '.joblib')
# +
#predictions for train set
y_train_preds = model.predict(X_train)
y_val_preds = model.predict(X_val)
# -
# ## Evaluation
from sklearn import metrics
# %load_ext autoreload
# %autoreload 2
import sys
import os
sys.path.append(os.path.abspath('..'))
from src.models.aj_metrics import confusion_matrix
# +
# Show confusion matrix with labels
print("Training:")
print(confusion_matrix(y_train, y_train_preds))
print("Validation:")
print(confusion_matrix(y_val, y_val_preds))
# -
print(metrics.classification_report(y_train, y_train_preds))
print(metrics.classification_report(y_val, y_val_preds))
import matplotlib.pyplot as plt
from sklearn import metrics
metrics.plot_roc_curve(model, X_train, y_train)
plt.show()
metrics.plot_roc_curve(model, X_val, y_val)
plt.show()
# # Apply to test data for submission
y_test_preds = model.predict(test)
y_test_preds
y_test_probs = model.predict_proba(test)
y_test_probs
len(y_test_probs)
test_raw.shape
test_raw['Id'].shape
submission = pd.DataFrame({'Id': range(0,3799), 'TARGET_5Yrs': [p[1] for p in y_test_probs]})
submission.head()
submission.to_csv('../reports/aj_' + experiment_label + 'submission.csv',
index=False,
)
| notebooks/duncanson_aj-12823819-week2_logreg03f_revisit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Background
#
# Links:
# - [Wikipedia](https://en.wikipedia.org/wiki/Frequentist_inference)
# +
import pandas as pd
import numpy as np
from scipy import stats
import seaborn as sns
from matplotlib import pyplot as plt
plt.style.use("../style.mplstyle")
# +
np.random.seed(123)
n1 = np.random.normal(80, 9, 300)
n2 = np.random.normal(78, 7, 300)
n3 = np.random.normal(65, 5, 500)
sns.histplot([n1, n2, n3], kde=True)
# -
# # Confidence Intervals
#
# Standard Deviation:
# $
# \hat{\sigma}=\frac{1}{N-1}\sum_{i=0}^N (x-\bar{x})^2
# $
#
# Standard Error:
# $
# s_{e} = \frac {\hat{\sigma}}{\sqrt{n}}
# $
n = n3
se = n.std() / np.sqrt(len(n))
mu = n.mean()
x = np.linspace(mu - 5 * se, mu + 5 * se, 100)
y = stats.norm.pdf(x, mu, se)
z = stats.norm.ppf(0.975) # 95% CI
ci = (mu - z * se, mu + z * se)
plt.plot(x, y)
plt.vlines(ci[1], ymin=0, ymax=1, color="k")
plt.vlines(ci[0], ymin=0, ymax=1, color="k", label="95% CI")
plt.legend()
plt.show()
# # Hypothesis Testing
#
# $
# N(\mu_1, \sigma_1^2) - N(\mu_2, \sigma_2^2) = N(\mu_1 - \mu_2, \sigma_1^2 + \sigma_2^2)
# $
#
# $
# N(\mu_1, \sigma_1^2) + N(\mu_2, \sigma_2^2) = N(\mu_1 + \mu_2, \sigma_1^2 + \sigma_2^2)
# $
# +
ndiff = n1 - n2
sns.histplot([n1, n2, ndiff], kde=True)
# -
#
# $
# \mu_{diff} = \mu_1 + \mu_2
# $
#
# $
# SE_{diff} = \sqrt{SE_1 + SE_2} = \sqrt{\sigma_1^2/n_1 + \sigma_2^2/n_2}
# $
#
mu_diff = n1.mean() - n2.mean()
se_diff = np.sqrt(n1.var() / len(n1) + n2.var() / len(n2))
ci = (mu_diff - 1.96 * se_diff, mu_diff + 1.96 * se_diff)
ci # 95% confidence difference between means of n1 and n2 lies within this range
# Z statistic:
# $
# z = \dfrac{\mu_{diff} - H_{0}}{SE_{diff}} = \dfrac{(\mu_1 - \mu_2) - H_{0}}{\sqrt{\sigma_1^2/n_1 + \sigma_2^2/n_2}}
# $
#
# A measure of how extreme the observed differenceis, compared to the value we want to test against, $H_{0}$.
z = mu_diff / se_diff # H0 = 0
print(f"{z:.3f}")
x = np.linspace(-5, 5, 100)
y = stats.norm.pdf(x, 0, 1)
plt.plot(x, y, label="Standard Normal")
plt.vlines(z, ymin=0, ymax=0.05, label="Z statistic")
plt.legend()
plt.show()
# Hypothesis tests are less conservative than checking if the 95% CI of two groups overlap - it's possible for CIs to overlap but to still have a statistically significiant difference.
# +
cont_mu, cont_se = (71, 1)
test_mu, test_se = (74, 7)
diff_mu = test_mu - cont_mu
diff_se = np.sqrt(cont_se + cont_se)
print(
f"Control 95% CI: ({cont_mu - 1.96 * cont_se:.2f}, {cont_mu + 1.96 * cont_se:.2f})"
)
print(
f"Test 95% CI : ({test_mu - 1.96 * test_se:.2f}, {test_mu + 1.96 * test_se:.2f})"
)
print(
f"Diff 95% CI : ({diff_mu - 1.96 * diff_se:.3f}, {diff_mu + 1.96 * diff_se:.3f})"
)
# -
# # P-values
#
# "The p-value is the probability of obtaining results at least as extreme as the results actually observed during the test, assuming the null hypothesis is correct."
print(f"P-value: {stats.norm.cdf(z):.4f}")
diff_mu = n1.mean() - n2.mean()
diff_se = np.sqrt(n1.var() / len(n1) + n2.var() / len(n2))
print(
f"95% CI : ({diff_mu - stats.norm.ppf(.975)*diff_se:.2f}, {diff_mu + stats.norm.ppf(.975)*diff_se:.2f})"
)
print(
f"99% CI : ({diff_mu - stats.norm.ppf(.995)*diff_se:.2f}, {diff_mu + stats.norm.ppf(.995)*diff_se:.2f})"
)
print(
f"99.9% CI: ({diff_mu - stats.norm.ppf(.9995)*diff_se:.2f}, {diff_mu + stats.norm.ppf(.9995)*diff_se:.2f})"
)
# # Combined
# +
def AB_test(test: pd.Series, control: pd.Series, confidence=0.95, h0=0):
mu1, mu2 = test.mean(), control.mean()
se1, se2 = test.std() / np.sqrt(len(test)), control.std() / np.sqrt(len(control))
diff = mu1 - mu2
se_diff = np.sqrt(test.var() / len(test) + control.var() / len(control))
z_stats = (diff - h0) / se_diff
p_value = stats.norm.cdf(z_stats)
def critial(se):
return -se * stats.norm.ppf((1 - confidence) / 2)
print(f"Test {confidence*100:.1f}% CI : {mu1:.2f} +- {critial(se1):.2f}")
print(f"Control {confidence*100:.1f}% CI : {mu2:.2f} +- {critial(se2):.2f}")
print(f"Test-Control {confidence*100:.1f}% CI: {diff:.2f} +- {critial(se_diff):.2f}")
print(f"Z Statistic : {z_stats:.4f}")
print(f"P-Value : {p_value:.4f}")
AB_test(n1, n2)
# -
| notebooks/frequentist_inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # vis_getting_started_network_javascript
#
# The vis.js website provides a 'getting started' example for its Network tool:
#
# http://visjs.org/docs/network/
#
# This notebook looks at recreating this in Jupyter notebooks.
# ## Importing vis.js
#
# In other examples on the web, vis.js is imported into a Jupyter notebook using the require.config command.
#
# The vis package could be loaded using the local copy `require.config({paths: {vis: '../vis-4.21.0/dist/vis'}});`. However this then doesn't seem to render on nbviewer.
#
# SO instead the package is loaded from the cdnjs website.
#
from IPython.display import Javascript
Javascript(
"""
require.config({paths: {vis: '//cdnjs.cloudflare.com/ajax/libs/vis/4.21.0/vis.min'}});
require(['vis'], function(vis){
// create an array with nodes
var nodes = new vis.DataSet([
{id: 1, label: 'Node 1'},
{id: 2, label: 'Node 2'},
{id: 3, label: 'Node 3'},
{id: 4, label: 'Node 4'},
{id: 5, label: 'Node 5'}
]);
// create an array with edges
var edges = new vis.DataSet([
{from: 1, to: 3},
{from: 1, to: 2},
{from: 2, to: 4},
{from: 2, to: 5}
]);
// create a network
var container = document.getElementById('mynetwork10');
// provide the data in the vis format
var data = {
nodes: nodes,
edges: edges
};
var options = {width: '600px',
height: '400px'};
// initialize your network!
var network = new vis.Network(container, data, options);
});
element.append("<div id='mynetwork10'></div>")
"""
)
# This renders as a html document saved in the same folder as this notebook. [./vis_getting_started_network_notebook_javascript.html](./vis_getting_started_network_notebook_javascript.html)
#
#
# Can't load this on GitHub at present, so don't know how it is rendering.
# https://github.com/stevenkfirth/Jupyter_and_vis.js/blob/master/vis_getting_started_network/vis_getting_started_network_notebook_javascript.ipynb
#
# However this does seem to render on nbviewer. https://nbviewer.jupyter.org/github/stevenkfirth/Jupyter_and_vis.js/blob/master/vis_getting_started_network/vis_getting_started_network_notebook_javascript.ipynb?flush_cache=true
#
#
#
'test'
| vis_getting_started_network/vis_getting_started_network_notebook_javascript.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exercises
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 1: Stabilizing the Crank-Nicolson method by Rannacher time stepping
# <div id="diffu:exer:CN:Rannacher"></div>
#
# mathcal{I}_t is well known that the Crank-Nicolson method may give rise to
# non-physical oscillations in the solution of diffusion equations
# if the initial data exhibit jumps (see the section [diffu:pde1:analysis:CN](#diffu:pde1:analysis:CN)).
# Rannacher [[Rannacher_1984]](#Rannacher_1984) suggested a stabilizing technique
# consisting of using the Backward Euler scheme for the first two
# time steps with step length $\frac{1}{2}\Delta t$. One can generalize
# this idea to taking $2m$ time steps of size $\frac{1}{2}\Delta t$ with
# the Backward Euler method and then continuing with the
# Crank-Nicolson method, which is of second-order in time.
# The idea is that the high frequencies of the initial solution are
# quickly damped out, and the Backward Euler scheme treats these
# high frequencies correctly. Thereafter, the high frequency content of
# the solution is gone and the Crank-Nicolson method will do well.
#
# Test this idea for $m=1,2,3$ on a diffusion problem with a
# discontinuous initial condition. Measure the convergence rate using
# the solution ([diffu:analysis:pde1:step:erf:sol](#diffu:analysis:pde1:step:erf:sol)) with the boundary
# conditions
# ([diffu:analysis:pde1:p1:erf:uL](#diffu:analysis:pde1:p1:erf:uL))-([diffu:analysis:pde1:p1:erf:uR](#diffu:analysis:pde1:p1:erf:uR))
# for $t$ values such that the conditions are in the vicinity of $\pm 1$.
# For example, $t< 5a 1.6\cdot 10^{-2}$ makes the solution diffusion from
# a step to almost a straight line. The
# program `diffu_erf_sol.py` shows how to compute the analytical
# solution.
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Project 2: Energy estimates for diffusion problems
# <div id="diffu:exer:energy:estimates"></div>
#
#
# This project concerns so-called *energy estimates* for diffusion problems
# that can be used for qualitative analytical insight and for
# verification of implementations.
#
#
# **a)**
# We start with a 1D homogeneous diffusion equation with zero Dirichlet
# conditions:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p1:pdf"></div>
#
# $$
# \begin{equation}
# u_t = \alpha u_xx, x\in \Omega =(0,L),\ t\in (0,T],
# \label{diffu:exer:estimates:p1:pdf} \tag{1}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p1:bc"></div>
#
# $$
# \begin{equation}
# u(0,t) = u(L,t) = 0, t\in (0,T],
# \label{diffu:exer:estimates:p1:bc} \tag{2}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p1:ic"></div>
#
# $$
# \begin{equation}
# u(x,0) = I(x), x\in [0,L]
# \label{diffu:exer:estimates:p1:ic} \tag{3}
# \thinspace .
# \end{equation}
# $$
# The energy estimate for this problem reads
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p1:result"></div>
#
# $$
# \begin{equation}
# ||u||_{L^2} \leq ||I||_{L^2},
# \label{diffu:exer:estimates:p1:result} \tag{4}
# \end{equation}
# $$
# where the $||\cdot ||_{L^2}$ norm is defined by
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:L2"></div>
#
# $$
# \begin{equation}
# ||g||_{L^2} = \sqrt{\int_0^L g^2dx}\thinspace .
# \label{diffu:exer:estimates:L2} \tag{5}
# \end{equation}
# $$
# The quantify $||u||_{L^2}$ or $\frac{1}{2} ||u||_{L^2}$ is known
# as the *energy* of the solution, although it is not the physical
# energy of the system. A mathematical tradition has introduced the
# notion *energy* in this context.
#
# The estimate ([4](#diffu:exer:estimates:p1:result)) says that the
# "size of $u$" never exceeds that of the initial condition,
# or more precisely, it says that the area under the $u$ curve decreases
# with time.
#
# To show ([4](#diffu:exer:estimates:p1:result)), multiply the PDE
# by $u$ and integrate from $0$ to $L$. Use that $uu_t$ can be
# expressed as the time derivative of $u^2$ and that $u_xxu$ can
# integrated by parts to form an integrand $u_x^2$. Show that
# the time derivative of $||u||_{L^2}^2$ must be less than or equal
# to zero. Integrate this expression and derive
# ([4](#diffu:exer:estimates:p1:result)).
#
# <!-- <http://www.ann.jussieu.fr/~frey/cours/UdC/ma691/ma691_ch6.pdf> -->
#
# **b)**
# Now we address a slightly different problem,
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p2:pdf"></div>
#
# $$
# \begin{equation}
# u_t = \alpha u_xx + f(x,t), x\in \Omega =(0,L),\ t\in (0,T],
# \label{diffu:exer:estimates:p2:pdf} \tag{6}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p2:bc"></div>
#
# $$
# \begin{equation}
# u(0,t) = u(L,t) = 0, t\in (0,T],
# \label{diffu:exer:estimates:p2:bc} \tag{7}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p2:ic"></div>
#
# $$
# \begin{equation}
# u(x,0) = 0, x\in [0,L]
# \label{diffu:exer:estimates:p2:ic} \tag{8}
# \thinspace .
# \end{equation}
# $$
# The associated energy estimate is
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p2:result"></div>
#
# $$
# \begin{equation}
# ||u||_{L^2} \leq ||f||_{L^2}\thinspace .
# \label{diffu:exer:estimates:p2:result} \tag{9}
# \end{equation}
# $$
# (This result is more difficult to derive.)
#
# Now consider the compound problem with an initial condition $I(x)$ and
# a right-hand side $f(x,t)$:
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p3:pdf"></div>
#
# $$
# \begin{equation}
# u_t = \alpha u_xx + f(x,t), x\in \Omega =(0,L),\ t\in (0,T],
# \label{diffu:exer:estimates:p3:pdf} \tag{10}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p3:bc"></div>
#
# $$
# \begin{equation}
# u(0,t) = u(L,t) = 0, t\in (0,T],
# \label{diffu:exer:estimates:p3:bc} \tag{11}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p3:ic"></div>
#
# $$
# \begin{equation}
# u(x,0) = I(x), x\in [0,L]
# \label{diffu:exer:estimates:p3:ic} \tag{12}
# \thinspace .
# \end{equation}
# $$
# Show that if $w_1$ fulfills
# ([1](#diffu:exer:estimates:p1:pdf))-([3](#diffu:exer:estimates:p1:ic))
# and $w_2$ fulfills
# ([6](#diffu:exer:estimates:p2:pdf))-([8](#diffu:exer:estimates:p2:ic)),
# then $u=w_1 + w_2$ is the solution of
# ([10](#diffu:exer:estimates:p3:pdf))-([12](#diffu:exer:estimates:p3:ic)).
# Using the triangle inequality for norms,
# $$
# ||a + b|| \leq ||a|| + ||b||,
# $$
# show that the energy estimate for
# ([10](#diffu:exer:estimates:p3:pdf))-([12](#diffu:exer:estimates:p3:ic))
# becomes
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p3:result"></div>
#
# $$
# \begin{equation}
# ||u||_{L^2} \leq ||I||_{L^2} + ||f||_{L^2}\thinspace .
# \label{diffu:exer:estimates:p3:result} \tag{13}
# \end{equation}
# $$
# **c)**
# One application of ([13](#diffu:exer:estimates:p3:result)) is to prove uniqueness
# of the solution.
# Suppose $u_1$ and $u_2$ both fulfill
# ([10](#diffu:exer:estimates:p3:pdf))-([12](#diffu:exer:estimates:p3:ic)).
# Show that $u=u_1 - u_2$ then fulfills
# ([10](#diffu:exer:estimates:p3:pdf))-([12](#diffu:exer:estimates:p3:ic))
# with $f=0$ and $I=0$. Use ([13](#diffu:exer:estimates:p3:result))
# to deduce that the energy must be zero for all times and therefore
# that $u_1=u_2$, which proves that the solution is unique.
#
# **d)**
# Generalize ([13](#diffu:exer:estimates:p3:result)) to a 2D/3D
# diffusion equation $u_t = \nabla\cdot (\alpha \nabla u)$ for $x\in\Omega$.
#
# <!-- --- begin hint in exercise --- -->
#
# **Hint.**
# Use integration by parts in multi dimensions:
# $$
# \int_\Omega u \nabla\cdot (\alpha\nabla u)\dx =
# - \int_\Omega \alpha \nabla u\cdot\nabla u\dx
# + \int_{\partial\Omega} u \alpha\frac{\partial u}{\partial n},
# $$
# where $\frac{\partial u}{\partial n} = \boldsymbol{n}\cdot\nabla u$,
# $\boldsymbol{n}$ being the outward unit normal to the boundary $\partial\Omega$
# of the domain $\Omega$.
#
# <!-- --- end hint in exercise --- -->
#
# **e)**
# Now we also consider the multi-dimensional PDE $u_t =
# \nabla\cdot (\alpha \nabla u)$. Integrate both sides over $\Omega$
# and use Gauss' divergence theorem, $\int_\Omega \nabla\cdot\boldsymbol{q}\dx
# = \int_{\partial\Omega}\boldsymbol{q}\cdot\boldsymbol{n}\ds$ for a vector field
# $\boldsymbol{q}$. Show that if we have homogeneous Neumann conditions
# on the boundary, $\partial u/\partial n=0$, area under the
# $u$ surface remains constant in time and
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:estimates:p4:result"></div>
#
# $$
# \begin{equation}
# \int_{\Omega} u\dx = \int_{\Omega} I\dx
# \thinspace .
# \label{diffu:exer:estimates:p4:result} \tag{14}
# \end{equation}
# $$
# **f)**
# Establish a code in 1D, 2D, or 3D that can solve a diffusion equation with a
# source term $f$, initial condition $I$, and zero Dirichlet or
# Neumann conditions on the whole boundary.
#
# We can use ([13](#diffu:exer:estimates:p3:result))
# and ([14](#diffu:exer:estimates:p4:result)) as a partial verification
# of the code. Choose some functions $f$ and $I$ and
# check that ([13](#diffu:exer:estimates:p3:result)) is obeyed at any
# time when zero Dirichlet conditions are used.
# mathcal{I}_terate over the same $I$ functions and check that
# ([14](#diffu:exer:estimates:p4:result)) is fulfilled
# when using zero Neumann conditions.
#
# **g)**
# Make a list of some possible bugs in the code, such as indexing errors
# in arrays, failure to set the correct boundary conditions,
# evaluation of a term at a wrong time level, and similar.
# For each of the bugs, see if the verification tests from the previous
# subexercise pass or fail. This investigation shows how strong
# the energy estimates and the estimate ([14](#diffu:exer:estimates:p4:result))
# are for pointing out errors in the implementation.
#
# Filename: `diffu_energy`.
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 3: Splitting methods and preconditioning
# <div id="diffu:exer:splitting_prec"></div>
#
#
# In the section [diffu:2D:direct_vs_iter](#diffu:2D:direct_vs_iter), we outlined a class of
# iterative methods for $Au=b$ based on splitting $A$ into $A=M-N$
# and introducing the iteration
# $$
# Mu^{k} = Nu^k + b\thinspace .
# $$
# The very simplest splitting is $M=I$, where $I$ is the identity
# matrix. Show that this choice corresponds to the iteration
# <!-- Equation labels as ordinary links -->
# <div id="diffu:exer:splitting_prec:simplest"></div>
#
# $$
# \begin{equation}
# u^k = u^{k-1} + r^{k-1},\quad r^{k-1} = b - Au^{k-1},
# \label{diffu:exer:splitting_prec:simplest} \tag{15}
# \end{equation}
# $$
# where $r^{k-1}$ is the residual in the linear system in iteration
# $k-1$. The formula ([15](#diffu:exer:splitting_prec:simplest)) is known
# as Richardson's iteration.
# Show that if we apply the simple iteration method
# ([15](#diffu:exer:splitting_prec:simplest)) to the *preconditioned*
# system $M^{-1}Au=M^{-1}b$, we arrive at the Jacobi method by choosing
# $M=D$ (the diagonal of $A$) as preconditioner and the SOR method by
# choosing $M=\omega^{-1}D + L$ ($L$ being the lower triangular part of
# $A$). This equivalence shows that we can apply one iteration of the
# Jacobi or SOR method as preconditioner.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# Inserting $M=I$ and $N=I-A$ in the iterative method leads to
# $$
# u^{k} = (I-A)u^{k-1} + b = u^{k-1} + (b - Au^{k-1}),
# $$
# which is ([15](#diffu:exer:splitting_prec:simplest)).
# Replacing $A$ by $M^{-1}A$ and $b$ by $M^{-1}b$ in this equation
# gives
# $$
# u^k = u^{k-1} + M^{-1}r^{k-1},\quad r^{k-1}=b-Au^{k-1},
# $$
# which we after multiplication by $M$ and reordering can write
# as
# $$
# Mu^k = (M-A)u^{k-1} + b = Nu^{k-1} + b,
# $$
# which is the standard form for the Jacobi and SOR methods. Choosing $M=D$
# gives Jacobi and $M=\omega^{-1}D+L$ gives SOR. We have shown that we may
# view $M$ as a preconditioner of a simplest possible iteration method.
#
# <!-- --- end solution of exercise --- -->
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Problem 4: Oscillating surface temperature of the earth
# <div id="diffu:exer:earthosc"></div>
#
# Consider a day-and-night or seasonal variation in temperature at
# the surface of the earth. How deep down in the ground will the
# surface oscillations reach? For simplicity, we model only the
# vertical variation along a coordinate $x$, where $x=0$ at the
# surface, and $x$ increases as we go down in the ground.
# The temperature is governed by the heat equation
# $$
# \varrho c_v\frac{\partial T}{\partial t} = \nabla\cdot(k\nabla T),
# $$
# in some spatial domain $x\in [0,L]$, where $L$ is chosen large enough such
# that we can assume that $T$ is approximately constant, independent of the surface
# oscillations, for $x>L$. The parameters $\varrho$, $c_v$, and $k$ are the
# density, the specific heat capacity at constant volume, and the
# heat conduction coefficient, respectively.
#
#
# **a)**
# Derive the mathematical model for computing $T(x,t)$.
# Assume the surface oscillations to be sinusoidal around some mean
# temperature $T_m$. Let $T=T_m$ initially. At $x=L$, assume $T\approx T_m$.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# The surface temperature is set as
# $$
# T(0,t) = T_m + A\sin(\omega t)\thinspace .
# $$
# With only one "active" spatial coordinate we get the initial-boundary
# value problem
# $$
# \begin{alignat*}{2}
# \varrho c_v \frac{\partial T}{\partial t} &= \frac{\partial}{\partial x}
# \left(k(x)\frac{\partial T}{\partial x}\right), & x\in (0,L),\ t\in (0,T],\\
# T(x,0)&= T_m, & x\in [0,L],\\
# T(0,t)&= T_m + A\sin(\omega t), & t\in (0,T],\\
# T(L,t) &= T_m, & t\in (0,T].
# \end{alignat*}
# $$
# <!-- --- end solution of exercise --- -->
#
# **b)**
# Scale the model in a) assuming $k$ is constant. Use a time scale
# $t_c = \omega^{-1}$ and a length scale $x_c = \sqrt{2\dfc/\omega}$,
# where $\dfc = k/(\varrho c_v)$. The primary unknown can be scaled
# as $\frac{T-T_m}{2A}$.
#
# Show that the scaled PDE is
# $$
# \frac{\partial u}{\partial \bar t} =
# \frac{1}{2}\frac{\partial^2 u}{\partial x^2},
# $$
# with initial condition $u(\bar x,0) = 0$,
# left boundary condition
# $u(0,\bar t) = \sin(\bar t)$,
# and right boundary condition
# $u(\bar L,\bar t) = 0$. The bar indicates a dimensionless quantity.
#
# Show that $u(\bar x, \bar t)=e^{-\bar x}\sin (\bar x - \bar t)$ is a
# solution that fulfills the PDE and the boundary condition at $\bar x
# =0$ (this is the solution we will experience as $\bar
# t\rightarrow\infty$ and $L\rightarrow\infty$). Conclude that an
# appropriate domain for $x$ is $[0,4]$ if a damping $e^{-4}\approx
# 0.18$ is appropriate for implementing $\bar u\approx\hbox{const}$;
# increasing to $[0,6]$ damps $\bar u$ to 0.0025.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# Chapter 3.2.4 in the book [[Langtangen_scaling]](#Langtangen_scaling) describes the
# scaling of this problem in detail.
# Inserting dimensionless variables $\bar t = \omega t$, $\bar x =
# \sqrt{\omega/(2\dfc)} x$, and
# $$
# u = \frac{T-T_m}{2A},
# $$
# leads to
# $$
# \begin{alignat*}{2}
# \frac{\partial u}{\partial \bar t} &=
# \frac{1}{2}\frac{\partial^2 u}{\partial x^2},
# \quad & \bar x\in (0,\bar L),\ \bar t\in (0,\bar T],
# \\
# u(\bar x,0) &= 0,
# \quad &\bar x\in [0,1],
# \\
# u(0,\bar t) & = \sin(\bar t),
# \quad &\bar t\in (0,\bar T],
# \\
# u(\bar L,\bar t) & = 0,
# \quad &\bar t\in (0,\bar T].
# \end{alignat*}
# $$
# The domain lengths $\bar L$ and $\bar T$ follows from straightforward
# scaling of $L$ and $T$.
#
# Inserting $u(\bar x, \bar t)=e^{-\bar x}\sin (\bar t - \bar x)$ in the
# PDE shows that this is a solution. mathcal{I}_t also obeys
# the boundary condition $\bar u(0,\bar t)=sin(\bar t)$. As
# $\bar t\rightarrow\infty$, the initial condition has no longer impact
# on the solution and is "forgotten" and of no interest.
# The boundary condition at $\bar x=\bar L$ is never compatible with the
# given solution unless $\bar u$ is damped to zero, which happens
# mathematically as $\bar L\rightarrow\infty$. For a numerical solution,
# however, we may use a small finite value such as $\bar L=4$.
#
# <!-- --- end solution of exercise --- -->
#
# **c)**
# Compute the scaled temperature and make animations comparing two solutions
# with $\bar L=4$ and $\bar L=8$, respectively (keep $\Delta x$ the same).
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We can use the `viz` function in `diff1D_vc.py` to do the number
# crunching. Appropriate calls and visualization go here:
# +
# %matplotlib inline
import sys, os
sys.path.insert(0, os.path.join(os.pardir, 'src-diffu'))
from diffu1D_vc import viz
sol = [] # store solutions
for Nx, L in [[20, 4], [40, 8]]:
dt = 0.1
dx = float(L)/Nx
D = dt/dx**2
from math import pi, sin
T = 2*pi*6
from numpy import zeros
a = zeros(Nx+1) + 0.5
cpu, u_ = viz(
I=lambda x: 0, a=a, L=L, Nx=Nx, D=D, T=T,
umin=-1.1, umax=1.1, theta=0.5,
u_L=lambda t: sin(t),
u_R=0,
animate=False, store_u=True)
sol.append(u_)
print('computed solution for Nx=%d in [0,%g]' % (Nx, L))
print sol[0].shape
print sol[1].shape
import scitools.std as plt
counter = 0
for u0, u1 in zip(sol[0][2:], sol[1][2:]):
x0 = sol[0][0]
x1 = sol[1][0]
plt.plot(x0, u0, 'r-', x1, u1, 'b-',
legend=['short', 'long'],
savefig='tmp_%04d.png' % counter,
axis=[x1[0], x1[-1], -1.1, 1.1])
counter += 1
# -
# <!-- dom:MOVIE: [https://github.com/hplgit/fdm-book/raw/master/doc/pub/book/html/mov-diffu/surface_osc/movie.mp4] -->
# <!-- begin movie -->
from IPython.display import HTML
_s = """
<div>
<video loop controls width='640' height='365' preload='none'>
<source src='https://github.com/hplgit/fdm-book/raw/master/doc/pub/book/html/mov-diffu/surface_osc/movie.mp4' type='video/mp4; codecs="avc1.42E01E, mp4a.40.2"'>
<source src='https://github.com/hplgit/fdm-book/raw/master/doc/pub/book/html/mov-diffu/surface_osc/movie.webm' type='video/webm; codecs="vp8, vorbis"'>
<source src='https://github.com/hplgit/fdm-book/raw/master/doc/pub/book/html/mov-diffu/surface_osc/movie.ogg' type='video/ogg; codecs="theora, vorbis"'>
</video>
</div>
<p><em></em></p>
<!-- Issue warning if in a Safari browser -->
<script language="javascript">
if (!!(window.safari)) {
document.write("<div style=\"width: 95%%; padding: 10px; border: 1px solid #100; border-radius: 4px;\"><p><font color=\"red\">The above movie will not play in Safari - use Chrome, Firefox, or Opera.</font></p></div>")}
</script>
"""
HTML(_s)
# <!-- end movie -->
#
# <!-- --- end solution of exercise --- -->
#
#
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Problem 5: Oscillating and pulsating flow in tubes
# <div id="diffu:exer:bloodflow"></div>
#
# We consider flow in a straight tube with radius $R$ and straight walls.
# The flow is driven by a pressure gradient $\beta(t)$. The effect of
# gravity can be neglected. The mathematical problem reads
# <!-- Equation labels as ordinary links -->
# <div id="_auto1"></div>
#
# $$
# \begin{equation}
# \varrho\frac{\partial u}{\partial t} =
# \mu\frac{1}{r}\frac{\partial}{\partial r}\left(
# r\frac{\partial u}{\partial r}\right) + \beta(t),\quad
# r\in [0,R],\ t\in (0,T],
# \label{_auto1} \tag{16}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto2"></div>
#
# $$
# \begin{equation}
# u(r,0) = I(r),\quad r\in [0,R],
# \label{_auto2} \tag{17}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto3"></div>
#
# $$
# \begin{equation}
# u(R,t) = 0,\quad t\in (0,T],
# \label{_auto3} \tag{18}
# \end{equation}
# $$
# <!-- Equation labels as ordinary links -->
# <div id="_auto4"></div>
#
# $$
# \begin{equation}
# \frac{\partial u}{\partial r}(0,t) = 0,\quad t\in (0,T].
# \label{_auto4} \tag{19}
# \end{equation}
# $$
# We consider two models for $\beta(t)$. One plain, sinusoidal oscillation:
# <!-- Equation labels as ordinary links -->
# <div id="_auto5"></div>
#
# $$
# \begin{equation}
# \beta = A\sin(\omega t),
# \label{_auto5} \tag{20}
# \end{equation}
# $$
# and one with periodic pulses,
# <!-- Equation labels as ordinary links -->
# <div id="_auto6"></div>
#
# $$
# \begin{equation}
# \beta = A\sin^{16}(\omega t),
# \label{_auto6} \tag{21}
# \end{equation}
# $$
# Note that both models can be written as $\beta = A\sin^m(\omega t)$, with
# $m=1$ and $m=16$, respectively.
#
#
# **a)**
# Scale the mathematical model, using the viscous time scale $\varrho R^2/\mu$.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We can introduce
# $$
# \bar r = \frac{r}{R}, \quad \bar t = \frac{t}{\varrho R^2/\mu},\quad u = \frac{u}{u_c}\thinspace .
# $$
# Inserted in the PDE, we get
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \frac{1}{\bar r}\frac{\partial}{\partial\bar r}\left(
# \bar r\frac{\partial\bar u}{\partial\bar r}\right) +
# \frac{R^2 A}{u_c \mu}\sin^m (\alpha\bar t)
# $$
# where $\alpha$ is a dimensionless number
# $$
# \alpha = \frac{\omega\varrho R^2}{\mu} = \frac{\varrho R^2/\mu}{1/\omega},
# $$
# reflecting the ratio of the viscous diffusion time scale and the
# time scale of the oscillating pressure gradient.
# We may choose $u_c$ such that the coefficient in the pressure gradient
# term equals unity:
# $$
# u_c = \frac{R^2 A}{\mu}\thinspace .
# $$
# The governing PDE, dropping the bars, then reads
# $$
# \frac{\partial u}{\partial t} =
# \frac{1}{r}\frac{\partial}{\partial r}\left(
# r\frac{\partial u}{\partial r}\right) +
# \sin^m (\alpha\bar t),\quad r\in (0,1),\ t\in (0,T]\thinspace .
# $$
# <!-- --- end solution of exercise --- -->
#
# **b)**
# Implement the scaled model from a), using the unifying $\theta$ scheme
# in time and centered differences in space.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We need to take into account extensions below: a coefficient in front of
# the viscous term, and an extra source term.
#
# A preliminary and unfinished code:
# +
"""
Solve the diffusion equation for axi-symmetric case:
u_t = 1/r * (r*a(r)*u_r)_r + f(r,t)
on (0,R) with boundary conditions u(0,t)_r = 0 and u(R,t) = 0,
for t in (0,T]. Initial condition: u(r,0) = I(r).
Pressure gradient f.
The following naming convention of variables are used.
===== ==========================================================
Name Description
===== ==========================================================
Nx The total number of mesh cells; mesh points are numbered
from 0 to Nx.
T The stop time for the simulation.
I Initial condition (Python function of x).
a Variable coefficient (constant).
R Length of the domain ([0,R]).
r Mesh points in space.
t Mesh points in time.
n Index counter in time.
u Unknown at current/new time level.
u_1 u at the previous time level.
dr Constant mesh spacing in r.
dt Constant mesh spacing in t.
===== ==========================================================
``user_action`` is a function of ``(u, r, t, n)``, ``u[i]`` is the
solution at spatial mesh point ``r[i]`` at time ``t[n]``, where the
calling code can add visualization, error computations, data analysis,
store solutions, etc.
"""
import scipy.sparse
import scipy.sparse.linalg
from numpy import linspace, zeros, random, array, ones, sum, log, sqrt
import time, sys
import sympy as sym
def solver_theta(I, a, R, Nr, D, T, theta=0.5, u_L=None, u_R=0,
user_action=None, f=0):
"""
The array a has length Nr+1 and holds the values of
a(x) at the mesh points.
Method: (implicit) theta-rule in time.
Nr is the total number of mesh cells; mesh points are numbered
from 0 to Nr.
D = dt/dr**2 and implicitly specifies the time step.
T is the stop time for the simulation.
I is a function of r.
u_L = None implies du/dr = 0, i.e. a symmetry condition
f(r,t) is pressure gradient with radius.
user_action is a function of (u, x, t, n) where the calling code
can add visualization, error computations, data analysis,
store solutions, etc.
r*alpha is needed midway between spatial mesh points, - use
arithmetic mean of successive mesh values (i.e. of r_i*alpha_i)
"""
import time
t0 = time.perf_counter()
r = linspace(0, R, Nr+1) # mesh points in space
dr = r[1] - r[0]
dt = D*dr**2
Nt = int(round(T/float(dt)))
t = linspace(0, T, Nt+1) # mesh points in time
if isinstance(u_L, (float,int)):
u_L_ = float(u_L) # must take copy of u_L number
u_L = lambda t: u_L_
if isinstance(u_R, (float,int)):
u_R_ = float(u_R) # must take copy of u_R number
u_R = lambda t: u_R_
if isinstance(f, (float,int)):
f_ = float(f) # must take copy of f number
f = lambda r, t: f_
ra = r*a # help array in scheme
inv_r = zeros(len(r)-2) # needed for inner mesh points
inv_r = 1.0/r[1:-1]
u = zeros(Nr+1) # solution array at t[n+1]
u_1 = zeros(Nr+1) # solution at t[n]
Dl = 0.5*D*theta
Dr = 0.5*D*(1-theta)
# Representation of sparse matrix and right-hand side
diagonal = zeros(Nr+1)
lower = zeros(Nr)
upper = zeros(Nr)
b = zeros(Nr+1)
# Precompute sparse matrix (scipy format)
diagonal[1:-1] = 1 + Dl*(ra[2:] + 2*ra[1:-1] + ra[:-2])*inv_r
lower[:-1] = -Dl*(ra[1:-1] + ra[:-2])*inv_r
upper[1:] = -Dl*(ra[2:] + ra[1:-1])*inv_r
# Insert boundary conditions
if u_L == None: # symmetry axis, du/dr = 0
diagonal[0] = 1 + 8*a[0]*Dl
upper[0] = -8*a[0]*Dl
else:
diagonal[0] = 1
upper[0] = 0
diagonal[Nr] = 1
lower[-1] = 0
A = scipy.sparse.diags(
diagonals=[diagonal, lower, upper],
offsets=[0, -1, 1],
shape=(Nr+1, Nr+1),
format='csr')
#print A.todense()
# Set initial condition
for i in range(0,Nr+1):
u_1[i] = I(r[i])
if user_action is not None:
user_action(u_1, r, t, 0)
# Time loop
for n in range(0, Nt):
b[1:-1] = u_1[1:-1] + Dr*(
(ra[2:] + ra[1:-1])*(u_1[2:] - u_1[1:-1]) -
(ra[1:-1] + ra[0:-2])*(u_1[1:-1] - u_1[:-2]))*inv_r + \
dt*theta*f(r[1:-1], t[n+1]) + \
dt*(1-theta)*f(r[1:-1], t[n])
# Boundary conditions
if u_L == None: # symmetry axis, du/dr = 0
b[0] = u_1[0] + 8*a[0]*Dr*(u_1[1] - u_1[0]) + \
dt*theta*f(0, (n+1)*dt) + \
dt*(1 - theta)*f(0, n*dt)
else:
b[0] = u_L(t[n+1])
b[-1] = u_R(t[n+1])
#print b
# Solve
u[:] = scipy.sparse.linalg.spsolve(A, b)
if user_action is not None:
user_action(u, r, t, n+1)
# Switch variables before next step
u_1, u = u, u_1
t1 = time.perf_counter()
# return u_1, since u and u_1 are switched
return u_1, t, t1-t0
def compute_rates(h_values, E_values):
m = len(h_values)
q = [log(E_values[i+1]/E_values[i])/
log(h_values[i+1]/h_values[i])
for i in range(0, m-1, 1)]
q = [round(q_, 2) for q_ in q]
return q
def make_a(alpha, r):
"""
alpha is a func, generally of r, - but may be constant.
Note: when solution is to be axi-symmetric, alpha
must be so too.
"""
a = alpha(r)*ones(len(r))
return a
def tests_with_alpha_and_u_exact():
'''
Test solver performance when alpha is either const or
a fu of r, combined with a manufactured sol u_exact
that is either a fu of r only, or a fu of both r and t.
Note: alpha and u_e are defined as symb expr here, since
test_solver_symmetric needs to automatically generate
the source term f. After that, test_solver_symmetric
redefines alpha, u_e and f as num functions.
'''
R, r, t = sym.symbols('R r t')
# alpha const ...
# ue = const
print('Testing with alpha = 1.5 and u_e = R**2 - r**2...')
test_solver_symmetric(alpha=1.5, u_exact=R**2 - r**2)
# ue = ue(t)
print('Testing with alpha = 1.5 and u_e = 5*t*(R**2 - r**2)...')
test_solver_symmetric(alpha=1.5, u_exact=5*t*(R**2 - r**2))
# alpha function of r ...
# ue = const
print('Testing with alpha = 1 + r**2 and u_e = R**2 - r**2...')
test_solver_symmetric(alpha=1+r**2, u_exact=R**2 - r**2)
# ue = ue(t)
print('Testing with alpha = 1+r**2 and u_e = 5*t*(R**2 - r**2)...')
test_solver_symmetric(alpha=1+r**2, u_exact=5*t*(R**2 - r**2))
def test_solver_symmetric(alpha, u_exact):
'''
Test solver performance for manufactured solution
given in the function u_exact. Parameter alpha is
either a const or a function of r. In the latter
case, an "exact" sol can not be achieved, so then
testing switches to conv. rates.
R is tube radius and T is duration of simulation.
alpha constant:
Compares the manufactured solution with the
solution from the solver at each time step.
alpha function of r:
convergence rates are tested (using the sol
at the final point in time only).
'''
def compare(u, r, t, n): # user_action function
"""Compare exact and computed solution."""
u_e = u_exact(r, t[n])
diff = abs(u_e - u).max()
#print diff
tol = 1E-12
assert diff < tol, 'max diff: %g' % diff
def pde_source_term(a, u):
'''Return the terms in the PDE that the source term
must balance, here du/dt - (1/r) * d/dr(r*a*du/dr).
a, i.e. alpha, is either const or a fu of r.
u is a symbolic Python function of r and t.'''
return sym.diff(u, t) - \
(1.0/r)*sym.diff(r*a*sym.diff(u, r), r)
R, r, t = sym.symbols('R r t')
# fit source term
f = sym.simplify(pde_source_term(alpha, u_exact))
R = 1.0 # radius of tube
T = 2.0 # duration of simulation
if sym.diff(alpha, r) == 0:
alpha_is_const = True
else:
alpha_is_const = False
# make alpha, f and u_exact numerical functions
alpha = sym.lambdify([r], alpha, modules='numpy')
f = sym.lambdify([r, t], f.subs('R', R), modules='numpy')
u_exact = sym.lambdify(
[r, t], u_exact.subs('R', R), modules='numpy')
I = lambda r: u_exact(r, 0)
# some help variables
FE = 0 # Forward Euler method
BE = 1 # Backward Euler method
CN = 0.5 # Crank-Nicolson method
# test all three schemes
for theta in (FE, BE, CN):
print('theta: ', theta)
E_values = []
dt_values = []
for Nr in (2, 4, 8, 16, 32, 64):
print('Nr:', Nr)
r = linspace(0, R, Nr+1) # mesh points in space
dr = r[1] - r[0]
a_values = make_a(alpha, r)
if theta == CN:
dt = dr
else: # either FE or BE
# use most conservative dt as decided by FE
K = 1.0/(4*a_values.max())
dt = K*dr**2
D = dt/dr**2
if alpha_is_const:
u, t, cpu = solver_theta(
I, a_values, R, Nr, D, T,
theta, u_L=None, u_R=0,
user_action=compare, f=f)
else: # alpha depends on r
u, t, cpu = solver_theta(
I, a_values, R, Nr, D, T,
theta, u_L=None, u_R=0,
user_action=None, f=f)
# compute L2 error at t = T
u_e = u_exact(r, t[-1])
e = u_e - u
E = sqrt(dr*sum(e**2))
E_values.append(E)
dt_values.append(dt)
if alpha_is_const is False:
q = compute_rates(dt_values, E_values)
print('theta=%g, q: %s' % (theta, q))
expected_rate = 2 if theta == CN else 1
tol = 0.1
diff = abs(expected_rate - q[-1])
print('diff:', diff)
assert diff < tol
if __name__ == '__main__':
tests_with_alpha_and_u_exact()
print('This is just a start. More remaining for this Exerc.')
# -
# <!-- --- end solution of exercise --- -->
#
# **c)**
# Verify the implementation in b) using a manufactured solution that is
# quadratic in $r$ and linear in $t$. Make a corresponding test function.
#
# <!-- --- begin hint in exercise --- -->
#
# **Hint.**
# You need to include an extra source term
# in the equation to allow for such tests. Let the spatial variation be
# $1-r^2$ such that the boundary condition is fulfilled.
#
# <!-- --- end hint in exercise --- -->
#
# **d)**
# Make animations for $m=1,16$ and $\alpha=1,0.1$. Choose $T$ such that
# the motion has reached a steady state (non-visible changes from period to
# period in $u$).
#
# **e)**
# For $\alpha\gg 1$, the scaling in a) is not good, because the
# characteristic time for changes (due to the pressure) is much smaller
# than the viscous diffusion time scale ($\alpha$ becomes large).
# We should in this case base
# the short time scale on $1/\omega$. Scale the model again, and
# make an animation for $m=1,16$ and $\alpha = 10$.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# Now the governing PDE becomes
# $$
# \frac{\partial u}{\partial t} =
# \alpha^{-1}\frac{1}{r}\frac{\partial}{\partial r}\left(
# r\frac{\partial u}{\partial r}\right) +
# \sin^m t,\quad r\in (0,1),\ t\in (0,T]\thinspace .
# $$
# In this case,
# $$
# u_c = \frac{A}{\varrho\omega}\thinspace .
# $$
# We see that for $\alpha\gg 1$, we can neglect the viscous term, and we
# basically have a balance between the acceleration and the driving pressure
# gradient:
# $$
# \frac{\partial u}{\partial t} = \sin^m t\thinspace .
# $$
# [hpl 1: This may be a great challenge numerically, since we have a plug
# independent of r that oscillates back and forth. CN is probably very
# unstable. Can make a point out of this. Try $\alpha=1$ and increase
# gently.]
#
# <!-- --- end solution of exercise --- -->
#
# Filename: `axisymm_flow`.
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Problem 6: Scaling a welding problem
# <div id="diffu:exer:welding"></div>
#
# Welding equipment makes a very localized heat source that moves in
# time. We shall investigate the heating due to welding and choose, for
# maximum simplicity, a one-dimensional heat equation with a fixed
# temperature at the ends, and we neglect melting. We shall scale the
# problem, and besides solving such a problem numerically, the aim is to
# investigate the appropriateness of alternative scalings.
#
# The governing PDE problem reads
# $$
# \begin{alignat*}{2}
# \varrho c\frac{\partial u}{\partial t} &= k\frac{\partial^2 u}{\partial x^2}
# + f, & x\in (0,L),\ t\in (0,T),\\
# u(x,0) &= U_s, & x\in [0,L],\\
# u(0,t) = u(L,t) &= 0, & t\in (0,T].
# \end{alignat*}
# $$
# Here, $u$ is the temperature, $\varrho$ the density of the material,
# $c$ a heat capacity, $k$ the heat conduction coefficient, $f$ is
# the heat source from the welding equipment, and $U_s$ is the
# initial constant (room) temperature in the material.
#
# A possible model for the heat source is a moving Gaussian function:
# $$
# f = A\exp{\left(-\frac{1}{2}\left(\frac{x-vt}{\sigma}\right)^2\right)},
# $$
# where $A$ is the strength, $\sigma$ is a parameter governing how
# peak-shaped (or localized in space) the heat source is, and
# $v$ is the velocity (in positive $x$ direction) of the source.
#
#
# **a)**
# Let $x_c$, $t_c$, $u_c$, and $f_c$ be scales, i.e., characteristic
# sizes, of $x$, $t$, $u$, and $f$, respectively. The natural choice of
# $x_c$ and $f_c$ is $L$ and $A$, since these make the scaled $x$ and
# $f$ in the interval $[0,1]$. If each of the three terms in the PDE
# are equally important, we can find $t_c$ and $u_c$ by demanding that
# the coefficients in the scaled PDE are all equal to unity. Perform
# this scaling. Use scaled quantities in the arguments for the
# exponential function in $f$ too and show that
# $$
# \bar f= e^{-\frac{1}{2}\beta^2(\bar x -\gamma \bar t)^2},
# $$
# where $\beta$ and $\gamma$ are dimensionless numbers. Give an
# interpretation of $\beta$ and $\gamma$.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We introduce
# $$
# \bar x=\frac{x}{L},\quad \bar t = \frac{t}{t_c},\quad \bar u = \frac{u-U_s}{u_c},
# \quad \bar f=\frac{f}{A}\thinspace .
# $$
# Inserted in the PDE and dividing by $\varrho c u_c/t_c$ such that the
# coefficient in front of $\partial\bar u/\partial\bar t$ becomes unity,
# and thereby all terms become dimensionless, we get
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \frac{k t_c}{\varrho c L^2}\frac{\partial^2\bar u}{\partial\bar x^2}
# + \frac{A t_c}{\varrho c u_c}\bar f\thinspace .
# $$
# Demanding that all three terms are equally important, it follows that
# $$
# \frac{k t_c}{\varrho c L^2} = 1,\quad \frac{A t_c}{\varrho c u_c}=1\thinspace .
# $$
# These constraints imply the *diffusion time scale*
# $$
# t_c = \frac{\varrho cL^2}{k},
# $$
# and a scale for $u_c$,
# $$
# u_c = \frac{AL^2}{k}\thinspace .
# $$
# The scaled PDE reads
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \frac{\partial^2\bar u}{\partial\bar x^2}
# + \bar f\thinspace .
# $$
# Scaling $f$ results in
# $$
# \begin{align*}
# \bar f &= \exp{\left(-\frac{1}{2}\left(\frac{x-vt}{\sigma}\right)^2\right)}\\
# &= \exp{\left(-\frac{1}{2}\frac{L^2}{\sigma^2}
# \left(\bar x- \frac{vt_c}{L}t\right)^2\right)}\\
# &= \exp{\left(-\frac{1}{2}\beta^2\left(\bar x-\gamma \bar t\right)^2\right)},
# \end{align*}
# $$
# where $\beta$ and $\gamma$ are dimensionless numbers:
# $$
# \beta = \frac{L}{\sigma},\quad
# \gamma = \frac{vt_c}{L} = \frac{v\varrho cL}{k}\thinspace .
# $$
# The $\sigma$ parameter measures the width of the Gaussian peak, so
# $\beta$ is the ratio of the domain and the width of the heat source (large
# $\beta$ implies a very peak-formed heat source). The $\gamma$
# parameter arises from $t_c/(L/v)$, which is the ratio of the diffusion
# time scale and the time it takes for the heat source to travel through
# the domain. Equivalently, we can multiply by $t_c/t_c$ to get $\gamma
# = v/(t_cL)$ as the ratio between the velocity of the heat source and
# the diffusion velocity.
#
# <!-- --- end solution of exercise --- -->
#
# **b)**
# Argue that for large $\gamma$ we should base the time scale on the
# movement of the heat source. Show that this gives rise to the scaled
# PDE
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \gamma^{-1}\frac{\partial^2\bar u}{\partial\bar x^2}
# + \bar f,
# $$
# and
# $$
# \bar f = \exp{(-\frac{1}{2}\beta^2(\bar x - \bar t)^2)}\thinspace .
# $$
# Discuss when the scalings in a) and b) are appropriate.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We perform the scaling as in a), but this time we determine $t_c$ such
# that the heat source moves with unit velocity. This means that
# $$
# \frac{vt_c}{L} = 1\quad\Rightarrow\quad t_c = \frac{L}{v}\thinspace .
# $$
# Scaling of the PDE gives, as before,
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \frac{k t_c}{\varrho c L^2}\frac{\partial^2\bar u}{\partial\bar x^2}
# + \frac{A t_c}{\varrho c u_c}\bar f\thinspace .
# $$
# Inserting the expression for $t_c$, we have
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \frac{k L}{\varrho c L^2v}\frac{\partial^2\bar u}{\partial\bar x^2}
# + \frac{A L}{v\varrho c u_c}\bar f\thinspace .
# $$
# We recognize the first coefficient as $\gamma^{-1}$, while $u_c$ can
# be determined from demanding the second coefficient to be unity:
# $$
# u_c = \frac{AL}{v\varrho c}\thinspace .
# $$
# The scaled PDE is therefore
# $$
# \frac{\partial\bar u}{\partial\bar t} =
# \gamma^{-1}\frac{\partial^2\bar u}{\partial\bar x^2}
# + \bar f\thinspace .
# $$
# If the heat source moves very fast, there is little time for the
# diffusion to transport the heat away from the source, and the heat
# conduction term becomes insignificant. This is reflected in the
# coefficient $\gamma^{-1}$, which is small when $\gamma$, the ratio of
# the heat source velocity and the diffusion velocity, is large.
#
# The scaling in a) is therefore appropriate if diffusion is a
# significant process, i.e., the welding equipment moves at a slow speed
# so heat can efficiently spread out by diffusion. For large $\gamma$,
# the scaling in b) is appropriate, and $t=1$ corresponds to having the
# heat source traveled through the domain (with the scaling in a), the
# heat source will leave the domain in short time).
#
# <!-- --- end solution of exercise --- -->
#
# **c)**
# One aim with scaling is to get a solution that lies in the interval
# $[-1,1]$. This is not always the case when $u_c$ is based on a scale
# involving a source term, as we do in a) and b). However, from the
# scaled PDE we realize that if we replace $\bar f$ with $\delta\bar f$,
# where $\delta$ is a dimensionless factor, this corresponds to
# replacing $u_c$ by $u_c/\delta$. So, if we observe that $\bar
# u\sim1/\delta$ in simulations, we can just replace $\bar f$ by $\delta
# \bar f$ in the scaled PDE.
#
# Use this trick and implement the two scaled models. Reuse software for
# the diffusion equation (e.g., the `solver` function in
# `diffu1D_vc.py`). Make a function `run(gamma, beta=10, delta=40,
# scaling=1, animate=False)` that runs the model with the given
# $\gamma$, $\beta$, and $\delta$ parameters as well as an indicator
# `scaling` that is 1 for the scaling in a) and 2 for the scaling in
# b). The last argument can be used to turn screen animations on or off.
#
# Experiments show that with $\gamma=1$ and $\beta=10$, $\delta =20$
# is appropriate. Then $\max |\bar u|$ will be larger than 4 for $\gamma
# =40$, but that is acceptable.
#
# Equip the `run` function with visualization, both animation of $\bar u$
# and $\bar f$, and plots with $\bar u$ and $\bar f$ for $t=0.2$ and $t=0.5$.
#
# <!-- --- begin hint in exercise --- -->
#
# **Hint.**
# Since the amplitudes of $\bar u$ and $\bar f$ differs by a factor $\delta$,
# it is attractive to plot $\bar f/\delta$ together with $\bar u$.
#
# <!-- --- end hint in exercise --- -->
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# Here is a possible `run` function:
# +
# from .diffu1D_vc import solver
import numpy as np
def run(gamma, beta=10, delta=40, scaling=1, animate=False):
"""Run the scaled model for welding."""
if scaling == 1:
v = gamma
a = 1
elif scaling == 2:
v = 1
a = 1.0/gamma
b = 0.5*beta**2
L = 1.0
ymin = 0
# Need gloal to be able change ymax in closure process_u
global ymax
ymax = 1.2
I = lambda x: 0
f = lambda x, t: delta*np.exp(-b*(x - v*t)**2)
import time
import scitools.std as plt
plot_arrays = []
def process_u(u, x, t, n):
global ymax
if animate:
plt.plot(x, u, 'r-',
x, f(x, t[n])/delta, 'b-',
axis=[0, L, ymin, ymax], title='t=%f' % t[n],
xlabel='x', ylabel='u and f/%g' % delta)
if t[n] == 0:
time.sleep(1)
plot_arrays.append(x)
dt = t[1] - t[0]
tol = dt/10.0
if abs(t[n] - 0.2) < tol or abs(t[n] - 0.5) < tol:
plot_arrays.append((u.copy(), f(x, t[n])/delta))
if u.max() > ymax:
ymax = u.max()
Nx = 100
D = 10
T = 0.5
u_L = u_R = 0
theta = 1.0
cpu = solver(
I, a, f, L, Nx, D, T, theta, u_L, u_R, user_action=process_u)
x = plot_arrays[0]
plt.figure()
for u, f in plot_arrays[1:]:
plt.plot(x, u, 'r-', x, f, 'b--', axis=[x[0], x[-1], 0, ymax],
xlabel='$x$', ylabel=r'$u, \ f/%g$' % delta)
plt.hold('on')
plt.legend(['$u,\\ t=0.2$', '$f/%g,\\ t=0.2$' % delta,
'$u,\\ t=0.5$', '$f/%g,\\ t=0.5$' % delta])
filename = 'tmp1_gamma%g_s%d' % (gamma, scaling)
s = 'diffusion' if scaling == 1 else 'source'
plt.title(r'$\beta = %g,\ \gamma = %g,\ $' % (beta, gamma)
+ 'scaling=%s' % s)
plt.savefig(filename + '.pdf'); plt.savefig(filename + '.png')
return cpu
# -
# Note that we have dropped the bar notation in the plots. mathcal{I}_t is common
# to drop the bars as soon as the scaled problem is established.
#
# <!-- --- end solution of exercise --- -->
#
# **d)**
# Use the software in c) to investigate $\gamma=0.2,1,5,40$ for the
# two scalings. Discuss the results.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# For these investigations, we compare the two scalings for each of
# the different $\gamma$ values. An appropriate function for automating
# the tasks is
def investigate():
"""Do scienfic experiments with the run function above."""
# Clean up old files
import glob
for filename in glob.glob('tmp1_gamma*') + \
glob.glob('welding_gamma*'):
os.remove(filename)
gamma_values = 1, 40, 5, 0.2, 0.025
for gamma in gamma_values:
for scaling in 1, 2:
run(gamma=gamma, beta=10, delta=20, scaling=scaling)
# Combine images
for gamma in gamma_values:
for ext in 'pdf', 'png':
cmd = 'doconce combine_images -2 '\
'tmp1_gamma%(gamma)g_s1.%(ext)s '\
'tmp1_gamma%(gamma)g_s2.%(ext)s '\
'welding_gamma%(gamma)g.%(ext)s' % vars()
os.system(cmd)
# pdflatex doesn't like 0.2 in filenames...
if '.' in str(gamma):
os.rename(
'welding_gamma%(gamma)g.%(ext)s' % vars(),
('welding_gamma%(gamma)g' % vars()).replace('.', '_')
+ '.' + ext)
# We run here a Backward Euler scheme with $N_x=100$ and quite long
# time steps.
#
# Running the `investigate` function, we get the following plots:
#
# <!-- dom:FIGURE: [fig-diffu/welding_gamma0_025.png, width=800 frac=1] -->
# <!-- begin figure -->
#
# <p></p>
# <img src="fig-diffu/welding_gamma0_025.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/welding_gamma0_2.png, width=800 frac=1] -->
# <!-- begin figure -->
#
# <p></p>
# <img src="fig-diffu/welding_gamma0_2.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/welding_gamma1.png, width=800 frac=1] -->
# <!-- begin figure -->
#
# <p></p>
# <img src="fig-diffu/welding_gamma1.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/welding_gamma5.png, width=800 frac=1] -->
# <!-- begin figure -->
#
# <p></p>
# <img src="fig-diffu/welding_gamma5.png" width=800>
#
# <!-- end figure -->
#
#
# <!-- dom:FIGURE: [fig-diffu/welding_gamma40.png, width=800 frac=1] -->
# <!-- begin figure -->
#
# <p></p>
# <img src="fig-diffu/welding_gamma40.png" width=800>
#
# <!-- end figure -->
#
#
# For $\gamma\ll 1$ as in $\gamma = 0.025$, the heat source moves very
# slowly on the diffusion time scale and has hardly entered the medium,
# while the scaling in b) is not inappropriate, but a larger $\delta$ is
# needed to bring $\bar u$ around unity. We see that for $\gamma=0.2$,
# each of the scalings work, but with the diffusion time scale, the heat
# source has not moved much into the domain. For $\gamma=1$, the
# mathematical problems are identical and hence the plots too. For
# $\gamma=5$, the time scale based on the source is clearly the best
# choice, and for $\gamma=40$, only this scale is appropriate.
#
# A conclusion is that the scaling in b) works well for a range of $\gamma$
# values, also in the case $\gamma\ll 1$.
#
# <!-- --- end solution of exercise --- -->
#
# <!-- ===== Exercise: Radial heat conduction out of offshore pipelines ===== -->
#
# <!-- Easy to make something out of the ideas/5620/apps/offshore... mekit -->
# <!-- paper where one has a multi-walled radial heat conduction equation. -->
# <!-- Can, as in the paper, use one cell per material. Coupling to soil -->
# <!-- outside with many parameters given. The discussion of the Fourier -->
# <!-- number is interesting - I guess time changes here relates to -->
# <!-- BCs on the inner wall because the gas suddenly has a different -->
# <!-- temperature? Could be a good project perhaps; anyway, the theory -->
# <!-- can be written up. -->
#
# Filename: `welding`.
#
# <!-- --- end exercise --- -->
#
#
#
#
# <!-- --- begin exercise --- -->
#
# ## Exercise 7: Implement a Forward Euler scheme for axi-symmetric diffusion
# <div id="diffu:exer:axisymm"></div>
#
# Based on the discussion in the section [diffu:fd2:radial](#diffu:fd2:radial), derive in detail
# the discrete equations for a Forward Euler in time, centered in space,
# finite difference method for axi-symmetric diffusion. The
# diffusion coefficient may be a function of the radial coordinate.
# At the outer boundary $r=R$, we may have either a Dirichlet or Robin
# condition.
# Implement this scheme. Construct appropriate test problems.
#
#
# <!-- --- begin solution of exercise --- -->
# **Solution.**
# We start with the equation at $r=0$. According to the section [diffu:fd2:radial](#diffu:fd2:radial),
# we get
# $$
# \frac{u^{n+1}_0-u^n_0}{\Delta t} = 4\dfc(0)\frac{u_1^n - u^n_0}{\Delta r^2}
# + f_0^n\thinspace .
# $$
# For $i>0$, we have
# $$
# \begin{align*}
# \frac{u^{n+1}_i-u^n_i}{\Delta t} &= \frac{1}{r_i\Delta r^2}(
# \frac{1}{2}(r_i + r_{i+1})\frac{1}{2}(\dfc_i + \dfc_{i+1})(u^n_{i+1} - u^n_i) -\\
# &\qquad\frac{1}{2}(r_{i-1} + r_{i})\frac{1}{2}(\dfc_{i-1} + \dfc_{i})(u^n_{i} - u^n_{i-1}))
# + f_i^n
# \end{align*}
# $$
# Solving with respect to $u^{n+1}_i$ and introducing $D=\Delta t/\Delta r^2$
# results in
# $$
# \begin{align*}
# u^{n+1}_0 &= u^n_0 + 4D\dfc(0)(u_1^n - u^n_0)
# + f_0^n,\\
# u^{n+1}_i &= u^n_i + D\frac{1}{r_i}(
# \frac{1}{2}(r_i + r_{i+1})\frac{1}{2}(\dfc_i + \dfc_{i+1})(u^n_{i+1} - u^n_i) -\\
# &\qquad\frac{1}{2}(r_{i-1} + r_{i})\frac{1}{2}(\dfc_{i-1} + \dfc_{i})(u^n_{i} - u^n_{i-1}))
# + \Delta t f_i^n,\\
# &\qquad i = 1,\ldots,N_r-1,
# \end{align*}
# $$
# and $u^{n+1}_i$ at the end point $i=N_r$ is assumed known in case of
# a Dirichlet condition. A Robin condition
# $$
# -\dfc\frac{\partial u}{\partial n} = h_T(u-U_s),
# $$
# can be discretized at $i=N_r$ by
# $$
# -\alpha_i\frac{u_{i+1}^n-u_{i-1}^n}{2\Delta r} = h_T(u_i^n - U_s)\thinspace .
# $$
# Solving with respect to the value at the fictitious point $i+1$ gives
# $$
# u_{i+1}^n = u_{i-1}^n - 2\Delta r \frac{h_T}{\alpha_i}(u_i^n - U_s)\thinspace .
# $$
# This value is then inserted for $u_{i+1}^n$ in the discrete PDE at $i=N_r$.
#
# <!-- --- end solution of exercise --- -->
# Filename: `FE_axisym`.
#
# <!-- --- end exercise --- -->
| fdm-devito-notebooks/03_diffu/diffu_exer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo of the Adaptive Ground Point Filtering Library
# This notebook gives a tour of the capabilities of `filteradapt`. It is continuously updated while we develop the library. You can execute the code in each cell by pressing `Ctrl+Enter`. To start your own project based on `filteradapt`, you can either edit this notebook or start a new one.
# The first thing to do in a Jupyter notebook that uses `filteradapt` is to import the library:
import filteradapt
# The next step you will typically do is to construct a dataset object by reading a LAS/LAZ file:
# + tags=[]
ds = filteradapt.DataSet("data/500k_NZ20_Westport.laz")
# -
# The data set can be visualized in the notebook through its `show()` method:
ds.show()
| jupyter/demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %%file sumArraysOnGPU.cu
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <sys/time.h>
double cpuSecond(){
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void sumArraysOnDevice(float *A, float *B, float *C, const int N){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) C[idx] = A[idx] + B[idx];
}
void initialData(float *ip, int size){
// generate different seed for random number
time_t t;
srand((unsigned int) time (&t));
for (int i=0; i<size; i++){
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N){
for (int idx=0; idx<N; idx++){
C[idx] = A[idx] + B[idx];
}
}
void checkResult(float *hostRef, float *gpuRef, const int N){
double epsilon = 1.0E-8;
int match = 1;
for (int i = 0; i < N; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n",
hostRef[i], gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match. \n\n");
}
int main(int argc, char **argv){
printf("%s Starting...\n", argv[0]);
// malloc host memory
int nElem = 1 <<24;
size_t nBytes = nElem * sizeof(float);
// initialize data at host side
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
// Use cudaMemcpy to transfer the data from the host memory to the GPU global memory with the
// parameter cudaMemcpyHostToDevice specifying the transfer direction.
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 128;
dim3 block(iLen);
dim3 grid((nElem+block.x-1)/block.x);
double iStart = cpuSecond();
sumArraysOnDevice<<<grid, block>>>(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
double iElaps = cpuSecond() - iStart;
printf("sumArraysOnGPU <<<%d,%d>>> Time elapsed %f sec\n", grid.x, block.x, iElaps);
//printf("Execution configuration <<<%d, %d>>>\n", grid.x, block.x);
// copy kernel result back to host side
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
for (int i=0; i<10; i++){
printf("%f + %f = %f \n", h_A[i], h_B[i], hostRef[i]);
}
// check device results
checkResult(hostRef, gpuRef, nElem);
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
// use cudaFree to release the memory used on the GPU
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaDeviceReset();
return (0);
}
# + language="bash"
# nvcc sumArraysOnGPU.cu -o addvector
# ./addvector
# -
# ## Timing with nvprof
# !nvprof --unified-memory-profiling off ./addvector
# !nvprof --help
| cuda-c/src/cuda-programming-model/04-timing-kernel.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="iPiqbmznkBAa"
# loading R in Google Colab
# %load_ext rpy2.ipython
# + id="_OPKtgUak0Fv" language="R"
# # installing the 'Palmer Penguins' dataset in the environment
# install.packages("palmerpenguins")
# library(palmerpenguins)
# + colab={"base_uri": "https://localhost:8080/"} id="lQUXRBOVlQfP" outputId="30cbd3c4-23b6-406b-818e-c73c876ed54a" language="R"
# # head() function to view the first 6 rows of the dataset
# head(penguins)
# + colab={"base_uri": "https://localhost:8080/"} id="ffS-g81Jjb7A" outputId="03f79f19-ffb6-4316-e405-facd30278ee1" language="R"
# # head(..,n) to view the first n rows of the dataset
# head(penguins, n=10)
# + colab={"base_uri": "https://localhost:8080/"} id="DKJnJlYxjq3g" outputId="d22683c1-9dfa-4bcc-e42c-e3f39dcb53f1" language="R"
# # tail() to view the last 6 rows of the dataset
# tail(penguins)
# + colab={"base_uri": "https://localhost:8080/"} id="tFC2JTpKjx9F" outputId="1403c373-1afd-4f2f-9088-a50c3e41f92f" language="R"
# # tail(..,n) to view the last n rows of the dataset
# tail(penguins, n=10)
# + colab={"base_uri": "https://localhost:8080/"} id="KajX6WFipPAU" outputId="8e4a4fae-2f92-4ea5-822f-fb9d1c31b03c" language="R"
# # obtaining the dimensions of the dataset
# dim(penguins)
# + colab={"base_uri": "https://localhost:8080/"} id="eCw75_d_ltsd" outputId="27e31fdd-9a0a-4e48-d6b0-e36d2cdebc00" language="R"
# # str() function to view the structure of the dataset
# str(penguins)
# + id="nGjkgO9Vnbwu" language="R"
# # installing the package 'tidyverse' in the environment
# install.packages('tidyverse')
# library(tidyverse)
# + colab={"base_uri": "https://localhost:8080/"} id="ys1CJ8TFofdE" outputId="5e6bddba-713a-4af5-f7fb-bec982f5400c" language="R"
# # count() function to get count of penguins by species
# penguins %>%
# count(species)
# + id="lt5oIUUfpuNk" colab={"base_uri": "https://localhost:8080/"} outputId="ba05f76f-2d4f-41bf-f460-00968c789b7a" language="R"
# # summarize() function to get the average body mass of penguins
# penguins %>%
# summarize(avg_body_mass = mean(body_mass_g, na.rm=TRUE))
# + id="ABytjZFBoZRK" colab={"base_uri": "https://localhost:8080/"} outputId="b66c1d61-439c-4b1a-d9cb-f86f2192a603" language="R"
# # group_by() function to get the average body mass of penguins by species
# penguins %>%
# group_by(species) %>%
# summarize(avg_body_mass = mean(body_mass_g, na.rm=TRUE))
# + id="h-QjgNsjHq_j" colab={"base_uri": "https://localhost:8080/"} outputId="2b797837-9bca-43d5-ea3d-d85792612ef2" language="R"
# # filter() function to filter out penguins having body mass less than the average body mass of their species
# penguins %>%
# group_by(species) %>%
# filter(body_mass_g < mean(body_mass_g, na.rm=TRUE))
# + id="TOM_cqBvIT2g" colab={"base_uri": "https://localhost:8080/"} outputId="e2b5949e-1848-4202-d675-614be275c1d9" language="R"
# # arrange() function to sort body masses of penguins having body mass less than the average body mass of their species in desceding order
# penguins %>%
# group_by(species) %>%
# filter(body_mass_g < mean(body_mass_g, na.rm=TRUE)) %>%
# arrange(desc(body_mass_g))
# + id="lKYTahCXJ_3u" language="R"
# # installing the package 'ggplot2' in the environment
# install.packages("ggplot2")
# library(ggplot2)
# + id="wS0kA3ERLYTk" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="e4d7fbba-71af-4b94-db87-dee2edd70631" language="R"
# # basic scatter plot of flipper length vs body mass
# ggplot(penguins, aes(x=body_mass_g, y=flipper_length_mm)) +
# geom_point ()
# + id="wavbmiGsNbN3" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="d5fe0422-9e2a-436c-af07-e974023d3f71" language="R"
# # basic line graph of flipper length vs body mass
# ggplot(penguins, aes(x=body_mass_g, y=flipper_length_mm)) +
# geom_line ()
# + id="OJv3dP9DNBSl" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="0977bb4e-868e-46d9-8a08-29a0f82f0243" language="R"
# # basic bar plot of body mass
# ggplot(penguins, aes(x=body_mass_g)) +
# geom_bar ()
# + id="aJkE1AdWNlQn" colab={"base_uri": "https://localhost:8080/", "height": 530} outputId="444a8df8-d627-4cbd-fe9b-38e7f9ae49be" language="R"
# # basic histogram of body mass
# ggplot(penguins, aes(x=body_mass_g)) +
# geom_histogram ()
# + id="np_4bDd6NxCs" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="0de27e8a-ef84-452c-eadb-0db8dc65b0f8" language="R"
# # scatter plot with colour aesthetic
# ggplot(penguins, aes(x=body_mass_g, y = flipper_length_mm, colour=species)) +
# geom_point ()
# + id="FmQLFEc5N7xv" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="5c27420d-3ab4-46cf-f141-652c9be51fdb" language="R"
# # scatter plot with colour and size aesthetic
# ggplot(penguins, aes(x=body_mass_g, y = flipper_length_mm, colour=species, size=species)) +
# geom_point ()
# + id="2V3o39vnNzja" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="7292c7df-5a88-4b75-cfd6-0538b2633446" language="R"
# # scatter plot with colour, size and shape aesthetic
# ggplot(penguins, aes(x=body_mass_g, y = flipper_length_mm, colour=species, size=species, shape=species)) +
# geom_point ()
# + id="PeNbnTNNOHyl" colab={"base_uri": "https://localhost:8080/", "height": 497} outputId="993544fc-f3fe-4b2d-a0d9-855da58e41a2" language="R"
# # scatter plot with colour, size and shape aesthetic and labels
# ggplot(penguins, aes(x=body_mass_g, y = flipper_length_mm, colour=species, size=species, shape=species)) +
# geom_point () +
# labs(title='Palmer Penguins', subtitle = 'To demonstrate the use of aesthetics', caption = 'Note how the species have different colours, shapes and sizes',
# x = 'Body Mass', y = 'Flipper Length')
# + colab={"base_uri": "https://localhost:8080/"} id="wBdLJTA2pNgG" outputId="e1d6396f-e312-4fdc-de6e-fdd632c276ce" language="R"
# # selecting only the numeric columns
# penguins = select_if(penguins, is.numeric)
# penguins
# + colab={"base_uri": "https://localhost:8080/"} id="sfp3N_qltyiv" outputId="c70925d3-2017-4a83-e937-0dd9f58400e2" language="R"
# # dropping rows with missing values
# penguins = na.omit(penguins)
# sum(is.na(penguins))
# + id="gp9-0kFZuwCV" language="R"
# # calculating the correlations
# corr = cor(penguins)
# + id="k5q6p1mUvKOw" language="R"
# # installing the package 'corrplot' in the environment
# install.packages("corrplot")
# library(corrplot)
# + colab={"base_uri": "https://localhost:8080/", "height": 497} id="6oY1dTJUvc8X" outputId="1158124e-f60c-4f1d-8ae7-1d769e4419cb" language="R"
# # plotting heatmap
# corrplot(corr)
| eda on palmer penguins dataset/EDA-PalmerPenguins.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="hf6P0mwYVayb" colab_type="text"
# **Importing PyTorch into the environment**
# + id="JIY7QJba6RLj" colab_type="code" colab={}
import torch
# + [markdown] id="IEMcSvHKVfbR" colab_type="text"
# **Using higher - level abstraction. First, we need to set the size of our random training data.**
# + id="ZArIwktcRTuv" colab_type="code" colab={}
batch_size = 32
input_shape = 5
output_shape = 10
# + [markdown] id="kua6MFaMVpOE" colab_type="text"
# **To make use of GPUs, cast the tensors as follows.**
# **This ensures that all computations will use the attached GPU**
# + id="V3OjBFUTR8ld" colab_type="code" colab={}
torch.set_default_tensor_type('torch.cuda.FloatTensor')
# + [markdown] id="at-TiU8cV9Y0" colab_type="text"
# **Generating random training data:**
# + id="BGEFkThtSDoJ" colab_type="code" colab={}
from torch.autograd import Variable
X = Variable(torch.randn(batch_size, input_shape))
y = Variable(torch.randn(batch_size, output_shape),
requires_grad = False)
# + [markdown] id="TNML_uWRWG_v" colab_type="text"
# **Using a simple neural network having 1 hidden layer with 32 units and an output layer. We use the .cuda() extension to make sure the model runs on the GPU**
# + id="ylDT0vD4Sq-6" colab_type="code" colab={}
model = torch.nn.Sequential(
torch.nn.Linear(input_shape, 32),
torch.nn.Linear(32, output_shape),
).cuda()
# + [markdown] id="Eg7LQY_sX9dP" colab_type="text"
# **Defining the MSE loss function**
# + id="wfhUJEYBS8sC" colab_type="code" colab={}
loss_function = torch.nn.MSELoss()
# + [markdown] id="o1EAe5k4YCAu" colab_type="text"
# **Training the model for 10 epochs:**
# + id="zzrLq_OATFXp" colab_type="code" outputId="c96290c6-2e3c-47a7-f3db-8296629106f9" colab={"base_uri": "https://localhost:8080/", "height": 196}
learning_rate = 0.001
for i in range(10):
y_pred = model(X)
loss = loss_function(y_pred, y)
print(loss)
#print(loss.data[0])
# Zero gradients
model.zero_grad()
loss.backward()
# Update weights
for param in model.parameters():
param.data -= learning_rate * param.grad.data
| Chapter 1/3_Using PyTorch's dynamic computation graphs for RNNs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Нейронные сети: зависимость ошибки и обучающей способности от числа нейронов
# В этом задании вы будете настраивать двуслойную нейронную сеть для решения задачи многоклассовой классификации. Предлагается выполнить процедуры загрузки и разбиения входных данных, обучения сети и подсчета ошибки классификации. Предлагается определить оптимальное количество нейронов в скрытом слое сети. Нужно так подобрать число нейронов, чтобы модель была с одной стороны несложной, а с другой стороны давала бы достаточно точный прогноз и не переобучалась. Цель задания -- показать, как зависит точность и обучающая способность сети от ее сложности.
# Для решения задачи многоклассовой классификации предлагается воспользоваться библиотекой построения нейронных сетей [pybrain](http://pybrain.org/). Библиотека содержит основные модули инициализации двуслойной нейронной сети прямого распространения, оценки ее параметров с помощью метода обратного распространения ошибки (backpropagation) и подсчета ошибки.
#
# Установить библиотеку pybrain можно с помощью стандартной системы управления пакетами pip:
#
# ```
# pip install pybrain
# ```
# Кроме того, для установки библиотеки можно использовать и другие способы, приведенные в [документации](https://github.com/pybrain/pybrain/wiki/installation).
# ### Используемые данные
#
# Рассматривается задача оценки качества вина по его физико-химическим свойствам [1]. Данные размещены в [открытом доступе](https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv) в репозитории UCI и содержат 1599 образцов красного вина, описанных 11 признаками, среди которых -- кислотность, процентное содержание сахара, алкоголя и пр. Кроме того, каждому объекту поставлена в соответствие оценка качества по шкале от 0 до 10. Требуется восстановить оценку качества вина по исходному признаковому описанию.
#
# [1] <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Modeling wine preferences by data mining from physicochemical properties. In Decision Support Systems, Elsevier, 47(4):547-553, 2009.
# +
# Выполним инициализацию основных используемых модулей
# %matplotlib inline
import random
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize
import numpy as np
# -
# Выполним загрузку данных
with open('winequality-red.csv') as f:
f.readline() # пропуск заголовочной строки
data = np.loadtxt(f, delimiter=';')
# В качестве альтернативного варианта, можно выполнить загрузку данных напрямую из репозитория UCI, воспользовавшись библиотекой urllib.
import urllib
# URL for the Wine Quality Data Set (UCI Machine Learning Repository)
url = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
# загрузка файла
f = urllib.urlopen(url)
f.readline() # пропуск заголовочной строки
data = np.loadtxt(f, delimiter=';')
# Выделим из данных целевую переменную. Классы в задаче являются несбалинсированными: основной доле объектов поставлена оценка качества от 5 до 7. Приведем задачу к трехклассовой: объектам с оценкой качества меньше пяти поставим оценку 5, а объектам с оценкой качества больше семи поставим 7.
# +
TRAIN_SIZE = 0.7 # Разделение данных на обучающую и контрольную части в пропорции 70/30%
from sklearn.cross_validation import train_test_split
y = data[:, -1]
np.place(y, y < 5, 5)
np.place(y, y > 7, 7)
y -= min(y)
X = data[:, :-1]
X = normalize(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=TRAIN_SIZE, random_state=0)
# -
# ### Двуслойная нейронная сеть
#
# Двуслойная нейронная сеть представляет собой функцию распознавания, которая може быть записана в виде следующей суперпозиции:
#
# $f(x,W)=h^{(2)}\left(\sum\limits_{i=1}^D w_i^{(2)}h^{(1)}\left(\sum\limits_{j=1}^n w_{ji}^{(1)}x_j+b_i^{(1)}\right)+b^{(2)}\right)$, где
#
# $x$ -- исходный объект (сорт вина, описанный 11 признаками), $x_j$ -- соответствующий признак,
#
# $n$ -- количество нейронов во входном слое сети, совпадающее с количеством признаков,
#
# $D$ -- количество нейронов в скрытом слое сети,
#
# $w_i^{(2)}, w_{ji}^{(1)}, b_i^{(1)}, b^{(2)}$ -- параметры сети, соответствующие весам нейронов,
#
# $h^{(1)}, h^{(2)}$ -- функции активации.
#
# В качестве функции активации на скрытом слое сети используется линейная функция. На выходном слое сети используется функция активации softmax, являющаяся обобщением сигмоидной функции на многоклассовый случай:
#
# $y_k=\text{softmax}_k(a_1,...,a_k)=\frac{\exp(a_k)}{\sum_{k=1}^K\exp(a_k)}.$
#
#
# ### Настройка параметров сети
#
# Оптимальные параметры сети $W_{opt}$ определяются путем минимизации функции ошибки:
#
# $W_{opt}=\arg\min\limits_{W}L(W)+\lambda\|W\|^2$.
#
# Здесь $L(W)$ является функцией ошибки многоклассовой классификации,
#
# $L(W)=- \sum^N_{n=1}\sum^K_{k=1} t_{kn} log(y_{kn}),$
#
# $t_{kn}$ -- бинарно закодированные метки классов, $K$ -- количество меток, $N$ -- количество объектов,
#
# а $\lambda\|W\|^2$ является регуляризующим слагаемым, контролирующим суммарный вес параметров сети и предотвращающий эффект переобучения.
#
# Оптимизация параметров выполняется методом обратного распространения ошибки (backpropagation).
# Выполним загрузку основных модулей: ClassificationDataSet -- структура данных pybrain, buildNetwork -- инициализация нейронной сети, BackpropTrainer -- оптимизация параметров сети методом backpropagation, SoftmaxLayer -- функция softmax, соответствующая выходному слою сети, percentError -- функцию подсчета ошибки классификации (доля неправильных ответов).
from pybrain.datasets import ClassificationDataSet # Структура данных pybrain
from pybrain.tools.shortcuts import buildNetwork
from pybrain.supervised.trainers import BackpropTrainer
from pybrain.structure.modules import SoftmaxLayer
from pybrain.utilities import percentError
# Инициализируем основные параметры задачи: HIDDEN_NEURONS_NUM -- количество нейронов скрытого слоя, MAX_EPOCHS -- максимальное количество итераций алгоритма оптимизации
# Определение основных констант
HIDDEN_NEURONS_NUM = 700 # Количество нейронов, содержащееся в скрытом слое сети
MAX_EPOCHS = 100 # Максимальное число итераций алгоритма оптимизации параметров сети
# Инициализируем структуру данных ClassificationDataSet, используемую библиотекой pybrain. Для инициализации структура принимает два аргумента: количество признаков *np.shape(X)[1]* и количество различных меток классов *len(np.unique(y))*.
#
# Кроме того, произведем бинаризацию целевой переменной с помощью функции *_convertToOneOfMany( )* и разбиение данных на обучающую и контрольную части.
# Конвертация данных в структуру ClassificationDataSet
# Обучающая часть
ds_train = ClassificationDataSet(np.shape(X)[1], nb_classes=len(np.unique(y_train)))
# Первый аргумент -- количество признаков np.shape(X)[1], второй аргумент -- количество меток классов len(np.unique(y_train)))
ds_train.setField('input', X_train) # Инициализация объектов
ds_train.setField('target', y_train[:, np.newaxis]) # Инициализация ответов; np.newaxis создает вектор-столбец
ds_train._convertToOneOfMany( ) # Бинаризация вектора ответов
# Контрольная часть
ds_test = ClassificationDataSet(np.shape(X)[1], nb_classes=len(np.unique(y_train)))
ds_test.setField('input', X_test)
ds_test.setField('target', y_test[:, np.newaxis])
ds_test._convertToOneOfMany( )
# Инициализируем двуслойную сеть и произведем оптимизацию ее параметров. Аргументами для инициализации являются:
#
# ds.indim -- количество нейронов на входном слое сети, совпадает с количеством признаков (в нашем случае 11),
#
# HIDDEN_NEURONS_NUM -- количество нейронов в скрытом слое сети,
#
# ds.outdim -- количество нейронов на выходном слое сети, совпадает с количеством различных меток классов (в нашем случае 3),
#
# SoftmaxLayer -- функция softmax, используемая на выходном слое для решения задачи многоклассовой классификации.
# +
np.random.seed(0) # Зафиксируем seed для получения воспроизводимого результата
# Построение сети прямого распространения (Feedforward network)
net = buildNetwork(ds_train.indim, HIDDEN_NEURONS_NUM, ds_train.outdim, outclass=SoftmaxLayer)
# ds.indim -- количество нейронов входного слоя, равне количеству признаков
# ds.outdim -- количество нейронов выходного слоя, равное количеству меток классов
# SoftmaxLayer -- функция активации, пригодная для решения задачи многоклассовой классификации
init_params = np.random.random((len(net.params))) # Инициализируем веса сети для получения воспроизводимого результата
net._setParameters(init_params)
# -
# Выполним оптимизацию параметров сети. График ниже показывает сходимость функции ошибки на обучающей/контрольной части.
random.seed(0)
# Модуль настройки параметров pybrain использует модуль random; зафиксируем seed для получения воспроизводимого результата
trainer = BackpropTrainer(net, dataset=ds_train) # Инициализируем модуль оптимизации
err_train, err_val = trainer.trainUntilConvergence(maxEpochs=MAX_EPOCHS)
line_train = plt.plot(err_train, 'b', err_val, 'r') # Построение графика
xlab = plt.xlabel('Iterations')
ylab = plt.ylabel('Error')
# Рассчитаем значение доли неправильных ответов на обучающей и контрольной выборке.
res_train = net.activateOnDataset(ds_train).argmax(axis=1) # Подсчет результата на обучающей выборке
print('Error on train: ', percentError(res_train, ds_train['target'].argmax(axis=1)), '%') # Подсчет ошибки
res_test = net.activateOnDataset(ds_test).argmax(axis=1) # Подсчет результата на тестовой выборке
print('Error on test: ', percentError(res_test, ds_test['target'].argmax(axis=1)), '%') # Подсчет ошибки
# ### Задание. Определение оптимального числа нейронов.
# В задании требуется исследовать зависимость ошибки на контрольной выборке в зависимости от числа нейронов в скрытом слое сети. Количество нейронов, по которому предполагается провести перебор, записано в векторе
# ```
# hidden_neurons_num = [50, 100, 200, 500, 700, 1000]
# ```
#
# 1. Для фиксированного разбиения на обучающую и контрольную части подсчитайте долю неправильных ответов (ошибок) классификации на обучении/контроле в зависимости от количества нейронов в скрытом слое сети. Запишите результаты в массивы ```res_train_vec``` и ```res_test_vec```, соответственно. С помощью функции ```plot_classification_error``` постройте график зависимости ошибок на обучении/контроле от количества нейронов. Являются ли графики ошибок возрастающими/убывающими? При каком количестве нейронов достигается минимум ошибок классификации?
#
# 2. С помощью функции ```write_answer_nn``` запишите в выходной файл число: количество нейронов в скрытом слое сети, для которого достигается минимум ошибки классификации на контрольной выборке.
# +
random.seed(0) # Зафиксируем seed для получения воспроизводимого результата
np.random.seed(0)
def plot_classification_error(hidden_neurons_num, res_train_vec, res_test_vec):
# hidden_neurons_num -- массив размера h, содержащий количество нейронов, по которому предполагается провести перебор,
# hidden_neurons_num = [50, 100, 200, 500, 700, 1000];
# res_train_vec -- массив размера h, содержащий значения доли неправильных ответов классификации на обучении;
# res_train_vec -- массив размера h, содержащий значения доли неправильных ответов классификации на контроле
plt.figure()
plt.plot(hidden_neurons_num, res_train_vec)
plt.plot(hidden_neurons_num, res_test_vec, '-r')
def write_answer_nn(optimal_neurons_num):
with open("nnets_answer1.txt", "w") as fout:
fout.write(str(optimal_neurons_num))
hidden_neurons_num = [50, 100, 200, 500, 700, 1000]
res_train_vec = list()
res_test_vec = list()
for nnum in hidden_neurons_num:
random.seed(0) # Зафиксируем seed для получения воспроизводимого результата
np.random.seed(0)
net = buildNetwork(ds_train.indim, nnum, ds_train.outdim, outclass=SoftmaxLayer)
init_params = np.random.random((len(net.params)))
net._setParameters(init_params)
trainer = BackpropTrainer(net, dataset=ds_train)
trainer.trainUntilConvergence(maxEpochs=MAX_EPOCHS)
res_train = net.activateOnDataset(ds_train).argmax(axis=1)
res_test = net.activateOnDataset(ds_test).argmax(axis=1)
res_train_vec.append(percentError(res_train, ds_train['target'].argmax(axis=1)))
res_test_vec.append(percentError(res_test, ds_test['target'].argmax(axis=1)))
# Не забудьте про инициализацию весов командой np.random.random((len(net.params)))
# Постройте график зависимости ошибок на обучении и контроле в зависимости от количества нейронов
plot_classification_error(hidden_neurons_num, res_train_vec, res_test_vec)
# Запишите в файл количество нейронов, при котором достигается минимум ошибки на контроле
write_answer_nn(hidden_neurons_num[res_test_vec.index(min(res_test_vec))])
# -
# !cat nnets_answer1.txt
write_answer_nn(700)
| Course_2/Week_5/task_nn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Predictive Maintenance using Machine Learning on Sagemaker
# *Part 1 - Data preparation*
# ## Initialization
# ---
# Directory structure to run this notebook:
# ```
# sagemaker-predictive-maintenance
# |
# +--- data
# | |
# | +--- interim: intermediate data we can manipulate and process
# | |
# | \--- raw: *immutable* data downloaded from the source website
# |
# +--- notebooks: all the notebooks are positionned here
# ```
import os
import pandas as pd
import sagemaker
import boto3
# ## Introduction
# ---
# Imagine you are the manager at a manufacturing company responsible for monitoring assembly lines. Each assembly line contains multiple kinds of machines that must work continuously and reliably to produce ready-to-ship products as can be seen in the image below. IoT sensors placed on these machines monitor electricity consumption, noise generated, vibration, temperature and various other measurable quantities that are used to monitor the health of each machine. Sudden breakdown of any of these machines across multiple assembly lines will lead to:
#
# * Unscheduled downtime and resulting delays in delivering your product to market
# * Cost incurred due to delays, and hiring maintenance workers to repair and possibly replace parts of the machine that caused the breakdown
#
# You have been tasked with researching a technique called “predictive maintenance”, especially after your competitors Advantech, Inc. have published a report (http://www.advantech.com/industrial-automation/industry4.0/pms#my_cen). Additionally, you are intrigued to see if Machine Learning can help with this problem. Your team's collective research notes regarding a potential proof-of-concept that you will be building is included here.
#
# ## Reactive, Predictive or Preventive Maintenance
# ---
# Maintenance schedules in typical manufacturing and energy companies that involve large number of machines performing tasks are typically a result of “reactive” or “preventive” maintenance. A reactive maintenance task is scheduled if a machine breaks down (or fails), or is operating in a known degrade state of operation. Preventive maintenance is triggered by usage or time or a fixed schedule. As an example, for car owners, reactive maintenance occurs after there is an failure of a component (stalled engine, punctured tire, etc.), whereas preventive maintenance occurs on a fixed schedule (for example, tire rotation or oil change every 10000 miles) even though there may not be a need for doing so.
#
# “Predictive” maintenance implies that maintenance is scheduled when a system predicts the possible occurrence of a failure even in the future. This solves problems that are common in reactive and preventive maintenance - 1. reactive maintenance adds unnecessary time and cost to project schedules, since maintenance workers are deployed only after discovery of a failure event; and 2. preventive maintenance adds unnecessarily frequent maintenance tasks, therefore increasing wait times and costs for the end user. Currently, predictive maintenance techniques involve simple monitoring-and-thresholding, or statistical techniques to identify anomalies from sensor data. However, these techniques are limited to use by Subject Matter Experts (SMEs) and depend on human-generated thresholds. With Machine Learning (ML), it is possible to train models to detect abnormal patterns from sensor data. The trained ML model does not require rules or pre-programmed thresholds, and vast amounts of data can be analyzed repeatably with no need of human involvement.
#
# ## NASA Turbofan Engine Fault Dataset
# ---
# ### Background
#
# NASA's Prognostic Center of Excellence established a repository with datasets to be used for benchmarking prognostics and predictive maintenance related algorithms. Among these datasets involves data from a turbofan engine simulation model C-MAPPS (or Commercial Modular Aero Propulsion System Simulation). The references section contains details about the over 100 publications using this dataset. C-MAPPS is a tool used to generate health, control and engine parameters from a simulated turbofan engine. A custom code wrapper was used to inject synthetic faults and continuous degradation trends into a time series of sensor data.
#
# <img src="../../common-assets/pictures/turbofan.png" width="640" />
#
# Some high level characteristics of this dataset are as follows:
#
# 1. The data obtained is from a **high fidelity simulation** of a turbofan engine, but closely models the sensor values of an actual engine.
# 2. **Synthetic noise** was added to the dataset to replicate real-world scenarios.
# 3. The effects of faults are masked due to operational conditions, which is a common trait of most real world systems.
#
# Dataset can be downloaded from here: https://ti.arc.nasa.gov/tech/dash/groups/pcoe/prognostic-data-repository/#turbofan.
# +
ok = True
ok = ok and os.path.exists('../data/interim/train_FD001.txt')
ok = ok and os.path.exists('../data/interim/test_FD001.txt')
ok = ok and os.path.exists('../data/interim/RUL_FD001.txt')
ok = ok and os.path.exists('../data/raw')
if (ok):
print('Working directories and datasets already exist.')
else:
print('Some datasets are missing, create working directories and download original dataset from the NASA repository.')
# Making sure the raw data targets already exists:
os.makedirs('../data/raw', exist_ok=True)
os.makedirs('../data/interim', exist_ok=True)
# Download the dataset from the NASA repository, unzip it and set
# aside the first training file to work on:
# !wget https://ti.arc.nasa.gov/c/6/ --output-document=../data/raw/CMAPSSData.zip
# !unzip ../data/raw/CMAPSSData.zip -d ../data/raw
# Each training / test set is associated to different operating conditions.
for i in range(1,5):
# !cp ../data/raw/train_FD00$i\.txt ../data/interim/train_FD00$i\.txt
# !cp ../data/raw/test_FD00$i\.txt ../data/interim/test_FD00$i\.txt
# !cp ../data/raw/RUL_FD00$i\.txt ../data/interim/RUL_FD00$i\.txt
# -
# ### More details
# Datasets consist of multiple **multivariate** time series. Each data set is further divided into training and test subsets. Each time series is from a different engine (`unit number`) i.e., the data can be considered to be from a fleet of engines of the same type. Each engine starts with different degrees of initial wear and manufacturing variation which is unknown to the user. This wear and variation is considered normal, i.e., it is not considered a fault condition. There are three operational settings that have a substantial effect on engine performance. These settings are also included in the data (`operational setting 1`, `operational setting 2` and `operational setting 3`). The data (`sensor measurement 1` to `sensor measurement 21`) is contaminated with **sensor noise**.
#
# The engine is operating normally at the start of each time series, and develops a fault at some point during the series. In the training set, the fault grows in magnitude until system failure. In the test set, the time series ends some time prior to system failure. The objective of the competition is to **predict the number of remaining operational cycles before failure in the test set**, i.e., the number of operational cycles after the last cycle that the engine will continue to operate. Also provided a vector of true Remaining Useful Life (RUL) values for the test data.
#
# The data are provided as a zip-compressed text file with 26 columns of numbers, separated by spaces. Each row is a snapshot of data taken during a single operational cycle, each column is a different variable. The columns correspond to:
#
# 1. `unit number`
# 2. `time, in cycles`
# 3. `operational setting 1`
# 4. `operational setting 2`
# 5. `operational setting 3`
# 6. `sensor measurement 1`
# 7. `sensor measurement 2`
# 8. ...
# 9. `sensor measurement 21`
# ### Data ingestion
dataset_path = '../data/interim/train_FD001.txt'
data = pd.read_csv(dataset_path, header=None, sep=' ')
data.dropna(axis='columns', how='all', inplace=True)
print('Shape:', data.shape)
data.head(5)
# Setting readable column names:
columns = [
'unit_number',
'time',
'operational_setting_1',
'operational_setting_2',
'operational_setting_3',
] + ['sensor_measurement_{}'.format(s) for s in range(1,22)]
data.columns = columns
# ### Defining RUL target function
# Traditionnal way of assigning target values for RUL estimation, applies a linear decrease along with time. This definition implies that the health of a system degrades linearly along with time. In practical applications, degradation of a component is negligible at the beginning of use and increases when component approaches end-of-life. To better model the RUL changes along with time, a piece-wise linear RUL target function is often proposed: for this experiment, we will set the maximum limit to 130 time cycles (value used in several papers: [here](https://www.researchgate.net/publication/224358896_Recurrent_neural_networks_for_remaining_useful_life_estimation) and [there](https://www.researchgate.net/publication/314933361_Deep_Convolutional_Neural_Network_Based_Regression_Approach_for_Estimation_of_Remaining_Useful_Life) for instance).
# +
# Add a RUL column and group the data by unit_number:
data['true_rul'] = 0
data['piecewise_rul'] = 0
grouped_data = data.groupby(by='unit_number')
rul_limit = 130
# Loops through each unit number to get the lifecycle counts:
for unit, max_rul in enumerate(grouped_data.count()['time']):
current_df = data[data['unit_number'] == (unit+1)].copy()
current_df['true_rul'] = max_rul - current_df['time']
current_df.loc[current_df['true_rul'] > 130, 'piecewise_rul'] = 130
current_df.loc[current_df['true_rul'] <= 130, 'piecewise_rul'] = current_df['true_rul']
data[data['unit_number'] == (unit+1)] = current_df
# -
# We now have a piecewise RUL:
# +
current_df = data[data['unit_number'] == 1]
true_rul = current_df['true_rul']
piecewise_rul = current_df['piecewise_rul']
fig = plt.figure(figsize=(10,4))
ax = true_rul.plot(linestyle='-', linewidth=5, alpha=0.3, label='True RUL')
piecewise_rul.plot(linestyle='--', color='#CC0000', ax=ax, label='Piece-wise RUL')
ax.set_ylabel('Remaining useful life (RUL)', fontsize=12)
ax.set_xlabel('Time cycle', fontsize=12)
plt.legend();
# -
data.head()
# Adding a column specifying that the engine is near the end of its lifetime: we will use this column as the label for the classification part of the training later on.
low_cycle = 30
data['label'] = data['rul'].apply(lambda x: 1 if x <= low_cycle else 0)
data.head()
# ## Cleanup
# ### Storing data for the next notebook
# %store -z
# %store data
# %store columns
# ### Persisting these data to disk
# This is useful in case you want to be able to execute each notebook independantly (from one session to another) and don't want to reexecute every notebooks whenever you want to focus on a particular step.
# +
# Create the local path for our artifacts:
local_path = '../data/interim'
os.makedirs(local_path, exist_ok=True)
# Write the dataframe:
data.to_csv(os.path.join(local_path, 'data.csv'), index=False)
# Write the columns names:
with open(os.path.join(local_path, 'columns.txt'), 'w') as f:
f.write(','.join(columns))
| nasa-turbofan-rul-xgboost/notebooks/1 - Data preparation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Implementation of a Quantum Approximation Optimizario Algorithm (QAOA).
#
# This example is based on the exampled provided by Rigetti [Rigetti](https://github.com/rigetticomputing/grove/blob/master/examples/IsingSolver.ipynb).
#
#
# This code finds the global minima of an Ising model with external fields of the form
# $$f(x)= \Sigma_i h_i x_i + \Sigma_{i,j} J_{i,j} x_i x_j.$$
# Two adjacent sites $i,j$ have an interaction equal to $J_{i,j}$. There is also an external magnetic field $h_i$ that affects each individual spin. The discrete variables take the values $x_i \in \{+1,-1\}$.
#
# The reference hamiltonian will be
#
# $$H_{b}=\sum_{i=0}^{N-1}\sigma^X_i$$
#
# with N the number of qubits for the problem (in this small case, 4). This state has the ground state define for a Walsh-Hadamard state
#
# $$|\Psi(0)\rangle = |+>_{N-1}\otimes|+>_{N-2}\otimes \dots \otimes |+>_{0} = \frac{1}{\sqrt{2^N}}\sum_{i=0}^{2^N-1}|i\rangle$$
#
# So, the time evolution will be applied starting from this ground state.
#
# You will find the minima of the following Ising model
# $$f(x)=x_0+x_1-x_2+x_3-2 x_0 x_1 +3 x_2 x_3.$$
# Which corresponds to $x_{min}=[-1, -1, 1, -1]$ in numerical order, with a minimum value of $f(x_{min})=-9$.
# Remember that, as Variational Quantum Eigensolver (VQE), this is a hybrid algorithm. Part of the code is executed in the CPU (the optimisation) and part in the QPU (the calculus of the expected values of the Hamiltonian).
#
# ## 1. Import the needed packages
#
# Import:
#
# 1. ProjecQ Simulator
# 2. Operations to be used. Because this is mainly a time evolution, the most important is TimeEvolution
# 3. The optimization function from Scipy.
import projectq
from projectq.backends import Simulator
from projectq.ops import All, Measure, QubitOperator, TimeEvolution,H
from scipy.optimize import minimize
# ## 2. Define the functions for the optimization
# This functions will calculate the expectation value for a hamiltonian *H* after applying the Time Evolution of the Hamiltonian composed by the reference and cost hamiltonians, for a selected number of times. However, the time evolution of each hamiltonian is executed for a different *time* $\theta_i$ which are the optimisation parameters.
def Expectation_H(theta,nqubits, steps,base_ham,cost_ham,eval_ham):
"""
Args:
theta (float): array of variational parameters for ansatz wavefunction
nqubits: number of qubits to use for this Hamiltonian
steps: number of times that the time evolution is repeated
base_ham: the base hamiltonian
cost_ham: the cost hamiltonian
hamiltonian (QubitOperator): Hamiltonian to evaluate
Returns:
energy of the wavefunction for parameters
"""
# Create a ProjectQ compiler with a simulator as a backend
eng = projectq.MainEngine(backend=Simulator(gate_fusion=True, rnd_seed=1000))
wavefunction = eng.allocate_qureg(nqubits)
# Initialize to Walsh-Hadamard state
All(H) | wavefunction
#Get the parameters from the optimizer
alfa=theta[:steps]
gamma=theta[steps:]
#Apply the "time evolution" a number of times (steps)
for i in range(steps):
TimeEvolution(gamma[i], cost_ham) | wavefunction
TimeEvolution(alfa[i], base_ham) | wavefunction
# flush all gates
eng.flush()
# Calculate the energy.
# The simulator can directly return expectation values, while on a
# real quantum devices one would have to measure each term of the
# Hamiltonian.
energy = eng.backend.get_expectation_value(eval_ham, wavefunction)
# Measure in order to return to return to a classical state
# (as otherwise the simulator will give an error)
All(Measure) | wavefunction
del eng
return energy
#
#
# Helper function to compose the real hamiltonians from their terms.
#
#
def compose_ham(Base,hamiltonian):
H_o=None
for i in hamiltonian:
if (H_o is None):
H_o=i
else:
H_o=H_o+i
H_b=None
for i in Base:
if (H_b is None):
H_b=i
else:
H_b=H_b+i
return H_b,H_o
# This function returns the most probable state (which is is solution)
def State_H(theta,nqubits, steps,base_ham,cost_ham):
"""
Args:
theta (float): variational parameter for ansatz wavefunction
hamiltonian (QubitOperator): Hamiltonian of the system
Returns:
energy of the wavefunction for parameter theta
"""
import numpy as np
H_b,H_o=compose_ham(base_ham,cost_ham)
# Create a ProjectQ compiler with a simulator as a backend
from projectq.backends import Simulator
eng = projectq.MainEngine(backend=Simulator(gate_fusion=True, rnd_seed=1000))
wavefunction = eng.allocate_qureg(nqubits)
# Initialize to Walsh-Hadamard state
All(H) | wavefunction
#print("Theta:",theta)
alfa=theta[:steps]
gamma=theta[steps:]
#print(steps)
for i in range(steps):
TimeEvolution(gamma[i], H_o) | wavefunction
TimeEvolution(alfa[i], H_b) | wavefunction
# flush all gates
eng.flush()
maxp=0.0
maxstate=None
for i in range(2**nqubits):
bits=np.binary_repr(i,width=len(wavefunction))
statep=eng.backend.get_probability(bits[-1::-1],wavefunction)
if (maxp < statep):
maxstate=bits[-1::-1]
maxp=statep
All(Measure) | wavefunction
eng.flush()
del eng
return maxstate,maxp
# Function to calculate the expectation values of each term of the hamiltonian. This step can be executed in parallel
def variational_quantum_eigensolver(theta,nqubits,steps,Base,hamiltonian):
#print("Theta:",theta)
vqe=0.
H_b,H_o=compose_ham(Base,hamiltonian)
for i in hamiltonian:
vqe+=Expectation_H(theta,nqubits,steps,H_b,H_o,i)
print("VQE:",vqe)
return vqe
# ## 4. Optimize
#
# This is the main part. Starting from a defined Ising Hamiltonian, find the result using an optimizer
# The input for the code in the default mode corresponds simply to the parameters $h_i$ and $J_{i,j}$, that we specify as a list in numerical order and a dictionary. The code returns the bitstring of the minima, the minimum value, and the QAOA quantum circuit used to obtain that result.
J = {(0, 1): -2, (2, 3): 3}
h = [1, 1, -1, 1]
num_steps=10
# +
import numpy as np
#
# if the number os steps is 0, select them as twice the number of qubits
if num_steps == 0:
num_steps = 2 * len(h)
nqubits = len(h)
hamiltonian_o=[]
hamiltonian_b=[]
for i, j in J.keys():
hamiltonian_o.append(QubitOperator("Z%d Z%d"%(i,j),J[(i, j)]))
for i in range(nqubits):
hamiltonian_o.append(QubitOperator("Z%d"%i,h[i]))
for i in range(nqubits):
hamiltonian_b.append(QubitOperator("X%d"%i,-1.0))
betas = np.random.uniform(0, np.pi, num_steps)[::-1]
gammas = np.random.uniform(0, 2*np.pi, num_steps)
theta_0=np.zeros(2*num_steps)
theta_0[0:num_steps]=betas
theta_0[num_steps:]=gammas
minimum = minimize(variational_quantum_eigensolver,theta_0,args=(nqubits,num_steps,hamiltonian_b,hamiltonian_o),
method='Nelder-Mead',options= {'disp': True,'ftol': 1.0e-2,'xtol': 1.0e-2,'maxiter':20})
# -
# And calculate now the most probable state
maxstate,maxp=State_H(minimum.x,nqubits,num_steps,hamiltonian_b,hamiltonian_o)
# Ok. This is the end. Show the results
print([(-1 if int(i)==1 else 1) for i in maxstate], " with probability %.2f"%maxp)
| Notebooks/QAOA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# * This notebook was made to explore salinity variation along Horseshoe Bay to Nanaimo ferry route
# +
from __future__ import division, print_function
from cStringIO import StringIO
from IPython.core.display import HTML
from salishsea_tools.nowcast import figures
from glob import glob
import datetime
import glob
import os
import arrow
from dateutil import tz
from datetime import datetime, timedelta
from sklearn import linear_model
from pylab import *
from matplotlib.backends import backend_agg as backend
import matplotlib.cm as cm
import matplotlib.dates as mdates
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import netCDF4 as nc
import numpy as np
import pandas as pd
import requests
import math
from scipy import interpolate as interp
import scipy.io as sio
from salishsea_tools import (
nc_tools,
viz_tools,
stormtools,
tidetools,
)
# Font format
title_font = {
'fontname': 'Bitstream Vera Sans', 'size': '15', 'color': 'black',
'weight': 'medium'
}
axis_font = {'fontname': 'Bitstream Vera Sans', 'size': '13'}
# %matplotlib inline
# -
def results_dataset(period, grid, results_dir):
"""Return the results dataset for period (e.g. 1h or 1d)
and grid (e.g. grid_T, grid_U) from results_dir.
"""
filename_pattern = 'SalishSea_{period}_*_{grid}.nc'
filepaths = glob(os.path.join(results_dir, filename_pattern.format(period=period, grid=grid)))
return nc.Dataset(filepaths[0])
run_date = datetime.datetime(2015,8,14)
# Results dataset location
results_home = '/data/dlatorne/MEOPAR/SalishSea/nowcast/'
results_dir = os.path.join(results_home, run_date.strftime('%d%b%y').lower())
def date(year, month, day_start, day_end, period, grid):
day_range = np.arange(day_start, day_end+1)
day_len = len(day_range)
files_all = [None] * day_len
inds = np.arange(day_len)
for i, day in zip(inds, day_range):
run_date = datetime.datetime(year,month, day)
results_home = '/data/dlatorne/MEOPAR/SalishSea/nowcast/'
results_dir = os.path.join(results_home, run_date.strftime('%d%b%y').lower())
filename = 'SalishSea_' + period + '_' + run_date.strftime('%Y%m%d').lower() + \
'_' + run_date.strftime('%Y%m%d').lower() + '_' + grid + '.nc'
file_single = os.path.join(results_dir, filename)
files_all[i] = file_single
return files_all
from glob import glob
grid_T_hr = results_dataset('1h', 'grid_T', results_dir)
bathy = nc.Dataset('/data/nsoontie/MEOPAR/NEMO-forcing/grid/bathy_meter_SalishSea2.nc')
PNW_coastline = sio.loadmat('/ocean/rich/more/mmapbase/bcgeo/PNW.mat')
filepath_name = date(run_date.year,run_date.month,run_date.day,run_date.day,'1h','grid_T')
latitude=grid_T_hr.variables['nav_lat']
longitude=grid_T_hr.variables['nav_lon']
sal_hr = grid_T_hr.variables['vosaline']
t, z = 3, 1
sal_hr = np.ma.masked_values(sal_hr[t, z], 0)
# # Prepare salinity data
saline=sio.loadmat('/ocean/jieliu/research/meopar/salinity_comparison/data/HBDB/HBDB_TSG20150813.mat')
def find_dist (q, lon11, lat11, X, Y, bathy, longitude, latitude, saline_nemo_3rd, saline_nemo_4rd):
k=0
values =0
valuess=0
dist = np.zeros(9)
weights = np.zeros(9)
value_3rd=np.zeros(9)
value_4rd=np.zeros(9)
#regr =linear_model.LinearRegression()
#regr.fit(lon11,lat11);
#regr.coef_
[x1, j1] = tidetools.find_closest_model_point(lon11[q],lat11[q],\
X,Y,bathy,lon_tol=0.0052,lat_tol=0.00210,allow_land=False)
for i in np.arange(x1-1,x1+2):
for j in np.arange(j1-1,j1+2):
dist[k]=tidetools.haversine(lon11[q],lat11[q],longitude[i,j],latitude[i,j])
weights[k]=1.0/dist[k]
value_3rd[k]=saline_nemo_3rd[i,j]*weights[k]
value_4rd[k]=saline_nemo_4rd[i,j]*weights[k]
values=values+value_3rd[k]
valuess=valuess+value_4rd[k]
k+=1
return values, valuess, weights
def salinity_fxn(saline):
struct= (((saline['HBDB_TSG'])['output'])[0,0])['Practical_Salinity'][0,0]
salinity = struct['data'][0,0]
time = struct['matlabTime'][0,0]
lonn = struct['longitude'][0,0]
latt = struct['latitude'][0,0]
a=len(time)
lon1=np.zeros([a,1])
lat1=np.zeros([a,1])
salinity1=np.zeros([a,1])
run_lower = run_date.replace(hour = 2, minute = 40)#!!more convenient than day,miniute..
run_upper = run_date.replace(hour= 4, minute = 20)
for i in np.arange(0,a):
matlab_datenum = np.float(time[i])
python_datetime = datetime.datetime.fromordinal(int(matlab_datenum))\
+ timedelta(days=matlab_datenum%1) - timedelta(days = 366)
#if((python_datetime.year == run_date.year) & (python_datetime.month == run_date.month)\
# & (python_datetime.day == run_date.day)
#& (python_datetime.hour >= 3))&(python_datetime.hour < 5):
#if ((python_datetime.year == run_date.year) & (python_datetime.month == run_date.month)\
# & (python_datetime.day == run_date.day)
# & (python_datetime.hour >= 2))&(python_datetime.hour < 5):
if (python_datetime >= run_lower) &(python_datetime <= run_upper):
lon1[i]=lonn[i]
lat1[i]=latt[i]
salinity1[i]=salinity[i]
mask=lon1[:,0]!=0
lon1_2_4=lon1[mask]
lat1_2_4=lat1[mask]
salinity1_2_4=salinity1[mask]
lon11=lon1_2_4[0:-1:20]
lat11=lat1_2_4[0:-1:20]
salinity11=salinity1_2_4[0:-1:20]
bathy, X, Y = tidetools.get_SS2_bathy_data()
aa=date(run_date.year,run_date.month,run_date.day,run_date.day,'1h','grid_T')
#sim_date = datetime.datetime(2015,3,19)####need to change for \
#different daily model results, construct a datetime object
#run_date = datetime.datetime(2015,3,19)
date_str = run_date.strftime('%d-%b-%Y') ##create a string based on this date
tracers=nc.Dataset(aa[0])
j=int(aa[0][65:67])
jj=int(aa[0][67:69])
latitude=tracers.variables['nav_lat'][:]
longitude=tracers.variables['nav_lon'][:]
saline_nemo = tracers.variables['vosaline']
saline_nemo_3rd = saline_nemo[3,1, 0:898, 0:398]
saline_nemo_4rd = saline_nemo[4,1, 0:898, 0:398]
matrix=np.zeros([len(lon11),9])
values=np.zeros([len(lon11),1])
valuess=np.zeros([len(lon11),1])
value_mean_3rd_hour=np.zeros([len(lon11),1])
value_mean_4rd_hour=np.zeros([len(lon11),1])
for q in np.arange(0,len(lon11)):
values[q], valuess[q], matrix[q,:]=find_dist(q, lon11, lat11, X, Y,\
bathy, longitude, latitude, saline_nemo_3rd, saline_nemo_4rd)
value_mean_3rd_hour[q]=values[q]/sum(matrix[q])
value_mean_4rd_hour[q]=valuess[q]/sum(matrix[q])
return lon11, lat11, lon1_2_4, lat1_2_4,\
value_mean_3rd_hour, value_mean_4rd_hour,\
salinity11, salinity1_2_4,date_str
# Hides Deprecation warming - needs fixing
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Dictionary of ferry stations - new
ferry_stations = {'Horseshoe Bay': {'lat': 49.3742,'lon': -123.2728},
'Nanaimo': {'lat': 49.1632,'lon': -123.8909},
'Vancouver': {'lat': 49.2827,'lon': -123.1207}}
def salinity_ferry_route(grid_T, grid_B, PNW_coastline, ferry_sal):
""" plot daily salinity comparisons between ferry observations
and model results as well as ferry route with model salinity
distribution.
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg PNW_coastline: Coastline dataset.
:type PNW_coastline: :class:`mat.Dataset`
:arg ferry_sal: saline
:type ferry_sal: numpy
:returns: fig
"""
fig, axs = plt.subplots(1, 2, figsize=(15, 8))
figures.plot_map(axs[1], grid_B, PNW_coastline)
axs[1].set_xlim(-124.5, -122.5)
axs[1].set_ylim(48.2, 49.5)
viz_tools.set_aspect(axs[1],coords='map',lats=latitude)
cmap=plt.get_cmap('spectral')
cmap.set_bad('burlywood')
mesh=axs[1].pcolormesh(longitude[:],latitude[:],sal_hr[:],cmap=cmap)
cbar=fig.colorbar(mesh)
plt.setp(plt.getp(cbar.ax.axes, 'yticklabels'), color='w')
cbar.set_label('Pratical Salinity', color='white')
axs[1].set_title('Ferry Route: 3am[UTC] 1.5m model result ', **title_font)
bbox_args = dict(boxstyle='square', facecolor='white', alpha=0.7)
stations=['Horseshoe Bay','Nanaimo','Vancouver']
for stn in stations:
axs[1].plot(ferry_stations[stn]['lon'], ferry_stations[stn]['lat'], marker='D', \
color='white',\
markersize=10, markeredgewidth=2)
axs[1].annotate ('Horseshoe Bay',(ferry_stations['Horseshoe Bay']['lon'] + 0.022,\
ferry_stations['Horseshoe Bay']['lat'] + 0.052), fontsize=15, color='black', bbox=bbox_args )
axs[1].annotate ('Nanaimo',(ferry_stations['Nanaimo']['lon'] - 0.45,\
ferry_stations['Nanaimo']['lat'] - 0.1 ),fontsize=15, color='black', bbox=bbox_args )
axs[1].annotate ('Vancouver',(ferry_stations['Vancouver']['lon'] + 0.02,\
ferry_stations['Vancouver']['lat']- 0.10 ),fontsize=15, color='black', bbox=bbox_args )
figures.axis_colors(axs[1], 'white')
lon11, lat11, lon1_2_4, lat1_2_4,\
value_mean_3rd_hour, value_mean_4rd_hour,\
salinity11,salinity1_2_4, date_str = salinity_fxn(saline)
axs[1].plot(lon11,lat11,'black', linewidth = 4)
model_salinity_3rd_hour=axs[0].plot(lon11,value_mean_3rd_hour,'DodgerBlue',\
linewidth=2, label='3 am [UTC]')
model_salinity_4rd_hour=axs[0].plot(lon11,value_mean_4rd_hour,'MediumBlue',\
linewidth=2, label="4 am [UTC]" )
observation_salinity=axs[0].plot(lon1_2_4,salinity1_2_4,'DarkGreen', linewidth=2, label="Observed")
axs[0].text(0.25, -0.1,'Observations from Ocean Networks Canada', \
transform=axs[0].transAxes, color='white')
axs[0].set_xlim(-124, -123)
axs[0].set_ylim(0, 30)
axs[0].set_title('Surface Salinity: ' + date_str, **title_font)
axs[0].set_xlabel('Longitude', **axis_font)
axs[0].set_ylabel('Practical Salinity', **axis_font)
axs[0].legend()
axs[0].grid()
fig.patch.set_facecolor('#2B3E50')
figures.axis_colors(axs[0], 'gray')
return fig
## without linear regression route
fig = salinity_ferry_route(grid_T_hr, bathy, PNW_coastline, saline)
# # Plot
## with linear regression route
fig = salinity_ferry_route(grid_T_hr, bathy, PNW_coastline, saline)
| jie/HBDB_FerryRouteSalinityTemplate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # JupyterLab's Package Installer Extension
# Meet JupyterLab's new package installer extension! This extension allows you to install missing Python packages (using `pip`) into the *current running* kernel without needing to restart the kernel.
#
# **Try it out!**
#
# First, run the cell below to show you have not yet installed `names` here. An error will occur.
# +
import names
names.get_full_name()
# -
# Click the wrench in the left panel to open the package installer. Enter `names` as the package name then click Install. Once it installs, re-run the cell above to show that it is installed!
| notebooks/Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python3
# language: python
# name: python3
# ---
# ## Parallel, Multi-Objective BO in BoTorch with qEHVI and qParEGO
#
# In this tutorial, we illustrate how to implement a simple multi-objective (MO) Bayesian Optimization (BO) closed loop in BoTorch.
#
# We use the parallel ParEGO ($q$ParEGO) [1] and parallel Expected Hypervolume Improvement ($q$EHVI) [1] acquisition functions to optimize a synthetic Branin-Currin test function. The two objectives are
#
# $$f^{(1)}(x_1\text{'}, x_2\text{'}) = (x_2\text{'} - \frac{5.1}{4 \pi^ 2} (x_1\text{'})^2 + \frac{5}{\pi} x_1\text{'} - r)^2 + 10 (1-\frac{1}{8 \pi}) \cos(x_1\text{'}) + 10$$
#
# $$f^{(2)}(x_1, x_2) = \bigg[1 - \exp\bigg(-\frac{1} {(2x_2)}\bigg)\bigg] \frac{2300 x_1^3 + 1900x_1^2 + 2092 x_1 + 60}{100 x_1^3 + 500x_1^2 + 4x_1 + 20}$$
#
# where $x_1, x_2 \in [0,1]$, $x_1\text{'} = 15x_1 - 5$, and $x_2\text{'} = 15x_2$ (parameter values can be found in `botorch/test_functions/multi_objective.py`).
#
# Since botorch assumes a maximization of all objectives, we seek to find the pareto frontier, the set of optimal trade-offs where improving one metric means deteriorating another.
#
# [1] [<NAME>, <NAME>, and <NAME>. Differentiable Expected Hypervolume Improvement for Parallel Multi-Objective Bayesian Optimization. Advances in Neural Information Processing Systems 33, 2020.](https://arxiv.org/abs/2006.05078)
# ### Set dtype and device
# Note: $q$EHVI aggressively exploits parallel hardware and is much faster when run on a GPU. See [1] for details.
import torch
tkwargs = {
"dtype": torch.double,
"device": torch.device("cuda" if torch.cuda.is_available() else "cpu"),
}
# ### Problem setup
#
# +
from botorch.test_functions.multi_objective import BraninCurrin
problem = BraninCurrin(negate=True).to(**tkwargs)
# -
# #### Model initialization
#
# We use a multi-output `SingleTaskGP` to model the two objectives with a homoskedastic Gaussian likelihood with an inferred noise level.
#
# The models are initialized with $2(d+1)=6$ points drawn randomly from $[0,1]^2$.
# +
from botorch.models.gp_regression import SingleTaskGP
from botorch.models.transforms.outcome import Standardize
from gpytorch.mlls.exact_marginal_log_likelihood import ExactMarginalLogLikelihood
from botorch.utils.transforms import unnormalize
from botorch.utils.sampling import draw_sobol_samples
def generate_initial_data(n=6):
# generate training data
train_x = draw_sobol_samples(bounds=problem.bounds,n=1, q=n, seed=torch.randint(1000000, (1,)).item()).squeeze(0)
train_obj = problem(train_x)
return train_x, train_obj
def initialize_model(train_x, train_obj):
# define models for objective and constraint
model = SingleTaskGP(train_x, train_obj, outcome_transform=Standardize(m=train_obj.shape[-1]))
mll = ExactMarginalLogLikelihood(model.likelihood, model)
return mll, model
# -
# #### Define a helper function that performs the essential BO step for $q$EHVI
# The helper function below initializes the $q$EHVI acquisition function, optimizes it, and returns the batch $\{x_1, x_2, \ldots x_q\}$ along with the observed function values.
#
# For this example, we'll use a small batch of $q=4$. Passing the keyword argument `sequential=True` to the function `optimize_acqf`specifies that candidates should be optimized in a sequential greedy fashion (see [1] for details why this is important). A simple initialization heuristic is used to select the 20 restart initial locations from a set of 1024 random points. Multi-start optimization of the acquisition function is performed using LBFGS-B with exact gradients computed via auto-differentiation.
#
# **Reference Point**
#
# $q$EHVI requires specifying a reference point, which is the lower bound on the objectives used for computing hypervolume. In this tutorial, we assume the reference point is known. In practice the reference point can be set 1) using domain knowledge to be slightly worse than the lower bound of objective values, where the lower bound is the minimum acceptable value of interest for each objective, or 2) using a dynamic reference point selection strategy.
#
# **Partitioning the Non-dominated Space into disjoint rectangles**
#
# $q$EHVI requires partitioning the non-dominated space into disjoint rectangles (see [1] for details).
#
# *Note:* `NondominatedPartitioning` *will be very slow when 1) there are a lot of points on the pareto frontier and 2) there are >3 objectives.*
# +
from botorch.optim.optimize import optimize_acqf, optimize_acqf_list
from botorch.acquisition.objective import GenericMCObjective
from botorch.utils.multi_objective.scalarization import get_chebyshev_scalarization
from botorch.utils.multi_objective.box_decomposition import NondominatedPartitioning
from botorch.acquisition.multi_objective.monte_carlo import qExpectedHypervolumeImprovement
from botorch.utils.sampling import sample_simplex
BATCH_SIZE = 4
standard_bounds = torch.zeros(2, problem.dim, **tkwargs)
standard_bounds[1] = 1
def optimize_qehvi_and_get_observation(model, train_obj, sampler):
"""Optimizes the qEHVI acquisition function, and returns a new candidate and observation."""
# partition non-dominated space into disjoint rectangles
partitioning = NondominatedPartitioning(num_outcomes=problem.num_objectives, Y=train_obj)
acq_func = qExpectedHypervolumeImprovement(
model=model,
ref_point=problem.ref_point.tolist(), # use known reference point
partitioning=partitioning,
sampler=sampler,
)
# optimize
candidates, _ = optimize_acqf(
acq_function=acq_func,
bounds=standard_bounds,
q=BATCH_SIZE,
num_restarts=20,
raw_samples=1024, # used for intialization heuristic
options={"batch_limit": 5, "maxiter": 200, "nonnegative": True},
sequential=True,
)
# observe new values
new_x = unnormalize(candidates.detach(), bounds=problem.bounds)
new_obj = problem(new_x)
return new_x, new_obj
# -
# #### Define a helper function that performs the essential BO step for $q$ParEGO
# The helper function below similarly initializes $q$ParEGO, optimizes it, and returns the batch $\{x_1, x_2, \ldots x_q\}$ along with the observed function values.
#
# $q$ParEGO uses random augmented chebyshev scalarization with the `qExpectedImprovement` acquisition function. In the parallel setting ($q>1$), each candidate is optimized in sequential greedy fashion using a different random scalarization (see [1] for details).
#
# To do this, we create a list of `qExpectedImprovement` acquisition functions, each with different random scalarization weights. The `optimize_acqf_list` method sequentially generates one candidate per acquisition function and conditions the next candidate (and acquisition function) on the previously selected pending candidates.
def optimize_qparego_and_get_observation(model, train_obj, sampler):
"""Samples a set of random weights for each candidate in the batch, performs sequential greedy optimization
of the qParEGO acquisition function, and returns a new candidate and observation."""
acq_func_list = []
for _ in range(BATCH_SIZE):
weights = sample_simplex(problem.num_objectives, **tkwargs).squeeze()
objective = GenericMCObjective(get_chebyshev_scalarization(weights=weights, Y=train_obj))
acq_func = qExpectedImprovement( # pyre-ignore: [28]
model=model,
objective=objective,
best_f=objective(train_obj).max().item(),
sampler=sampler,
)
acq_func_list.append(acq_func)
# optimize
candidates, _ = optimize_acqf_list(
acq_function_list=acq_func_list,
bounds=standard_bounds,
num_restarts=20,
raw_samples=1024, # used for intialization heuristic
options={"batch_limit": 5, "maxiter": 200},
)
# observe new values
new_x = unnormalize(candidates.detach(), bounds=problem.bounds)
new_obj = problem(new_x)
return new_x, new_obj
# ### Perform Bayesian Optimization loop with $q$EHVI and $q$ParEGO
# The Bayesian optimization "loop" for a batch size of $q$ simply iterates the following steps:
# 1. given a surrogate model, choose a batch of points $\{x_1, x_2, \ldots x_q\}$
# 2. observe $f(x)$ for each $x$ in the batch
# 3. update the surrogate model.
#
#
# Just for illustration purposes, we run three trials each of which do `N_BATCH=25` rounds of optimization. The acquisition function is approximated using `MC_SAMPLES=128` samples.
#
# *Note*: Running this may take a little while.
# +
from botorch import fit_gpytorch_model
from botorch.acquisition.monte_carlo import qExpectedImprovement, qNoisyExpectedImprovement
from botorch.sampling.samplers import SobolQMCNormalSampler
from botorch.exceptions import BadInitialCandidatesWarning
from botorch.utils.multi_objective.pareto import is_non_dominated
from botorch.utils.multi_objective.hypervolume import Hypervolume
import time
import warnings
warnings.filterwarnings('ignore', category=BadInitialCandidatesWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
N_TRIALS = 3
N_BATCH = 25
MC_SAMPLES = 128
verbose = False
hvs_qparego_all, hvs_qehvi_all, hvs_random_all = [], [], []
hv = Hypervolume(ref_point=problem.ref_point)
# average over multiple trials
for trial in range(1, N_TRIALS + 1):
torch.manual_seed(trial)
print(f"\nTrial {trial:>2} of {N_TRIALS} ", end="")
hvs_qparego, hvs_qehvi, hvs_random = [], [], []
# call helper functions to generate initial training data and initialize model
train_x_qparego, train_obj_qparego = generate_initial_data(n=6)
mll_qparego, model_qparego = initialize_model(train_x_qparego, train_obj_qparego)
train_x_qehvi, train_obj_qehvi = train_x_qparego, train_obj_qparego
train_x_random, train_obj_random = train_x_qparego, train_obj_qparego
# compute hypervolume
mll_qehvi, model_qehvi = initialize_model(train_x_qehvi, train_obj_qehvi)
# compute pareto front
pareto_mask = is_non_dominated(train_obj_qparego)
pareto_y = train_obj_qparego[pareto_mask]
# compute hypervolume
volume = hv.compute(pareto_y)
hvs_qparego.append(volume)
hvs_qehvi.append(volume)
hvs_random.append(volume)
# run N_BATCH rounds of BayesOpt after the initial random batch
for iteration in range(1, N_BATCH + 1):
t0 = time.time()
# fit the models
fit_gpytorch_model(mll_qparego)
fit_gpytorch_model(mll_qehvi)
# define the qEI and qNEI acquisition modules using a QMC sampler
qparego_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
qehvi_sampler = SobolQMCNormalSampler(num_samples=MC_SAMPLES)
# optimize acquisition functions and get new observations
new_x_qparego, new_obj_qparego = optimize_qparego_and_get_observation(
model_qparego, train_obj_qparego, qparego_sampler
)
new_x_qehvi, new_obj_qehvi = optimize_qehvi_and_get_observation(
model_qehvi, train_obj_qehvi, qehvi_sampler
)
new_x_random, new_obj_random = generate_initial_data(n=BATCH_SIZE)
# update training points
train_x_qparego = torch.cat([train_x_qparego, new_x_qparego])
train_obj_qparego = torch.cat([train_obj_qparego, new_obj_qparego])
train_x_qehvi = torch.cat([train_x_qehvi, new_x_qehvi])
train_obj_qehvi = torch.cat([train_obj_qehvi, new_obj_qehvi])
train_x_random = torch.cat([train_x_random, new_x_random])
train_obj_random = torch.cat([train_obj_random, new_obj_random])
# update progress
for hvs_list, train_obj in zip(
(hvs_random, hvs_qparego, hvs_qehvi),
(train_obj_random, train_obj_qparego, train_obj_qehvi),
):
# compute pareto front
pareto_mask = is_non_dominated(train_obj)
pareto_y = train_obj[pareto_mask]
# compute hypervolume
volume = hv.compute(pareto_y)
hvs_list.append(volume)
# reinitialize the models so they are ready for fitting on next iteration
# Note: we find improved performance from not warm starting the model hyperparameters
# using the hyperparameters from the previous iteration
mll_qparego, model_qparego = initialize_model(train_x_qparego, train_obj_qparego)
mll_qehvi, model_qehvi = initialize_model(train_x_qehvi, train_obj_qehvi)
t1 = time.time()
if verbose:
print(
f"\nBatch {iteration:>2}: Hypervolume (random, qParEGO, qEHVI) = "
f"({hvs_random[-1]:>4.2f}, {hvs_qparego[-1]:>4.2f}, {hvs_qehvi[-1]:>4.2f}), "
f"time = {t1-t0:>4.2f}.", end=""
)
else:
print(".", end="")
hvs_qparego_all.append(hvs_qparego)
hvs_qehvi_all.append(hvs_qehvi)
hvs_random_all.append(hvs_random)
# -
# #### Plot the results
# The plot below shows the a common metric of multi-objective optimization performance, the log hypervolume difference: the log difference between the hypervolume of the true pareto front and the hypervolume of the approximate pareto front identified by each algorithm. The log hypervolume difference is plotted at each step of the optimization for each of the algorithms. The confidence intervals represent the variance at that step in the optimization across the trial runs. The variance across optimization runs is quite high, so in order to get a better estimate of the average performance one would have to run a much larger number of trials `N_TRIALS` (we avoid this here to limit the runtime of this tutorial).
#
# The plot show that $q$EHVI vastly outperforms the $q$ParEGO and Sobol baselines and has very low variance.
# +
import numpy as np
from matplotlib import pyplot as plt
# %matplotlib inline
def ci(y):
return 1.96 * y.std(axis=0) / np.sqrt(N_TRIALS)
iters = np.arange(N_BATCH + 1) * BATCH_SIZE
log_hv_difference_qparego = np.log10(problem.max_hv - np.asarray(hvs_qparego_all))
log_hv_difference_qehvi = np.log10(problem.max_hv - np.asarray(hvs_qehvi_all))
log_hv_difference_rnd = np.log10(problem.max_hv - np.asarray(hvs_random_all))
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
ax.errorbar(iters, log_hv_difference_rnd.mean(axis=0), yerr=ci(log_hv_difference_rnd), label="Sobol", linewidth=1.5)
ax.errorbar(iters, log_hv_difference_qparego.mean(axis=0), yerr=ci(log_hv_difference_qparego), label="qParEGO", linewidth=1.5)
ax.errorbar(iters, log_hv_difference_qehvi.mean(axis=0), yerr=ci(log_hv_difference_qehvi), label="qEHVI", linewidth=1.5)
ax.set(xlabel='number of observations (beyond initial points)', ylabel='Log Hypervolume Difference')
ax.legend(loc="lower right")
# -
# #### plot the observations colored by iteration
#
# To examine optimization process from another perspective, we plot the collected observations under each algorithm where the color corresponds to the BO iteration at which the point was collected. The plot on the right for $q$EHVI shows that the $q$EHVI quickly identifies the pareto front and most of its evaluations are very close to the pareto front. $q$ParEGO also identifies has many observations close to the pareto front, but relies on optimizing random scalarizations, which is a less principled way of optimizing the pareto front compared to $q$EHVI, which explicitly attempts focuses on improving the pareto front. Sobol generates random points and has few points close to the pareto front
# +
from matplotlib.cm import ScalarMappable
fig, axes = plt.subplots(1, 3, figsize=(17, 5))
algos = ["Sobol", "qParEGO", "qEHVI"]
cm = plt.cm.get_cmap('viridis')
batch_number = torch.cat([torch.zeros(6), torch.arange(1, N_BATCH+1).repeat(BATCH_SIZE, 1).t().reshape(-1)]).numpy()
for i, train_obj in enumerate((train_obj_random, train_obj_qparego, train_obj_qehvi)):
sc = axes[i].scatter(train_obj[:, 0].cpu().numpy(), train_obj[:,1].cpu().numpy(), c=batch_number, alpha=0.8)
axes[i].set_title(algos[i])
axes[i].set_xlabel("Objective 1")
axes[i].set_xlim(-260, 5)
axes[i].set_ylim(-15, 0)
axes[0].set_ylabel("Objective 2")
norm = plt.Normalize(batch_number.min(), batch_number.max())
sm = ScalarMappable(norm=norm, cmap=cm)
sm.set_array([])
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.93, 0.15, 0.01, 0.7])
cbar = fig.colorbar(sm, cax=cbar_ax)
cbar.ax.set_title("Iteration")
| tutorials/multi_objective_bo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
import requests
import pandas as pd
import csv
import nltk
url = 'https://www.washingtonpost.com/news/the-fix/wp/2016/09/26/the-first-trump-clinton-presidential-debate-transcript-annotated/'
# Import the data
debate=pd.read_csv('/Users/carol/personal-notebooks/debate.csv')
# Print overview
print("Number of rows: ",debate.shape[0])
debate.head()
print(debate)
debate.tail()
debate.index
debate.columns
debate.values
debate.describe()
| debate/Simple debate notebook.ipynb |