text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` !git clone https://github.com/NVlabs/stylegan3.git !pip install --upgrade psutil # based on https://github.com/Sxela/stylegan3_blending/blob/main/stylegan3_blending_public.ipynb import os import sys sys.path.append(os.path.join(os.path.abspath(""), "stylegan3")) import copy import math import pickle from glob import glob import numpy as np import cv2 import PIL from IPython.display import display from tqdm.notebook import trange import torch from torchvision.transforms.functional import to_tensor, to_pil_image init_model = "stylegan3-t-ffhqu-256x256.pkl" trainData = "arcaneFilteredData.zip" finetuned_path = "model_stylegan3_finetuned" createdDataPath = "stylegan3_data_arcane/" createdDataPhotoPath = createdDataPath + "photo" createdDataAnimePath = createdDataPath + "anime" for i in [finetuned_path, createdDataPhotoPath, createdDataAnimePath]: os.makedirs(i, exist_ok=True) # Download pretrained checkpoint !wget -nc https://api.ngc.nvidia.com/v2/models/nvidia/research/stylegan3/versions/1/files/{init_model} #train stylegan !cd stylegan3 && python train.py --outdir="../$finetuned_path" --data="../$trainData" \ --cfg=stylegan3-t --gpus=1 --batch=32 --gamma=2 --batch-gpu=16 --snap=10 \ --mirror=1 --freezed=10 --cbase=16384 --resume="../$init_model" #get recent finetuned model weight path def getRecentPkl(path): pklList=glob(finetuned_path+"/**/*.pkl", recursive=True) pklList.sort() return pklList[-1] finetuned_model=getRecentPkl(finetuned_path) finetuned_model # load model def get_model(path): with open(path, 'rb') as f: _G = pickle.load(f)['G_ema'].cuda() return _G G_raw = get_model(init_model) G_tuned = get_model(finetuned_model) G_blend = copy.deepcopy(G_raw) #blend model weight and create data pair import matplotlib.pyplot as plt def doBlend(blendRatio): # Not blending affine layers gives us colors closer to the original gen, without affecting the geometry much. W_new = G_raw.synthesis.state_dict().copy() W_tuned = G_tuned.synthesis.state_dict() for key in W_new: if "input" in key or 'affine' in key: continue l = blendRatio[int(key.split('_')[0][1:])] W_new[key] = W_tuned[key]*l + W_new[key]*(1-l) G_blend.synthesis.load_state_dict(W_new) return G_blend def getImage(model,seed=42): torch.manual_seed(seed) z = torch.randn(1,model.z_dim).cuda() w = model.mapping(z, None, truncation_psi=.5, truncation_cutoff=8) image = model.synthesis(w, noise_mode='const', force_fp32=True).cpu()[0] image = to_pil_image((image * 0.5 + 0.5).clip(0, 1)) return image blendRatio1 = [0,0,0,0,0,0,0,.2,.5,.7,.9,1,1,1,1] blendRatio2 = [0,0,0,0,0,0,0,.2,.5,.7,.8,.8,.8,.8,.8] blendRatio3 = [0,0,0,0,0,.2,.2,.2,.5,.7,.8,.8,.8,.8,1] blendRatio4 = [0]*7+[0.8]*(15-7 G_blend=doBlend(blendRatio3) imageNum=10000 for i in trange(imageNum): im1 = getImage(G_raw,seed=i) im3 = getImage(G_blend,seed=i) im1.save(f'{createdDataPhotoPath}/s{i}_.jpg', quality=100, subsampling=0) im3.save(f'{createdDataAnimePath}/s{i}_.jpg', quality=100, subsampling=0) #display created image for i in trange(0,10): im1 = getImage(G_raw,seed=i) im2 = getImage(G_tuned,seed=i) im3 = getImage(G_blend,seed=i) display(im1) display(im2) display(im3) ```
github_jupyter
<table> <tr><td align="right" style="background-color:#ffffff;"> <img src="../images/logo.jpg" width="20%" align="right"> </td></tr> <tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;"> Abuzer Yakaryilmaz | April 04, 2019 (updated) </td></tr> <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;"> This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr> </table> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ <h2>Quantum Coin Flipping</h2> We will do a series of experiments, and try to understand the behaviors of "particles". <h3> The first experiment</h3> We will trace the behavior of a photon. For quantum coin-flipping, we use a beam splitter. For measurements, we use two photon detectors. <ul> <li> Photon is our coin. </li> <li> Beam splitter flips the photon. </li> <li> Photon detectors are our eyes.</li> </li> <h4> The setup </h4> </b>We send photons to a beam splitter as shown below. We expect two behaviors: the beam splitter either transmits or reflects the photon. <img src="../images/photon1.jpg" width="50%"> <h4> Experimental results </h4> After many experiments, we observe the photons in each photon detector almost evenly ($ \approx \% 50 $ and $ \approx \% 50 $). <img src="../images/photon2.jpg" width="50%"> <h4> The first interpretation </h4> So, a beam splitter behaves similarly to a fair coin. <ul> <li> Head (state 0): Trasmitted </li> <li> Tail (state 1): Reflected </li> </ul> <h4> Modeling </h4> We describe our first experiment by a single (probabilistic) bit. We start in state 0. With half probability, the photon transmits, and the state does not change. With half probability, the photon is reflected, and the state is flipped. <img src="../images/photon3.jpg" width="50%"> <h3> The second experiment </h3> We extend our experiment with two mirrors and another beam splitter. Then, we try to validate our <u>interpretation</u> and <u>model</u>. <img src="../images/photon4.jpg" width="60%"> In this setup, we have three photon detectors. By using our model described above, we expect to observe a photon <ul> <li> in $ A $ with probability $ 0.5 $, </li> <li> and in $ B1 $ and $ B2 $ with probabilities $ 0.25 $. </li> </ul> Thus, our prediction for the frequencies of observing the photons in $ A $, $ B1 $, and $ B2 $ are respectively $$ \approx \% 50, \approx \% 25, \mbox{ and } \approx \% 25. $$ <h4> Experimental results </h4> Experiments confirm our predictions. Our model explains the second experiment. <img src="../images/photon5.jpg" width="65%"> <h3> The third experiment </h3> In the third experiment, we remove the photon detector $ A $. So we have only the detectors $ B1 $ and $ B2 $. <img src="../images/photon6.jpg" width="65%"> <h4> Our prediction </h4> The third setup is similar to flipping a fair coin twice. Our prediciton is to observe the photons in $ B1 $ and $ B2 $ almost evenly ($ \approx \% 50 $ and $ \approx \% 50 $) <h4>Let's do the math of our prediction</h4> 0) At the initial step, we are in state $ 0 $. If we use our vector representation, it is $$ v_0 = \myvector{1 \\ 0}. $$ 1) We flip a fair coin. The new probabilistic state is expected to be in both states ($0$ and $1$) with half probability ($ \frac{1}{2} = 0.5 $). $$ v_1 = \myvector{\frac{1}{2} \\ \frac{1}{2}} = \mymatrix{cc}{ \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} } \myvector{1 \\ 0}. $$ Here the transitions of a fair coin can be represented by the matrix (table): $ \mymatrix{cc}{ \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} } $ . 2) Then, we flip a fair coin again. The new probabilistic state will be the same: $$ v_2 = \myvector{\frac{1}{2} \\ \frac{1}{2}} = \mymatrix{cc}{ \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} } \myvector{\frac{1}{2} \\ \frac{1}{2}}. $$ <b><i> Our predicition is fine with mathematical calculation. </i></b> <img src="../images/prediction1.jpg" width="50%"> <h4> Experimental results </h4> <b style="color:red;">However, the experiment results do not confirm our prediction.<b> <img src="../images/photon7.jpg" width="65%"> We observe the photons <b>only</b> in the detector $ B1 $, and we <b>never</b> observe any photon in the detector $ B2 $. <b> How could this be possible?</b> We can conclude that the "classical" (Newtonian) mechanics fail to explain the behaviors of particles. We need a new (mathematical) model. We can explain our experiments by using <u>quantum mechanics</u>.
github_jupyter
# Policy Gradient (PG) Referências: - [Schulman, John. _Optimizing Expectations_: From Deep Reinforcement Learning to Stochastic Computation Graphs](https://www2.eecs.berkeley.edu/Pubs/TechRpts/2016/EECS-2016-217.html). - [Spinning Up](https://spinningup.openai.com/en/latest/spinningup/rl_intro3.html) # Conceito Em todos os métodos que vimos até agora (Monte Carlo, TD/Q-Learning, ...), o agente aprende uma função de valor $V(s | \theta)$ ou $Q(s,a | \theta)$, onde $\theta$ são os parâmetros/pesos do modelo. O agente então segue uma política ($\varepsilon$-)gulosa, (quase-)deterministica, derivada da função de valor. Esses métodos são todos aproximações de programação dinâmica e encontram a política ótima de maneira indireta. Um método alternativo é estimar as políticas ótimas diretamente, ou seja, estimar os parâmetros ótimos $\theta$ para a política $\pi(a | s, \theta)$. Os métodos que utilizam gradientes para realizar essa tarefa são chamados de Policy Gradient. No caso de DQN, nós estimávamos a qualidade de uma ação usando bootstrap e minizávamos o erro entre o agente e esse $Q_{\mathrm{bootstrap}}$. Em PG, a situação é um pouco diferente, porque não é tão simples estimar diretamente algum "$\pi_{\mathrm{bootstrap}}$". Ao invés disso, utilizamos _gradient ascent_ para maximizar alguma função objetivo, como: - $J_0(\theta) = V^{\pi_\theta}(s_0)$ (valor do estado inicial) - $J_{\mathrm{mean}V}(\theta) = E_{s|\theta}\left[V^{\pi_\theta}(s)\right]$ (valor médio) - $J_{\mathrm{mean}\mathcal{R}}(\theta) = E_{s,a|\theta}\left[\mathcal{R}_s^a\right]$ (recompensa média) - $J_{\mathrm{mean}G}(\theta) = E_{\tau|\theta}\left[G_\tau\right]$ (retorno médio por episódio) O algoritmo de PG então se reduz a: $$\theta_{k+1} = \theta_k + \alpha \nabla_\theta J(\theta_k),$$ onde $\alpha$ é a taxa de aprendizado. Só falta um detalhe bem importante nessa equação: como calcular o gradiente de $J$. Obs: O resto dessa explicação, assim como a tese de referência, assume que a função objetivo é $J(\theta) = J_{\mathrm{mean}G}(\theta)$, ou seja, queremos maximizar o retorno médio por episódio. ## Teorema de Policy Gradient Definida a nossa função objetivo $J$, precisamos encontrar seu gradiente para então aplicar o gradiente ascendente. Para qualquer uma das funções objetivo especificadas acima, o gradiente de $J$ é dado por: $$\nabla_\theta J(\theta) = E_{\tau|\theta}\left[\sum_{t=0}^\infty Q(s_t,a_t|\theta) \nabla_\theta \log\pi(a_t|s_t,\theta)\right].$$ A demonstração do teorema encontra-se no [Apêndice](#apendice) deste notebook. ## REINFORCE **REINFORCE**, o algoritmo mais simples de PG, é obtido ao utilizar a função objetivo do retorno médio por episódio ($J_{\mathrm{mean}G}(\theta) = E_{\tau|\theta}\left[G_\tau\right]$) para avaliar nosso agente. Neste caso, o gradiente da nossa função objetivo poderia ser estimado por: \begin{align*} \nabla_\theta J(\theta) &= E_{\tau|\theta}\left[\sum_{t=0}^\infty Q(s_t,a_t|\theta) \nabla_\theta \log\pi(a_t|s_t,\theta)\right] \\ &\approx \sum_{t=0}^T G_t \nabla_\theta \log\pi(a_t|s_t,\theta) \end{align*} Dessa forma, seu algoritmo é dado por: ![REINFORCE](imgs/reinforce.svg) Note que esse algoritmo é on-policy, pois o cálculo do gradiente depende da distribuição de estados e ações e é válido apenas para a política que gerou essa distribuição. ## REINFORCE com Baseline Uma extensão dessa ideia é utilizar reinforce com baselines. Nesse método, ao invés de $G_t$, utilizamos a função vantagem (_advantage_) $A = G_t - V(s_t)$, que indica a qualidade de uma ação-estado em relação à qualidade média daquele estado. Para isso, é necessário treinar uma função de valor $V(s)$. O uso da vantagem reduz a variância do modelo e melhora significantemente sua convergência. O algoritmo fica: ![Baseline](imgs/baseline.svg) ## Imports ``` import gym import math import numpy as np import matplotlib.pyplot as plt import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torch.distributions.categorical import Categorical ``` ## Rede Neural ``` def logits_net(in_dim, out_dim): return nn.Sequential(nn.Linear(in_dim, 64), nn.ReLU(), nn.Linear(64, 64), nn.ReLU(), nn.Linear(64, out_dim)) def value_net(in_dim): return nn.Sequential(nn.Linear(in_dim, 32), nn.ReLU(), nn.Linear(32, 16), nn.ReLU(), nn.Linear(16, 1)) ``` ## Buffer PG ``` class PGBuffer: """ Armazena as experiências que serão utilizadas para treinar o agente de PG. """ def __init__(self, observation_space, max_length, gamma=1): self.gamma = gamma self.max_length = max_length self.states = np.zeros((max_length, *observation_space.shape), dtype=np.float32) self.actions = np.zeros(max_length, dtype=np.int32) self.rewards = np.zeros(max_length, dtype=np.float32) self.size = 0 def update(self, state, action, reward): self.states[self.size] = state self.actions[self.size] = action self.rewards[self.size] = reward self.size += 1 def clear(self): self.states[:] = 0 self.actions[:] = 0 self.rewards[:] = 0 self.size = 0 def get_returns(self): discounted_rewards = self.gamma**np.arange(self.max_length) * self.rewards return discounted_rewards[::-1].cumsum()[::-1].copy() def __len__(self): return self.size ``` ## Agente PG ``` class PGAgent: """ Uma classe que cria um agente PG. """ def __init__(self, observation_space, action_space, max_length, baseline=True, gamma=0.99, policy_lr=3e-4, baseline_lr=3e-4): """ Inicializa o agente com os parâmetros dados """ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") self.gamma = gamma self.action_space = action_space self.memory = PGBuffer(observation_space, max_length, gamma=gamma) self.policy_logit = logits_net(observation_space.shape[0], action_space.n).to(self.device) self.policy_optimizer = optim.Adam(self.policy_logit.parameters(), lr=policy_lr) if baseline: self.baseline = value_net(observation_space.shape[0]).to(self.device) self.baseline_optimizer = optim.Adam(self.baseline.parameters(), lr=baseline_lr) else: self.baseline = None def policy(self, state): if not torch.is_tensor(state): state = torch.FloatTensor(state).to(self.device) p = Categorical(logits=self.policy_logit(state)) return p def act(self, state): return self.policy(state).sample().item() def logp(self, state, action): action = torch.IntTensor(action).to(self.device) return self.policy(state).log_prob(action) def remember(self, state, action, reward): self.memory.update(state, action, reward) def is_full(self): return len(self.memory) == self.memory.max_length def train(self): size = len(self.memory) returns = self.memory.get_returns()[:size] states = torch.FloatTensor(self.memory.states[:size]).to(self.device) actions = self.memory.actions[:size] logps = self.logp(states, actions) advantages = torch.FloatTensor(returns).to(self.device) if self.baseline: v = self.baseline(states).flatten() advantages -= v.detach() baseline_loss = F.smooth_l1_loss(v, torch.FloatTensor(returns).to(self.device)) self.baseline_optimizer.zero_grad() baseline_loss.backward(retain_graph=True) torch.nn.utils.clip_grad_norm_(self.baseline.parameters(), 1) self.baseline_optimizer.step() policy_loss = -(advantages * logps).sum() self.policy_optimizer.zero_grad() policy_loss.backward() self.policy_optimizer.step() self.memory.clear() return policy_loss.item(), (baseline_loss.item() if self.baseline else 0) ``` ### Definição de parâmetros ``` env_name = 'CartPole-v1' env = gym.make(env_name) GAMMA = .999 MAX_LENGTH = 1000 POLICY_LR = 1e-3 BASELINE = True BASELINE_LR = 4e-4 TRAIN_TIME = 100_000 ``` ### Criando o agente ``` agent = PGAgent(env.observation_space, env.action_space, max_length=MAX_LENGTH, baseline=BASELINE, policy_lr=POLICY_LR, baseline_lr=BASELINE_LR, gamma=GAMMA) ``` ## Treinamento ``` def train(agent, env, total_timesteps): total_reward = 0 episode_returns = [] avg_returns = [] state = env.reset() timestep = 0 episode = 0 pl = 0 bl = 0 while timestep < total_timesteps: action = agent.act(state) next_state, reward, done, _ = env.step(action) agent.remember(state, action, reward) timestep += 1 total_reward += reward if done: episode_returns.append(total_reward) avg_returns.append(np.mean(episode_returns[-10:])) episode += 1 next_state = env.reset() pl, bl = agent.train() total_reward *= 1 - done state = next_state ratio = np.ceil(100 * timestep / total_timesteps) avg_return = avg_returns[-1] if avg_returns else np.nan print(f'\r[{ratio:3.0f}%] ' f'timestep = {timestep}/{total_timesteps}, ' f'episode = {episode:3d}, ' f'avg_return = {avg_returns[-1] if avg_returns else 0 :10.4f}, ' f'policy_loss={pl:9.4f}, ' f'baseline_loss={bl:9.4f}', end='') print() if len(agent.memory) > 0: agent.train() return episode_returns, avg_returns returns, avg_returns = train(agent, env, TRAIN_TIME) plt.plot(returns, label='Retorno') plt.plot(avg_returns, label='Retorno médio') plt.xlabel('episode') plt.ylabel('return') plt.legend() plt.show() ``` ## Testando nosso Agente ``` def evaluate(agent, env, episodes=10): total_reward = 0 episode_returns = [] episode = 0 state = env.reset() while episode < episodes: action = agent.act(state) next_state, reward, done, _ = env.step(action) total_reward += reward if done: episode_returns.append(total_reward) episode += 1 next_state = env.reset() total_reward *= 1 - done state = next_state ratio = np.ceil(100 * episode / episodes) print(f"\r[{ratio:3.0f}%] episode = {episode:3d}, avg_return = {np.mean(episode_returns) if episode_returns else 0:10.4f}", end="") return np.mean(episode_returns) evaluate(agent, env, 10) ``` ## Variância ``` episodes = [] returns = [] avg_returns = [] for _ in range(5): agent_ = PGAgent(env.observation_space, env.action_space, max_length=MAX_LENGTH, baseline=True, policy_lr=POLICY_LR, baseline_lr=BASELINE_LR, gamma=GAMMA) x, y = train(agent_, env, TRAIN_TIME) returns += x avg_returns += y episodes += list(range(len(x))) import pandas as pd import seaborn as sns df = pd.DataFrame({'episodes': episodes, 'returns': returns, 'avg_returns': avg_returns}) melted = df.melt(id_vars=['episodes'], value_vars=['returns', 'avg_returns']) sns.lineplot(x='episodes', y='value', hue='variable', data=melted, ci='sd') episodes = [] returns = [] avg_returns = [] for _ in range(5): agent_ = PGAgent(env.observation_space, env.action_space, max_length=MAX_LENGTH, baseline=False, policy_lr=POLICY_LR, baseline_lr=BASELINE_LR, gamma=GAMMA) x, y = train(agent_, env, TRAIN_TIME) returns += x avg_returns += y episodes += list(range(len(x))) import pandas as pd import seaborn as sns df = pd.DataFrame({'episodes': episodes, 'returns': returns, 'avg_returns': avg_returns}) melted = df.melt(id_vars=['episodes'], value_vars=['returns', 'avg_returns']) sns.lineplot(x='episodes', y='value', hue='variable', data=melted, ci='sd') ``` <a id="apendice"></a> # Apêndice ## A probabilidade de uma trajetória Algo que será bem útil é o cálculo da probabilidade de uma trajetória $\tau = (s_0,a_0,s_1,a_1,\dots)$. Se a distribuição inicial de estados é dada por $\mu(s) = $ _prob. do estado inicial ser_ $s$, temos: $$p(\tau|\theta) = \mu(s_0) \pi(a_0|s_0,\theta) p(s_1|s_0,a_0) \pi(a_1|s_1,\theta)\cdots.$$ Tomando o log dessa expressão, obtemos: \begin{align*} \log p(\tau|\theta) &= \log \mu(s_0) + \log\pi(a_0|s_0,\theta) + \log p(s_1|s_0,a_0) + \log \pi(a_1|s_1,\theta) + \cdots = \\ &= \log \mu(s_0) + \sum_{t=0}^\infty \left[\log \pi(a_t|s_t,\theta) + \log p(s_{t+1} | s_t, a_t)\right] \end{align*} Como os únicos termos que dependem de $\theta$ na última expressão são os termos da forma $\log \pi(a_t|s_t,\theta)$, temos por fim: $$\nabla \log p(\tau|\theta) = \sum_{t=0}^\infty \nabla \log \pi(a_t|s_t,\theta)$$ ## O gradiente de _J_ Do cálculo, sabemos que: $$\frac{d}{dx} \log x = \frac1x \implies \frac{d}{dx} \log g(x) = \frac{1}{g(x)} g'(x).$$ Em cálculo multivariável, vale analogamente: $$\nabla \log g(\theta) = \frac{1}{g(\theta)} \nabla g(\theta), \quad \text{ou seja}, \quad \nabla g(\theta) = g(\theta) \nabla \log g(\theta).$$ A função objetivo pode ser escrita em forma integral como: $$J(\theta) = E_{\tau|\theta}\left[G_\tau\right] = \int_\tau p(\tau|\theta) G_\tau d\tau$$ O gradiente de $J$ fica então: \begin{align*} \nabla J(\theta) &= \nabla_\theta \int_\tau p(\tau|\theta) \cdot G_\tau d\tau \\ &= \int G_\tau \cdot \nabla_\theta p(\tau|\theta) d\tau \\ &= \int G_\tau \cdot p(\tau|\theta) \nabla_\theta \log p(\tau|\theta) d\tau \\ &= \int p(\tau|\theta) \cdot G_\tau \nabla_\theta \log p(\tau|\theta) d\tau \\ &= E_{\tau|\theta}\left[G_\tau \nabla_\theta \log p(\tau|\theta)\right] \\ &= E_{\tau|\theta}\left[G_\tau \sum_{t=0}^\infty \nabla_\theta \log \pi(a_t|s_t,\theta)\right] \end{align*} ### Demonstração do Teorema de Policy Gradient A demonstração completa e rigorosa pode ser vista no material de referência e, em particular, [nesse material extra](https://spinningup.openai.com/en/latest/spinningup/extra_pg_proof1.html) do Spinning Up. Aqui será passada apenas a ideia básica. Primeiramente, podemos reescrever o gradiente de $J$ como: $$\nabla_\theta J(\theta) = E_{\tau|\theta}\left[\sum_{t=0}^\infty G_\tau \nabla_\theta \log \pi(a_t|s_t,\theta)\right].$$ Note que para qualquer instante $t=t_i$, essa fórmula considera o retorno total a partir do instante $t=0$, o que é um pouco contra-intuitivo. Final, o agente deveria considerar apenas as recompensas futuras ($t \ge t_i$) ao decidir qual ação tomar. Essa intuição pode ser confirmada matemáticamente, de forma que: \begin{align*} \nabla_\theta J(\theta) &= E_{\tau|\theta}\left[\sum_{t=0}^T G_{\tau}^{t:\infty} \nabla_\theta \log \pi(a_t|s_t,\theta)\right] \\ &= E_{\tau|\theta}\left[\sum_{t=0}^T Q(s_t,a_t|\theta) \nabla_\theta \log \pi(a_t|s_t,\theta)\right] \end{align*} Note que assumimos que o episódio tem uma duração máxima $T$ e que a distribuição de estados é estacionária (i.e. $s_t,a_t$ tem a mesma distribuição que $a,t$).
github_jupyter
# Introduction to Programming with Python # Unit 4: Loops Our task of generating a problem book with quadratic equations will only be useful, if we can generate many equations of the same type, not just one. Computer is very good at repeating the same computations, so the ability for us to express the idea of repeating something over and over is very important. This concept is called **a loop**. The best way to create a loop is by using **for** operator: ``` for color in ["red","green","blue"]: print("Roses are "+color) ``` In this example, we have a list of colors (written in square brackets `[]`), and **for loop** repeats the `print` operator several times, one time for each color in the list. In the code block below **for**, variable `color` takes the value of each color in turn. Very often we want to repeat something several times. In order for us not to write a list, we can use the special function `range(n)` to specify the range of number from 0 to the given number. For example, to print all the numbers from 1 to 10, we can write: ``` for i in range(10): print(i+1, end=' ') ``` Notice two things here: * We print `i+1` and not `i`, because `i` changes from 0 to 9. It is important to remember that in Python numbering starts with 0. * We use `end=' '` construction to indicate what `print` shoud do after printing the specified value. By default `print` goes to the new line, and by specifying space as an end-characted we can print all numbers in one line ## Drawing a Street Remember that we defined functon to draw a house earlier? Let's now explore our notion of loops and draw a street of houses! But first of all, let's also simplify our function of drawing a square using for loop: ``` import jturtle as turtle def square(x): for t in range(4): turtle.forward(x) turtle.right(90) square(10) turtle.done() ``` Now the function to draw the house itself. We will add a few more lines to the end of the function in order to return the turtle to the original position in the lower left corner of the drawing: ``` def house(size): square(size) turtle.forward(size) turtle.right(30) turtle.forward(size) turtle.right(120) turtle.forward(size) turtle.right(30) turtle.penup() turtle.forward(2*size/3) turtle.right(90) turtle.forward(size/3) turtle.pendown() square(size/3) turtle.penup() turtle.forward(2*size/3) turtle.left(90) turtle.forward(size/3) turtle.left(180) turtle.pendown() house(10) turtle.done() ``` To draw 5 houses in a street, we basically need to call the `house` function 5 times in a loop. However, if we just do that, all houses will be rendered on top of each other, and we will not actually see them. ``` for i in range(5): house(10) turtle.done() ``` To draw all houses in a row, we need to add some code to move the turtle in between houses, i.e. to shift to the right each time: ``` for i in range(5): house(10) turtle.penup() turtle.right(90) turtle.forward(11) turtle.left(90) turtle.pendown() turtle.done() ``` Congratulations, now you have mastered the **for**-loop! However, let us have a little bit more practice with loops. ## Creating a Problem Book Not that we know loops, we can easily produce a problem book with many different problems. In order not to duplicate functions defined in previous modules here, we collected them into an external module, and just write the `import` statement here to use them. If you have doubts how a function is defined - you can either have a look into the previous module, or into [pycourse.py](pycourse.py) file that contains all definitions. ``` import random,math from IPython.display import display, Math from pycourse import coef, equation, solve def random_equation(): a = random.randint(1,5)*random.choice([-1,1]) b = random.randint(-10,10) c = random.randint(-20,20) return (a,b,c) def print_random_equation(): a,b,c = random_equation() if solve(a,b,c) == None: display(Math(equation(a,b,c)+"\quad (\mbox{no solutions})")) else: x1,x2 = solve(a,b,c) display(Math(equation(a,b,c)+f"\quad (x_1={x1:.2f},x_2={x2:.2f})")) ``` This code mostly duplicates the code from last unit, but there are a couple of new things here as well. First of all, there are weird things what we print out, such as `\quad` and `\mbox`. Do not worry about them, those are used to make equations look a bit better. They are not part of Python language, but rather they are used inside Azure Notebooks to render math formulae. Second new feature is that we can see here is called **string interpolation** and **formatting**. Often we have the situation when we need to print the value of some varible `x` inside the string, for example: ``` x = 10/3 # Suppose I have spent 2/3 of my daily pocket money of $10 print('I have',x,'dollars left') ``` If I want for some reason to store the whole message in a variable to print it later, I would need to put in some manual type conversion, like this: ``` message = 'I have '+str(x)+' dollars left.' print(message) ``` The same effect can be accomplished using special Python syntax for string interpolation: ``` message = f'I have {x} dollars left' print(message) ``` String interpolation starts with `f'`, and any variable or expression that appears inside curly braces `{..}` will be substituted by its value. One more thing that can be used with interpolation is **formatting**. You may have noticed that in our case the number `3.333333` does not look nice. It would be better to round it up to, say, 2 decimal places. While we can achieve it with `math.round` function, we can also specify **format** for printing the number, i.e. how many decimal places we want: ``` print(f'I have {x:0.2f} dollars left') ``` Now that we have explained all interesting places in the code, let's use `print_random_equation` function to generate a number or random equations! ``` for i in range(10): print_random_equation() ``` This concludes our module. Please complete some exercises before going to the next one! ## Exercise: Factorial A factorial of number $n$ (denoted by $n!$) a number that is obtained by multiplying all natural numbers from $1$ to $n$, i.e. $$n! = 1\cdot2\cdot3\cdot\dots\cdot n$$ You need to: 1. Write a function `fact` that will calculate the factorial of any number `n` 2. Print a table of factorals for $n$ from 1 to 7
github_jupyter
# Aerospike Java Client – Advanced Collection Data Types *Last updated: June 22, 2021* The goal of this tutorial is to highlight the power of working with [collection data types (CDTs)]("https://docs.aerospike.com/docs/guide/cdt.html") in Aerospike. It covers the following topics: 1. Setting [contexts (CTXs)]("https://docs.aerospike.com/docs/guide/cdt-context.html") to apply operations to nested Maps and Lists. 2. Showing the return type options provided by CDT get/read operations. 3. Highlighting how policies shape application transactions. This [Jupyter Notebook](https://jupyter-notebook.readthedocs.io/en/stable/notebook.html) requires the Aerospike Database running locally with Java kernel and Aerospike Java Client. To create a Docker container that satisfies the requirements and holds a copy of these notebooks, visit the [Aerospike Notebooks Repo](https://github.com/aerospike-examples/interactive-notebooks). ## Prerequisites This Notebook builds on the material in the following notebooks: 1. [Working with Lists]("./java-working_with_lists.ipynb") 2. [Working with Maps]("./java-working_with_lists.ipynb") 3. [Introduction to Transactions]("./java-intro_to_transactions.ipynb") It uses examples based on those from [Modeling Using Lists](./java-modeling_using_lists.ipynb) and Working with Maps. If any of the following is confusing, please refer to a relevant notebook. # Notebook Setup ### Import Jupyter Java Integration Make it easier to work with Java in Jupyter. ``` import io.github.spencerpark.ijava.IJava; import io.github.spencerpark.jupyter.kernel.magic.common.Shell; IJava.getKernelInstance().getMagics().registerMagics(Shell.class); ``` ### Start Aerospike Ensure Aerospike Database is running locally. ``` %sh asd ``` ### Download the Aerospike Java Client Ask Maven to download and install the project object model (POM) of the Aerospike Java Client. ``` %%loadFromPOM <dependencies> <dependency> <groupId>com.aerospike</groupId> <artifactId>aerospike-client</artifactId> <version>5.0.0</version> </dependency> </dependencies> ``` ### Start the Aerospike Java Client and Connect Create an instance of the Aerospike Java Client, and connect to the demo cluster. The default cluster location for the Docker container is *localhost* port *3000*. If your cluster is not running on your local machine, modify *localhost* and *3000* to the values for your Aerospike cluster. ``` import com.aerospike.client.AerospikeClient; AerospikeClient client = new AerospikeClient("localhost", 3000); System.out.println("Initialized the client and connected to the cluster."); ``` # Create CDT Data, Put into Aerospike, and Print It ``` import com.aerospike.client.Key; import com.aerospike.client.Bin; import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.Record; import com.aerospike.client.Operation; import com.aerospike.client.Value; import com.aerospike.client.cdt.ListOperation; import com.aerospike.client.cdt.ListPolicy; import com.aerospike.client.cdt.ListOrder; import com.aerospike.client.cdt.ListWriteFlags; import com.aerospike.client.cdt.MapOperation; import com.aerospike.client.cdt.MapPolicy; import com.aerospike.client.cdt.MapOrder; import com.aerospike.client.cdt.MapWriteFlags; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; // Create whale migration list of tuples. ArrayList<Value> whaleMigration0 = new ArrayList<Value>(); whaleMigration0.add(Value.get(1420)); whaleMigration0.add(Value.get("beluga whale")); whaleMigration0.add(Value.get("Beaufort Sea")); whaleMigration0.add(Value.get("Bering Sea")); ArrayList<Value> whaleMigration1 = new ArrayList<Value>(); whaleMigration1.add(Value.get(13988)); whaleMigration1.add(Value.get("gray whale")); whaleMigration1.add(Value.get("Baja California")); whaleMigration1.add(Value.get("Chukchi Sea")); ArrayList<Value> whaleMigration2 = new ArrayList<Value>(); whaleMigration2.add(Value.get(1278)); whaleMigration2.add(Value.get("north pacific right whale")); whaleMigration2.add(Value.get("Japan")); whaleMigration2.add(Value.get("Sea of Okhotsk")); ArrayList<Value> whaleMigration3 = new ArrayList<Value>(); whaleMigration3.add(Value.get(5100)); whaleMigration3.add(Value.get("humpback whale")); whaleMigration3.add(Value.get("Columbia")); whaleMigration3.add(Value.get("Antarctic Peninsula")); ArrayList<Value> whaleMigration4 = new ArrayList<Value>(); whaleMigration4.add(Value.get(3100)); whaleMigration4.add(Value.get("southern hemisphere blue whale")); whaleMigration4.add(Value.get("Corcovado Gulf")); whaleMigration4.add(Value.get("The Galapagos")); ArrayList<Value> whaleMigration = new ArrayList<Value>(); whaleMigration.add(Value.get(whaleMigration0)); whaleMigration.add(Value.get(whaleMigration1)); whaleMigration.add(Value.get(whaleMigration2)); whaleMigration.add(Value.get(whaleMigration3)); whaleMigration.add(Value.get(whaleMigration4)); // Create Map of Whale Observations HashMap <Value, Value> mapObs = new HashMap <Value, Value>(); HashMap <String, Integer> mapCoords0 = new HashMap <String, Integer>(); mapCoords0.put("lat", -85); mapCoords0.put("long", -130); HashMap <String, Integer> mapCoords1 = new HashMap <String, Integer>(); mapCoords1.put("lat", -25); mapCoords1.put("long", -50); HashMap <String, Integer> mapCoords2 = new HashMap <String, Integer>(); mapCoords2.put("lat", 35); mapCoords2.put("long", 30); mapObs.put(Value.get(13456), Value.get(mapCoords1)); mapObs.put(Value.get(14567), Value.get(mapCoords2)); mapObs.put(Value.get(12345), Value.get(mapCoords0)); // Put data in Aerospike, get the data, and print it String nestedCDTSetName = "nestedset1"; String nestedCDTNamespaceName = "test"; Integer whaleMigrationWriteFlags = ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL; ListPolicy whaleMigrationPolicy = new ListPolicy(ListOrder.UNORDERED, whaleMigrationWriteFlags); MapPolicy mapObsPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.DEFAULT); Integer whaleKeyName = 2; String listWhaleBinName = "listwhalebin"; String mapObsBinName = "mapobsbin"; Bin bin1 = new Bin(listWhaleBinName, whaleMigration); Key whaleKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, whaleKeyName); Record putDataIn = client.operate(client.writePolicyDefault, whaleKey, Operation.put(bin1), MapOperation.putItems(mapObsPolicy, mapObsBinName, mapObs) ); System.out.println(listWhaleBinName + ": " + whaleMigration + "\n\n" + mapObsBinName + ": " + mapObs ); ``` # Using Contexts (CTXs) to work with Nested CDTs What are Nested CDTs and CTXs? ## What is a Nested CDT? The primary use case of Key-Value Stores, like Aerospike Database, is to store document-oriented data, like a JSON map. As document-oriented data grows organically, it is common for one CDT (list or map) to contain another CDT. Does the application need a list in a map in a list in a map? Aerospike fully supports nesting CDTs, so that’s no problem. ## What is a Context? A Context (CTX) is a reference to a nested CDT, a List or Map that is stored in a List or Map somewhere in an Aerospike Bin. All [List](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/ListOperation.html) and [Map Operations](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html) accept an optional CTX argument. Any CTX argument must refer to data of the type supported by the operation. The most common ways to access a CTX are to look up a Map CTX directly by its key within the Bin and to drill down within a List or Map by index, rank or value. A CTX can also be created within a List or Map. For more details, see the [CTX APIs](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/CDT.html). ## Look up a Map CTX in a Bin by Mapkey Use the `mapKey` method to look up a CTX in a Map directly by mapkey. This works for a Map anywhere in a Bin. The following is an example of finding a Map CTX in a Bin by Mapkey: ``` import com.aerospike.client.cdt.CTX; import com.aerospike.client.cdt.MapReturnType; Integer lookupMapKey = 14567; String latKeyName = "lat"; Record whaleSightings = client.operate(client.writePolicyDefault, whaleKey, MapOperation.getByKey(mapObsBinName, Value.get(latKeyName), MapReturnType.VALUE, CTX.mapKey(Value.get(lookupMapKey))) ); System.out.println(mapObsBinName + ": " + mapObs ); System.out.println("The " + latKeyName + " of sighting at timestamp " + lookupMapKey + ": " + whaleSightings.getValue(mapObsBinName)); ``` ## Drill down into a List or Map Here are the options to drill down into a CDT. Drilling down to a CTX in a List: * `listIndex`: Lookup list by index offset. * `listRank`: Lookup list by rank. * `listValue`: Lookup list by value. Drilling down to a CTX in a Map: * `mapIndex`: Lookup map by index offset. * `mapRank`: Lookup map by rank. * `mapValue`: Lookup map by value. The following is an example of drilling down within a List and Map CTX: ``` import com.aerospike.client.cdt.ListReturnType; // CDT Drilldown Values Integer drilldownIndex = 2; Integer drilldownRank = 1; Value listDrilldownValue = Value.get(whaleMigration1); Value mapDrilldownValue = Value.get(mapCoords0); // Variables to access parts of the selected CDT. Integer getIndex = 1; Record theRecord = client.get(null, whaleKey); Record drilldown = client.operate(client.writePolicyDefault, whaleKey, ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listIndex(drilldownIndex)), ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listRank(drilldownRank)), ListOperation.getByIndex(listWhaleBinName, getIndex, MapReturnType.VALUE, CTX.listValue(listDrilldownValue)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapIndex(drilldownIndex)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapRank(drilldownRank)), MapOperation.getByIndex(mapObsBinName, getIndex, MapReturnType.VALUE, CTX.mapValue(mapDrilldownValue)) ); List<?> returnWhaleList = drilldown.getList(listWhaleBinName); List<?> returnObsList = drilldown.getList(mapObsBinName); System.out.println("The whale migration list is: " + theRecord.getValue(listWhaleBinName) + "\n"); System.out.println("The whale name from the CTX selected by index " + drilldownIndex + ": " + returnWhaleList.get(0)); System.out.println("The whale name from the CTX selected by rank " + drilldownRank + ": " + returnWhaleList.get(1)); System.out.println("The whale name from the CTX selected by value " + listDrilldownValue + ": " + returnWhaleList.get(2) + "\n\n"); System.out.println("The observation map is: " + theRecord.getValue(mapObsBinName) + "\n"); System.out.println("The longitude of the observation from the CTX selected by index " + drilldownIndex + ": " + returnObsList.get(0)); System.out.println("The longitude of the observation from the CTX selected by rank " + drilldownRank + ": " + returnObsList.get(1)); System.out.println("The longitude of the observation from the CTX selected by value " + mapDrilldownValue + ": " + returnObsList.get(2)); ``` ## Create a CTX Example If the context for the operation does not yet exist, it can be created using the following methods. Creating a CTX in a List or Map: * `listIndexCreate`: Create list by base list's index offset. * `mapKeyCreate`: Create map by base map's key. The following are examples of creating a list and map CTX and then writing data to the new CTX. ``` ArrayList<Value> newWhaleMigration = new ArrayList<Value>(); newWhaleMigration.add(Value.get(1449)); newWhaleMigration.add(Value.get("sei whale")); newWhaleMigration.add(Value.get("Greenland")); newWhaleMigration.add(Value.get("Gulf of Maine")); Integer whaleIndex = 5; HashMap <Value, Value> mapCoords3 = new HashMap <Value, Value>(); mapCoords3.put(Value.get("lat"), Value.get(95)); mapCoords3.put(Value.get("long"), Value.get(110)); Integer newObsKey = 15678; Record createCTX = client.operate(client.writePolicyDefault, whaleKey, ListOperation.insertItems(listWhaleBinName, 0, newWhaleMigration, CTX.listIndexCreate(whaleIndex, ListOrder.UNORDERED, true)), MapOperation.putItems(mapObsPolicy, mapObsBinName, mapCoords3, CTX.mapKeyCreate(Value.get(newObsKey), MapOrder.KEY_ORDERED)) ); Record postCreate = client.get(null, whaleKey); System.out.println("Before, the whale migration list was: " + theRecord.getValue(listWhaleBinName) + "\n"); System.out.println("After the addition, it is:" + postCreate.getValue(listWhaleBinName) + "\n\n"); System.out.println("Before, the observation map was: " + theRecord.getValue(mapObsBinName) + "\n"); System.out.println("After the addition, it is: " + postCreate.getValue(mapObsBinName)); ``` # Choosing the Return Type Options for CDTs Operations on CDTs can return different types of data, depending on the return type value specified. A return type can be combined with the INVERTED flag to return all data from the CDT that was not selected by the operation. The following are the [Return Types for Lists](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/ListReturnType.html) and [Maps](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapReturnType.html). ## Standard Return Type Options for CDTs Aerospike Lists and Maps both provide the following return type options. * `COUNT`: Return count of items selected. * `INDEX`: Return index offset order. * `NONE`: Do not return a result. * `RANK`: Return value order. If the list/map is not ordered, Aerospike will JIT-sort the list/map. * `REVERSE_INDEX`: Return reverse index offset order. * `REVERSE_RANK`: Return value order from a version of the list sorted from maximum to minimum value. If the list is not ordered, Aerospike will JIT-sort the list. * `VALUE`: Return value for single item read and list of values from a range read. All indexes are 0-based, with the last element accessible by index -1. The following is an example demonstrating each possible return type from the same operation. ``` ArrayList<Value> lowTuple = new ArrayList<Value>(); lowTuple.add(Value.get(1400)); lowTuple.add(Value.NULL); ArrayList<Value> highTuple = new ArrayList<Value>(); highTuple.add(Value.get(3500)); highTuple.add(Value.NULL); Record between1400and3500 = client.operate(client.writePolicyDefault, whaleKey, ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.COUNT), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.INDEX), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.NONE), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.RANK), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.REVERSE_INDEX), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.REVERSE_RANK), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.VALUE) ); List<?> returnWhaleRange = between1400and3500.getList(listWhaleBinName); System.out.println("The current whale migration list is: " + postCreate.getValue(listWhaleBinName) + "\n"); System.out.println("For the whales who migrate between 1400 and 3500 miles..."); System.out.println("Return COUNT: " + returnWhaleRange.get(0)); System.out.println("Return INDEX: " + returnWhaleRange.get(1)); System.out.println("Return NONE: has no return value."); System.out.println("Return RANK: " + returnWhaleRange.get(2)); System.out.println("Return REVERSE_INDEX: " + returnWhaleRange.get(3)); System.out.println("Return REVERSE_RANK: " + returnWhaleRange.get(4)); System.out.println("Return Values: " + returnWhaleRange.get(5)); ``` ## Additional Return Type Options for Maps Because Maps have a replicable key/value structure, Aerospike provides options to return mapkeys or key/value pairs, in addition to value. * `KEY`: Return key for single key read and key list for range read. * `KEY_VALUE`: Return key/value pairs for items. The following is an example demonstrating returning a key or key/value pair. ``` Integer latestObsRank = -1; Record latestWhaleObs = client.operate(client.writePolicyDefault, whaleKey, MapOperation.getByRank(mapObsBinName, latestObsRank, MapReturnType.KEY), MapOperation.getByRank(mapObsBinName, latestObsRank, MapReturnType.KEY_VALUE) ); List<?> latestObs = latestWhaleObs.getList(mapObsBinName); System.out.println("The current whale observations map is: " + postCreate.getValue(mapObsBinName) + "\n"); System.out.println("For the most recent observation..."); System.out.println("Return the key: " + latestObs.get(0)); System.out.println("Return key/value pair: " + latestObs.get(1)); ``` ## Invert the Operation Results for CDT Operations Aerospike also provides the `INVERTED` flag for CDT operations. When `INVERTED` is “logical or”-ed to the return type, the flag instructs a list or map operation to return the return type data for list or Map elements that were not selected by the operation. This flag instructs an operation to act as though a logical NOT operator was applied to the entire operation. The following is an example demonstrating inverted return values. ``` ArrayList<Value> lowTuple = new ArrayList<Value>(); lowTuple.add(Value.get(1400)); lowTuple.add(Value.NULL); ArrayList<Value> highTuple = new ArrayList<Value>(); highTuple.add(Value.get(3500)); highTuple.add(Value.NULL); Record between1400and3500 = client.operate(client.writePolicyDefault, whaleKey, ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.COUNT | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.INDEX | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.NONE | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.RANK | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.REVERSE_INDEX | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.REVERSE_RANK | ListReturnType.INVERTED), ListOperation.getByValueRange(listWhaleBinName, Value.get(lowTuple), Value.get(highTuple), ListReturnType.VALUE | ListReturnType.INVERTED) ); List<?> returnWhaleRange = between1400and3500.getList(listWhaleBinName); System.out.println("The current whale migration list is: " + postCreate.getValue(listWhaleBinName) + "\n"); System.out.println("For the whales who migrate between 1400 and 3500 miles..."); System.out.println("Return INVERTED COUNT: " + returnWhaleRange.get(0)); System.out.println("Return INVERTED INDEX: " + returnWhaleRange.get(1)); System.out.println("Return INVERTED NONE: has no return value."); System.out.println("Return INVERTED RANK: " + returnWhaleRange.get(2)); System.out.println("Return INVERTED REVERSE_INDEX: " + returnWhaleRange.get(3)); System.out.println("Return INVERTED REVERSE_RANK: " + returnWhaleRange.get(4)); System.out.println("Return INVERTED Values: " + returnWhaleRange.get(5)); ``` # Highlighting how policies shape application transactions Each data type operation has a write policy which can be set per CDT write/put operation to optionally: * Just-in-time sort the data being operated on. * Apply flags that instruct Aerospike’s transaction write behavior. Create and set a MapPolicy or ListPolicy with the proper sort and write flags to change how Aerospike processes a transaction. ## MapOrder and ListOrder, Just-in-time Sorting for an Operation By default, Maps and Lists are stored unordered. There are explicit techniques to store a list or map in order. The Map data in this notebook is key sorted. Please refer to the code snippet creating the map data (above) for an example of this. There are examples of ordering lists in the notebook [Modeling Using Lists](./java-modeling_using_lists.ipynb). Applying a MapOrder or ListOrder has performance implications on operation performance. This can be a reason to apply a MapOrder or ListOrder when working with data. To understand the relative worst-case time complexity of Aerospike operations go [here for lists](https://docs.aerospike.com/docs/guide/cdt-list-performance.html) and [here for maps](https://docs.aerospike.com/docs/guide/cdt-map-performance.html). Whether to allow duplicates in a list is a function of ListOrder. **Note:** Aerospike finds that worst-case performance can be helpful in determining how to prioritize application use-cases against one another, but do not set realistic performance expectations for Aerospike Database. An example where they help is asking tough questions, like, “the worst case time complexity for operation A is X, is operation A important enough to do daily or just monthly in light of the other workloads that are more time sensitive?” ## Write Flags The following are lists of [write flags for Lists](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/ListWriteFlags.html) and [Maps](https://docs.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapWriteFlags.html). Beneath each are example transactions. A powerful use case for Aerospike is to group operations together into single-record atomic transactions using the `Operate` method. This technique is used above in this notebook. When applying transactions to data, there are common circumstances where: * All possible operations should be executed in a fault tolerant manner * Specific operation failure should cause all operations to fail Write flags can be used in any combination, as appropriate to the application and Aerospike operation being applied. ### Write Flags for all CDTs * `DEFAULT` * For Lists, allow duplicate values and insertions at any index. * For Maps, allow map create or updates. * `NO_FAIL`: Do not raise an error if a CDT item is denied due to write flag constraints. * `PARTIAL`: Allow other valid CDT items to be committed if a CDT item is denied due to write flag constraints. These flags provide fault tolerance to transactions. Apply some combination of the above three flags–`DEFAULT`, `NO_FAIL`, and `PARTIAL`–to operations by using “logical or” as demonstrated below. All other write flags set conditions for operations. **Note:** Without `NO_FAIL`, operations that fail due to the below policies will throw [either error code 24 or 26](https://docs.aerospike.com/docs/dev_reference/error_codes.html). #### Default Examples All of the above code snippets use a Default write flag policy. These operations are unrestricted by write policies. #### No Fail Examples All of the examples in the following sections show both an exception caused by a write flag, and then pair the demonstrated write flag with No Fail to show how the same operation can fail silently. #### Partial Flag Example Partial is generally used only in a transaction containing operations using the No Fail write flag. Otherwise, the transaction would contain no failures to overlook. The following example are a list and map transaction combining both failing and successful map and list operations. ``` // create policy to apply and data to trigger operation failure Integer inBoundsIndex = 0; Integer outOfBoundsIndex = 20; HashMap <Value, Value> mapCoords4 = new HashMap <Value, Value>(); mapCoords4.put(Value.get("lat"), Value.get(0)); mapCoords4.put(Value.get("long"), Value.get(0)); Integer existingObsKey = 13456; Integer listPartialWriteFlags = ListWriteFlags.INSERT_BOUNDED | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL; ListPolicy listPartialWritePolicy = new ListPolicy(ListOrder.UNORDERED, listPartialWriteFlags); Integer mapPartialWriteFlags = MapWriteFlags.CREATE_ONLY | MapWriteFlags.NO_FAIL | MapWriteFlags.PARTIAL; MapPolicy mapPartialWritePolicy = new MapPolicy(MapOrder.KEY_ORDERED, mapPartialWriteFlags); // create fresh record Integer partialFlagKeyName = 6; Key partialFlagKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, partialFlagKeyName); Bin bin1 = new Bin(listWhaleBinName, whaleMigration); Record putDataIn = client.operate(null, partialFlagKey, Operation.put(bin1), MapOperation.putItems(mapObsPolicy, mapObsBinName, mapObs) ); Record partialDataPutIn = client.get(client.writePolicyDefault, partialFlagKey); // one failed and one successful operation for both list and map Record partialSuccessOp = client.operate(null, partialFlagKey, ListOperation.insert(listPartialWritePolicy, listWhaleBinName, outOfBoundsIndex, Value.get(newWhaleMigration)), ListOperation.set(listPartialWritePolicy, listWhaleBinName, inBoundsIndex, Value.get(newWhaleMigration)), MapOperation.put(mapPartialWritePolicy, mapObsBinName, Value.get(existingObsKey), Value.get(mapCoords4)), MapOperation.put(mapPartialWritePolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords3)) ); Record partialSuccessData = client.get(client.writePolicyDefault, partialFlagKey); System.out.println ("Failed to add a 5th item.\nSucceeded at changing the first item.\n"); System.out.println ("Original List: " + partialDataPutIn.getValue(listWhaleBinName) + "\n"); System.out.println ("Updated List: " + partialSuccessData.getValue(listWhaleBinName) + "\n\n"); System.out.println ("Failed to modify an exiting observation.\nSucceeded at adding a new observation.\n"); System.out.println ("Original Map: " + partialDataPutIn.getValue(mapObsBinName) + "\n"); System.out.println ("Updated Map: " + partialSuccessData.getValue(mapObsBinName) + "\n\nFor more about the failed operations, see the examples below."); Boolean partialExampleRecordDeleted=client.delete(null, partialFlagKey); ``` ### Write Flags for Lists Only: * `INSERT_BOUNDED`: Enforce list boundaries when inserting. Do not allow values to be inserted at index outside current list boundaries. * `ADD_UNIQUE`: Only add unique values. #### Insert Bounded Example ``` // create policy to apply and data to break policy Integer outOfBoundsIndex = 20; ListPolicy listInsertBoundedPolicy = new ListPolicy(ListOrder.UNORDERED, ListWriteFlags.INSERT_BOUNDED); ListPolicy listBoundedNoFailPolicy = new ListPolicy(ListOrder.UNORDERED, ListWriteFlags.INSERT_BOUNDED | ListWriteFlags.NO_FAIL); // create fresh record Integer whaleBoundedKeyName = 7; Bin bin1 = new Bin(listWhaleBinName, whaleMigration); Key whaleBoundedKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, whaleBoundedKeyName); client.put(client.writePolicyDefault, whaleBoundedKey, bin1); Record ibDataPutIn = client.get(null, whaleBoundedKey); System.out.println("Data in the record: " + ibDataPutIn.getValue(listWhaleBinName) + "\n"); // fail for INSERT_BOUNDED try { Record ibFail = client.operate(client.writePolicyDefault, whaleBoundedKey, ListOperation.insert(listInsertBoundedPolicy, listWhaleBinName, outOfBoundsIndex, Value.get(newWhaleMigration)) ); System.out.println("The code does not get here."); } catch(Exception e) { System.out.println("Out of Bounds Attempt 1: Exception caught."); Record ibNoFail = client.operate(client.writePolicyDefault, whaleBoundedKey, ListOperation.insert(listBoundedNoFailPolicy, listWhaleBinName, outOfBoundsIndex, Value.get(newWhaleMigration)) ); Record ibNoFailData = client.get(client.writePolicyDefault, whaleBoundedKey); if(ibNoFailData.getValue(listWhaleBinName).equals(ibDataPutIn.getValue(listWhaleBinName))) { System.out.println("Out of Bounds Attempt 2: No operation was executed. Error was suppressed by NO_FAIL.\n"); } } Record noIB = client.operate(client.writePolicyDefault, whaleBoundedKey, ListOperation.insert(listWhaleBinName, outOfBoundsIndex, Value.get(newWhaleMigration)) ); Record noIBData = client.get(null, whaleBoundedKey); System.out.println("Without Insert Bounded, a series of nulls is insein the Bin: " + noIBData.getValue(listWhaleBinName)); ``` #### Add Unique Example ``` // create policy to apply ListPolicy listAddUniquePolicy = new ListPolicy(ListOrder.UNORDERED, ListWriteFlags.ADD_UNIQUE); ListPolicy listAddUniqueNoFailPolicy = new ListPolicy(ListOrder.UNORDERED, ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL); // create fresh record Integer whaleAddUniqueKeyName = 8; Bin bin1 = new Bin(listWhaleBinName, whaleMigration); Key whaleAddUniqueKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, whaleAddUniqueKeyName); client.put(client.writePolicyDefault, whaleAddUniqueKey, bin1); Record auDataPutIn = client.get(null, whaleAddUniqueKey); // successful ADD_UNIQUE operation Record auSuccess = client.operate(client.writePolicyDefault, whaleAddUniqueKey, ListOperation.append(listAddUniquePolicy, listWhaleBinName, Value.get(newWhaleMigration)) ); Record auSuccessData = client.get(null, whaleAddUniqueKey); System.out.println("Data after the unique add of " + newWhaleMigration + ": " + auSuccessData.getValue(listWhaleBinName) + "\n"); // fail for 2nd ADD_UNIQUE try { Record auFail = client.operate(client.writePolicyDefault, whaleAddUniqueKey, ListOperation.append(listAddUniquePolicy, listWhaleBinName, Value.get(newWhaleMigration)) ); System.out.println("The code does not get here."); } catch(Exception e) { System.out.println("Non-Unique Add 1: Exception caught."); Record auNoFail = client.operate(client.writePolicyDefault, whaleAddUniqueKey, ListOperation.append(listAddUniqueNoFailPolicy, listWhaleBinName, Value.get(newWhaleMigration)) ); Record auNoFailData = client.get(null, whaleAddUniqueKey); if(auNoFailData.getValue(listWhaleBinName).equals(auSuccessData.getValue(listWhaleBinName))) { System.out.println("Non-Unique Add 2: No operation was executed. Error was suppressed by NO_FAIL.\n"); } } Record noAU = client.operate(client.writePolicyDefault, whaleAddUniqueKey, ListOperation.append(listWhaleBinName, Value.get(newWhaleMigration)) ); Record noAUData = client.get(null, whaleAddUniqueKey); System.out.println("Without Add Unique here, the tuple for a sei whale is there 2x: " + noAUData.getValue(listWhaleBinName)); ``` ### Write Flags for Maps Only: * `CREATE_ONLY`: If the key already exists, the item will be denied. * `UPDATE_ONLY`: If the key already exists, the item will be overwritten. If the key does not exist, the item will be denied. #### Create Only Example ``` // create modify data and policy to apply HashMap <Value, Value> mapCoords4 = new HashMap <Value, Value>(); mapCoords4.put(Value.get("lat"), Value.get(0)); mapCoords4.put(Value.get("long"), Value.get(0)); MapPolicy mapCreateOnlyPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.CREATE_ONLY); MapPolicy mapCreateOnlyNoFailPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.CREATE_ONLY | MapWriteFlags.NO_FAIL); // create fresh record Integer obsCreateOnlyKeyName = 9; Key obsCreateOnlyKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, obsCreateOnlyKeyName); Record putDataIn = client.operate(client.writePolicyDefault, obsCreateOnlyKey, MapOperation.putItems(mapObsPolicy, mapObsBinName, mapObs) ); Record coDataPutIn = client.get(null, obsCreateOnlyKey); // success for CREATE_ONLY Record coSuccess = client.operate(client.writePolicyDefault, obsCreateOnlyKey, MapOperation.put(mapCreateOnlyPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords3)) ); Record coSuccessData = client.get(null, obsCreateOnlyKey); System.out.println("Created record and new key " + newObsKey + ". The data is now: " + coSuccessData.getValue(mapObsBinName) + "\n"); // fail for CREATE_ONLY try { Record coFail = client.operate(client.writePolicyDefault, obsCreateOnlyKey, MapOperation.put(mapCreateOnlyPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords4)) ); System.out.println("The code does not get here."); } catch(Exception e) { System.out.println("Update attempt 1: Exception caught."); Record coNoFail = client.operate(client.writePolicyDefault, obsCreateOnlyKey, MapOperation.put(mapCreateOnlyNoFailPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords4)) ); Record coNoFailData = client.get(null, obsCreateOnlyKey); if(coNoFailData.getValue(mapObsBinName).equals(coSuccessData.getValue(mapObsBinName))) { System.out.println("Update attempt 2: No operation was executed. Error was suppressed by NO_FAIL.\n"); } } Record noCO = client.operate(client.writePolicyDefault, obsCreateOnlyKey, MapOperation.put(mapObsPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords4)) ); Record noCOData = client.get(null, obsCreateOnlyKey); System.out.println("Without Create Only, the observation at 15678 is overwritten: " + noCOData.getValue(mapObsBinName)); Boolean createOnlyExampleRecordDeleted=client.delete(null, obsCreateOnlyKey); ``` #### Update Only Example ``` // create policy to apply MapPolicy mapUpdateOnlyPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.UPDATE_ONLY); MapPolicy mapUpdateOnlyNoFailPolicy = new MapPolicy(MapOrder.KEY_ORDERED, MapWriteFlags.UPDATE_ONLY | MapWriteFlags.NO_FAIL); // create Aerospike data elements for a fresh record Integer obsUpdateOnlyKeyName = 10; Key obsUpdateOnlyKey = new Key(nestedCDTNamespaceName, nestedCDTSetName, obsUpdateOnlyKeyName); Record uoPutDataIn = client.operate(client.writePolicyDefault, obsUpdateOnlyKey, MapOperation.putItems(mapObsPolicy, mapObsBinName, mapObs) ); Record uoDataPutIn = client.get(null, obsUpdateOnlyKey); System.out.println("Created record: " + uoDataPutIn.getValue(mapObsBinName) + "\n"); // fail for UPDATE_ONLY try { Record uoFail = client.operate(client.writePolicyDefault, obsUpdateOnlyKey, MapOperation.put(mapUpdateOnlyPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords3)) ); System.out.println("The code does not get here."); } catch(Exception e) { System.out.println("Create Attempt 1: Exception caught."); Record uoNoFail = client.operate(client.writePolicyDefault, obsUpdateOnlyKey, MapOperation.put(mapUpdateOnlyNoFailPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords3)) ); Record uoNoFailData = client.get(null, obsUpdateOnlyKey); if(uoNoFailData.getValue(mapObsBinName).equals(uoDataPutIn.getValue(mapObsBinName))){ System.out.println("Create Attempt 2: No operation was executed. Error was suppressed by NO_FAIL.\n"); } } Record noUO = client.operate(client.writePolicyDefault, obsUpdateOnlyKey, MapOperation.put(mapObsPolicy, mapObsBinName, Value.get(newObsKey), Value.get(mapCoords3)) ); Record noUOData = client.get(null, obsUpdateOnlyKey); // success for UPDATE_ONLY Record uoSuccess = client.operate(client.writePolicyDefault, obsUpdateOnlyKey, MapOperation.put(mapUpdateOnlyPolicy, mapObsBinName, Value.get(existingObsKey), Value.get(mapCoords4)) ); Record uoSuccessData = client.get(null, obsUpdateOnlyKey); System.out.println("Using update only, the value of an existing key " + existingObsKey + " can be updated: " + uoSuccessData.getValue(mapObsBinName) + "\n"); Boolean uoExampleRecordDeleted=client.delete(null, obsUpdateOnlyKey); ``` # Notebook Cleanup ### Truncate the Set Truncate the set from the Aerospike Database. ``` import com.aerospike.client.policy.InfoPolicy; InfoPolicy infoPolicy = new InfoPolicy(); client.truncate(infoPolicy, nestedCDTNamespaceName, nestedCDTSetName, null); System.out.println("Set Truncated."); ``` ### Close the Client connections to Aerospike ``` client.close(); System.out.println("Server connection(s) closed."); ``` # Takeaways – CDTs Provide Flexible Document-Oriented Data Power Aerospike Collection Data Types... 1. facilitate complex data structures by supporting nesting through the use of contexts (CTXs) 2. provide intuitive and flexible return types options from operations 3. support policies that empower efficient and flexible transaction processing # What's Next? ## Next Steps Have questions? Don't hesitate to reach out if you have additional questions about data modeling at https://discuss.aerospike.com/c/how-developers-are-using-aerospike/data-modeling/143. Want to check out other Java notebooks? 1. [Intro to Transactions](./java-intro_to_transactions.ipynb) 2. [Modeling Using Lists](./java-modeling_using_lists.ipynb) 3. [Working with Maps](./java-working_with_maps.ipynb) 4. [Aerospike Query and UDF](query_udf.ipynb) Are you running this from Binder? [Download the Aerospike Notebook Repo](https://github.com/aerospike-examples/interactive-notebooks) and work with Aerospike Database and Jupyter locally using a Docker container. ## Additional Resources * Want to get started with Java? [Download](https://www.aerospike.com/download/client/) or [install](https://github.com/aerospike/aerospike-client-java) the Aerospike Java Client. (https://www.aerospike.com/apidocs/java/com/aerospike/client/cdt/MapOperation.html). * What are Namespaces, Sets, and Bins? Check out the [Aerospike Data Model](https://www.aerospike.com/docs/architecture/data-model.html). * How robust is the Aerospike Database? Browses the [Aerospike Database Architecture](https://www.aerospike.com/docs/architecture/index.html).
github_jupyter
# Assignment 3 - Practical Deep Learning Workshop #### In this task we will work with the dataset of the Home depot product search relevance competition. #### Some background: In this competition, Home Depot is asking to help them improve their customers' shopping experience by developing a model that can accurately predict the relevance of search results. Search relevancy is an implicit measure Home Depot uses to gauge how quickly they can get customers to the right products. This data set contains a number of products and real customer search terms from Home Depot's website. The challenge is to predict a relevance score for the provided combinations of search terms and products. To create the ground truth labels, Home Depot has crowdsourced the search/product pairs to multiple human raters. The relevance is a number between 1 (not relevant) to 3 (highly relevant). For example, a search for "AA battery" would be considered highly relevant to a pack of size AA batteries (relevance = 3), mildly relevant to a cordless drill battery (relevance = 2), and not relevant to a snow shovel (relevance = 1). Each pair was evaluated by at least three human raters. The provided relevance scores are the average value of the ratings. There are three additional things to know about the ratings: • The specific instructions given to the raters is provided in relevance_instructions.docx. • Raters did not have access to the attributes. • Raters had access to product images, while the competition does not include images. #### Out task here is to predict the relevance for each pair listed in the test set. The test set contains both seen and unseen search terms. ``` from sklearn.feature_extraction.text import CountVectorizer from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.models import Model, Sequential from keras.layers import * # Dense, Embedding, LSTM from sklearn.model_selection import train_test_split from keras.utils.np_utils import to_categorical from keras.regularizers import l2 import re import pandas as pd import numpy as np import datetime import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates %matplotlib inline from google.colab import drive drive.mount('/content/gdrive') ``` #### First of all, we'll take a look at the data in each dataset of the input: train.csv is the training set, contains products, searches, and relevance scores. ``` train = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/input/train.csv',encoding='latin1') train.head() ``` test.csv is the test set, contains products and searches. We will need to predict the relevance for these pairs. ``` test = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/input/test.csv',encoding='latin1') test.head() ``` product_descriptions.csv contains a text description of each product. We may join this table to the training or test set via the product_uid. ``` product_descriptions = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/input/product_descriptions.csv',encoding='latin1') product_descriptions.head() ``` attributes.csv provides extended information about a subset of the products (typically representing detailed technical specifications). Not every product will have attributes. ``` attributes = pd.read_csv('/content/gdrive/My Drive/Colab Notebooks/input/attributes.csv',encoding='latin1') attributes.head() ``` Data fields: - id - a unique Id field which represents a (search_term, product_uid) pair - product_uid - an id for the products - product_title - the product title - product_description - the text description of the product (may contain HTML content) - search_term - the search query - relevance - the average of the relevance ratings for a given id - name - an attribute name - value - the attribute's value ## Preprocessing the data We would like to have the products' corresponding product description, so we will merge the train and test datasets with the product_description table. Note: in order to decrease the dimensionality of the text, we will lower the characters. ``` mergedTrain = pd.merge(train, product_descriptions, how='inner', on='product_uid') mergedTrain.search_term = mergedTrain.search_term.apply(lambda x: x.lower()) mergedTrain.product_description = mergedTrain.product_description.apply(lambda x: x.lower()) mergedTrain.head() mergedTest= pd.merge(test, product_descriptions, how='inner', on='product_uid') mergedTest.search_term = mergedTest.search_term.apply(lambda x: x.lower()) mergedTest.product_description = mergedTest.product_description.apply(lambda x: x.lower()) mergedTest.head() ``` We convert the product_description and search_term attributes' values to lists of characters. ``` search_term_chars = [] product_description_chars = [] search_term_chars = mergedTrain.search_term.apply(lambda x: search_term_chars + list(x)) product_description_chars = mergedTrain.product_description.apply(lambda x: product_description_chars + list(x)) search_term_chars = [item for sublist in search_term_chars for item in sublist] product_description_chars = [item for sublist in product_description_chars for item in sublist] ``` And then, translate the characters to a unique integer values. We create two dictionaries (one for search_term and another for product_description), containing the pairs of characters and their uniquie values. ``` search_term_char_set = sorted(set(search_term_chars)) product_description_char_set = sorted(set(product_description_chars)) # translate from character to number, it's enumerator search_term_char_to_int = dict((c, i) for i, c in enumerate(search_term_char_set)) search_term_int_to_char = dict((i, c) for i, c in enumerate(search_term_char_set)) product_description_char_to_int = dict((c, i) for i, c in enumerate(product_description_char_set)) product_description_int_to_char = dict((i, c) for i, c in enumerate(product_description_char_set)) # summarize the loaded data n_chars = len(search_term_chars) n_vocab = len(search_term_char_set) print("search_term Total Characters: ", n_chars) print("search_term Total Vocab: ", n_vocab) n_chars2 = len(product_description_chars) n_vocab2 = len(product_description_char_set) print("product_description Total Characters: ", n_chars2) print("product_description Total Vocab: ", n_vocab2) mergedTrain.search_term = mergedTrain.search_term.apply(lambda x: list(x)) mergedTrain.product_description = mergedTrain.product_description.apply(lambda x: list(x)) mergedTrain.head() ``` We would like to turn the search_term and the product_description into sequences of unique integers. ``` def createData(char_to_int, char_arr): #seq_length = 100 dataX = [] for i in range(0,len(char_arr)): dataX.append(char_to_int[char_arr[i]]) return np.asarray(dataX) mergedTrain.search_term = mergedTrain.search_term.apply(lambda x: createData(search_term_char_to_int, x)) mergedTrain.product_description = mergedTrain.product_description.apply(lambda x: createData(product_description_char_to_int, x)) mergedTrain.head() ``` ## The target value - relevance Each pair was evaluated by at least three human raters. The provided relevance scores are the average value of the ratings. Thus, we would like to see the number of unique values, between 1 and 3. There are 13 unique relevance values in the data sample. We could address the problem as a classification problem, but we want to address the distance from the maximum relevance value, so we will treat this as a regression problem. ``` plt.hist(np.unique(mergedTrain.relevance.values),density=True, histtype='bar') plt.show() np.unique(mergedTrain.relevance.values).size ``` In order to predict the relevance values we need to preprocess the values, and change the range from 1 - 3, to 0 - 1. We want to see the maximum length of each column - the search_term and product_description. We try to limit the char sequences to 75 chars as in the same manner as characters, the lengths of the sequences must be the same in order to unite the data in specified part of the network to enable predictions based on both of these inputs. We also want to see the max sizes of sequences in each column to find the optimal value that will get enough data from both of the columns. ``` from sklearn import preprocessing target = mergedTrain['relevance'].values min_max_scaler = preprocessing.MinMaxScaler() Y = min_max_scaler.fit_transform(target.reshape(-1, 1)) Y[:5] X1 = mergedTrain['search_term'].values X2 = mergedTrain['product_description'].values search_terms_lens = [] for element in mergedTrain['search_term'].values: search_terms_lens.append(len(element)) product_description_lens = [] for element in mergedTrain['product_description'].values: product_description_lens.append(len(element)) max_length1 = max(search_terms_lens) max_length2 = max(product_description_lens) ``` After trying a few options, we choose the maximum lenght of the sequences to be 75 integers, in order to yield better results. Sequences that are shorter, will be padded in order to meet this lenght. ``` max_length = 75 def padding(seq, length): ans = [] for i in range(0,min(len(seq),length)): ans.append(seq[i]) if len(seq) <= length: for i in range(0,length-len(seq)): ans.append(0) return ans X1 = np.asarray([padding(x,max_length) for x in X1]) X2 = np.asarray([padding(x,max_length) for x in X2]) X1 = X1.reshape(X1.shape[0],X1.shape[1],1) X2 = X2.reshape(X2.shape[0],X2.shape[1],1) X1 = X1.astype(np.float32) X2 = X2.astype(np.float32) print(X1.shape) print(X2.shape) ``` This is the input that we insert into the model. ## Building the model We create a siamese model ################## ADDDDDDDDDDD ``` st_input = Input(shape=(max_length,1), name='st_input',dtype='float32') pd_input = Input(shape=(max_length,1), name='pd_input',dtype='float32') def createModel(): model = Sequential() model.add(LSTM(40)) model.add(Dense(64, activation='relu')) return model from keras.optimizers import Adadelta st_model = createModel() pd_model = createModel() def createSiameseModel(model1,model2,customLoss): out = Lambda(function=lambda x: K.exp(-K.sum(K.abs(x[0]-x[1]), axis=1, keepdims=True)), output_shape=lambda x: (x[0][0], 1), name='prediction')([model1(st_input), model2(pd_input)]) siamese_net = Model(input=[st_input,pd_input],output=[out]) siamese_net.compile(loss=customLoss,optimizer=Adadelta(lr=1.0, rho=0.95,clipnorm=1.20)) return siamese_net siamese_net1 = createSiameseModel(st_model,pd_model,'mse') siamese_net2 = createSiameseModel(st_model,pd_model,'mae') st_model.summary() siamese_net1.summary() ``` We have a good amount of trainable parameters. ``` X1_train,X1_val,X2_train,X2_val,Y_train, Y_val = train_test_split(X1,X2,Y,test_size = 0.2) ``` We split the data into train and validation/test sets. We choose the validation to be 20% of the entire data. We save the model weights that are best, in order to use them later for feature extraction without the need to train the model again. ``` from keras.callbacks import * path = 'gdrive/My Drive/Colab Notebooks' def set_callbacks(description='run1',patience=15,tb_base_logdir='./logs/'): cp = ModelCheckpoint(path + '/best_model_weights_char.h5'.format(description),save_best_only=True) rlop = ReduceLROnPlateau(patience=5) cb = [cp,rlop] return cb ``` ### Here we train the model: ``` start = time.time() history = siamese_net1.fit([X1_train,X2_train],Y_train,batch_size=1024, epochs=5, verbose=1, validation_data=([X1_val,X2_val],Y_val), callbacks=set_callbacks()) end = time.time() total_time = end - start plt.plot(history.history['loss'], label='train') plt.plot(history.history['val_loss'], label='test') plt.legend() plt.show() val_preds = siamese_net1.predict([X1_val,X2_val]) train_preds = siamese_net1.predict([X1_train,X2_train]) plt.hist(val_preds,density=True, histtype='bar') plt.show() plt.hist(Y_val,density=True, histtype='bar') plt.show() ``` We can see that the model predicted values around the average mark. ``` resultsTable = pd.DataFrame(columns=['model','runtime','TrainRMSE','ValRMSE','TestRMSE','TrainMAE','ValMAE','TestMAE']) def addToTable(modelName,runtime,train_rmse,val_rmse,test_rmse,train_mae,val_mae,test_mae): return resultsTable.append({'model': modelName,'runtime': runtime,'TrainRMSE': train_rmse,'ValRMSE': val_rmse, 'TrainMAE': test_rmse,'TrainMAE': train_mae,'ValMAE' :val_mae,'TestMAE': test_mae},ignore_index=True) ``` Lets run the model on the test samples, in order to do that we need to repeat the preprocessing and normalization process on the test data set. ``` search_term_chars2 = [] product_description_chars2 = [] search_term_chars2 = mergedTest.search_term.apply(lambda x: search_term_chars2 + list(x)) product_description_chars2 = mergedTest.product_description.apply(lambda x: product_description_chars2 + list(x)) search_term_chars2 = [item for sublist in search_term_chars2 for item in sublist] product_description_chars2 = [item for sublist in product_description_chars2 for item in sublist] search_term_char_set2 = sorted(set(search_term_chars2)) product_description_char_set2 = sorted(set(product_description_chars2)) # translate from character to number, it's enumerator search_term_char_to_int2 = dict((c, i) for i, c in enumerate(search_term_char_set2)) search_term_int_to_char2 = dict((i, c) for i, c in enumerate(search_term_char_set2)) product_description_char_to_int2 = dict((c, i) for i, c in enumerate(product_description_char_set2)) product_description_int_to_char2 = dict((i, c) for i, c in enumerate(product_description_char_set2)) mergedTest.search_term = mergedTest.search_term.apply(lambda x: list(x)) mergedTest.product_description = mergedTest.product_description.apply(lambda x: list(x)) mergedTest.search_term = mergedTest.search_term.apply(lambda x: createData(search_term_char_to_int2, x)) mergedTest.product_description = mergedTest.product_description.apply(lambda x: createData(product_description_char_to_int2, x)) mergedTest.head() X1_test = mergedTest.search_term.values X2_test = mergedTest.product_description.values X1_test = np.asarray([padding(x,max_length) for x in X1_test]) X2_test = np.asarray([padding(x,max_length) for x in X2_test]) X1_test = X1_test.reshape(X1_test.shape[0],X1_test.shape[1],1) X2_test = X2_test.reshape(X2_test.shape[0],X2_test.shape[1],1) test_preds = siamese_net1.predict([X1_test,X2_test]) from sklearn.metrics import mean_absolute_error as mae from sklearn.metrics import mean_squared_error as mse resultsTable = addToTable('CHAR_SiameseNetwork',total_time,mse(train_preds,Y_train),mse(val_preds,Y_val),'-',mae(train_preds,Y_train),mae(val_preds,Y_val),'-') resultsTable ``` We calculated the RMSE and the MAE between the prediction and the true value in each one of the training, validation and test parts of the dataset. * Note: We could not find the true results of the test data samples, so we could not calculate the MAE and RMSE on these samples. ## ML Benchmark Lets create a benchmark model to compare the results of our model and the benchmark. We do a similiar character embedding process like in our model, but this time we will use the sklearn Vectorizer to do this process. The benchmark model that we will use is the Random Forest Regressor. ``` mergedTrain2 = pd.merge(train, product_descriptions, how='inner', on='product_uid') mergedTrain2.head() from sklearn.feature_extraction.text import CountVectorizer vectorizer = CountVectorizer(encoding='latin-1', analyzer='char') vectorizer.fit(mergedTrain2['search_term']) mltrain_x, mlval_x, mltrain_y, mlval_y = train_test_split(mergedTrain2['search_term'].values,mergedTrain2['relevance'].values, test_size = 0.2) train_x_count = vectorizer.transform(mltrain_x) val_x_count = vectorizer.transform(mlval_x) from sklearn import model_selection, preprocessing, linear_model, naive_bayes, metrics, svm,ensemble ml = ensemble.RandomForestRegressor() start_time = time.time() ml.fit(train_x_count, mltrain_y) end_time = time.time() total_time = end_time - start_time ml_train_preds = ml.predict(train_x_count) ml_val_preds = ml.predict(val_x_count) print(ml_val_preds.shape) resultsTable = addToTable('CHAR_RandomForestBenchmark',total_time,mse(ml_train_preds,mltrain_y),mse(ml_val_preds,mlval_y),'-',mae(ml_train_preds,mltrain_y),mae(ml_val_preds,mlval_y),'-') resultsTable plt.hist(ml_val_preds,density=True, histtype='bar') plt.show() plt.hist(mlval_y,density=True, histtype='bar') plt.show() ``` The benchmark model performed better than our siamese model. This shows us that our model is not achieving the desired scores to see good enough results. Here are some possible ways to improve the model: * The model is still not tuned with the correct parameters in the intermediate layers. Finding the most percise values for the LSTM node number or the number of outputs in the Dense layer. We tried higher values but it looked like the model was overfitted when it handles large number of LSTM nodes. * There might be some imbalance in the data when we pad the search_term and the product_description, the search_term sequences are a lot shorter than the product_description, so we need to chose the right amount of characters in each sequence or change the value that we pad with in the padding function, currently its 0. ## Feature Extraction We want to check how the feature extarction abilities of the model compare by taking out the last layers outputs - the processed search_term and product_description inputs and concatinate that to feed to a ML model to see the RMSE and MAE of the ML models with the features from our network. The machine learning we use are the Random Forest model and the Linear Regression model from sklearn. ``` fe_st_input = Input(shape=(max_length,1), name='st_input',dtype='float32') fe_pd_input = Input(shape=(max_length,1), name='pd_input',dtype='float32') input_layer1 = siamese_net1.layers[0].input[0] input_layer2 = siamese_net1.layers[1].input[0] fe_st_model = createModel() fe_pd_model = createModel() output_layer1 = siamese_net1.layers[3].get_output_at(0) output_layer2 = siamese_net1.layers[3].get_output_at(1) output_fn = K.function([st_input, pd_input], [output_layer1, output_layer2]) def extractFeatures(model1,model2,customLoss): out = concatenate([model1(fe_st_input), model2(fe_pd_input)]) siamese_net = Model(input=[fe_st_input,fe_pd_input],output=[out]) siamese_net.load_weights(path + '/best_model_weights_char.h5') siamese_net.compile(loss=customLoss,optimizer=Adadelta(lr=1.0, rho=0.95,clipnorm=1.20)) return siamese_net fe_model = extractFeatures(fe_st_model,fe_pd_model,'mse') fe_train_features = fe_model.predict([X1_train,X2_train]) fe_val_features = fe_model.predict([X1_val,X2_val]) fe_test_features = fe_model.predict([X1_test,X2_test]) randomForest = ensemble.RandomForestRegressor() start_time = time.time() randomForest.fit(fe_train_features, Y_train) end_time = time.time() total_time = end_time - start_time fe_train_preds = randomForest.predict(fe_train_features) fe_val_preds = randomForest.predict(fe_val_features) resultsTable = addToTable('FE_RandomForest_CHAR',total_time,mse(fe_train_preds,Y_train),mse(fe_val_preds,Y_val),'-',mae(fe_train_preds,Y_train),mae(fe_val_preds,Y_val),'-') linear = linear_model.LinearRegression() start_time = time.time() linear.fit(fe_train_features, Y_train) end_time = time.time() total_time = end_time - start_time fe_train_preds2= linear.predict(fe_train_features) fe_val_preds2 = linear.predict(fe_val_features) resultsTable = addToTable('FE_LinearRegression_CHAR',total_time,mse(fe_train_preds2,Y_train),mse(fe_val_preds2,Y_val),'-',mae(fe_train_preds2,Y_train),mae(fe_val_preds2,Y_val),'-') resultsTable ``` We see that the feature extraction ML models had pretty much the same performance as the siamese network, This means that the inaccuarcy of our model is in the feature extraction phase, maybe by making the improvements listed above we might acheive a better score. # Word Level Embedding We want to repeat the process but this time we want to do the embedding on a word level. every word instead of every character (as in the last part) will get a unique value in a sequence of values that will be fed to a similiar siamese network and will be checked in the same manner as the character embedding. ## Data Preprocessing Similiarly we find the amount of unique words that are available for each of the search_term and product_description samples and create dictionaries for each one of them to converts the texts to unique value sequences for the model to train and predict on. ``` mergedTrain = pd.merge(train, product_descriptions, how='inner', on='product_uid') mergedTrain.search_term = mergedTrain.search_term.apply(lambda x: x.lower()) mergedTrain.product_description = mergedTrain.product_description.apply(lambda x: x.lower()) mergedTrain.head() mergedTest= pd.merge(test, product_descriptions, how='inner', on='product_uid') mergedTest.search_term = mergedTest.search_term.apply(lambda x: x.lower()) mergedTest.product_description = mergedTest.product_description.apply(lambda x: x.lower()) mergedTest.head() import nltk nltk.download('punkt') from nltk.tokenize import word_tokenize st_words = [] for term in mergedTrain.search_term.values: for word in word_tokenize(term): st_words.append(word) st_word_set = sorted(set(st_words)) st_dict = dict((c, i) for i, c in enumerate(st_word_set)) pd_words = [] for term in mergedTrain.product_description.values: for word in word_tokenize(term): pd_words.append(word) pd_word_set = sorted(set(pd_words)) pd_dict = dict((c, i) for i, c in enumerate(pd_word_set)) st_words2 = [] for term in mergedTest.search_term.values: for word in word_tokenize(term): st_words2.append(word) st_word_set2 = sorted(set(st_words2)) st_dict2 = dict((c, i) for i, c in enumerate(st_word_set2)) pd_words2 = [] for term in mergedTest.product_description.values: for word in word_tokenize(term): pd_words2.append(word) pd_word_set2 = sorted(set(pd_words2)) pd_dict2 = dict((c, i) for i, c in enumerate(pd_word_set2)) mergedTrain.search_term = mergedTrain.search_term.apply(lambda x: createData(st_dict, word_tokenize(x))) mergedTrain.product_description = mergedTrain.product_description.apply(lambda x: createData(pd_dict, word_tokenize(x))) mergedTrain.head() mergedTest.search_term = mergedTest.search_term.apply(lambda x: createData(st_dict2, word_tokenize(x))) mergedTest.product_description = mergedTest.product_description.apply(lambda x: createData(pd_dict2, word_tokenize(x))) mergedTest.head() ``` ## Data Normalization We normalize the target relevance to be in the 0 - 1 range, like in the first part. We try to limit the word sequences to 50 words as in the same manner as characters. ``` target = mergedTrain['relevance'].values min_max_scaler = preprocessing.MinMaxScaler() Y = min_max_scaler.fit_transform(target.reshape(-1, 1)) Y[:5] X1 = mergedTrain['search_term'].values X2 = mergedTrain['product_description'].values search_terms_lens = [] for element in mergedTrain['search_term'].values: search_terms_lens.append(len(element)) product_description_lens = [] for element in mergedTrain['product_description'].values: product_description_lens.append(len(element)) max_length1 = max(search_terms_lens) max_length2 = max(product_description_lens) max_length = 50 def padding(seq, length): ans = [] for i in range(0,min(len(seq),length)): ans.append(seq[i]) if len(seq) <= length: for i in range(0,length-len(seq)): ans.append(0) return ans X1 = np.asarray([padding(x,max_length) for x in X1]) X2 = np.asarray([padding(x,max_length) for x in X2]) X1 = X1.reshape(X1.shape[0],X1.shape[1],1) X2 = X2.reshape(X2.shape[0],X2.shape[1],1) X1_test = mergedTest.search_term.values X2_test = mergedTest.product_description.values X1_test = np.asarray([padding(x,max_length) for x in X1_test]) X2_test = np.asarray([padding(x,max_length) for x in X2_test]) X1_test = X1_test.reshape(X1_test.shape[0],X1_test.shape[1],1) X2_test = X2_test.reshape(X2_test.shape[0],X2_test.shape[1],1) ``` ## Model Fitting + Predictions The model is created in the same manner as in the first part. the only difference is that the input now are embedded word sequences of the data samples. ``` def set_callbacks2(description='run1',patience=15,tb_base_logdir='./logs/'): cp = ModelCheckpoint(path + '/best_model_weights_word.h5'.format(description),save_best_only=True) rlop = ReduceLROnPlateau(patience=5) cb = [cp,rlop] return cb st_input = Input(shape=(max_length,1), name='st_input') pd_input = Input(shape=(max_length,1), name='pd_input') def createModel(): model = Sequential() model.add(LSTM(60)) model.add(Dense(140, activation='relu')) return model st_model3 = createModel() pd_model3 = createModel() def createSiameseModel(model1,model2,customLoss): out = Lambda(function=lambda x: K.exp(-K.sum(K.abs(x[0]-x[1]), axis=1, keepdims=True)), output_shape=lambda x: (x[0][0], 1), name='prediction')([model1(st_input), model2(pd_input)]) siamese_net = Model(input=[st_input,pd_input],output=[out]) siamese_net.compile(loss=customLoss,optimizer=Adadelta(lr=1.0, rho=0.95,clipnorm=1.20)) return siamese_net siamese_net3 = createSiameseModel(st_model3,pd_model3,'mse') siamese_net4 = createSiameseModel(st_model3,pd_model3,'mae') siamese_net3.summary() X1_train,X1_val,X2_train,X2_val,Y_train, Y_val = train_test_split(X1,X2,Y,test_size = 0.2) start = time.time() history3 = siamese_net3.fit([X1_train,X2_train],Y_train,batch_size=1024, epochs=5, verbose=1, validation_data=([X1_val,X2_val],Y_val), callbacks=set_callbacks2()) end = time.time() total_time = end - start val_preds = siamese_net3.predict([X1_val,X2_val]) train_preds = siamese_net3.predict([X1_train,X2_train]) test_preds = siamese_net3.predict([X1_test,X2_test]) plt.plot(history3.history['loss'], label='train') plt.plot(history3.history['val_loss'], label='test') plt.legend() plt.show() plt.hist(val_preds,density=True, histtype='bar') plt.show() plt.hist(Y_val,density=True, histtype='bar') plt.show() resultsTable = addToTable('WORD_SiameseNetwork',total_time,mse(train_preds,Y_train),mse(val_preds,Y_val),'-',mae(train_preds,Y_train),mae(val_preds,Y_val),'-') resultsTable ``` The word model outperformed the char model only by a little bit. ## Feature Extraction - Word Level Again, we want to check our feature extraction capabilites of the word model by feeding the features that the model finds to classic ML models to see their performance with the processed data that our model creates during the learning phase. ``` fe_st_input = Input(shape=(max_length,1), name='st_input',dtype='float32') fe_pd_input = Input(shape=(max_length,1), name='pd_input',dtype='float32') input_layer1 = siamese_net1.layers[0].input[0] input_layer2 = siamese_net1.layers[1].input[0] fe_st_model = createModel() fe_pd_model = createModel() output_layer1 = siamese_net1.layers[3].get_output_at(0) output_layer2 = siamese_net1.layers[3].get_output_at(1) output_fn = K.function([st_input, pd_input], [output_layer1, output_layer2]) def extractFeatures(model1,model2,customLoss): out = concatenate([model1(fe_st_input), model2(fe_pd_input)]) siamese_net = Model(input=[fe_st_input,fe_pd_input],output=[out]) siamese_net.load_weights(path + '/best_model_weights_word.h5') siamese_net.compile(loss=customLoss,optimizer=Adadelta(lr=1.0, rho=0.95,clipnorm=1.20)) return siamese_net fe_model = extractFeatures(fe_st_model,fe_pd_model,'mse') fe_train_features = fe_model.predict([X1_train,X2_train]) fe_val_features = fe_model.predict([X1_val,X2_val]) fe_test_features = fe_model.predict([X1_test,X2_test]) randomForest2 = ensemble.RandomForestRegressor() start_time = time.time() randomForest2.fit(fe_train_features, Y_train) end_time = time.time() total_time = end_time - start_time fe_train_preds = randomForest2.predict(fe_train_features) fe_val_preds = randomForest2.predict(fe_val_features) resultsTable = addToTable('FE_RandomForest_WORD',total_time,mse(fe_train_preds,Y_train),mse(fe_val_preds,Y_val),'-',mae(fe_train_preds,Y_train),mae(fe_val_preds,Y_val),'-') linear2 = linear_model.LinearRegression() start_time = time.time() linear2.fit(fe_train_features, Y_train) end_time = time.time() total_time = end_time - start_time fe_train_preds2= linear2.predict(fe_train_features) fe_val_preds2 = linear2.predict(fe_val_features) resultsTable = addToTable('FE_LinearRegression_WORD',total_time,mse(fe_train_preds2,Y_train),mse(fe_val_preds2,Y_val),'-',mae(fe_train_preds2,Y_train),mae(fe_val_preds2,Y_val),'-') resultsTable ``` ## Test results submission ``` subans = test_preds.reshape(test_preds.shape[0]) subans = min_max_scaler.inverse_transform(subans.reshape(1, -1)) subans = subans.reshape(subans.shape[1]) subans sub = pd.read_csv("/content/gdrive/My Drive/Colab Notebooks/input/sample_submission.csv") sub.reset_index(drop = True) print(sub.relevance.values.shape) sub['relevance'] = subans sub.to_csv(path + '/sub.csv') ``` # Summary
github_jupyter
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'UA-59152712-8'); </script> # $\texttt{GiRaFFE}$: General Relativistic Force-Free Electrodynamics ## Authors: Patrick Nelson, Zachariah Etienne, George Vopal & Maria Babiuc-Hamilton ### Formatting improvements courtesy Brandon Clark ## GiRaFFE_HO_v2 is an experiment that omits the analytic derivatives of $\partial_j T^j_{{\rm EM} i}$ **Notebook Status:** <font color='red'><b> In progress </b></font> **Validation Notes:** This module is under active development -- do ***not*** use the resulting C code output for doing science. ### NRPy+ Source Code for this module: [GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py) ## Introduction: The original $\texttt{GiRaFFE}$ code, as presented in [the original paper](https://arxiv.org/pdf/1704.00599.pdf), exists as a significant modification to $\texttt{IllinoisGRMHD}$. As such, it used a third-order reconstruction algorithm with a slope limiter (Colella et al's piecewise parabolic method, or PPM) to handle spatial derivatives in the general relativistic force-free electrodynamics (GRFFE) equations. However the GRFFE equations do not generally permit shocks, so a more optimal approach would involve finite differencing all derivatives in the GRFFE equations. As it happens, NRPy+ was designed to generate C codes involving complex tensorial expressions and finite difference spatial derivatives, with finite-differencing order a freely-specifiable parameter. The purpose of this notebook is to rewrite the equations of GRFFE as used in the original $\texttt{GiRaFFE}$ code so that all derivatives that appear are represented numerically as finite difference derivatives. The GRFFE evolution equations (from eq. 13 of the [original paper](https://arxiv.org/pdf/1704.00599.pdf)) we wish to encode in the NRPy+ version of $\texttt{GiRaFFE}$ are as follows: * $\partial_t \tilde{S}_i = - \partial_j \left( \alpha \sqrt{\gamma} T^j_{{\rm EM} i} \right) + \frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$: [Link to Step 3.0](#step7) * $\partial_t A_i = \epsilon_{ijk} v^j B^k - \partial_i (\alpha \Phi - \beta^j A_j)$: [Link to Step 4.0](#step8) * $\partial_t [\sqrt{\gamma} \Phi] = -\partial_j (\alpha\sqrt{\gamma}A^j - \beta^j [\sqrt{\gamma} \Phi]) - \xi \alpha [\sqrt{\gamma} \Phi]$: [Link to Step 4.0](#step8) Here, the densitized spatial Poynting flux one-form $\tilde{S}_i = \sqrt{\gamma} S_i$ (and $S_i$ comes from $S_{\mu} -n_{\nu} T^{\nu}_{{\rm EM} \mu}$), and $(\Phi, A_i)$ is the vector potential. We will solve these PDEs using the method of lines, where the right-hand sides of these equations (involving no explicit time derivatives) will be constructed using NRPy+. ### A Note on Notation: As is standard in NRPy+, * Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component. * Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction. For instance, in calculating the first term of [$T_{\rm EM}^{\mu\nu}$](#em_tensor) (specifically, ${\rm Term\ 1} = b^2 u^\mu u^\nu$), we use Greek indices: ``` T4EMUU = ixp.zerorank2(DIM=4) for mu in range(4): for nu in range(4): # Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]\ ``` When we calculate [$\beta_i = \gamma_{ij} \beta^j$](#4metric), we use Latin indices: ``` betaD = ixp.zerorank1() for i in range(DIM): for j in range(DIM): betaD[i] += gammaDD[i][j] * betaU[j] ``` As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle the second term of [$\partial_t \tilde{S}_i$](#construct_si) (or, more specifically, the second term thereof: $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$): ``` # The second term: \alpha \sqrt{\\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2 for i in range(DIM): for mu in range(4): for nu in range(4): Stilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DDdD[mu][nu][i+1] / 2 ``` <a id='toc'></a> # Table of Contents: $$\label{toc}$$ This notebook is organized as follows 1. [Step 1](#initializenrpy): Set up the needed NRPy+ infrastructure and declare core gridfunctions used by $\texttt{GiRaFFE}$ 1. [Step 2](#4metric): Build the four metric $g_{\mu\nu}$, its inverse $g^{\mu\nu}$ and spatial derivatives $g_{\mu\nu,i}$ from ADM 3+1 quantities $\gamma_{ij}$, $\beta^i$, and $\alpha$ 1. [Step 3](#t_derivatives): $T^{\mu\nu}_{\rm EM}$ and its derivatives 1. [Step 3.a](#uibi): $u^i$ and $b^i$ and related quantities 1. [Step 3.b](#em_tensor): Construct all components of the electromagnetic stress-energy tensor $T^{\mu \nu}_{\rm EM}$ 1. [Step 4](#construct_si): Construct the evolution equation for $\tilde{S}_i$ 1. [Step 5](#construct_ai): Construct the evolution equations for $A_i$ and $[\sqrt{\gamma}\Phi]$ 1. [Step 5.a](#aux): Construct some useful auxiliary gridfunctions for the other evolution equations 1. [Step 5.b](#complete_construct_ai): Complete the construction of the evolution equations for $A_i$ and $[\sqrt{\gamma}\Phi]$ 1. [Step 6](#code_validation): Code Validation against `GiRaFFE_HO.GiRaFFE_Higher_Order_v2` NRPy+ Module 1. [Step 7](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file <a id='initializenrpy'></a> # Step 1: Set up the needed NRPy+ infrastructure and declare core gridfunctions used by $\texttt{GiRaFFE}$ \[Back to [top](#toc)\] $$\label{initializenrpy}$$ 1. Set some basic NRPy+ parameters. E.g., set the spatial dimension parameter to 3 and the finite differencing order to 4. 1. Next, declare some gridfunctions that are provided as input to the equations: 1. $\alpha$, $\beta^i$, and $\gamma_{ij}$: These ADM 3+1 metric quantities are declared in the ADMBase Einstein Toolkit thorn, and are assumed to be made available to $\texttt{GiRaFFE}$ at this stage. 1. The Valencia 3-velocity $v^i_{(n)}$ and vector potential $A_i$: Declared by $\texttt{GiRaFFE}$, and will have their initial values set in the separate thorn **GiRaFFEfood_HO**. 1. The magnetic field as measured by a normal observer $B^i$: The quantities evolved forward in time in $\texttt{GiRaFFE}$ do not include the Valencia 3-velocity, so this quantity is not automatically updated. Instead, we compute it based on the evolved quantity $\tilde{S}_i$ and $B^i = \epsilon^{ijk} \partial_j A_k$ (where $A_k$ is another evolved quantity and $\epsilon^{ijk}$ is the Levi-Civita tensor). $B^i$ is evaluated using finite differences of $A_k$ in a separate function, though it can only be evaluated consistently on the interior of the grid. In the ghost zones, we will have to use lower-order derivatives. ``` # Step 0: Add NRPy's directory to the path # https://stackoverflow.com/questions/16780014/import-file-from-parent-directory import os,sys nrpy_dir_path = os.path.join("..") if nrpy_dir_path not in sys.path: sys.path.append(nrpy_dir_path) import NRPy_param_funcs as par import indexedexp as ixp import grid as gri import finite_difference as fin from outputC import * #Step 1.0: Set the spatial dimension parameter to 3. par.set_parval_from_str("grid::DIM", 3) DIM = par.parval_from_str("grid::DIM") # Step 1.1: Set the finite differencing order to 4. par.set_parval_from_str("finite_difference::FD_CENTDERIVS_ORDER", 4) thismodule = "GiRaFFE_NRPy" # M_PI will allow the C code to substitute the correct value M_PI = par.Cparameters("#define",thismodule,"M_PI","") # ADMBase defines the 4-metric in terms of the 3+1 spacetime metric quantities gamma_{ij}, beta^i, and alpha gammaDD = ixp.register_gridfunctions_for_single_rank2("AUX","gammaDD", "sym01",DIM=3) betaU = ixp.register_gridfunctions_for_single_rank1("AUX","betaU",DIM=3) alpha = gri.register_gridfunctions("AUX","alpha") # GiRaFFE uses the Valencia 3-velocity and A_i, which are defined in the initial data module(GiRaFFEfood) ValenciavU = ixp.register_gridfunctions_for_single_rank1("AUX","ValenciavU",DIM=3) AD = ixp.register_gridfunctions_for_single_rank1("EVOL","AD",DIM=3) # B^i must be computed at each timestep within GiRaFFE so that the Valencia 3-velocity can be evaluated BU = ixp.register_gridfunctions_for_single_rank1("AUX","BU",DIM=3) ``` <a id='4metric'></a> # Step 2: Build the four metric $g_{\mu\nu}$, its inverse $g^{\mu\nu}$ and spatial derivatives $g_{\mu\nu,i}$ from ADM 3+1 quantities $\gamma_{ij}$, $\beta^i$, and $\alpha$ \[Back to [top](#toc)\] $$\label{4metric}$$ Notice that the time evolution equation for $\tilde{S}_i$ $$ \partial_t \tilde{S}_i = - \partial_j \left( \alpha \sqrt{\gamma} T^j_{{\rm EM} i} \right) + \frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} $$ contains $\partial_i g_{\mu \nu} = g_{\mu\nu,i}$. We will now focus on evaluating this term. The four-metric $g_{\mu\nu}$ is related to the three-metric $\gamma_{ij}$, index-lowered shift $\beta_i$, and lapse $\alpha$ by $$ g_{\mu\nu} = \begin{pmatrix} -\alpha^2 + \beta^k \beta_k & \beta_j \\ \beta_i & \gamma_{ij} \end{pmatrix}. $$ This tensor and its inverse have already been built by the u0_smallb_Poynting__Cartesian.py module ([documented here](Tutorial-u0_smallb_Poynting-Cartesian.ipynb)), so we can simply load the module and import the variables. ``` # Step 1.2: import u0_smallb_Poynting__Cartesian.py to set # the four metric and its inverse. This module also sets b^2 and u^0. import u0_smallb_Poynting__Cartesian.u0_smallb_Poynting__Cartesian as u0b u0b.compute_u0_smallb_Poynting__Cartesian(gammaDD,betaU,alpha,ValenciavU,BU) betaD = ixp.zerorank1() for i in range(DIM): for j in range(DIM): betaD[i] += gammaDD[i][j] * betaU[j] # Error check: fixed = to += # We will now pull in the four metric and its inverse. import BSSN.ADMBSSN_tofrom_4metric as AB4m gammaDD,betaU,alpha = AB4m.setup_ADM_quantities("ADM") AB4m.g4DD_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha) g4DD = AB4m.g4DD AB4m.g4UU_ito_BSSN_or_ADM("ADM",gammaDD,betaU,alpha) g4UU = AB4m.g4UU ``` Next we compute spatial derivatives of the metric, $\partial_i g_{\mu\nu} = g_{\mu\nu,i}$, written in terms of the three-metric, shift, and lapse. Simply taking the derivative of the expression for $g_{\mu\nu}$ above, we find $$ g_{\mu\nu,l} = \begin{pmatrix} -2\alpha \alpha_{,l} + \beta^k_{\ ,l} \beta_k + \beta^k \beta_{k,l} & \beta_{j,l} \\ \beta_{i,l} & \gamma_{ij,l} \end{pmatrix}. $$ Notice the derivatives of the shift vector with its indexed lowered, $\beta_{i,j} = \partial_j \beta_i$. This can be easily computed in terms of the given ADMBase quantities $\beta^i$ and $\gamma_{ij}$ via: \begin{align} \beta_{i,j} &= \partial_j \beta_i \\ &= \partial_j (\gamma_{ik} \beta^k) \\ &= \gamma_{ik} \partial_j\beta^k + \beta^k \partial_j \gamma_{ik} \\ \beta_{i,j} &= \gamma_{ik} \beta^k_{\ ,j} + \beta^k \gamma_{ik,j}. \end{align} Since this expression mixes Greek and Latin indices, we will declare this as a 4 dimensional quantity, but only set the three spatial components of its last index (that is, leaving $l=0$ unset). So, we will first set $$ g_{00,l} = \underbrace{-2\alpha \alpha_{,l}}_{\rm Term\ 1} + \underbrace{\beta^k_{\ ,l} \beta_k}_{\rm Term\ 2} + \underbrace{\beta^k \beta_{k,l}}_{\rm Term\ 3} $$ ``` # Step 1.2, cont'd: Build spatial derivatives of the four metric # Step 1.2.a: Declare derivatives of grid functions. These will be handled by FD_outputC alpha_dD = ixp.declarerank1("alpha_dD") betaU_dD = ixp.declarerank2("betaU_dD","nosym") gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01") # Step 1.2.b: These derivatives will be constructed analytically. betaDdD = ixp.zerorank2() g4DDdD = ixp.zerorank3(DIM=4) for i in range(DIM): for j in range(DIM): for k in range(DIM): # \gamma_{ik} \beta^k_{,j} + \beta^k \gamma_{ik,j} betaDdD[i][j] += gammaDD[i][k] * betaU_dD[k][j] + betaU[k] * gammaDD_dD[i][k][j] #Error check: changed = to += above # Step 1.2.c: Set the 00 components # Step 1.2.c.i: Term 1: -2\alpha \alpha_{,l} for l in range(DIM): g4DDdD[0][0][l+1] = -2*alpha*alpha_dD[l] # Step 1.2.c.ii: Term 2: \beta^k_{\ ,l} \beta_k for l in range(DIM): for k in range(DIM): g4DDdD[0][0][l+1] += betaU_dD[k][l] * betaD[k] # Step 1.2.c.iii: Term 3: \beta^k \beta_{k,l} for l in range(DIM): for k in range(DIM): g4DDdD[0][0][l+1] += betaU[k] * betaDdD[k][l] ``` Now we will contruct the other components of $g_{\mu\nu,l}$. We will first construct $$ g_{i0,l} = g_{0i,l} = \beta_{i,l}, $$ then $$ g_{ij,l} = \gamma_{ij,l} $$ ``` # Step 1.2.d: Set the i0 and 0j components for l in range(DIM): for i in range(DIM): # \beta_{i,l} g4DDdD[i+1][0][l+1] = g4DDdD[0][i+1][l+1] = betaDdD[i][l] #Step 1.2.e: Set the ij components for l in range(DIM): for i in range(DIM): for j in range(DIM): # \gamma_{ij,l} g4DDdD[i+1][j+1][l+1] = gammaDD_dD[i][j][l] ``` <a id='t_derivatives'></a> # Step 3: $T^{\mu\nu}_{\rm EM}$ and its derivatives \[Back to [top](#toc)\] $$\label{t_derivatives}$$ Now that the metric and its derivatives are out of the way, let's return to the evolution equation for $\tilde{S}_i$, $$ \partial_t \tilde{S}_i = - \partial_j \left( \alpha \sqrt{\gamma} T^j_{{\rm EM} i} \right) + \frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}. $$ We turn our focus now to $T^j_{{\rm EM} i}$ and its derivatives. To this end, we start by computing $T^{\mu \nu}_{\rm EM}$ (from eq. 27 of [Paschalidis & Shapiro's paper on their GRFFE code](https://arxiv.org/pdf/1310.3274.pdf)): $$\boxed{T^{\mu \nu}_{\rm EM} = b^2 u^{\mu} u^{\nu} + \frac{b^2}{2} g^{\mu \nu} - b^{\mu} b^{\nu}.}$$ Notice that $T^{\mu\nu}_{\rm EM}$ is written in terms of * $b^\mu$, the 4-component magnetic field vector, related to the comoving magnetic field vector $B^i_{(u)}$ * $u^\mu$, the 4-velocity * $g^{\mu \nu}$, the inverse 4-metric However, $\texttt{GiRaFFE}$ has access to only the following quantities, requiring in the following sections that we write the above quantities in terms of the following ones: * $\gamma_{ij}$, the 3-metric * $\alpha$, the lapse * $\beta^i$, the shift * $A_i$, the vector potential * $B^i$, the magnetic field (we assume only in the grid interior, not the ghost zones) * $\left[\sqrt{\gamma}\Phi\right]$, the zero-component of the vector potential $A_\mu$, times the square root of the determinant of the 3-metric * $v_{(n)}^i$, the Valencia 3-velocity * $u^0$, the zero-component of the 4-velocity <a id='uibi'></a> ## Step 3.a: $u^i$ and $b^i$ and related quantities \[Back to [top](#toc)\] $$\label{uibi}$$ We begin by importing what we can from [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py). We will need the four-velocity $u^\mu$, which is related to the Valencia 3-velocity $v^i_{(n)}$ used directly by $\texttt{GiRaFFE}$ (see also [Duez, et al, eqs. 53 and 56](https://arxiv.org/pdf/astro-ph/0503420.pdf)) \begin{align} u^i &= u^0 (\alpha v^i_{(n)} - \beta^i), \\ u_j &= \alpha u^0 \gamma_{ij} v^i_{(n)}, \end{align} where $v^i_{(n)}$ is the Valencia three-velocity. These have already been constructed in terms of the Valencia 3-velocity and other 3+1 ADM quantities by the [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py) module, so we can simply import these variables: ``` # Step 2.0: u^i, b^i, and related quantities # Step 2.0.a: import the four-velocity, as written in terms of the Valencia 3-velocity uD = ixp.zerorank1() uU = ixp.zerorank1() u4upperZero = gri.register_gridfunctions("AUX","u4upperZero") for i in range(DIM): uD[i] = u0b.uD[i].subs(u0b.u0,u4upperZero) uU[i] = u0b.uU[i].subs(u0b.u0,u4upperZero) ``` We also need the magnetic field 4-vector $b^{\mu}$, which is related to the magnetic field by [eqs. 23, 24, and 31 in Duez, et al](https://arxiv.org/pdf/astro-ph/0503420.pdf): \begin{align} b^0 &= \frac{1}{\sqrt{4\pi}} B^0_{\rm (u)} = \frac{u_j B^j}{\sqrt{4\pi}\alpha}, \\ b^i &= \frac{1}{\sqrt{4\pi}} B^i_{\rm (u)} = \frac{B^i + (u_j B^j) u^i}{\sqrt{4\pi}\alpha u^0}, \\ \end{align} where $B^i$ is the variable tracked by the HydroBase thorn in the Einstein Toolkit. Again, these have already been built by the [u0_smallb_Poynting__Cartesian.py](../edit/u0_smallb_Poynting__Cartesian/u0_smallb_Poynting__Cartesian.py), so we can simply import the variables. ``` # Step 2.0.b: import the small b terms smallb4U = ixp.zerorank1(DIM=4) smallb4D = ixp.zerorank1(DIM=4) for mu in range(4): smallb4U[mu] = u0b.smallb4U[mu].subs(u0b.u0,u4upperZero) smallb4D[mu] = u0b.smallb4D[mu].subs(u0b.u0,u4upperZero) smallb2 = u0b.smallb2etk.subs(u0b.u0,u4upperZero) ``` <a id='em_tensor'></a> ## Step 3.b: Construct all components of the electromagnetic stress-energy tensor $T^{\mu \nu}_{\rm EM}$ \[Back to [top](#toc)\] $$\label{em_tensor}$$ We now have all the pieces to calculate the stress-energy tensor, $$T^{\mu \nu}_{\rm EM} = \underbrace{b^2 u^{\mu} u^{\nu}}_{\rm Term\ 1} + \underbrace{\frac{b^2}{2} g^{\mu \nu}}_{\rm Term\ 2} - \underbrace{b^{\mu} b^{\nu}}_{\rm Term\ 3}.$$ Because $u^0$ is a separate variable, we will create a temporary variable $u^\mu=\left( u^0, u^i \right)$, ``` # Step 2.1: Construct the electromagnetic stress-energy tensor # Step 2.1.a: Set up the four-velocity vector u4U = ixp.zerorank1(DIM=4) u4U[0] = u4upperZero for i in range(DIM): u4U[i+1] = uU[i] # Step 2.1.b: Build T4EMUU itself T4EMUU = ixp.zerorank2(DIM=4) for mu in range(4): for nu in range(4): # Term 1: b^2 u^{\mu} u^{\nu} T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu] for mu in range(4): for nu in range(4): # Term 2: b^2 / 2 g^{\mu \nu} T4EMUU[mu][nu] += smallb2*g4UU[mu][nu]/2 for mu in range(4): for nu in range(4): # Term 3: -b^{\mu} b^{\nu} T4EMUU[mu][nu] += -smallb4U[mu]*smallb4U[nu] ``` <a id='construct_si'></a> # Step 4: Construct the evolution equation for $\tilde{S}_i$ \[Back to [top](#top)\] $$\label{construct_si}$$ Finally, we will return our attention to the time evolution equation (from eq. 13 of the [original paper](https://arxiv.org/pdf/1704.00599.pdf)), \begin{align} \partial_t \tilde{S}_i &= - \partial_j \underbrace{\left( \alpha \sqrt{\gamma} T^j_{{\rm EM} i} \right)}_{\rm SevolParenUD} + \frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} \\ &= - \partial_j{\rm SevolParenUD[j][i]} + \frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} . \end{align} We will first construct `SevolParenUD`, then use its derivatives to construct the evolution equation. Note that \begin{align} {\rm SevolParenUD[j][i]} &= \alpha \sqrt{\gamma} T^j_{{\rm EM} i} \\ &= \alpha \sqrt{\gamma} g_{\mu i} T^{\mu j}_{\rm EM}. \end{align} ``` # Step 3.0: Construct the evolution equation for \tilde{S}_i # Here, we set up the necessary machinery to take FD derivatives of alpha * sqrt(gamma) gammaUU = ixp.register_gridfunctions_for_single_rank2("AUX","gammaUU","sym01") gammadet = gri.register_gridfunctions("AUX","gammadet") gammaUU, gammadet = ixp.symm_matrix_inverter3x3(gammaDD) alpsqrtgam = alpha*sp.sqrt(gammadet) SevolParenUD = ixp.register_gridfunctions_for_single_rank2("AUX","SevolParenUD","nosym") SevolParenUD = ixp.zerorank2() for i in range(DIM): for j in range (DIM): for mu in range(4): SevolParenUD[j][i] += alpsqrtgam * g4DD[mu][i+1] * T4EMUU[mu][j+1] SevolParenUD_dD = ixp.declarerank3("SevolParenUD_dD","nosym") Stilde_rhsD = ixp.zerorank1() # The first term: \alpha \sqrt{\gamma} \partial_j T^j_{{\rm EM} i} for i in range(DIM): for j in range(DIM): Stilde_rhsD[i] += -SevolParenUD_dD[j][i][j] # The second term: \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2 for i in range(DIM): for mu in range(4): for nu in range(4): Stilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DDdD[mu][nu][i+1] / 2 ``` <a id='construct_ai'></a> # Step 5: Construct the evolution equations for $A_i$ and $[\sqrt{\gamma}\Phi]$ \[Back to [top](#toc)\] $$\label{construct_ai}$$ We will also need to evolve the vector potential $A_i$. This evolution is given as eq. 17 in the [$\texttt{GiRaFFE}$](https://arxiv.org/pdf/1704.00599.pdf) paper: $$\boxed{\partial_t A_i = \epsilon_{ijk} v^j B^k - \partial_i (\underbrace{\alpha \Phi - \beta^j A_j}_{\rm AevolParen}),}$$ where $\epsilon_{ijk} = [ijk] \sqrt{\gamma}$ is the antisymmetric Levi-Civita tensor, the drift velocity $v^i = u^i/u^0$, $\gamma$ is the determinant of the three metric, $B^k$ is the magnetic field, $\alpha$ is the lapse, and $\beta$ is the shift. We also need the scalar electric potential $\Phi$, which is evolved by eq. 19: $$\boxed{\partial_t [\sqrt{\gamma} \Phi] = -\partial_j (\underbrace{\alpha\sqrt{\gamma} \gamma^{ij} A_i - \beta^j [\sqrt{\gamma} \Phi]}_{\rm PevolParenU[j]}) - \xi \alpha [\sqrt{\gamma} \Phi],}$$ with $\xi$ chosen as a damping factor. <a id='aux'></a> ## Step 5.a: Construct some useful auxiliary gridfunctions for the other evolution equations \[Back to [top](#toc)\] $$\label{aux}$$ After declaring a some needed quantities, we will also define the parenthetical terms (underbrace above) that we need to take derivatives of. That way, we can take finite-difference derivatives easily. Note that the above equations incorporate the fact that $\gamma^{ij}$ is the appropriate raising operator for $A_i$: $A^j = \gamma^{ij} A_i$. This is correct since $n_\mu A^\mu = 0$, where $n_\mu$ is a normal to the hypersurface, so $A^0=0$ (according to Sec. II, subsection C of [the "Improved EM gauge condition" paper of Etienne *et al*](https://arxiv.org/pdf/1110.4633.pdf)). ``` # Step 4.0: Construct the evolution equations for A_i and sqrt(gamma)Phi # Step 4.0.a: Construct some useful auxiliary gridfunctions for the other evolution equations xi = par.Cparameters("REAL",thismodule,"xi",0.1) # The damping factor # Define sqrt(gamma)Phi as psi6Phi psi6Phi = gri.register_gridfunctions("EVOL","psi6Phi") Phi = psi6Phi / sp.sqrt(gammadet) # We'll define a few extra gridfunctions to avoid complicated derivatives AevolParen = gri.register_gridfunctions("AUX","AevolParen") PevolParenU = ixp.register_gridfunctions_for_single_rank1("AUX","PevolParenU") # {\rm AevolParen} = \alpha \Phi - \beta^j A_j AevolParen = alpha*Phi for j in range(DIM): AevolParen += -betaU[j] * AD[j] # {\rm PevolParenU[j]} = \alpha\sqrt{\gamma} \gamma^{ij} A_i - \beta^j [\sqrt{\gamma} \Phi] for j in range(DIM): PevolParenU[j] = -betaU[j] * psi6Phi for i in range(DIM): PevolParenU[j] += alpsqrtgam * gammaUU[i][j] * AD[i] AevolParen_dD = ixp.declarerank1("AevolParen_dD") PevolParenU_dD = ixp.declarerank2("PevolParenU_dD","nosym") ``` <a id='complete_construct_ai'></a> ## Step 5.b: Complete the construction of the evolution equations for $A_i$ and $[\sqrt{\gamma}\Phi]$ \[Back to [top](#toc)\] $$\label{complete_construct_ai}$$ Now to set the evolution equations ([eqs. 17 and 19](https://arxiv.org/pdf/1704.00599.pdf)), recalling that the drift velocity $v^i = u^i/u^0$: \begin{align} \partial_t A_i &= \epsilon_{ijk} v^j B^k - \partial_i (\alpha \Phi - \beta^j A_j) \\ &= \epsilon_{ijk} \frac{u^j}{u^0} B^k - {\rm AevolParen\_dD[i]} \\ \partial_t [\sqrt{\gamma} \Phi] &= -\partial_j \left(\left(\alpha\sqrt{\gamma}\right)A^j - \beta^j [\sqrt{\gamma} \Phi]\right) - \xi \alpha [\sqrt{\gamma} \Phi] \\ &= -{\rm PevolParenU\_dD[j][j]} - \xi \alpha [\sqrt{\gamma} \Phi]. \\ \end{align} ``` # Step 4.0.b: Construct the actual evolution equations for A_i and sqrt(gamma)Phi A_rhsD = ixp.zerorank1() psi6Phi_rhs = sp.sympify(0) # We already have a handy function to define the Levi-Civita symbol in WeylScalars import WeylScal4NRPy.WeylScalars_Cartesian as weyl # Initialize the Levi-Civita tensor by setting it equal to the Levi-Civita symbol LeviCivitaSymbolDDD = weyl.define_LeviCivitaSymbol_rank3() LeviCivitaTensorDDD = ixp.zerorank3() #LeviCivitaTensorUUU = ixp.zerorank3() for i in range(DIM): for j in range(DIM): for k in range(DIM): LeviCivitaTensorDDD[i][j][k] = LeviCivitaSymbolDDD[i][j][k] * sp.sqrt(gammadet) #LeviCivitaTensorUUU[i][j][k] = LeviCivitaSymbolDDD[i][j][k] / sp.sqrt(gammadet) for i in range(DIM): A_rhsD[i] = -AevolParen_dD[i] for j in range(DIM): for k in range(DIM): A_rhsD[i] += LeviCivitaTensorDDD[i][j][k]*(uU[j]/u4upperZero)*BU[k] psi6Phi_rhs = -xi*alpha*psi6Phi for j in range(DIM): psi6Phi_rhs += -PevolParenU_dD[j][j] ``` <a id='code_validation'></a> # Step 6: Code Validation against `GiRaFFE_HO.GiRaFFE_Higher_Order_v2` NRPy+ Module \[Back to [top](#toc)\] $$\label{code_validation}$$ Here, as a code validation check, we verify agreement in the SymPy expressions for the $\texttt{GiRaFFE}$ evolution equations and auxiliary quantities we intend to use between 1. this tutorial and 2. the NRPy+ [GiRaFFE_HO.GiRaFFE_Higher_Order_v2](../edit/GiRaFFE_HO/GiRaFFE_Higher_Order_v2.py) module. ``` # Reset the list of gridfunctions, as registering a gridfunction # twice will spawn an error. gri.glb_gridfcs_list = [] import GiRaFFE_HO.GiRaFFE_Higher_Order_v2 as gho gho.GiRaFFE_Higher_Order_v2() print("Consistency check between GiRaFFE_Higher_Order tutorial and NRPy+ module: ALL SHOULD BE ZERO.") print("AevolParen - gho.AevolParen = " + str(AevolParen - gho.AevolParen)) print("psi6Phi_rhs - gho.psi6Phi_rhs = " + str(psi6Phi_rhs - gho.psi6Phi_rhs)) for i in range(DIM): print("PevolParenU["+str(i)+"] - gho.PevolParenU["+str(i)+"] = " + str(PevolParenU[i] - gho.PevolParenU[i])) print("Stilde_rhsD["+str(i)+"] - gho.Stilde_rhsD["+str(i)+"] = " + str(Stilde_rhsD[i] - gho.Stilde_rhsD[i])) print("A_rhsD["+str(i)+"] - gho.A_rhsD["+str(i)+"] = " + str(A_rhsD[i] - gho.A_rhsD[i])) for j in range(DIM): print("SevolParenUD["+str(i)+"]["+str(j)+"] - gho.SevolParenUD["+str(i)+"]["+str(i)+"] = "\ + str(SevolParenUD[i][j] - gho.SevolParenUD[i][j])) ``` <a id='latex_pdf_output'></a> # Step 7: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\] $$\label{latex_pdf_output}$$ The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename [Tutorial-GiRaFFE_Higher_Order_v2.pdf](Tutorial-GiRaFFE_Higher_Order_v2.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.) ``` !jupyter nbconvert --to latex --template latex_nrpy_style.tplx --log-level='WARN' Tutorial-GiRaFFE_Higher_Order_v2.ipynb !pdflatex -interaction=batchmode Tutorial-GiRaFFE_Higher_Order_v2.tex !pdflatex -interaction=batchmode Tutorial-GiRaFFE_Higher_Order_v2.tex !pdflatex -interaction=batchmode Tutorial-GiRaFFE_Higher_Order_v2.tex !rm -f Tut*.out Tut*.aux Tut*.log ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/classification-credit-card-fraud/auto-ml-classification-credit-card-fraud.png) # Automated Machine Learning _**Classification of credit card fraudulent transactions with local run **_ ## Contents 1. [Introduction](#Introduction) 1. [Setup](#Setup) 1. [Train](#Train) 1. [Results](#Results) 1. [Test](#Test) 1. [Acknowledgements](#Acknowledgements) ## Introduction In this example we use the associated credit card dataset to showcase how you can use AutoML for a simple classification problem. The goal is to predict if a credit card transaction is considered a fraudulent charge. This notebook is using the local machine compute to train the model. If you are using an Azure Machine Learning [Notebook VM](https://docs.microsoft.com/en-us/azure/machine-learning/service/tutorial-1st-experiment-sdk-setup), you are all set. Otherwise, go through the [configuration](../../../configuration.ipynb) notebook first if you haven't already to establish your connection to the AzureML Workspace. In this notebook you will learn how to: 1. Create an experiment using an existing workspace. 2. Configure AutoML using `AutoMLConfig`. 3. Train the model. 4. Explore the results. 5. Visualization model's feature importance in azure portal 6. Explore any model's explanation and explore feature importance in azure portal 7. Test the fitted model. ## Setup As part of the setup you have already created an Azure ML `Workspace` object. For Automated ML you will need to create an `Experiment` object, which is a named object in a `Workspace` used to run experiments. ``` import logging from matplotlib import pyplot as plt import pandas as pd import azureml.core from azureml.core.experiment import Experiment from azureml.core.workspace import Workspace from azureml.core.dataset import Dataset from azureml.train.automl import AutoMLConfig from azureml.explain.model._internal.explanation_client import ExplanationClient ``` This sample notebook may use features that are not available in previous versions of the Azure ML SDK. ``` print("This notebook was created using version 1.3.0 of the Azure ML SDK") print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK") ws = Workspace.from_config() # choose a name for experiment experiment_name = 'automl-classification-ccard-local' experiment=Experiment(ws, experiment_name) output = {} output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Experiment Name'] = experiment.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) outputDf.T ``` ### Load Data Load the credit card dataset from a csv file containing both training features and labels. The features are inputs to the model, while the training labels represent the expected output of the model. Next, we'll split the data using random_split and extract the training data for the model. ``` data = "https://automlsamplenotebookdata.blob.core.windows.net/automl-sample-notebook-data/creditcard.csv" dataset = Dataset.Tabular.from_delimited_files(data) training_data, validation_data = dataset.random_split(percentage=0.8, seed=223) label_column_name = 'Class' ``` ## Train Instantiate a AutoMLConfig object. This defines the settings and data used to run the experiment. |Property|Description| |-|-| |**task**|classification or regression| |**primary_metric**|This is the metric that you want to optimize. Classification supports the following primary metrics: <br><i>accuracy</i><br><i>AUC_weighted</i><br><i>average_precision_score_weighted</i><br><i>norm_macro_recall</i><br><i>precision_score_weighted</i>| |**enable_early_stopping**|Stop the run if the metric score is not showing improvement.| |**n_cross_validations**|Number of cross validation splits.| |**training_data**|Input dataset, containing both features and label column.| |**label_column_name**|The name of the label column.| **_You can find more information about primary metrics_** [here](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-configure-auto-train#primary-metric) ``` automl_settings = { "n_cross_validations": 3, "primary_metric": 'average_precision_score_weighted', "experiment_timeout_hours": 0.25, # This is a time limit for testing purposes, remove it for real use cases, this will drastically limit ability to find the best model possible "verbosity": logging.INFO, "enable_stack_ensemble": False } automl_config = AutoMLConfig(task = 'classification', debug_log = 'automl_errors.log', training_data = training_data, label_column_name = label_column_name, **automl_settings ) ``` Call the `submit` method on the experiment object and pass the run configuration. Depending on the data and the number of iterations this can run for a while. In this example, we specify `show_output = True` to print currently running iterations to the console. ``` local_run = experiment.submit(automl_config, show_output = True) # If you need to retrieve a run that already started, use the following code #from azureml.train.automl.run import AutoMLRun #local_run = AutoMLRun(experiment = experiment, run_id = '<replace with your run id>') local_run ``` ## Results #### Widget for Monitoring Runs The widget will first report a "loading" status while running the first iteration. After completing the first iteration, an auto-updating graph and table will be shown. The widget will refresh once per minute, so you should see the graph update as child runs complete. **Note:** The widget displays a link at the bottom. Use this link to open a web interface to explore the individual run details ``` from azureml.widgets import RunDetails RunDetails(local_run).show() ``` ## Analyze results ### Retrieve the Best Model Below we select the best pipeline from our iterations. The `get_output` method on `automl_classifier` returns the best run and the fitted model for the last invocation. Overloads on `get_output` allow you to retrieve the best run and fitted model for *any* logged metric or for a particular *iteration*. ``` best_run, fitted_model = local_run.get_output() fitted_model ``` #### Print the properties of the model The fitted_model is a python object and you can read the different properties of the object. ## Best Model 's explanation Retrieve the explanation from the best_run which includes explanations for engineered features and raw features. #### Download engineered feature importance from artifact store You can use ExplanationClient to download the engineered feature explanations from the artifact store of the best_run. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features. ``` client = ExplanationClient.from_run(best_run) engineered_explanations = client.download_model_explanation(raw=False) print(engineered_explanations.get_feature_importance_dict()) print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + best_run.get_portal_url()) ``` ## Explanations In this section, we will show how to compute model explanations and visualize the explanations using azureml-explain-model package. Besides retrieving an existing model explanation for an AutoML model, you can also explain your AutoML model with different test data. The following steps will allow you to compute and visualize engineered feature importance based on your test data. #### Retrieve any other AutoML model from training ``` automl_run, fitted_model = local_run.get_output(metric='accuracy') ``` #### Setup the model explanations for AutoML models The fitted_model can generate the following which will be used for getting the engineered explanations using automl_setup_model_explanations:- 1. Featurized data from train samples/test samples 2. Gather engineered name lists 3. Find the classes in your labeled column in classification scenarios The automl_explainer_setup_obj contains all the structures from above list. ``` X_train = training_data.drop_columns(columns=[label_column_name]) y_train = training_data.keep_columns(columns=[label_column_name], validate=True) X_test = validation_data.drop_columns(columns=[label_column_name]) from azureml.train.automl.runtime.automl_explain_utilities import automl_setup_model_explanations automl_explainer_setup_obj = automl_setup_model_explanations(fitted_model, X=X_train, X_test=X_test, y=y_train, task='classification') ``` #### Initialize the Mimic Explainer for feature importance For explaining the AutoML models, use the MimicWrapper from azureml.explain.model package. The MimicWrapper can be initialized with fields in automl_explainer_setup_obj, your workspace and a LightGBM model which acts as a surrogate model to explain the AutoML model (fitted_model here). The MimicWrapper also takes the automl_run object where engineered explanations will be uploaded. ``` from azureml.explain.model.mimic.models.lightgbm_model import LGBMExplainableModel from azureml.explain.model.mimic_wrapper import MimicWrapper explainer = MimicWrapper(ws, automl_explainer_setup_obj.automl_estimator, LGBMExplainableModel, init_dataset=automl_explainer_setup_obj.X_transform, run=automl_run, features=automl_explainer_setup_obj.engineered_feature_names, feature_maps=[automl_explainer_setup_obj.feature_map], classes=automl_explainer_setup_obj.classes) ``` #### Use Mimic Explainer for computing and visualizing engineered feature importance The explain() method in MimicWrapper can be called with the transformed test samples to get the feature importance for the generated engineered features. You can also use azure portal url to view the dash board visualization of the feature importance values of the engineered features. ``` engineered_explanations = explainer.explain(['local', 'global'], eval_dataset=automl_explainer_setup_obj.X_test_transform) print(engineered_explanations.get_feature_importance_dict()) print("You can visualize the engineered explanations under the 'Explanations (preview)' tab in the AutoML run at:-\n" + automl_run.get_portal_url()) ``` ## Test the fitted model Now that the model is trained, split the data in the same way the data was split for training (The difference here is the data is being split locally) and then run the test data through the trained model to get the predicted values. ``` # convert the test data to dataframe X_test_df = validation_data.drop_columns(columns=[label_column_name]).to_pandas_dataframe() y_test_df = validation_data.keep_columns(columns=[label_column_name], validate=True).to_pandas_dataframe() # call the predict functions on the model y_pred = fitted_model.predict(X_test_df) y_pred ``` ### Calculate metrics for the prediction Now visualize the data on a scatter plot to show what our truth (actual) values are compared to the predicted values from the trained model that was returned. ``` from sklearn.metrics import confusion_matrix import numpy as np import itertools cf =confusion_matrix(y_test_df.values,y_pred) plt.imshow(cf,cmap=plt.cm.Blues,interpolation='nearest') plt.colorbar() plt.title('Confusion Matrix') plt.xlabel('Predicted') plt.ylabel('Actual') class_labels = ['False','True'] tick_marks = np.arange(len(class_labels)) plt.xticks(tick_marks,class_labels) plt.yticks([-0.5,0,1,1.5],['','False','True','']) # plotting text value inside cells thresh = cf.max() / 2. for i,j in itertools.product(range(cf.shape[0]),range(cf.shape[1])): plt.text(j,i,format(cf[i,j],'d'),horizontalalignment='center',color='white' if cf[i,j] >thresh else 'black') plt.show() ``` ## Acknowledgements This Credit Card fraud Detection dataset is made available under the Open Database License: http://opendatacommons.org/licenses/odbl/1.0/. Any rights in individual contents of the database are licensed under the Database Contents License: http://opendatacommons.org/licenses/dbcl/1.0/ and is available at: https://www.kaggle.com/mlg-ulb/creditcardfraud The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection. More details on current and past projects on related topics are available on https://www.researchgate.net/project/Fraud-detection-5 and the page of the DefeatFraud project Please cite the following works: • Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. Calibrating Probability with Undersampling for Unbalanced Classification. In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015 • Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. Learned lessons in credit card fraud detection from a practitioner perspective, Expert systems with applications,41,10,4915-4928,2014, Pergamon • Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. Credit card fraud detection: a realistic modeling and a novel learning strategy, IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE o Dal Pozzolo, Andrea Adaptive Machine learning for credit card fraud detection ULB MLG PhD thesis (supervised by G. Bontempi) • Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. Scarff: a scalable framework for streaming credit card fraud detection with Spark, Information fusion,41, 182-194,2018,Elsevier • Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization, International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
github_jupyter
# Word2Vec for Text Classification In this short notebook, we will see an example of how to use a pre-trained Word2vec model for doing feature extraction and performing text classification. We will use the sentiment labelled sentences dataset from UCI repository http://archive.ics.uci.edu/ml/datasets/Sentiment+Labelled+Sentences The dataset consists of 1500 positive, and 1500 negative sentiment sentences from Amazon, Yelp, IMDB. Let us first combine all the three separate data files into one using the following unix command: ```cat amazon_cells_labelled.txt imdb_labelled.txt yelp_labelled.txt > sentiment_sentences.txt``` For a pre-trained embedding model, we will use the Google News vectors. https://drive.google.com/file/d/0B7XkCwpI5KDYNlNUTTlSS21pQmM Let us get started! ``` # To install only the requirements of this notebook, uncomment the lines below and run this cell # =========================== !pip install numpy==1.19.5 !pip install pandas==1.1.5 !pip install gensim==3.8.3 !pip install wget==3.2 !pip install nltk==3.5 !pip install scikit-learn==0.21.3 # =========================== # To install the requirements for the entire chapter, uncomment the lines below and run this cell # =========================== # try: # import google.colab # !curl https://raw.githubusercontent.com/practical-nlp/practical-nlp/master/Ch4/ch4-requirements.txt | xargs -n 1 -L 1 pip install # except ModuleNotFoundError: # !pip install -r "ch4-requirements.txt" # =========================== #basic imports import warnings warnings.filterwarnings('ignore') import os import wget import gzip import shutil from time import time #pre-processing imports import nltk nltk.download('stopwords') nltk.download('punkt') from nltk.tokenize import word_tokenize from nltk.corpus import stopwords from string import punctuation #imports related to modeling import numpy as np from gensim.models import Word2Vec, KeyedVectors from sklearn.linear_model import LogisticRegression from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report try: from google.colab import files # upload 'amazon_cells_labelled.txt', 'imdb_labelled.txt' and 'yelp_labelled.txt' present in "sentiment labelled sentences" folder uploaded = files.upload() !mkdir DATAPATH !mv -t DATAPATH amazon_cells_labelled.txt imdb_labelled.txt yelp_labelled.txt !cat DATAPATH/amazon_cells_labelled.txt DATAPATH/imdb_labelled.txt DATAPATH/yelp_labelled.txt > DATAPATH/sentiment_sentences.txt except ModuleNotFoundError: fil = 'sentiment_sentences.txt' if not os.path.exists("Data/sentiment_sentences.txt"): file = open(os.path.join(path, fil), 'w') file.close() # combined the three files to make sentiment_sentences.txt filenames = ['amazon_cells_labelled.txt', 'imdb_labelled.txt', 'yelp_labelled.txt'] with open('Data/sentiment_sentences.txt', 'w') as outfile: for fname in filenames: with open('Data/sentiment labelled sentences/' + fname) as infile: outfile.write(infile.read()) print("File created") else: print("File already exists") #Load the pre-trained word2vec model and the dataset try: from google.colab import files data_path= "DATAPATH" !wget -P DATAPATH https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz !gunzip DATAPATH/GoogleNews-vectors-negative300.bin.gz path_to_model = 'DATAPATH/GoogleNews-vectors-negative300.bin' training_data_path = "DATAPATH/sentiment_sentences.txt" except ModuleNotFoundError: data_path= "Data" if not os.path.exists('GoogleNews-vectors-negative300.bin'): if not os.path.exists('../Ch2/GoogleNews-vectors-negative300.bin'): if not os.path.exists('../Ch3/GoogleNews-vectors-negative300.bin'): wget.download("https://s3.amazonaws.com/dl4j-distribution/GoogleNews-vectors-negative300.bin.gz") with gzip.open('GoogleNews-vectors-negative300.bin.gz', 'rb') as f_in: with open('GoogleNews-vectors-negative300.bin', 'wb') as f_out: shutil.copyfileobj(f_in, f_out) path_to_model = 'GoogleNews-vectors-negative300.bin' else: path_to_model = '../Ch3/GoogleNews-vectors-negative300.bin' else: path_to_model = '../Ch2/GoogleNews-vectors-negative300.bin' else: path_to_model = 'GoogleNews-vectors-negative300.bin' training_data_path = os.path.join(data_path, "sentiment_sentences.txt") #Load W2V model. This will take some time. %time w2v_model = KeyedVectors.load_word2vec_format(path_to_model, binary=True) print('done loading Word2Vec') #Read text data, cats. #the file path consists of tab separated sentences and cats. texts = [] cats = [] fh = open(training_data_path) for line in fh: text, sentiment = line.split("\t") texts.append(text) cats.append(sentiment) #Inspect the model word2vec_vocab = w2v_model.vocab.keys() word2vec_vocab_lower = [item.lower() for item in word2vec_vocab] print(len(word2vec_vocab)) #Inspect the dataset print(len(cats), len(texts)) print(texts[1]) print(cats[1]) #preprocess the text. def preprocess_corpus(texts): mystopwords = set(stopwords.words("english")) def remove_stops_digits(tokens): #Nested function that lowercases, removes stopwords and digits from a list of tokens return [token.lower() for token in tokens if token.lower() not in mystopwords and not token.isdigit() and token not in punctuation] #This return statement below uses the above function to process twitter tokenizer output further. return [remove_stops_digits(word_tokenize(text)) for text in texts] texts_processed = preprocess_corpus(texts) print(len(cats), len(texts_processed)) print(texts_processed[1]) print(cats[1]) # Creating a feature vector by averaging all embeddings for all sentences def embedding_feats(list_of_lists): DIMENSION = 300 zero_vector = np.zeros(DIMENSION) feats = [] for tokens in list_of_lists: feat_for_this = np.zeros(DIMENSION) count_for_this = 0 + 1e-5 # to avoid divide-by-zero for token in tokens: if token in w2v_model: feat_for_this += w2v_model[token] count_for_this +=1 if(count_for_this!=0): feats.append(feat_for_this/count_for_this) else: feats.append(zero_vector) return feats train_vectors = embedding_feats(texts_processed) print(len(train_vectors)) #Take any classifier (LogisticRegression here, and train/test it like before. classifier = LogisticRegression(random_state=1234) train_data, test_data, train_cats, test_cats = train_test_split(train_vectors, cats) classifier.fit(train_data, train_cats) print("Accuracy: ", classifier.score(test_data, test_cats)) preds = classifier.predict(test_data) print(classification_report(test_cats, preds)) ``` Not bad. With little efforts we got 81% accuracy. Thats a great starting model to have!!
github_jupyter
# ニコニコAIスクール 第1回 Python入門 基礎演習 ## 今日の目標 * Pythonのデータ型と各データ型の取り扱い方を覚える。 * Pythonで頻出する概念を覚える。 * Numpyを用いてベクトル・行列の計算及び操作を記述し、実行できる。 ## キーワード * Python3 * データ型 (int, float, str, bool) * リスト、タプル、辞書 * numpy ## Jupyter notebookことはじめ コードの編集:各セルをクリックして、その中で直接編集 コードの実行:画面上の再生ボタンをクリックまたはShift+Enter 実行停止:画面上の停止ボタンをクリック Notebook中のコードをすべて実行:「Cell」→「Run all」 何かおかしいと感じたら:「Kernel」→「Restart」 ## 基礎演習の進め方 1. 講師がコードの説明をします 2. 講師の指示にしたがって、各セルのコードを実行してください 3. 一部のコードは、「エラー例」「参考」用としてコメントアウトされています。必要に応じてコメントアウトを解除して挙動を確かめよう 4. "WRITE ME!"と書かれている部分は講師の指示とヒントに従いながら自分の手で書いてみましょう **注意:頭に\*が書かれている節は進行状況に応じて解説を飛ばしますので、興味のある方のみ確認してください。** # 1. Python3入門 今回の講義では、Python3.5を用います。Python2系とは若干文法が異なるため、Webサイトなどで情報を確認する際は違いに注意してください。 ## print文と組み込み型 Pythonで頻出する4つの組み込み型 (int, float, str, bool) を紹介します。 型とは、その変数にどのような種類の情報を格納できるかを示したものです。 Pythonは動的型付け言語に分類され、変数の型は自動的に与えられるが、場合によっては誤った挙動を引き起こす点に留意が必要になります。 ``` print("Hello, World!") # 数字 (整数型) a = 1 print(type(a)) # -> int # 数字 (浮動小数点型) b = 0.555 print(type(b)) # -> float # 文字列 (ダブルクオートまたはシングルクオートで囲む) c = "NICO2AI" print(type(c)) # -> str c2 = 'NICO2AI' print(type(c2)) # -> str # 真偽値 (True または False) d = True print(type(d)) # -> bool d2 = True print(type(d2)) # -> bool ``` ## *注意:文字列の囲いについて Pythonにおいては、文字列を表す方法として * シングルクオートで囲む ('hoge') * ダブルクオートで囲む ("hoge") という2つの方法があり、**どちらで書くべきかは指示されていません**。 そのため、**好きなように書いて構いません**。 慣習的にはシンボルと自然言語とで使い分ける場合があります (興味のある方は以下を参照) https://stackoverflow.com/questions/56011/single-quotes-vs-double-quotes-in-python ## 書式付きprint (フォーマット) ``` print("Hello, {0}!".format(c)) print("{0} + {1} is {2}".format(a, b, a + b)) print("{:.2f}".format(b)) # 番号は省略可能、:.2fは小数点2桁目まで表示することを意味 print("{0:06.2f}".format(53.055)) # この場合、少数点は2桁目までで、全部で6桁になるように表示 (足りない場合はゼロ埋めする) print("{0:6.2f}".format(53.055)) # この場合、少数点は2桁目までで、全部で6桁になるように表示 (足りない場合はゼロ埋めしない) ``` ## 四則演算 Pythonでは、特別な注意なく足し算、引き算、掛け算ができます。 ただし、Python3は、標準の割り算( / )では**割り切れる・切れないにかかわらず浮動小数点**を返すことに注意しましょう。 整数を返して欲しい場合、 // を使いましょう。 ``` # 足し算 print(5 + 3) # 引き算 print(5 - 3) # 掛け算 print(10 * 3) # 通常の割り算は必ず浮動小数点を返す a = 5 / 2 print(a) print(type(a)) # -> float b = 4 / 2 print(b) print(type(b)) # -> float # 整数を返して欲しい場合は//を使う c = 5 // 2 print(c) print(type(c)) # -> int d = 4 // 2 print(d) print(type(d)) # -> int # 剰余演算子 (あまり) print(5 % 2) ``` ## if文と比較演算子 比較には==, !=, >, >=, <, <=を用いる。ただし、変数がNone(値、中身が存在しないことを示す)かどうかを判定する場合はisを用います。 if文の条件文の最後には必ずコロン(:)をつけ、カッコの代わりに**インデント (半角4文字)**で条件節の中身の処理を記述します。 記述例: ``` if 条件: print("これはif文が真の時のみ実行されるが") print("これはif文の中身とは関係がない") ``` ### 補足:Noneとの比較について Noneとの比較では、is文の使用が推奨されています。==でも比較可能ですが、次の2つの理由によりis文を使うべきとされています。 * is文の方が高速である (http://jaredgrubb.blogspot.jp/2009/04/python-is-none-vs-none.html) * PEP 8 (Pythonの公式スタイルガイド) にてis文の使用が強く推奨されている (https://www.python.org/dev/peps/pep-0008/#programming-recommendations) 実際、上記PEP 8では次の様に説明されています: ``` Comparisons to singletons like None should always be done with is or is not , never the equality operators. ``` ``` a = 5 if a == 5: # 比較には==を使用 print("a is equal to 5") else: print("a is not equal to 5") b = "banana" print(b == "banana") # 文字列の比較にも==を使用 True print(5 > 2) # True print(5 <= 6) # False print(a != 5) # False c = None if c is None: print("c is None") # 未定義の変数に対してはエラー (コメントアウトして実行) #if z is None: # -> ERROR! # print("z is None") ``` ## for文 ``` cnt = 0 for idx in range(10): cnt += 1 # cnt = cnt + 1 に同じ print(cnt) ``` ## range文 range([start], stop, step):start以上stop**未満**, 刻み幅stepの数列を作成します。 しばしばfor文と組み合わせて使用されます。 Python3ではイテレータ(iterator)を返すので、**リストとして欲しい場合はlist()で囲う必要があります**。 ``` # [0, 1, 2] for i in range(3): print(i) # [0, 3, 6] (9はない) for j in range(0, 9, 3): print(j) a = range(5) print(a) # Python3ではイテレータが返る print(list(a)) # List ``` ## 関数 プログラミング言語で一般的に用いられるテクニックとして、同じ処理をその入力のみを変えて何度も繰り返すための関数を定義すると便利な場合があります。Pythonでは、defキーワードを用いて関数を定義します。 関数の名前及び引数 (関数名の後ろにつく、関数内で使われる変数) 名は基本的に自由に名付けることができます。 分かりやすい命名を心がけましょう。 変数名の付け方については例えば『リーダブルコード ―より良いコードを書くためのシンプルで実践的なテクニック』(オライリージャパン、2012) などの参考書を参照のこと。 ``` # 累乗を計算する関数 # 引数は分かりやすいように書く def power(base, exponent): # def 関数名(引数1, 引数2, ...) result = 1 for i in range(exponent): result *= base return result print(power(2, 2)) # -> 4 print(power(3, 3)) # -> 27 ``` ### 書いてみよう1: 級数 Q: 1から10までの数の総和を計算する関数my_sum()を実装せよ。 #### ヒント: * 関数の定義は def my_sum(): * for文とrange(start, end, step)を組み合わせて、値を足していく ``` # WRITE ME! # 解答チェック if my_sum() == 55: print("You are smart!") else: print("Opps...something wrong") print(my_sum()) ``` ## *書いてみよう2:フィボナッチ数列 フィボナッチ数列とは、次の条件を満たす数列である。 \begin{align*} f(0) &= 0 \\ f(1) &= 1 \\ f(k+1) &= f(k) + f(k-1)\ \ (k=2, 3,\dots) \\ \end{align*} Q: kを引数として受け取り、f(k)を返す関数fib(k)を実装せよ。 ### ヒント: * f(0)、f(1)はif文を使ってそれぞれ0, 1を返せば良い * for文を使って、f(0), f(1)からf(2), f(3), ...を順に計算していけば良い * 同時代入が便利 (後述) * まずはf(2), f(3)などの簡単なケースで正解になることを確認する ### 同時代入 Pythonでは、次の2つのコードは等価である: ``` a = 1 b = 2 tmp = b b = a a = tmp ``` ``` a = 1 b = 2 a, b = b, a ``` ``` # WRITE ME! # 解答チェック fib_answer = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233] if all([fib(i) == x for i, x in enumerate(fib_answer)]): print("You are smart!") else: print("Opps...something wrong") print([fib(i) for i, x in enumerate(fib_answer)]) ``` ## リスト・タプル・辞書 Pythonは、組み込みのデータ構造として * リスト (list):変更可能なデータ列 * タプル (tuple):変更不可能なデータ列 * 辞書 (dict):キーと値の組み合わせてデータを格納、連想配列 の3つのデータ構造を持っています。複数の値を順番に格納・更新する場合はリスト、一度宣言したら更新しない属性 (配列のサイズなど) の格納にはタプル、順番ではなく格納データの種類に意味がある場合・属性名を付けたい場合には辞書を用います。 Pythonでは、**0から配列の添字(index)を数える**ことに注意しましょう。 データ構造 (公式) https://docs.python.jp/3/tutorial/datastructures.html ### リスト (list) ``` # リスト sample_list = [] # リストの初期化 sample_list.append(1) # リストの末尾に値を追加 print(sample_list) print(len(sample_list)) sample_list.extend([2, 3, 4, 5]) # リストの末尾に別のリストを合流 print(sample_list) print(sample_list[1]) # 要素にアクセス (添字(index)は0から始まるので、1は2番目の要素を指す) print(sample_list[2:4]) # リストの一部を取り出すことも可能 print([1, 2] + [3, 4]) # リストは+演算子で連結可能 (足し算ではない!) ``` ### *便利機能:リストの後ろからのアクセス ``` print(sample_list) print(sample_list[len(sample_list)-1]) # 通常、一番後ろの要素にはこうアクセスするが… print(sample_list[-1]) # これだけで書ける!便利! print(sample_list[-4:-2]) # 後ろより2番めより前 ``` ### タプル (tuple) ``` # タプル sample_tuple = (224, 224, 3) # sample_tuple[0] = 192 # 書き換えはできないのでエラー ``` ### 辞書 (dict) 辞書オブジェクトを宣言には、中括弧{}を用います。 辞書内の各要素は ``` { key1: value1, key2: value2, ... } ``` というキー (key)と 値 (value) の組み合わせによって表現されます。 キーには、**変更不可能なもの** (数値、文字列、**タプル**) を指定することができます。 辞書を使うことで、所望の要素にキーの名前でアクセスすることができるようになります。 その他、辞書は次の性質を持ちます: * 辞書の各要素は順番を持たない (順番を保持したい場合は[collections.OrderedDict](https://docs.python.jp/3/library/collections.html#collections.OrderedDict)を用いる) * 同じキーが2つ以上存在してはならない **豆知識:**Pythonのdictとデータの保存格納に使われるjsonは、キーと値の組み合わせで記述できる点が共通しており、直感的には対応関係にある (厳密にはルールが一部異なる)。Python3にはjsonモジュールが用意されており、dictとjsonの相互変換の機能を提供しています。 json — JSON エンコーダおよびデコーダ (公式) https://docs.python.jp/3/library/json.html ``` # 辞書の宣言 sample_dict = {} # 要素の代入 sample_dict["apple"] = "gorilla" sample_dict[2] = "two" sample_dict[(1, 2, 3)] = [4, 5, 6] print(sample_dict) # sample_dict[[1]] = 2 # リストは変更可能なのでエラー ``` ### 要素へのアクセス ``` print(sample_dict) print(sample_dict["apple"]) # キーのリスト、値のリストの取得にはkeys(), values()を用いる # ただし、その順番は保証されない! print("Key only:") for key in sample_dict.keys(): print(key) print("Value only:") for value in sample_dict.values(): print(value) # キー及び値の組のリストの取得にはitems()を用いる print("Key and value:") for key, value in sample_dict.items(): print(key, value) # 存在しないキーを指定するとエラー # print(sample_dict["banana"]) ``` ### 辞書の使い方 例えば:画像ファイルの幅 (width) と高さ (height) を格納する場合 ``` image_list = ["img1.jpg", "img2.jpg", "img3.jpg"] # 辞書はネストできる image_size_dict = { "img1.jpg": { "width": 640, "height": 480 } } for image_name in image_list: if image_name not in image_size_dict: # in構文でdict内に指定のキーを持つ要素が存在するか調べられる image_size_dict[image_name] = {} image_size_dict[image_name]["width"] = 1920 image_size_dict[image_name]["height"] = 1080 print(image_size_dict) ``` # 2. Numpy入門 (1) ## ライブラリの使用 (import) ライブラリの機能を使うためには、そのライブラリをインポートする必要があります。 numpyの機能を呼び出す際は、ライブラリ名を前につけて、呼び出したいメソッドなどをドット(.)で繋いで呼び出します。 ``` import hoge hoge.foo.bar() ``` Numpyでは、いちいちnumpy.と呼び出すのが面倒であることから、以下のように慣習的にimport as文を用いてnp.とするのが一般的です。 (一方で、省略記法を嫌う人もいるので、議論の分かれるところです) ``` import numpy as np # numpyを'np'という名前でインポートする import time # 時間計測用のライブラリ ``` ## 配列(ndarray)の定義と性質 配列の定義には、np.arrayを用います。 numpyは、基本的に全てのデータをこのndarray型に納め、その上で処理を行います。 numpy配列はそれぞれ形状 (shape) を持ち、その形状の要素数を「次元数」と呼びます。 ``` # ndarrayの定義 a = np.array([1, 2, 3]) print(len(a)) # 長さは3 print(a.ndim) # 1次元配列 b = np.array([[1, 2, 3], [4, 5, 6]]) print(b.shape) # 配列の形状は2×3 (Pythonのリストとしては、「長さ3のリストが2つあるリスト」) print(b.size) # 配列の総要素数 print(b.dtype) # この配列のデータ型 (後ほど) c = np.array([[2, 2, 2], [2, 2, 2]]) print(b + c) # 配列の和 print(b - c) # 配列の差 print(b * c) # 配列の要素積 (b * c = {b_i + c_i}) ``` ## 配列の生成 * np.ones(shape):中身がすべて1の配列を作成 * np.zeros(shape) : 中身がすべて0の配列を作成 * np.arange([start], stop, [step]): range関数と似た機能を提供。連続した数列を作成 ### np.zeros ``` print(np.zeros(1)) # 大きさ (1,) print(np.zeros((2, 3))) # 注意:複数次元の場合はカッコで囲う # print(np.zeros(2, 3)) # エラー ``` ### np.ones ``` print(np.ones((2, 2, 2))) # np.onesも同様 print(np.ones((3, 3))*5) # 要素積と組み合わせると幅が拡がる ``` ### np.arange ``` print(np.arange(5)) # 0から4 (!= 5) までの数列を作成 print(np.arange(0, 10, 3)) # 3刻みの数列を作成 print(np.arange(10, 0, -1)) # 降順のリストも書ける ``` ## ちょっと寄り道:リスト内包表記 Python本体の機能として、ちょっと複雑なリストを表記する際によく用いられます。 通常のfor文よりも高速なので、numpy配列が直接宣言できない場合に使いましょう。 **実行速度: numpyの関数 >> リスト内包表記 > 通常のリスト** ``` # 例1: 1から50までの奇数 odd_numbers = [x * 2 + 1 for x in range(25)] print(odd_numbers) # 例2: 例1をfor文を用いて用意した場合 odd_numbers = [] for x in range(25): odd_numbers.append(x * 2 + 1) print(odd_numbers) # 例3: np.arangeを用いた場合 odd_numbers = np.arange(1, 50, 2) print(odd_numbers) # 実行時間の比較 start = time.time() # time関数同士の時間を比較することで、実行時間を秒で計測できる for trial in range(50000): odd_numbers = [x * 2 + 1 for x in range(25)] end = time.time() print("For内包表記: {} (s)".format(end - start)) start = time.time() for trial in range(50000): odd_numbers = [] for x in range(25): odd_numbers.append(x * 2 + 1) end = time.time() print("Forループ: {} (s)".format(end - start)) start = time.time() for trial in range(50000): odd_numbers = np.arange(1, 50, 2) end = time.time() print("Numpy: {} (s)".format(end - start)) ``` ### *リスト内包表記と要素のフィルタリング if文と組み合わせることによって、リスト内包表記で要素のフィルタリングを行うことができます。 ``` # 1から49までの奇数 odds = [x for x in range(50) if x % 2 == 1] print(odds) ``` ## 要素の切り出し・変形 numpyの配列は、リスト同様そのうちの1つまたは一部を切り出すことができます。このとき、切り出した部分配列の要素を書き換えると、**元の配列も書き換わる**ことに注意しましょう。 これは、配列のメモリ上の位置はそのままで、始点と終点を変えているだけであるからです。 (そして、これがnumpyが速い理由のひとつです) また、np.reshapeを用いることで、配列の形状を変えることができます。当然、要素の総数は同じではくてはなりません。 ``` sample_array = np.arange(12).reshape((4, 3)) # (12,)の配列を(4, 3)に変形 print(sample_array) print(sample_array[0, :]) # 1行目を取り出す print(sample_array[:, 0]) # 1列目を取り出す sample_view = sample_array[1:3, 1:3] # 各次元ごとに切り出す print(sample_view) # 切り出した配列を書き換えると、元の配列も書き換わる sample_view[0, 0] = -1 print(sample_view) print(sample_array) a = np.ones((10, 2, 2)).reshape((-1, 2)) # 1つの要素を-1に指定すると、総要素数に合わせて変形してくれる print(a.shape) ``` ## ブロードキャスティング (broadcasting) numpyの強力な機能の1つがこのブロードキャスティングです。ブロードキャスティングは、次のルールに従って、**異なる大きさの配列同士の計算**を可能にします。 * 2つの配列の次元数が同じで、**各次元についてどちらか片方の要素数が1の場合** * 2つの配列の次元数から異なる場合、次元数の大きい配列の後ろの次元から比較して、対応する各次元の要素数が1または同じである場合 説明ではわかりにくいので、実際の例で確認しましょう: ``` # 2配列の形状が全く同一である場合は当然OK a = np.ones((3, 3)) b = np.ones((3, 3)) print((a + b).shape) # -> (3, 3) # どちらか片方の次元数が1なら、その次元について中身をコピーして形状を合わせてくれる a = np.ones((4, 1, 5)) b = np.ones((1, 7, 1)) print((a + b).shape) # -> (4, 7, 5) # 次元数が異なる場合でも後ろから見て要素数の不一致がなければOK a = np.ones((224, 224, 3)) b = np.ones(3) print((a + b).shape) # -> (224, 224, 3) # 一見ダメなように見える例も… a = np.ones((5, 5, 3)) b = np.ones((5, 1)) print((a + b).shape) # -> (5, 5, 3) ``` ### ブロードキャスティングに失敗する例 ``` # CAUTION! コメントアウトして実行 a = np.ones((3, 3)) b = np.ones((2, 3)) # print((a + b).shape) # -> ERROR! a = np.ones((5, 3)) b = np.ones(5) # print((a + b).shape) # -> ERROR! 後ろから数えると5 != 3 ``` ### np.newaxisを用いて次元を合わせる 前項の(5, 3)と(5,)の足し算のような例は実際のコーディングでも頻発しますが、そのままではブロードキャスティングに失敗してしまいます。 本来変数bに期待する形状は(5,)ではなく(5, 1)であり、そのように変形すれば良さそうです。 そのための便利な機能として、np.newaxisがあります。np.newaxisは次元を任意の位置に追加します。 ``` a = np.ones((5, 3)) b = np.ones(5) c = a + b[:, np.newaxis] # bの2次元目に(,1)を追加するので、(5, 3) + (5, 1) -> (5, 3)となる print(c.shape) ``` ## 書いてみよう3:配列の計算とブロードキャスティング * Q1: (3, 3)の配列を2つ用意して、その和・差・要素積を計算せよ。 * Q2: すべての要素の大きさが5の(10, 10)の行列を構成せよ。 * Q3: 配列AとBは大きさが違うが、ブロードキャスティングによりその和を計算できる。答えを予想した後に実行して結果を確かめよ。 ``` # 課題用 A = np.ones((3, 5, 5)) B = np.array([[1, 2, 3, 4, 5]]) # WRITE ME! # Q1 # Q2 # Q3 ``` ## 行列積の記述 (np.matmul, np.dot) すでに述べた通り、\*演算子は行列の要素積であり、行列同士に定義される行列積ではありませんでした。 行列積を記述する場合、np.matmulまたはnp.dotを用います。 np.matmulとnp.dotは配列が行列(2次元)である場合には同様の挙動を示します。 どちらも一般的に使われているため、両方覚えましょう。 ただし、**3次元以上のテンソルの演算の場合には結果が変わる**ため、そのようなコードを見かけた場合注意しましょう。 一方で、3次元以上の配列の積を記述することは稀かつ、どの次元の積和を取るかが非直感的になるので、その場合はnp.einsum (今回は解説しない) を用いると良いでしょう。 たとえば、行列$W$と列ベクトル$x$の掛け算 \begin{align*} {\bf y} = W{\bf x} \end{align*} は ``` y = np.matmul(W, x) ``` または ``` y = np.dot(W, x) ``` と書けます。 ``` W = np.array([[3, 5], [1, 2]]) # (2, 2) x = np.array([1, 2]) # (2,) y = np.matmul(W, x) print(y) y = np.dot(W, x) print(y) ``` ### 書いてみよう4:行列積 Q: (10, 2)の行列と(2, 10)の適当な行列を作成し、その積を計算せよ。 ``` # WRITE ME! ``` ### *3次元以上のテンソルの場合の結果の比較 ``` A = np.ones((3, 4, 5)) * 2 B = np.ones((3, 5, 1)) * 2 print(np.dot(A, B).shape) # Aの最後の次元(5,)とBの後ろから2番めの次元(5,)を足し上げ、その他は放置 print(np.matmul(A, B).shape) # 後ろ2次元分はnp.dotと同じだが、それより前の次元については要素積を取る A = np.ones((3, 4, 5)) * 2 B = np.ones((4, 5, 1)) * 2 print(np.dot(A, B).shape) # これはOKだが # print(np.matmul(A, B).shape) # これは最初の次元が3 != 4なのでエラー ```
github_jupyter
# 6.1 Reading and Writing Data in Text Format ``` import pandas as pd import numpy as np df = pd.read_csv('datasets/ex1.csv') df df = pd.read_table('datasets/ex1.csv', sep=',') df pd.read_csv('datasets/ex2.csv', header=None) pd.read_csv('datasets/ex2.csv', names=['a', 'b', 'c', 'd', 'message']) names = ['a', 'b', 'c', 'd', 'message'] pd.read_csv('datasets/ex2.csv', names=names, index_col='message') parsed = pd.read_csv('datasets/csv_mindex.csv', index_col=['key1', 'key2']) parsed list(open('datasets/ex3.txt')) result = pd.read_table('datasets/ex3.txt', sep='\s+') result pd.read_csv('datasets/ex4.csv', skiprows=[0, 2, 3]) result = pd.read_csv('datasets/ex5.csv') result pd.isnull(result) pd.read_csv('datasets/ex5.csv', na_values=['NULL']) sentinels = {'message': ['foo', 'NA'], 'something': ['two']} pd.read_csv('datasets/ex5.csv', na_values=sentinels) ``` ### Reading Text Files in Pieces ``` pd.options.display.max_rows = 10 result = pd.read_csv('datasets/ex6.csv') result pd.read_csv('datasets/ex6.csv', nrows=5) chunker = pd.read_csv('datasets/ex6.csv', chunksize=1000) chunker tot = pd.Series([]) for piece in chunker: tot = tot.add(piece['key'].value_counts(), fill_value=0) tot ``` ### Writing Data to Text Format ``` data = pd.read_csv('datasets/ex5.csv') data data.to_csv('datasets/out.csv') import sys data.to_csv(sys.stdout, sep='|') data.to_csv(sys.stdout, na_rep='NULL') data.to_csv(sys.stdout, index=False, header=False) data.to_csv(sys.stdout, index=False, columns=['a', 'b', 'c']) ``` ## Working with Delimited Formats ``` data = pd.read_csv('datasets/ex7.csv') data import csv f = open('datasets/ex7.csv') reader = csv.reader(f) for line in reader: print(line) with open('datasets/ex7.csv') as f: lines = list(csv.reader(f)) header, values = lines[0], lines[1:] data_dict = {h: v for h, v in zip(header, zip(*values))} data_dict class MyDialect(csv.Dialect): lineterminator = '\n' delimiter = ';' quotechar = '"' quoting = csv.QUOTE_MINIMAL reader = csv.reader(f, dialect=MyDialect) reader reader = csv.reader(f, delimiter='|') with open('mydata.csv', 'w') as f: writer = csv.writer(f, dialect=MyDialect) writer.writerow(('one', 'two', 'three')) writer.writerow(('1', '2', '3')) writer.writerow(('4', '5', '6')) writer.writerow(('7', '8', '9')) ``` ### JSON Data ``` obj =''' { "name": "Wes", "places_lived": ["United States", "Spain", "Germany"], "pet": null, "siblings": [{"name": "Scott", "age": 30, "pets": ["Zeus", "Zuko"]}, {"name": "Katie", "age": 38, "pets": ["Sixes", "Stache", "Cisco"]}] } ''' import json result = json.loads(obj) result asjson = json.dumps(result) asjson siblings = pd.DataFrame(result['siblings'], columns=['name', 'age']) siblings data = pd.read_json('datasets/example.json') data print(data.to_json()) print(data.to_json(orient='records')) ``` ### XML and HTML: Web Scraping ``` tables = pd.read_table('datasets/fdic_failed_bank_list.html', error_bad_lines=False) len(tables) failures = tables[0] ``` ### Parsing XML with lxml.objectify ``` from lxml import objectify path = 'datasets/mta_perf/Performance_MNR.xml' parsed = objectify.parse(open(path)) root = parsed.getroot() data = [] skip_fields = ['PARENT_SEQ', 'INDICATOR_SEQ', 'DESIRED_CHANGE', 'DECIMAL_PLACES'] for elt in root.INDICATOR: el_data = {} for child in elt.getchildren(): if child.tag in skip_fields: continue el_data[child.tag] = child.pyval data.append(el_data) perf = pd.DataFrame(data) perf.head() # Much more complicated data from io import StringIO tag = '<a href="https://www.google.com">Google</a>' root = objectify.parse(StringIO(tag)).getroot() root root.get('href') root.text ``` # 6.2 Binary Data Formats ``` frame = pd.read_csv('datasets/ex1.csv') frame frame.to_pickle('datasets/frame_pickle') pd.read_pickle('datasets/frame_pickle') ``` ### Using HDF5 Format ``` frame = pd.DataFrame({'a': np.random.rand(100)}) store = pd.HDFStore('mydata.h5') store['obj1'] = frame store['obj1_col'] = frame['a'] store store['obj1'] store['obj1_col'] store.put('obj2', frame, format='table') store.select('obj2', where=['index >= 10 and index <=15']) store.close() frame.to_hdf('my_data.h5', 'obj3', format='table') pd.read_hdf('my_data.h5', 'obj3', where=['index < 5']) xlsx = pd.ExcelFile('datasets/ex1.xlsx') pd.read_excel(xlsx, 'Sheet1') frame = pd.read_excel('datasets/ex1.xlsx', 'Sheet1') frame # writing to Excel Format writer = pd.ExcelWriter('datasets/ex2.xlsx') frame.to_excel(writer, 'sheet1') writer.save() frame.to_excel('datasets/ex2.xlsx') ``` # 6.3 Interacting with WEB APIs ``` import requests url = 'https://api.github.com/repos/pandas-dev/pandas/issues' resp = requests.get(url) resp data = resp.json() data[0]['title'] issues = pd.DataFrame(data, columns=['number', 'title', 'labels', 'state']) issues.head() ``` # 6.4 Interacting with Databases ``` import sqlite3 query = ''' CREATE TABLE test( a VARCHAR(20), b VARCHAR(20), c REAL, d INTEGER); ''' con = sqlite3.connect('mydata.sqlite') con.execute(query) con.commit() data = [('Atlanata', 'Georgia', 1.25, 6), ('Tallahasse', 'Florida', 2.6, 3), ('Sacramento', 'California', 1.7, 5)] stmt = "INSERT INTO test VALUES(?, ? , ?, ?)" con.executemany(stmt, data) con.commit() # reading cursor = con.execute('SELECT * FROM test') rows = cursor.fetchall() rows cursor.description pd.DataFrame(rows, columns=[x[0] for x in cursor.description]) # Using SQLAlchemy to read database import sqlalchemy as sqla db = sqla.create_engine('sqlite:///mydata.sqlite') pd.read_sql('SELECT * FROM test', db) ```
github_jupyter
## Overview - Business Understanding In this notebook, I would like to explore the educational and job satisfaction characteristics of respondents of the survey based in Nigeria. To achieve this, I will retrieve data from the Stackoverflow developer survey 2017. The questions that I am interested in asking are; - What is their field of study - What is their highest level of Formal Education - What is their level of job satisfaction - What is their level of career satisfaction ## Data Understanding Now we are going to import the data obtained from the developers survey and convert it into a dataframe ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from sklearn.linear_model import LinearRegression from sklearn.model_selection import train_test_split from sklearn.metrics import r2_score, mean_squared_error %matplotlib inline df = pd.read_csv('survey-results-public.csv') df.head() ``` Let's look at the questions asked during the survey ``` df2 = pd.read_csv('survey-results-schema.csv') df2.head() ``` Now lets explore the data to gain more insights into it ``` num_rows = df.shape[0] num_cols = df.shape[1] print(num_rows, num_cols) ``` We see that there are 51392 rows and 154 columns in the developers survey ``` no_nulls = set(df.columns[df.isnull().any() == 0]) no_nulls ``` We see that there are 7 columns without missing values ``` print(set(df.FormalEducation)) f_ed = df.FormalEducation.value_counts() (f_ed/num_rows).plot(kind='bar') plt.title("What type of education do you possess") ``` From the chart, we see that about 42% of the respondents indicated that they possess bachelor's degrees; this is the most amongst all the other options ``` g_val = df.Gender.value_counts() (g_val/num_rows).plot(kind='bar') plt.title('What Gender are you') ``` About 60% of the respondents identified themselves as Male ``` pd.DataFrame(df.query("Professional == 'Professional developer' and (Gender == 'Male' or Gender == 'Female')").groupby(['Gender', 'FormalEducation']).mean()['Salary']) ``` Women with Bachelor's degrees and Doctoral degrees indicated higher average salaries than Men with similar degrees ``` set(df2.Column) #I want to obtain all the values under the second dataframe column df.Country.unique() #I want to view all the unique values under the country column in the first dataframe ``` **We are interested in extracting data for developers who live in Nigeria, and work as developers by profession or write code sometimes as part of their job. Let's see which questions asked in the survey can guide us to our required data** ``` list(df2[df2.Column == 'Country']['Question']) list(df2[df2.Column == 'Professional']['Question']) df['Professional'].unique() list(df2[df2.Column=='MajorUndergrad']['Question']) list(df2[df2.Column=='FormalEducation']['Question']) list(df2[df2.Column=='JobSatisfaction']['Question']) list(df2[df2.Column=='CareerSatisfaction']['Question']) ``` Next, we'll create a dataframe df3 which would contain the survey results from Nigeria only. Including those that identify as Male or Female and declare that they are a "Professional Developer" or "Professional non-developer who sometimes writes code" ``` df3 = df[(df.Country == 'Nigeria') & ((df.Gender == 'Male') | (df.Gender == 'Female')) & ((df.Professional == 'Professional developer') | (df.Professional == 'Professional non-developer who sometimes writes code'))] df3.head() print(df3.shape[0]) print(df3.shape[1]) ``` There are 75 rows and 154 columns in the new dataframe ``` len(df3[df3['Gender']=='Female']) ``` There are 4 respondents that identify as female from Nigeria in the new dataframe ``` percentage_women= len(df3[df3['Gender']=='Female'])/len(df3)*100 print(round(percentage_women,1)) ``` 5.3 percent of respondents identify as female in dataframe ``` len(df3[df3['Gender']=='Male']) ``` 71 respondents identify as Male from Nigeria in the new dataframe ``` percentage_men= len(df3[df3['Gender']=='Male'])/len(df3)*100 print(round(percentage_men,1)) ``` **Data Preparation** We will subset df3 to include only the columns that interest us ``` col_list = ['MajorUndergrad', 'FormalEducation', 'JobSatisfaction', 'CareerSatisfaction', 'Gender'] df_new = df3[col_list] df_new.head() ``` Let's get a graphical view of the number of missing values in the new dataframe ``` plt.figure(figsize=(25,8)) plt.title('Number of missing values in Columns') missing_count = pd.DataFrame(df_new.isnull().sum(), columns=['sum']).sort_values(by=['sum'], ascending=False).head(20).reset_index() missing_count.columns = ['features', 'sum'] sns.barplot(x='features', y='sum', data=missing_count) plt.show() ``` MajorUndergrad column has 9 missing values, JobSatisfaction column has 3 missing values. Since we will not be performing any modelling on the data, we will ignore these missing values and continue our analysis. # Analysis **What is your main field of study?** We will plot separate graphs for both genders to identify the percentage that declared for each field of study ``` ((df_new[df_new['Gender'] == 'Male']['MajorUndergrad'].value_counts()/len((df_new[df_new['Gender'] == 'Male']['MajorUndergrad']))*100)).plot(kind='bar') ``` 36% of male respondents declared computer science or software engineering as their major, about 16% declared computer engineering or electrical/electronics engineering as their major. ``` ((df_new[df_new['Gender'] == 'Female']['MajorUndergrad'].value_counts()/len((df_new[df_new['Gender'] == 'Female']['MajorUndergrad']))*100)).plot(kind='bar') ``` 50% of females declared computer science or software engineering as their major. From the charts above, we see that the most popular degree for developers in both men and women is Computer Science, women on the other hand, have a propensity to come from 'other' fields as opposed to men who are majorly in the traditional majors. **"Which of the following best describes the highest level of formal education that you've completed?"** Next we are going to check the highest level of education of both genders in Nigeria. ``` ((df_new[df_new['Gender'] == 'Male']['FormalEducation'].value_counts()/len((df_new[df_new['Gender'] == 'Male']['FormalEducation']))*100)).plot(kind='bar') ``` About 58% of male respondents declared that bachelor's degree was their highest level of education while about 22% indicated that they had some university experience without a bachelor's degree ``` ((df_new[df_new['Gender'] == 'Female']['FormalEducation'].value_counts()/len((df_new[df_new['Gender'] == 'Female']['FormalEducation']))*100)).plot(kind='bar') ``` 50% of female respondents declared for both Master's degree and bachelor's degree as their highest level of education. This could signify that women in Nigeria feel the need to be better educated before becoming developers as opposed to men. **What is your level of Job satisfaction?** Next we will find out which gender declares a higher average job satisfaction level than the other. ``` df_new.groupby(['Gender']).mean()['JobSatisfaction'].sort_values() ``` From the results, it can be seen that male respondents indicated a higher average level of Job satisfaction. Now this result may not be accurate for all the developers in Nigeria because as we can recall, there are only 4 respondents that identify as female and there are 71 that identify as male. **What is your level of career satisfaction?** Finally, we will try to determine the average level of career satisfaction between both genders ``` df_new.groupby(['Gender']).mean()['CareerSatisfaction'].sort_values() ``` From the results, we can say that both genders indicate high levels of career satisfaction with the male respondents just edging it
github_jupyter
<h2>Grover's Search: One Qubit Representation</h2> [Watch Lecture](https://youtu.be/VwzshIQsDBA) The execution of Grover's search algorithm can be simulated on the unit circle. Throughout the computation, the amplitudes of the marked (or unmarked) elements never differ from each other. Therefore, we can group the elements as marked and unmarked elements. As the length of the list is 1, we can represent the list as an unit vector on the unit circle where the vertical line represents the marked elements and horizantal line represents the unmarked elements. ### Example: N = 8 with 3 marked elements Suppose that the 3rd, 4th, and 7th elements are marked. We can use three qubits and we can associate each element with one of basis states: $$ \myarray{|c|c|}{ \hline element & state \\ \hline 1st & \ket{000} \\ \hline 2nd & \ket{001} \\ \hline \mathbf{3rd} & \mathbf{\ket{010}} \\ \hline \mathbf{4th} & \mathbf{\ket{011}} \\ \hline 5th & \ket{100} \\ \hline 6th & \ket{101} \\ \hline \mathbf{7th} & \mathbf{\ket{110}} \\ \hline 8th & \ket{111} \\ \hline } $$ Grover's search algorithm starts in the following quantum state: $$ \ket{u} = H\ket{0} \otimes H \ket{0} \otimes H \ket{0} = H^{\otimes 3} \ket{000} $$ $$ \ket{u} = \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } \otimes \mypar{ \frac{1}{\sqrt{2}} \ket{0} + \frac{1}{\sqrt{2}} \ket{1} } $$ $$ \ket{u} = \frac{1}{2\sqrt{2}} \ket{000} + \frac{1}{2\sqrt{2}} \ket{001} + \frac{1}{2\sqrt{2}} \ket{010} + \frac{1}{2\sqrt{2}} \ket{011} + \frac{1}{2\sqrt{2}} \ket{100} + \frac{1}{2\sqrt{2}} \ket{101} + \frac{1}{2\sqrt{2}} \ket{110} + \frac{1}{2\sqrt{2}} \ket{111}. $$ We group them as unmarked and marked elements: $$ \ket{u} = \frac{1}{2\sqrt{2}} \big( \ket{000} + \ket{001} + \ket{100} + \ket{101} + \ket{111} \big) + \frac{1}{2\sqrt{2}} \big(\mathbf{ \ket{010} + \ket{011} + \ket{110} } \big) $$ or as vectors $$ \ket{u} = \ket{u_{unmarked}} + \ket{u_{marked}} = \frac{1}{2\sqrt{2}} \myvector{1 \\ 1 \\ 0 \\ 0 \\ 1 \\ 1 \\ 0 \\ 1} + \frac{1}{2\sqrt{2}} \myvector{0 \\ 0 \\ 1 \\ 1 \\ 0 \\ 0 \\ 1 \\ 0} $$ #### Orthogonality of $ \ket{u_{unmarked}} $ and $ \ket{u_{marked}} $ It is clear that the quantum states $ \ket{u_{unmarked}} $ and $ \ket{u_{marked}} $ are orthogonal each other, i.e., $ \ket{u_{unmarked}} \perp \ket{u_{marked}} $. On the unit circle, the state $ \ket{0} $ and $ \ket{1} $ are orthogonal to each other, and so, we can represent (map) $ \ket{u} = \ket{u_{unmarked}} + \ket{u_{marked}} $ on the unit circle as $$ \ket{u} \rightarrow \alpha \ket{0} + \beta \ket{1} $$ or by re-naming the basis states $$ \ket{u} \rightarrow \alpha \ket{unmarked} + \beta \ket{marked}. $$ #### How can we determine the amplitudes of the states $ \ket{0} $ and $ \ket{1} $ based on the amplitudes of the marked and unmarked elements? We can rewrite $ \ket{u} $ as follows: $$ \ket{u} = \ket{u_{unmarked}} + \ket{u_{marked}} = \frac{\sqrt{5}}{2\sqrt{2}} \myvector{\frac{1}{\sqrt{5}} \\ \frac{1}{\sqrt{5}} \\ 0 \\ 0 \\ \frac{1}{\sqrt{5}} \\ \frac{1}{\sqrt{5}} \\ 0 \\ \frac{1}{\sqrt{5}} } + \frac{\sqrt{3}}{2\sqrt{2}} \myvector{0 \\ 0 \\ \frac{1}{\sqrt{3}} \\ \frac{1}{\sqrt{3}} \\ 0 \\ 0 \\ \frac{1}{\sqrt{3}} \\ 0} $$ Here both vectors have unit length and so we can replaces them with the states $ \ket{unmarked} $ and $ \ket{marked} $, respectively. Thus, the coefficients of the vectors are *the amplitudes* we are looking for: $$ \ket{u} \rightarrow \frac{\sqrt{5}}{2\sqrt{2}} \ket{unmarked} + \frac{\sqrt{3}}{2\sqrt{2}} \ket{marked} $$. We draw the obtained unit circle by using python below. ``` %run qlatvia.py draw_qubit_grover() draw_quantum_state((5/8)**0.5,(3/8)**0.5,"|u>") ``` #### The amplitudes of states $ \ket{marked} $ and $ \ket{unmarked} $ during the computation Remark that, after each phase of Grover's algorithm, the states $ \ket{marked} $ and $ \ket{unmarked} $ do not change (see also below). Any quantum state during the computation of Grover's algorithm can be represented, for some $ a,b $, as $$ \ket{u_j} = \ket{u_{j,unmarked}} + \ket{u_{j,marked}} = \myvector{ a \\ a \\ 0 \\ 0 \\ a \\ a \\ 0 \\ a } + \myvector{0 \\ 0 \\b \\ b \\ 0 \\ 0 \\ b \\ 0} = a \sqrt{5} \myvector{\frac{1}{\sqrt{5}} \\ \frac{1}{\sqrt{5}} \\ 0 \\ 0 \\ \frac{1}{\sqrt{5}} \\ \frac{1}{\sqrt{5}} \\ 0 \\ \frac{1}{\sqrt{5}} } + b \sqrt{3} \myvector{0 \\ 0 \\ \frac{1}{\sqrt{3}} \\ \frac{1}{\sqrt{3}} \\ 0 \\ 0 \\ \frac{1}{\sqrt{3}} \\ 0} = a\sqrt{5} ~ \ket{unmarked} + b\sqrt{3} ~ \ket{marked}. $$ As a generic rule: For $ N $ elements with $ k $ marked ones, if the amplitudes of an unmarked and a marked elements are $ a $ and $ b $, respectively, then the quantum state can be represented as $$ a\sqrt{N-k} ~ \ket{unmarked} + b \sqrt{k} ~ \ket{marked}. $$ ## Visualization of Grover's Search algorithm In this section, we execute Grover's search algorithm by using the modified game explained in notebook [Inversion About the Mean](B80_Inversion_About_the_Mean.ipynb). You may use your functions *oracle* and *inversion* in [Task 2](B80_Inversion_About_the_Mean.ipynb#task2) in the same notebook. *For simplicity, we assume that the first element is always marked and the last element is always unmarked.* <h3> Task 1 </h3> Execute Grover's search algorithm for 5 steps where $ N = 16 $ and the first element is marked. Draw all quantum states on the unit circle during the execution. Print the angle of each state in degree (use $\sin^{-1}$), and check whether there is any pattern for the oracle and inversion operators? Is there any pattern for each step of Grover's algorithm? ``` def query(elements=[1],marked_elements=[0]): for i in marked_elements: elements[i] = -1 * elements[i] return elements def inversion (elements=[1]): # summation of all values summation = 0 for i in range(len(elements)): summation += elements[i] # mean of all values mean = summation / len(elements) # reflection over mean for i in range(len(elements)): value = elements[i] new_value = mean - (elements[i]-mean) elements[i] = new_value return elements from math import asin, pi # initial values iteration = 5 N = 16 marked_elements = [0] k = len(marked_elements) elements = [] states_on_unit_circle= [] # initial quantum state for i in range(N): elements.append(1/N**0.5) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,"0"]) # Execute Grover's search algorithm for $iteration steps for step in range(iteration): # query elements = query(elements,marked_elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step)+"''"]) # inversion elements = inversion(elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step+1)]) # draw all states %run qlatvia.py draw_qubit_grover() for state in states_on_unit_circle: draw_quantum_state(state[0],state[1],state[2]) # print the angles print("angles in degree") for state in states_on_unit_circle: print(asin(state[1])/pi*180) ``` #### Observations The operator oracle is a reflection over the $x$-axis. The operator inversion is a reflection over the initial state. If the angle of the first state $ \theta $, then each step of Grover's algorithm is a rotation with angle $ 2 \theta $. <hr> <h3> Task 2 </h3> In Task 1, after which step the probability of observing a marked element is the highest? As can be verified from the angles, after the third step, the probability of observing a marking element is the highest. <h3> Task 3 </h3> We have a list of size $ N = 128 $. We iterate Grover's search algorithm 10 steps. Visually determine (i.e., Tasks 1 & 2) the good number of iterations if the number of marked elements is 1, 2, 4, or 8. (The quantum state on the unit circle should be close to the $y$-axis.) ``` def query(elements=[1],marked_elements=[0]): for i in marked_elements: elements[i] = -1 * elements[i] return elements def inversion (elements=[1]): # summation of all values summation = 0 for i in range(len(elements)): summation += elements[i] # mean of all values mean = summation / len(elements) # reflection over mean for i in range(len(elements)): value = elements[i] new_value = mean - (elements[i]-mean) elements[i] = new_value return elements from math import asin, pi # initial values iteration = 10 N = 128 # try each case one by one marked_elements = [0] #marked_elements = [0,1] #marked_elements = [0,1,2,3] #marked_elements = [0,1,2,3,4,5,6,7] k = len(marked_elements) elements = [] states_on_unit_circle= [] # initial quantum state for i in range(N): elements.append(1/N**0.5) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,"0"]) # Execute Grover's search algorithm for $iteration steps for step in range(iteration): # query elements = query(elements,marked_elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step)+"''"]) # inversion elements = inversion(elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step+1)]) # draw all states %run qlatvia.py draw_qubit_grover() for state in states_on_unit_circle: draw_quantum_state(state[0],state[1],state[2]) # print the angles print("angles in degree") for state in states_on_unit_circle: print(asin(state[1])/pi*180) ``` #### Observations The good number of iterations - For $ k = 1 $, $ 8 $ iterations - For $ k = 2 $, $ 6 $ iterations - For $ k = 4 $, $ 4 $ iterations - For $ k = 8 $, $ 3 $ or $ 9 $ iterations <hr> <h3> Task 4 </h3> We have a list of size $ N = 256 $. We iterate Grover's search algorithm 20 (or 10) steps. Visually determine (i.e., Tasks 1 & 2) the good number of iterations if the number of marked elements is 1, 2, 4, or 8. (The quantum state on the unit circle should be close to the $y$-axis.) ``` def query(elements=[1],marked_elements=[0]): for i in marked_elements: elements[i] = -1 * elements[i] return elements def inversion (elements=[1]): # summation of all values summation = 0 for i in range(len(elements)): summation += elements[i] # mean of all values mean = summation / len(elements) # reflection over mean for i in range(len(elements)): value = elements[i] new_value = mean - (elements[i]-mean) elements[i] = new_value return elements from math import asin, pi # initial values iteration = 20 #iteration = 10 N = 256 # try each case one by one marked_elements = [0] #marked_elements = [0,1] #marked_elements = [0,1,2,3] #marked_elements = [0,1,2,3,4,5,6,7] k = len(marked_elements) elements = [] states_on_unit_circle= [] # initial quantum state for i in range(N): elements.append(1/N**0.5) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,"0"]) # Execute Grover's search algorithm for $iteration steps for step in range(iteration): # query elements = query(elements,marked_elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step)+"''"]) # inversion elements = inversion(elements) x = elements[N-1] * ((N-k)**0.5) y = elements[0] * (k**0.5) states_on_unit_circle.append([x,y,str(step+1)]) # draw all states %run qlatvia.py draw_qubit_grover() for state in states_on_unit_circle: draw_quantum_state(state[0],state[1],state[2]) # print the angles print("angles in degree") for state in states_on_unit_circle: print(asin(state[1])/pi*180) ``` #### Observations The good number of iterations - For $ k = 1 $, $ 12 $ iterations - For $ k = 2 $, $ 8 $ iterations - For $ k = 4 $, $ 6 $ iterations - For $ k = 8 $, $ 4 $ iterations ## More on Grover's search algorithm The idea behind on Grover's search algorithm is that <ul> <li> the amplitudes of the marked (less frequent) elements can be quickly amplified, </li> <li> and so the probability of observing one of the marked elements quickly approches to 1.</li> </ul> For "quick" amplification, we iteratively apply two reflections to our quantum states. The first reflection is a clockwise rotation, and the second reflection is a counterclockwise rotation. The second reflection always rotates $ 2 \theta $ degree more than the first reflection, where the $ \theta $ is the angle of this initial state on the unit circle. Therefore, the quantum state is rotated by $ 2 \theta $ in counter-clockwise direction after two reflections. As an example, we consider the rotation on the unit circle with angle $ \frac{\pi}{8} $ that starts in $ \ket{0} $. <ul> <li> After every 4 rotations, we visit states $ \ket{1} $, $ -\ket{0} $, $ -\ket{1} $, again $ \ket{0} $, and so on. </li> <li> Remark that the probability of observing the state $ \ket{1} $ oscillates between 0 and 1 while rotating. </li> </ul> Similarly, when iterating Grover's search algorithm, we should be careful when to stop. <ul> <li> Because, after hitting a maximum value, these amplitudes start to quickly decrease, and after hitting a minimum value, they are amplified again, and so on.</li> </ul> ### Mathematical derivation of the reflection by inversion (optional) _(You will see a similar but alternative derivation in the next notebook.)_ It is clear that query operators reflects the quantum state on the unit circle over $ x $-axis. On the other hand, inversion operator reflects the quantum state on the unit circle over the line defined by the initial state, say $ \ket{u} $. This fact is not so obvious and we present here how to derive it. ($ \bra{u} $ is the conjugate transpose of the vector $ \ket{u} $.) The initial quantum state is $ \ket{u} = \myvector{\frac{1}{\sqrt{N}} \\ \vdots \\ \frac{1}{\sqrt{N}}}$ and the inversion is a linear operator and represened by the matrix: $$ D = 2 \mymatrix{ccc}{ \frac{1}{N} & \cdots & \frac{1}{N} \\ \vdots & \ddots & \vdots \\ \frac{1}{N} & \cdots & \frac{1}{N} \\ } - I . $$ Since $ \ket{u} \bra{u} = \mymatrix{ccc}{ \frac{1}{N} & \cdots & \frac{1}{N} \\ \vdots & \ddots & \vdots \\ \frac{1}{N} & \cdots & \frac{1}{N} \\ } $, we can represent $ D $ in terms of $ \ket{u} $ as $ D = 2 \ket{u} \bra{u} - I$. Let our current quantum state be $a \ket{u} + b \ket{u^\perp}$, where $\ket{u^\perp}$ denotes the state, which is orthogonal (perpendicular) to $\ket{u}$. After appling $D$ to our current quantum state, we obtain $$D \big(a \ket{u} + b \ket{u^\perp}\big) = \big(2 \ket{u} \bra{u} - I \big) \big(a \ket{u} + b \ket{u^\perp} \big) = a \big(2 \ket{u} \bra{u} \ket{u} - \ket{u} \big) + b \big(2 \ket{u} \bra{u} \ket{u^\perp} - \ket{u^\perp} \big). $$ To simplify this equation, we use the following two facts: <ul> <li>$\bra{u} \ket{u} = 1$, because the inner product of a quantum state gives its length square, which is equal to 1;</li> <li>$\bra{u} \ket{u^\perp} = 0$, because the states are orthogonal to each other.</li> </ul> $$ a \big( 2 \ket{u} \bra{u} \ket{u} - \ket{u} \big) + b \big( 2 \ket{u} \bra{u} \ket{u^\perp} - \ket{u^\perp} \big) = a \big( 2 \ket{u} - \ket{u} \big) + b \big( 2 \ket{u} \cdot 0 - \ket{u^\perp} \big) = a \ket{u} - b \ket{u^\perp}. $$ As $D (a \ket{u} + b \ket{u^\perp}) = a \ket{u} - b \ket{u^\perp}$, we conclude that $D$ is a reflection over axis formed by the state $\ket{u}$. <h3> The number of iterations </h3> If there is a single marked element in a list of size $ N $, then $ \pi \dfrac{\sqrt{N}}{4} $ iterations can give the marked element with high probability. If there are $k$ marked elements, then it is better to iterate $ \pi \dfrac{\sqrt{\frac{N}{k}}}{4} $ times. If $k$ is unknown, then we can execute the algorithm with different iterations. One way of doing this is to iterate the algorithm $$ \pi \dfrac{\sqrt{\frac{N}{1}}}{4}, \pi \dfrac{\sqrt{\frac{N}{2}}}{4}, \pi \dfrac{\sqrt{\frac{N}{4}}}{4}, \pi \dfrac{\sqrt{\frac{N}{8}}}{4}, \ldots $$ times. The total number of iterations will still be proportional to $ \pi \dfrac{\sqrt{N}}{4} $: $ O \Big( \pi \dfrac{\sqrt{N}}{4} \Big) $.
github_jupyter
# TSG037 - Determine master pool pod hosting primary replica ## Description Determine the pod that hosts the primary replica for the Big Data Cluster when master pool high availability is enabled. For BDC deployed with High availability, the master pool has at least three master PODs (availablity group replicas), SQL server deployed in master pool is based on Contained Availability Group where the availability group has its own logical master database, there will be 2 connection contexts that could be made to SQL server deployed in master pool 1. Connection to availability Group using sql-server-master endpoint, using this connection you will be connected to the availability group master database. 2. Connection to SQL instance, using this [connection](https://docs.microsoft.com/en-us/sql/big-data-cluster/deployment-high-availability?view=sqlallproducts-allversions#instance-connect) you will be connected to the instance master database, you may need this type of connection for certain operations like setting server level configurations or manually adding a database to the availability group (in case database was created with a restore workflow). `Note`: for bdc deployed without high availability, this notebook will always return master-0 since we have only one master POD in the mater pool. ## Steps ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found def run(cmd, return_output=False, no_output=False, retry_count=0, base64_decode=False, return_as_json=False): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportability, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) # Display an install HINT, so the user can click on a SOP to install the missing binary # if which_binary == None: print(f"The path used to search for '{cmd_actual[0]}' was:") print(sys.path) if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: if base64_decode: import base64 return base64.b64decode(output).decode('utf-8') else: return output # Hints for tool retry (on transient fault), known errors and install guide # retry_hints = {'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use', 'Login timeout expired (0) (SQLDriverConnect)', 'SSPI Provider: No Kerberos credentials available', ], 'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond', ], 'python': [ ], } error_hints = {'azdata': [['Please run \'azdata login\' to first authenticate', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Can\'t open lib \'ODBC Driver 17 for SQL Server', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb'], ['NameError: name \'azdata_login_secret_name\' is not defined', 'SOP013 - Create secret for azdata login (inside cluster)', '../common/sop013-create-secret-for-azdata-login.ipynb'], ['ERROR: No credentials were supplied, or the credentials were unavailable or inaccessible.', 'TSG124 - \'No credentials were supplied\' error from azdata login', '../repair/tsg124-no-credentials-were-supplied.ipynb'], ['Please accept the license terms to use this product through', 'TSG126 - azdata fails with \'accept the license terms to use this product\'', '../repair/tsg126-accept-license-terms.ipynb'], ], 'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb'], ], 'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP012 - Install unixodbc for Mac', '../install/sop012-brew-install-odbc-for-sql-server.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb'], ], } install_hint = {'azdata': [ 'SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb' ], 'kubectl': [ 'SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb' ], } print('Common functions defined successfully.') ``` ### Instantiate Kubernetes client ``` # Instantiate the Python Kubernetes client into 'api' variable import os from IPython.display import Markdown try: from kubernetes import client, config from kubernetes.stream import stream except ImportError: # Install the Kubernetes module import sys !{sys.executable} -m pip install kubernetes try: from kubernetes import client, config from kubernetes.stream import stream except ImportError: display(Markdown(f'HINT: Use [SOP059 - Install Kubernetes Python module](../install/sop059-install-kubernetes-module.ipynb) to resolve this issue.')) raise if "KUBERNETES_SERVICE_PORT" in os.environ and "KUBERNETES_SERVICE_HOST" in os.environ: config.load_incluster_config() else: try: config.load_kube_config() except: display(Markdown(f'HINT: Use [TSG118 - Configure Kubernetes config](../repair/tsg118-configure-kube-config.ipynb) to resolve this issue.')) raise api = client.CoreV1Api() print('Kubernetes client instantiated') ``` ### Get the namespace for the big data cluster Get the namespace of the Big Data Cluster from the Kuberenetes API. **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = api.list_namespace(label_selector='MSSQL_CLUSTER').items[0].metadata.name except IndexError: from IPython.display import Markdown display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print('The kubernetes namespace for your big data cluster is: ' + namespace) ``` ### Get the controller username and password Get the controller username and password from the Kubernetes Secret Store and place in the required AZDATA_USERNAME and AZDATA_PASSWORD environment variables. ``` # Place controller secret in AZDATA_USERNAME/AZDATA_PASSWORD environment variables import os, base64 os.environ["AZDATA_USERNAME"] = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.username}}', return_output=True, base64_decode=True) os.environ["AZDATA_PASSWORD"] = run(f'kubectl get secret/controller-login-secret -n {namespace} -o jsonpath={{.data.password}}', return_output=True, base64_decode=True) print(f"Controller username '{os.environ['AZDATA_USERNAME']}' and password stored in environment variables") ``` ### Find Pod hosting the primary replica Run the T-SQL command using `azdata sql query`. ``` run(f'azdata sql query -q "SELECT @@SERVERNAME [pod hosting primary replica]" -o table') del os.environ["AZDATA_PASSWORD"] print("Notebook execution is complete.") ```
github_jupyter
``` import keras print(keras.__version__) #Importing Libraries import sys import os import numpy as np import pandas as pd from tensorflow.keras import Sequential from keras.layers import Dense,Dropout,Activation,Flatten from keras.layers import Conv2D,MaxPooling2D,BatchNormalization,AveragePooling2D from keras.losses import categorical_crossentropy from keras.optimizers import Adam from keras.regularizers import l2 from keras.utils import np_utils #Reading file df=pd.read_csv(r'S:\ML Projects\Facial Expression\fer2013\fer2013.csv') print(df["Usage"].value_counts) #Defining variables X_train,y_train,X_test,y_test=[],[],[],[] for index, row in df.iterrows(): val=row['pixels'].split(" ") try: if 'Training' in row['Usage']: X_train.append(np.array(val,'float32')) y_train.append(row['emotion']) elif 'PublicTest' in row['Usage']: X_test.append(np.array(val,'float32')) y_test.append(row['emotion']) except: print(f"error occured at index :{index} and row:{row}") print(f"X_train sample data:{X_train[0:2]}") print(f"y_train sample data:{y_train[0:2]}") print(f"X_test sample data:{X_test[0:2]}") print(f"y_test sample data:{y_test[0:2]}") X_train=np.array(X_train,'float32') y_train=np.array(y_train,'float32') X_test=np.array(X_test,'float32') y_test=np.array(y_test,'float32') #Normalizing the dataset between 0 and 1 X_train -= np.mean(X_train, axis=0) X_train /= np.std(X_train, axis=0) X_test -= np.mean(X_test, axis=0) X_test /= np.std(X_test, axis=0) #Reshaping the dataset so that keras accept it. num_features=64 num_labels=7 batch_size=64 epochs=30 width,height=48,48 X_train=X_train.reshape(X_train.shape[0],width,height,1) X_test=X_test.reshape(X_test.shape[0],width,height,1) #Designing CNN in keras from tensorflow.keras.layers import Conv2D, Flatten, MaxPooling2D, Dropout, BatchNormalization,Dense #model = tensorflow.keras.Sequential() model=Sequential() #1st convolutional layer model.add(Conv2D(num_features,kernel_size=(3,3),activation='relu',input_shape=(X_train.shape[1:]))) model.add(Conv2D(num_features,kernel_size=(3,3),activation='relu')) #Taking maxium value from pool and using it as output model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) #Dropout so model does not overfit model.add(Dropout(0.5)) import keras_resnet import keras_resnet.models #2nd convolutional layer model.add(Conv2D(num_features,kernel_size=(3,3),activation='relu')) model.add(Conv2D(num_features,kernel_size=(3,3),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) model.add(Dropout(0.5)) #3rd convolutional layer model.add(Conv2D(2*num_features,(3,3),activation='relu')) model.add(Conv2D(2*num_features,(3,3),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2))) model.add(Flatten()) #Adding Dense layers model.add(Dense(2*2*num_features,activation='relu')) model.add(Dropout(0.2)) model.add(Dense(2*2*num_features,activation='relu')) model.add(Dropout(0.2)) model.add(Dense(num_labels,activation='softmax')) model.compile(loss=categorical_crossentropy,optimizers=Adam(),metrics=['accuracy']) model.fit(X_train,y_train,batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(X_test,y_test), shuffle=True) #Saving the model to use it later on fer_json = model.to_json() with open("fer.json", "w") as json_file: json_file.write(fer_json) model.save_weights("fer.h5") ```
github_jupyter
# Text *Under construction* <!-- ## Strings [Strings](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str) are defined using (single or double) quotes: ```python mathematician = 'Ramanujan' print(mathematician) ``` Ramanujan A [string](https://docs.python.org/3/library/stdtypes.html#text-sequence-type-str) is a sequence of characters enclosed in quotes. ```python name = 'Patrick' print(name) ``` ```output Patrick ``` ```python sentence = 'Math 210 is the best class in the world?!!' print(sentence) ``` ```output Math 210 is the best class in the world?!! ``` ```python quotes = 'Use a double quote " inside single quotes' print(quotes) ``` ```output Use a double quote " inside single quotes ``` Strings are sequences of characters and so we can access the characters in a string just like the elements of a list. ```python name[0] ``` ```output 'P' ``` ```python name[-1] ``` ```output 'k' ``` ```python type(name) ``` ```output str ``` --> <!-- ## 1. Text data ### Creating strings The text datatype in Python is called [string](https://docs.python.org/3/tutorial/introduction.html#strings) (`str`). We write strings by typing text enclosed in single or double or triple quotes. ```python course = 'MATH 210 Introduction to Mathematical Computing' ``` ```python print(course) ``` MATH 210 Introduction to Mathematical Computing ```python type(course) ``` str Or use double quotes: ```python course = "MATH 210 Introduction to Mathematical Computing" ``` ```python print(course) ``` MATH 210 Introduction to Mathematical Computing Generally, we use double quote's if our string contains a single quote. ```python today = "It's a rainy day." ``` ```python print(today) ``` It's a rainy day. Use triple quotes to write a multiline string: ```python lyrics = '''To the left, to the left To the left, to the left To the left, to the left Everything you own in the box to the left In the closet that's my stuff, yes If I bought it please don't touch''' ``` ```python print(lyrics) ``` To the left, to the left To the left, to the left To the left, to the left Everything you own in the box to the left In the closet that's my stuff, yes If I bought it please don't touch ### Strings are sequences A string is a sequence type and so we can use strings in `for` loops and list comprehensions. For example: ```python word = 'Math' for letter in word: print('Gimme a',letter + '!') print('What does that spell?!',word + '!') ``` Gimme a M! Gimme a a! Gimme a t! Gimme a h! What does that spell?! Math! Note that the addition operator acts as concatenation of strings: ```python 'MATH' + '210' ``` 'MATH210' We can also convert strings to lists of characters: ```python list('Mathematics') ``` ['M', 'a', 't', 'h', 'e', 'm', 'a', 't', 'i', 'c', 's'] Use index syntax just like for lists to access characters in a string: ```python password = 'syzygy' ``` ```python password[2] ``` 'z' ### String methods There are *many* [string methods](https://docs.python.org/3/library/stdtypes.html#string-methods) available to manipulate strings. Let's try a few methods: ```python sentence = "The quick brown fox jumped over the lazy dog." ``` ```python uppercase_sentence = sentence.upper() ``` ```python sentence ``` 'The quick brown fox jumped over the lazy dog.' ```python uppercase_sentence ``` 'THE QUICK BROWN FOX JUMPED OVER THE LAZY DOG.' A string (like all Python datatypes) is an [*object*](https://docs.python.org/3/tutorial/classes.html): it's a collection of data *and* methods for manipulating the data. We use the dot notation to access the methods of an object. ```python euler = "Euler's Method" ``` ```python euler.replace('E','3') ``` "3uler's Method" ```python euler.replace ``` <function str.replace> -->
github_jupyter
# Lab 04 : Test set evaluation -- demo ``` # For Google Colaboratory import sys, os if 'google.colab' in sys.modules: from google.colab import drive drive.mount('/content/gdrive') file_name = 'test_set_demo.ipynb' import subprocess path_to_file = subprocess.check_output('find . -type f -name ' + str(file_name), shell=True).decode("utf-8") print(path_to_file) path_to_file = path_to_file.replace(file_name,"").replace('\n',"") os.chdir(path_to_file) !pwd import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from random import randint import utils ``` ### Download the data and print the sizes ``` from utils import check_mnist_dataset_exists data_path=check_mnist_dataset_exists() train_data=torch.load(data_path+'mnist/train_data.pt') train_label=torch.load(data_path+'mnist/train_label.pt') test_data=torch.load(data_path+'mnist/test_data.pt') test_label=torch.load(data_path+'mnist/test_label.pt') ``` ### Make a ONE layer net class. ``` class one_layer_net(nn.Module): def __init__(self, input_size, output_size): super(one_layer_net , self).__init__() self.linear_layer = nn.Linear( input_size, output_size , bias=False) def forward(self, x): scores = self.linear_layer(x) return scores ``` ### Build the net ``` net=one_layer_net(784,10) print(net) ``` ### Choose the criterion, optimizer, batchsize, learning rate ``` criterion = nn.CrossEntropyLoss() optimizer=torch.optim.SGD( net.parameters() , lr=0.01 ) bs=200 ``` ### Do 30 passes through the training set ``` for epoch in range(30): running_loss=0 running_error=0 num_batches=0 shuffled_indices=torch.randperm(60000) for count in range(0,60000,bs): optimizer.zero_grad() indices=shuffled_indices[count:count+bs] minibatch_data = train_data[indices] minibatch_label= train_label[indices] inputs = minibatch_data.view(bs,784) inputs.requires_grad_() scores=net( inputs ) loss = criterion( scores , minibatch_label) loss.backward() optimizer.step() # compute and accumulate stats running_loss += loss.detach().item() error = utils.get_error( scores.detach() , minibatch_label) running_error += error.item() num_batches+=1 # compute stats for the full training set total_loss = running_loss/num_batches total_error = running_error/num_batches print('epoch=',epoch, '\t loss=', total_loss , '\t error=', total_error*100 ,'percent') ``` ### Now that the network is trained and do 10% of error on the training set, we are going to see how well it is doing on the test set... ``` running_error=0 num_batches=0 for i in range(0,10000,bs): # extract the minibatch minibatch_data = test_data[i:i+bs] minibatch_label= test_label[i:i+bs] # reshape the minibatch inputs = minibatch_data.view(bs,784) # feed it to the network scores=net( inputs ) # compute the error made on this batch error = utils.get_error( scores , minibatch_label) # add it to the running error running_error += error.item() num_batches+=1 # compute error rate on the full test set total_error = running_error/num_batches print( 'error rate on test set =', total_error*100 ,'percent') ``` ### Choose image at random from the test set and see how good/bad are the predictions ``` # choose a picture at random idx=randint(0, 10000-1) im=test_data[idx] # diplay the picture utils.show(im) # feed it to the net and display the confidence scores scores = net( im.view(1,784)) probs= F.softmax(scores, dim=1) utils.show_prob_mnist(probs) ```
github_jupyter
# What is Survival Analysis? [Survival analysis](https://en.wikipedia.org/wiki/Survival_analysis) is used to study the **time** until some **event** of interest (often referred to as **death**) occurs. Time could be measured in years, months, weeks, days, etc. The event could be anything of interest. It could be an actual death, a birth, a Pokemon Go server crash, etc. In this post we are interested in how long drafted NFL players are in the league, so the event of interest will be the retirement of drafted NFL players. The duration of time leading up to the event of interest can be called the **survival time**. In our case, the survival time is the number of years that a player was active in the league (according to [Pro Football Reference](http://www.pro-football-reference.com/)). Some of the players in this analysis are still active players (e.g. Aaron Rodgers, Eli Manning, etc.), so we haven't observed their retirement (the event of interest). Those players are considered **censored**. While we have some information about their career length (or survival time), we don't know the full length of their career. This specific type of censorship, one in which we do not observe end of the survival time, is called **right-censorship**. The methods developed in the field of survival analysis were created in order to deal with the issue of censored data. In this post we will use one such method, called the [Kaplan-Meier estimator](https://en.wikipedia.org/wiki/Kaplan%E2%80%93Meier_estimator), to estimate the survival function and construct the survival curve for an NFL career. ## A brief comment on the data used I used the draft data scraped from my [previous post](http://savvastjortjoglou.com/nfl-draft.html). The duration of a player's career is just the difference between "To" value from the [PFR draft table](http://www.pro-football-reference.com/years/2015/draft.htm) and the year the player was drafted. Players were considered active, if there name was in bold. However there are may be some players who are retired that PFR still considers active (e.g. Mike Kafka). You can check out how I prepared the data in [this Jupyter notebook](https://github.com/savvastj/nfl_survival_analysis/blob/master/Data_Prep.ipynb). Let me know if you see any issues/mistakes I've made. # What is the Survival Function? The [survival function](https://en.wikipedia.org/wiki/Survival_function), $S(t)$, of a population is defined as follows: $$S(t) = Pr(T > t)$$ Capital $T$ is a [random variable](https://www.khanacademy.org/math/probability/random-variables-topic/random-variables-prob-dist/v/random-variables) that represents a subject's survival time. In our case $T$ represents an NFL player's career length. Lower case $t$ represents a specific time of interest for $T$. In our analysis the $t$ represents a specific number of years played. In other words the survival function just gives us the probability that someone survives longer than (or at least as long as) a specified value of time, $t$. So in the context of our analysis, $S(3)$ will provide us the probability that an NFL career lasts longer than 3 years. # What is the Kaplan-Meier estimator? To estimate the survival function of NFL players we will use the Kaplan-Meier estimator. The Kaplan-Meier estimator is defined by the following product (from the [`lifelines` documentation](https://lifelines.readthedocs.io/en/latest/Intro%20to%20lifelines.html#estimating-the-survival-function-using-kaplan-meier)): $$\hat{S}(t) = \prod_{t_i \lt t} \frac{n_i - d_i}{n_i}$$ where $d_i$ are the number of death events at time $t$ and $n_i$ is the number of subjects at risk of death just prior to time $t$. We will walk through a simple example in a bit in order to get a better understanding of the above definition. # Estimating the Survival Function of NFL Players To estimate the survival function of NFL players we will be using the [`lifelines` library](https://lifelines.readthedocs.io/en/latest/index.html). It provides a user friendly interface for survival analyis using Python. Lets get started by importing what we need and reading in the data. ``` %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from lifelines import KaplanMeierFitter draft_df = pd.read_csv("data/nfl_survival_analysis_data.csv") # set some plotting aesthetics, similar to ggplot sns.set(palette = "colorblind", font_scale = 1.35, rc = {"figure.figsize": (12,9), "axes.facecolor": ".92"}) draft_df.head() ``` The columns of interest for our analysis are the *Duration* and *Retired* columns. The *Duration* column represents the number of years a player played in the NFL. The *Retired* column represents whether the player retired from the NFL or not. 1 indicates that he is retired, while 0 indicates that he is still an active player. To calculate the Kaplan-Meier estimate we will need to create a `KaplanMeierFitter` object. ``` kmf = KaplanMeierFitter() ``` We can then fit the data by calling the `KaplanMeierFitter`s `fit` method. ``` # The 1st arg accepts an array or pd.Series of individual survival times # The 2nd arg accepts an array or pd.Series that indicates if the event # interest (or death) occured. kmf.fit(durations = draft_df.Duration, event_observed = draft_df.Retired) ``` After fitting our data we can access the event table that contains a bunch of information regarding the subjects (the NFL players) at each time period. ``` kmf.event_table ``` The *removed* column contains the number of observations removed during that time period, whether due to death (the value in the *observed* column) or censorship. So the *removed* column is just the sum of the *observed* and *censorship* columns. The *entrance* column tells us whether any new subjects entered the population at that time period. Since all the players we are studying start at $time = 0$ (the moment they were drafted), the *entrance* value is 15,592 at that time and 0 for all other times. The *at_risk* column contains the number of subjects that are still alive during a given time. The value for *at_risk* at $time = 0$, is just equal to the *entrance* value. For the remaining time periods, the *at_risk* value is equal to the difference between the time previous period's *at_risk* value and *removed* value, plus the current period's *entrance* value. For example for $time = 1$, the number of subject's *at risk* is 10,995 which is equal to 15,592 (the previous *at_risk* value) - 4,597 (the previous *removed* value) + 0 (the current period's *entrance* value). Since we have access to the survival table we can calculate the survival probability at different times "by hand." Let us take a look at the definition of the Kaplan-Meier Estimate again: $$\hat{S}(t) = \prod_{t_i \lt t} \frac{n_i - d_i}{n_i}$$ where $d_i$ are the number of death events at time $t$ and $n_i$ is the number of subjects at risk of death just prior to time $t$. What the above essentially tells us is that the value of the survival function for time $t$, is the product of the survival probabilities for all individual time periods leading up to time $t$. We can define the survival probability for an individual time period as follows: $$S_t = \frac{\substack{\text{Number of subjects} \\ \text{at risk at the start}} - \substack{\text{Number of subjects} \\ \text{that died}}}{\substack{\text{Number of subjects} \\ \text{at risk at the start}}}$$ **NOTE** the number of deaths in the above formula does not include the number of censored observations. Lets walk through a simple example and calculate the the probability that an NFL career lasts longer than 2 years. First we calculate the individual survival probabilities for $t = 0$, $t = 1$, and $t = 2$. Here's the calculation for the survival probability time for $t = 0$: $$S_0 = \frac{\substack{\text{Number of players at risk at the start} \\ \text{(i.e. Number of players drafted)}} - \substack{\text{Number of players} \\ \text{that immediately failed}}}{\substack{\text{Number of players at risk at the start} \\ \text{(i.e. Number of players drafted)}}} = \frac{15,592 - 4,504}{15,592} = \frac{11,088}{15,592} \approx 0.711$$ And the code for the calculation: ``` # get the values for time = 0 from the survival table event_at_0 = kmf.event_table.iloc[0, :] # now calculate the survival probability for t = 0 surv_for_0 = (event_at_0.at_risk - event_at_0.observed) / event_at_0.at_risk surv_for_0 ``` What the above means is that about 71.1% of players drafted make it on to the field. Now the individual survival probability for $t = 1$: $$S_1 = \frac{\substack{\text{Number of players} \\ \text{that survive the draft}} - \substack{\text{Number of players} \\ \text{that failed in the 1st year}}}{\substack{\text{Number of players} \\ \text{that survive the draft}}} = \frac{10,995 - 1,076}{10,995} = \frac{9,919}{10,995} \approx 0.902$$ ``` # Calculate the survival probability for t = 1 event_at_1 = kmf.event_table.iloc[1, :] surv_for_1 = (event_at_1.at_risk - event_at_1.observed) / event_at_1.at_risk surv_for_1 ``` The value for $S_1$ represents the conditional probability that if a player does not immediately fail once drafted, then he has a 90.2% chance of playing 1 year of football. Below is the calculation for $S_2$: $$S_2 = \frac{\substack{\text{Number of players that survive the} \\ \text{1st year and are entering the 2nd year}} - \substack{\text{Number of players} \\ \text{that failed in the 2nd year}}}{\substack{\text{Number of players that survive the} \\ \text{1st year and are entering the 2nd year}}} = \frac{9,685 - 1,176}{9,685} = \frac{8,509}{9,685} \approx 0.879$$ ``` # Calculate the survival probability for t = 2 event_at_2 = kmf.event_table.iloc[2, :] surv_for_2 = (event_at_2.at_risk - event_at_2.observed) / event_at_2.at_risk surv_for_2 ``` $S_2$ also represents a conditional probability. It is the probability that a player plays in their 2nd year given that he did not retire after his 1st year. This ends up being about 87.9%. Finally to calculate the probability that an NFL career will last more than 2 years, we just multiply the three individual survival probabilities: $$S(2) = S_0 \times S_1 \times S_2 = \frac{11,088}{15,592} \times \frac{9,919}{10,995} \times \frac{8,509}{9,685} \approx 0.564$$ ``` # The probability that an NFL player has a career longer than 2 years surv_after_2 = surv_for_0 * surv_for_1 * surv_for_2 surv_after_2 ``` So we see that drafted players have about a 56.4% chance of making it past their 2nd year, or having a career as long as 2 years. Hopefully going through that short example gives you a better idea of how the Kaplan-Meier estimator works. Our `KaplanMeierFitter` object has already done all of the above calculations for us. We can get the survival probability after a given time by simply using the `predict` method. So to get the value for $S(2)$ we just pass in 2 into the `predict` method. ``` kmf.predict(2) ``` That's pretty close to the value we calculated by hand. (I'm not sure why they aren't exactly the same. Possibly a rounding issue? If you do know why please let me know). The `predict` method can also handle an array of numbers, returning an array of probabilities. ``` # The survival probabilities of NFL players after 1, 3, 5, and 10 yrs played kmf.predict([1,3,5,10]) ``` To get the full list of estimated probabilities from our `KaplanMeierFitter`, access the `survival_function_` attribute. ``` kmf.survival_function_ ``` The `median_` attribute also provides us the number of years where on average 50% of players are out of the league. ``` kmf.median_ ``` ## Plotting the Kaplan-Meier Estimate Plotting the Kaplan-Meier estimate (along with its confidence intervals) is pretty straightfoward. All we need to do is call the `plot` method. ``` # plot the KM estimate kmf.plot() # Add title and y-axis label plt.title("The Kaplan-Meier Estimate for Drafted NFL Players\n(1967-2015)") plt.ylabel("Probability a Player is Still Active") plt.show() ``` The first thing thing that you should notice is that the Kaplan-Meier estimate is a step function. Each horizontal line represents the probability that a player is still active after a given time $t$. For example, when $t = 0$, the probability that a player is still active after that point is about 71%. ### Plotting the Kaplan-Meier Estimate by Position Before we plot the career lengths by position, lets clean up some of the data. We will merge and drop some of the player positions in order to make the plotting a bit more manageable. ``` draft_df.Pos.unique() # check out all the different positions draft_df.Pos.value_counts() # get a count for each position # Relabel/Merge some of the positions # Set all HBs to RB draft_df.loc[draft_df.Pos == "HB", "Pos"] = "RB" # Set all Safeties and Cornernbacks to DBs draft_df.loc[draft_df.Pos.isin(["SS", "FS", "S", "CB"]), "Pos"] = "DB" # Set all types of Linebackers to LB draft_df.loc[draft_df.Pos.isin(["OLB", "ILB"]), "Pos"] = "LB" # drop players from the following positions [FL, E, WB, KR, LS, OL] # get the row indices for players with undesired postions idx = draft_df.Pos.isin(["FL", "E", "WB", "KR", "LS", "DL", "OL"]) # keep the players that don't have the above positions draft_df_2 = draft_df.loc[~idx, :] # check the number of positions in order to decide # on the plotting grid dimiensions len(draft_df_2.Pos.unique()) ``` Now that we have the data organized, lets plot the Kaplan-Meier estimate for each position. I've commented the code below to walk you through the process of plotting each position in a 5x3 plotting grid. ``` # create a new KMF object kmf_by_pos = KaplanMeierFitter() duration = draft_df_2.Duration observed = draft_df_2.Retired # Set the order that the positions will be plotted positions = ["QB", "RB", "WR", "TE", "T", "G", "C", "DE", "DT", "NT", "LB", "DB", "FB", "K", "P"] # Set up the the 5x3 plotting grid by creating figure and axes objects # Set sharey to True so that each row of plots share the left most y-axis labels fig, axes = plt.subplots(nrows = 5, ncols = 3, sharey = True, figsize=(12,15)) # flatten() creates a 1-D array of the individual axes (or subplots) # that we will plot on in our grid # We zip together the two 1-D arrays containing the positions and axes # so we can iterate over each postion and plot its KM estimate onto # its respective axes for pos, ax in zip(positions, axes.flatten()): # get indices for players with the matching position label idx = draft_df_2.Pos == pos # fit the kmf for the those players kmf_by_pos.fit(duration[idx], observed[idx]) # plot the KM estimate for that position on its respective axes kmf_by_pos.plot(ax=ax, legend=False) # place text indicating the median for the position # the xy-coord passed in represents the fractional value for each axis # for example (.5, .5) places text at the center of the plot ax.annotate("Median = {:.0f} yrs".format(kmf_by_pos.median_), xy = (.47, .85), xycoords = "axes fraction") # get rid the default "timeline" x-axis label set by kmf.plot() ax.set_xlabel("") # label each plot by its position ax.set_title(pos) # set a common x and y axis across all plots ax.set_xlim(0,25) ax.set_ylim(0,1) # tighten up the padding for the subplots fig.tight_layout() # https://stackoverflow.com/questions/16150819/common-xlabel-ylabel-for-matplotlib-subplots # set a common x-axis label fig.text(0.5, -0.01, "Timeline (Years)", ha="center") # set a common y-axis label fig.text(-0.01, 0.5, "Probability That a Player is Still Active", va="center", rotation="vertical") # add the title for the whole plot fig.suptitle("Survival Curve for each NFL Position\n(Players Drafted from 1967-2015)", fontsize=20) # add some padding between the title and the rest of the plot to avoid overlap fig.subplots_adjust(top=0.92) plt.show() ``` ## Checking the Conditional Survival Time Another interesting attribute in our `KaplanMeierFitter` is the `conditional_time_to_event_`. It is a `DataFrame` that contains the estimated median remaining lifetime, conditioned on surviving up to time $t$. So from the table below we see that if a player is in the league for 1 year, their expected remaining career length is 5 years. Please note that some of the conditional survival times for later time values are a bit funky due to the smaller sample sizes of those time periods. ``` kmf._conditional_time_to_event_() ``` # Resources Here are the resources I used to help write up this post and learn about survival analysis: ## Papers, Articles, and Documentation - [The `lifelines` documentation](https://lifelines.readthedocs.io/en/latest/index.html) - [The PDF to the original paper by Kapalan and Meier](http://www.csee.wvu.edu/~xinl/library/papers/math/statistics/kaplan.pdf) - [Survival Analysis: A Self Learning Text](https://www.amazon.com/Survival-Analysis-Self-Learning-Statistics-Biology/dp/1441966455) - [A Practical Guide to Understanding Kaplan-Meier Curves](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3932959/) - [Understanding survival analysis: Kaplan-Meier estimate](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3059453/) - [What is Survival Analysis (PDF)](https://www.cscu.cornell.edu/news/statnews/stnews78.pdf) - [A short article by Kaplan](http://www.garfield.library.upenn.edu/classics1983/A1983QS51100001.pdf) ## Videos - [Lifelines: Survival Analysis in Python](https://www.youtube.com/watch?v=XQfxndJH4UA), by Cameron Davidson-Pilon (the creator of the `lifelines` library) - [Survival Analysis in Python and R](https://www.youtube.com/watch?v=fli-yE5grtY), by Linda Uruchurtu As always you can find my code and data on [github](https://github.com/savvastj/nfl_survival_analysis). Please let me know if you see any mistakes/issues or have any suggestions on improving this post.
github_jupyter
# Import Modules ``` import os print(os.getcwd()) import sys import pandas as pd import numpy as np from pymatgen.io.ase import AseAtomsAdaptor # ######################################################### from methods import get_df_dft # ######################################################### # from local_methods import XRDCalculator from local_methods import get_top_xrd_facets from local_methods import compare_facets_for_being_the_same ``` # Script Inputs ``` verbose = True # verbose = False ``` # Read Data ``` df_dft = get_df_dft() print("df_dft.shape:", df_dft.shape[0]) ``` ``` # TEMP # df_dft = df_dft.sample(n=3) # bulk_id_i = "64cg6j9any" # bulk_id_i = "zwvqnhbk7f" # bulk_id_i = "8p8evt9pcg" bulk_id_i = "b5cgvsb16w" # df_dft = df_dft.loc[[bulk_id_i]] ``` # Main loop ``` from methods import get_df_xrd df_xrd_old = get_df_xrd() print( "Number of rows in df_xrd:", df_xrd_old.shape[0] ) # df_xrd_old.drop_duplicates( # assert False for i_cnt, (id_unique_i, row_i) in enumerate(df_dft.iterrows()): data_dict_i = dict() if verbose: print(40 * "=") print(str(i_cnt).zfill(3), "id_unique_i:", id_unique_i) # ##################################################### atoms_i = row_i.atoms atoms_stan_prim_i = row_i.atoms_stan_prim # ##################################################### from methods import get_df_xrd df_xrd_old = get_df_xrd() # if not id_unique_i in df_xrd_old.index: if id_unique_i in df_xrd_old.index: if verbose: print("Already computed, skipping") else: # ##################################################### # df_xrd_i = get_top_xrd_facets(atoms=atoms_stan_prim_i) xrd_out_dict = get_top_xrd_facets(atoms=atoms_stan_prim_i) df_xrd_all = xrd_out_dict["df_xrd"] df_xrd_unique = xrd_out_dict["df_xrd_unique"] df_xrd_i = df_xrd_unique # Collect all facets into a list all_facets = [] for i in df_xrd_all.facets: all_facets.extend(i) all_facets = list(set(all_facets)) # df_xrd_i_1 = df_xrd_i[df_xrd_i.y_norm > 30].iloc[0:10] df_xrd_i_1 = df_xrd_i[df_xrd_i.y_norm > 10].iloc[0:15] top_facets_i = [] facet_rank_list = [] for i_cnt, i in enumerate(df_xrd_i_1.facets.tolist()): top_facets_i.extend(i) rank_list_i = [i_cnt for i in range(len(i))] # print(rank_list_i) facet_rank_list.extend(rank_list_i) # top_facets_i = facets_list num_top_facets = len(top_facets_i) if verbose: tmp = [len(i) for i in top_facets_i] #print(tmp) # ################################################# data_dict_i["id_unique"] = id_unique_i data_dict_i["top_facets"] = top_facets_i data_dict_i["facet_rank"] = facet_rank_list data_dict_i["num_top_facets"] = num_top_facets data_dict_i["all_xrd_facets"] = all_facets # ################################################# # ################################################# # Creating df_xrd with one row and combine it with df_xrd in file data_dict_list = [] data_dict_list.append(data_dict_i) df_xrd_row = pd.DataFrame(data_dict_list) df_xrd_row = df_xrd_row.set_index("id_unique", drop=False) df_xrd_new = pd.concat([ df_xrd_row, df_xrd_old, ], axis=0) # Pickling data ################################### import os; import pickle directory = "out_data" if not os.path.exists(directory): os.makedirs(directory) with open(os.path.join(directory, "df_xrd.pickle"), "wb") as fle: pickle.dump(df_xrd_new, fle) # ################################################# # # ######################################################### # df_xrd = pd.DataFrame(data_dict_list) # df_xrd = df_xrd.set_index("id_unique", drop=False) # # ######################################################### assert False from methods import get_df_xrd df_xrd_tmp = get_df_xrd() df_xrd_tmp ``` ``` # df_xrd_row # df_xrd_old # Saving data to pickle # # Pickling data ########################################### # import os; import pickle # directory = "out_data" # if not os.path.exists(directory): os.makedirs(directory) # with open(os.path.join(directory, "df_xrd.pickle"), "wb") as fle: # pickle.dump(df_xrd, fle) # # ######################################################### # def compare_facets_for_being_the_same( # facet_0, # facet_1, # ): # """ # Checks whether facet_0 and facet_1 differ only by an integer multiplicative. # """ # # ######################################################### # facet_j_abs = [np.abs(i) for i in facet_j] # facet_j_sum = np.sum(facet_j_abs) # # ######################################################### # facet_k_abs = [np.abs(i) for i in facet_k] # facet_k_sum = np.sum(facet_k_abs) # # ######################################################### # if facet_j_sum > facet_k_sum: # # facet_j_abs / facet_k_abs # facet_larger = facet_j_abs # facet_small = facet_k_abs # else: # facet_larger = facet_k_abs # facet_small = facet_j_abs # # ######################################################### # facet_frac = np.array(facet_larger) / np.array(facet_small) # something_wrong = False # all_terms_are_whole_nums = True # for i_cnt, i in enumerate(facet_frac): # # print(i.is_integer()) # if np.isnan(i): # if facet_j_abs[i_cnt] != 0 or facet_k_abs[i_cnt] != 0: # something_wrong = True # print("Not good, these should both be zero") # elif not i.is_integer(): # all_terms_are_whole_nums = False # # print("Not a whole number here") # duplicate_found = False # if all_terms_are_whole_nums and not something_wrong: # duplicate_found = True # print("Found a duplicate facet here") # return(duplicate_found) # # duplicate_facet_found = \ # facet_j = (1, 0, 1) # facet_l = (3, 0, 1) # compare_facets_for_being_the_same(facet_j, facet_l) # facet_0 = (1, 0, 1) # facet_1 = (3, 0, 1) # # def compare_facets_for_being_the_same( # # facet_0, # # facet_1, # # ): # """ # Checks whether facet_0 and facet_1 differ only by an integer multiplicative. # """ # #| - compare_facets_for_being_the_same # # ######################################################### # facet_j = facet_0 # facet_k = facet_1 # # ######################################################### # facet_j_abs = [np.abs(i) for i in facet_j] # facet_j_sum = np.sum(facet_j_abs) # # ######################################################### # facet_k_abs = [np.abs(i) for i in facet_k] # facet_k_sum = np.sum(facet_k_abs) # # ######################################################### # if facet_j_sum > facet_k_sum: # # facet_j_abs / facet_k_abs # facet_larger = facet_j_abs # facet_small = facet_k_abs # else: # facet_larger = facet_k_abs # facet_small = facet_j_abs # # ######################################################### # facet_frac = np.array(facet_larger) / np.array(facet_small) # # ##################################################### # something_wrong = False # all_terms_are_whole_nums = True # # ##################################################### # div_ints = [] # # ##################################################### # for i_cnt, i in enumerate(facet_frac): # # print(i.is_integer()) # if np.isnan(i): # if facet_j_abs[i_cnt] != 0 or facet_k_abs[i_cnt] != 0: # something_wrong = True # print("Not good, these should both be zero") # elif not i.is_integer() or i == 0: # all_terms_are_whole_nums = False # # print("Not a whole number here") # elif i.is_integer(): # div_ints.append(int(i)) # all_int_factors_are_same = False # if len(list(set(div_ints))) == 1: # all_int_factors_are_same = True # duplicate_found = False # if all_terms_are_whole_nums and not something_wrong and all_int_factors_are_same: # duplicate_found = True # # print("Found a duplicate facet here") # # return(duplicate_found) # #__| # print("duplicate_found:", duplicate_found) # facet_frac # all_terms_are_whole_nums # something_wrong # # ######################################################### # indices_to_drop = [] # # ######################################################### # for ind_i, row_i in df_xrd_unique.iterrows(): # # ##################################################### # facets_i = row_i.facets # # ##################################################### # for facet_j in facets_i: # for ind_k, row_k in df_xrd_unique.iterrows(): # # ############################################# # facets_k = row_k.facets # # ############################################# # for facet_l in facets_k: # if facet_j == facet_l: # continue # else: # duplicate_facet_found = \ # compare_facets_for_being_the_same(facet_j, facet_l) # if duplicate_facet_found: # # print(duplicate_facet_found, facet_j, facet_l) # if np.sum(np.abs(facet_j)) > np.sum(np.abs(facet_l)): # indices_to_drop.append(ind_i) # # print(ind_i) # else: # indices_to_drop.append(ind_k) # # print(ind_k) # # ######################################################### # indices_to_drop = list(set(indices_to_drop)) # # ######################################################### # df_xrd_unique_1 = df_xrd_unique.drop(index=indices_to_drop) # df_xrd_i_1 # top_facets_i = [] # facet_rank_list = [] # for i_cnt, i in enumerate(df_xrd_i_1.facets.tolist()): # top_facets_i.extend(i) # rank_list_i = [i_cnt for i in range(len(i))] # print(rank_list_i) # facet_rank_list.extend(rank_list_i) # df_xrd_i_1 # facet_rank_list # df_xrd # df_xrd_all # [0, 10, 22, 29, 30] # [10, 29, 22, 30] # df_xrd_unique = df_xrd_unique.loc[[2, 19]] # df_xrd_unique = df_xrd_unique.loc[[2, 22]] # xrd_out_dict # df_xrd_unique ```
github_jupyter
<i>Copyright (c) Microsoft Corporation.</i> <i>Licensed under the MIT License.</i> # ARIMA: Autoregressive Integrated Moving Average This notebook provides an example of how to train an ARIMA model to generate point forecasts of product sales in retail. We will train an ARIMA based model on the Orange Juice dataset. An ARIMA, which stands for AutoRegressive Integrated Moving Average, model can be created using an `ARIMA(p,d,q)` model within `statsmodels` library. In this notebook, we will be using an alternative library `pmdarima`, which allows us to automatically search for optimal ARIMA parameters, within a specified range. More specifically, we will be using `auto_arima` function within `pmdarima` to automatically discover the optimal parameters for an ARIMA model. This function wraps `ARIMA` and `SARIMAX` models of `statsmodels` library, that correspond to non-seasonal and seasonal model space, respectively. In an ARIMA model there are 3 parameters that are used to help model the major aspects of a times series: seasonality, trend, and noise. These parameters are: - **p** is the parameter associated with the auto-regressive aspect of the model, which incorporates past values. - **d** is the parameter associated with the integrated part of the model, which effects the amount of differencing to apply to a time series. - **q** is the parameter associated with the moving average part of the model., If our data has a seasonal component, we use a seasonal ARIMA model or `ARIMA(p,d,q)(P,D,Q)m`. In that case, we have an additional set of parameters: `P`, `D`, and `Q` which describe the autoregressive, differencing, and moving average terms for the seasonal part of the ARIMA model, and `m` refers to the number of periods in each season. We provide a [quick-start ARIMA example](../00_quick_start/auto_arima_forecasting.ipynb), in which we explain the process of using ARIMA model to forecast a single time series, and analyze the model performance. Please take a look at this notebook for more information. In this notebook, we will train an ARIMA model on multiple splits (round) of the train/test data. ## Global Settings and Imports ``` import os import sys import math import warnings import itertools import numpy as np import pandas as pd import scrapbook as sb from datetime import datetime from pmdarima.arima import auto_arima from fclib.common.utils import git_repo_path, module_exists from fclib.common.plot import plot_predictions_with_history from fclib.evaluation.evaluation_utils import MAPE from fclib.dataset.ojdata import download_ojdata, split_train_test, complete_and_fill_df pd.options.display.float_format = "{:,.2f}".format np.set_printoptions(precision=2) warnings.filterwarnings("ignore") print("System version: {}".format(sys.version)) ``` ## Parameters Next, we define global settings related to the model. We will use historical weekly sales data only, without any covariate features to train the ARIMA model. The model parameter ranges are provided in params. These are later used by the `auto_arima()` function to search the space for the optimal set of parameters. To increase the space of models to search over, increase the `max_p` and `max_q` parameters. > NOTE: Our data does not show a strong seasonal component (as demonstrated in data exploration example notebook), so we will not be searching over the seasonal ARIMA models. To learn more about the seasonal ARIMA models, please take a look at the quick start ARIMA notebook, referenced above in the introduction. ``` # Use False if you've already downloaded and split the data DOWNLOAD_SPLIT_DATA = True # Data directory DATA_DIR = os.path.join(git_repo_path(), "ojdata") # Forecasting settings N_SPLITS = 5 HORIZON = 2 GAP = 2 FIRST_WEEK = 40 LAST_WEEK = 156 # Parameters of ARIMA model params = { "seasonal": False, "start_p": 0, "start_q": 0, "max_p": 5, "max_q": 5, } # Run notebook on a subset of stores (to reduce the run time) STORE_SUBSET = True ``` ## Data Preparation We need to download the Orange Juice data and split it into training and test sets. By default, the following cell will download and spit the data. If you've already done so, you may skip this part by switching `DOWNLOAD_SPLIT_DATA` to `False`. We store the training data and test data using dataframes. The training data includes `train_df` and `aux_df` with `train_df` containing the historical sales up to week 135 (the time we make forecasts) and `aux_df` containing price/promotion information up until week 138. Here we assume that future price and promotion information up to a certain number of weeks ahead is predetermined and known. In our example, we will be using historical sales only, and will not be using the `aux_df` data. The test data is stored in `test_df` which contains the sales of each product in week 137 and 138. Assuming the current week is week 135, our goal is to forecast the sales in week 137 and 138 using the training data. There is a one-week gap between the current week and the first target week of forecasting as we want to leave time for planning inventory in practice. The setting of the forecast problem are defined in `fclib.dataset.ojdata.split_train_test` function. We can change this setting (e.g., modify the horizon of the forecast or the range of the historical data) by passing different parameters to this functions. Below, we split the data into `n_splits=N_SPLITS` splits, using the forecasting settings listed above in the *Parameters* section. ``` if DOWNLOAD_SPLIT_DATA: download_ojdata(DATA_DIR) train_df_list, test_df_list, _ = split_train_test( DATA_DIR, n_splits=N_SPLITS, horizon=HORIZON, gap=GAP, first_week=FIRST_WEEK, last_week=LAST_WEEK, write_csv=True, ) print("Finished data downloading and splitting.") ``` To create training data and test data for multi-round forecasting, we pass a number greater than `1` to `n_splits` parameter in `split_train_test()` function. Note that the forecasting periods we generate in each test round are **non-overlapping**. This allows us to evaluate the forecasting model on multiple rounds of data, and get a more robust estimate of our model's performance. For visual demonstration, this is what the time series splits would look like for `N_SPLITS = 5`, and using other settings as above: ![Multi split](../../../../assets/time_series_split_multiround.jpg) ### Process training data Our time series data is not complete, since we have missing sales for some stores/products and weeks. We will fill in those missing values by propagating the last valid observation forward to next available value. We will define functions for data frame processing, then use these functions within a loop that loops over each forecasting rounds. Note that our time series are grouped by `store` and `brand`, while `week` represents a time step, and `logmove` represents the value to predict. ``` def process_training_df(train_df): """Process training data frame.""" train_df = train_df[["store", "brand", "week", "logmove"]] store_list = train_df["store"].unique() brand_list = train_df["brand"].unique() train_week_list = range(FIRST_WEEK, max(train_df.week)) train_filled = complete_and_fill_df(train_df, stores=store_list, brands=brand_list, weeks=train_week_list) return train_filled ``` ### Process test data Let's now process the test data. Note that, in addition to filling out missing values, we also convert unit sales from logarithmic scale to the counts. We will do model training on the log scale, due to improved performance, however, we will transfrom the test data back into the unit scale (counts) by applying `math.exp()`, so that we can evaluate the performance on the unit scale. ``` def process_test_df(test_df): """Process test data frame.""" test_df["actuals"] = test_df.logmove.apply(lambda x: round(math.exp(x))) test_df = test_df[["store", "brand", "week", "actuals"]] store_list = test_df["store"].unique() brand_list = test_df["brand"].unique() test_week_list = range(min(test_df.week), max(test_df.week) + 1) test_filled = complete_and_fill_df(test_df, stores=store_list, brands=brand_list, weeks=test_week_list) return test_filled ``` ## Model training Now let's run model training across all the stores and brands, and across all rounds. We will re-run the same code to automatically search for the best parameters, simply wrapped in a for loop iterating over stores and brands. We will use [Ray](https://ray.readthedocs.io/en/latest/#) to distribute the computation to the cores available on your machine if Ray is installed. Otherwise, we will train the models for different stores, brands, and rounds sequentially. By the time we develop this example, Ray only supports Linux and MacOS. Thus, sequential training will be used on Windows. In the cells below, we first define a function that trains an ARIMA model for a specific store-brand-round. Then, we use the following to leverage Ray: - `ray.init()` will start all the relevant Ray processes - we define a function to run an ARIMA model on a single brand and single store. To turn this function into a function that can be executed remotely, we declare the function with the ` @ray.remote` decorator. - `ray.get()` collects the results, and `ray.shutdown()` will stop Ray. It will take around 4.5 minutes to run the below cell for 5 rounds on a machine with 4 cores and about 2.7 minutes on a machine with 6 cores. To speed up the execution, we model only a subset of twenty stores in each round. To change this behavior, and run ARIMA modeling over *all stores and brands*, switch the boolean indicator `STORE_SUBSET` to `False` under the *Parameters* section on top. ``` def train_store_brand(train, test, store, brand, split): train_ts = train.loc[(train.store == store) & (train.brand == brand)] train_ts = np.array(train_ts["logmove"]) model = auto_arima( train_ts, seasonal=params["seasonal"], start_p=params["start_p"], start_q=params["start_q"], max_p=params["max_p"], max_q=params["max_q"], stepwise=True, error_action="ignore", ) model.fit(train_ts) preds = model.predict(n_periods=GAP + HORIZON - 1) predictions = np.round(np.exp(preds[-HORIZON:])) test_week_list = range(min(test.week), max(test.week) + 1) pred_df = pd.DataFrame( {"predictions": predictions, "store": store, "brand": brand, "week": test_week_list, "round": split + 1,} ) test_ts = test.loc[(test.store == store) & (test.brand == brand)] return pd.merge(pred_df, test_ts, on=["store", "brand", "week"], how="left") %%time if module_exists("ray"): print("Ray is available. Parallel training will be used. \n") import ray import logging # Initialize Ray print("Initializing Ray...") address_info = ray.init(log_to_driver=False, logging_level=logging.ERROR) print("Address information about the processes started by Ray:") print(address_info, "\n") @ray.remote def ray_train_store_brand(train, test, store, brand, split): return train_store_brand(train, test, store, brand, split) # Create an empty df to store predictions result_df = pd.DataFrame(None, columns=["predictions", "store", "brand", "week", "round", "actuals"]) for r in range(N_SPLITS): print(f"{datetime.now().time()} --- Round " + str(r + 1) + " ---") # Process training data set train_df = train_df_list[r].reset_index() train_filled = process_training_df(train_df) # Process test data set test_df = test_df_list[r].reset_index() test_filled = process_test_df(test_df) store_list = train_filled["store"].unique() brand_list = train_filled["brand"].unique() if STORE_SUBSET: store_list = store_list[0:20] # persist input data into Ray shared memory train_filled_id = ray.put(train_filled) test_filled_id = ray.put(test_filled) # train for each store/brand print("Training ARIMA model ...") results = [ ray_train_store_brand.remote(train_filled_id, test_filled_id, store, brand, r) for store, brand in itertools.product(store_list, brand_list) ] result_round = pd.concat(ray.get(results), ignore_index=True) result_df = result_df.append(result_round, ignore_index=True) # Stop Ray ray.shutdown() ``` If Ray is not installed, we will train all the models sequentially as follows. The training time could be several times longer compared with training the models in parallel with Ray. ``` %%time if not module_exists("ray"): print("Ray is not available. Sequential training will be used. \n") from tqdm import tqdm # CHANGE to False to model across all stores subset_stores = True # Create an empty df to store predictions result_df = pd.DataFrame(None, columns=["predictions", "store", "brand", "week", "actuals", "round"]) for r in tqdm(range(N_SPLITS)): print("-------- Round " + str(r + 1) + " --------") # Process training data set train_df = train_df_list[r].reset_index() train_filled = process_training_df(train_df) # Process test data set test_df = test_df_list[r].reset_index() test_filled = process_test_df(test_df) print("Training ARIMA model ...") store_list = train_filled["store"].unique() brand_list = train_filled["brand"].unique() if subset_stores: store_list = store_list[0:10] for store, brand in itertools.product(store_list, brand_list): combined_df = train_store_brand(train_filled, test_filled, store, brand, r) result_df = result_df.append(combined_df, ignore_index=True) ``` Note that since `auto_arima` model makes consecutive forecasts from the last time point, we want to forecast the next `n_periods = GAP + HORIZON - 1` points, so that we can account for the GAP, as described in the data setup. ## Model evaluation To evaluate the model, we will use *mean absolute percentage error* or [MAPE](https://en.wikipedia.org/wiki/Mean_absolute_percentage_error). ``` mape_r = result_df.groupby("round").apply(lambda x: MAPE(x.predictions, x.actuals) * 100) print("MAPE values for each forecasting round:") print(mape_r) metric_value = MAPE(result_df.predictions, result_df.actuals) * 100 sb.glue("MAPE", metric_value) print(f"Overall MAPE is {metric_value:.2f} %") ``` The resulting MAPE value is relatively high. As `auto_arima` searches a restricted space of the models, defined by the range of `p` and `q` parameters, we often might not find an optimal model for each time series. In addition, when building a model for a large number of time series, it is often difficult to examine each model individually, which would usually help us improve an ARIMA model. Please refer to the [quick start ARIMA notebook](../00_quick_start/auto_arima_forecasting.ipynb) for a more comprehensive evaluation of a single ARIMA model. Now let's plot a few examples of forecasted results. ``` num_samples = 6 min_week = 140 sales = pd.read_csv(os.path.join(DATA_DIR, "yx.csv")) sales["move"] = sales.logmove.apply(lambda x: round(math.exp(x)) if x > 0 else 0) result_df["move"] = result_df.predictions plot_predictions_with_history( result_df, sales, grain1_unique_vals=store_list, grain2_unique_vals=brand_list, time_col_name="week", target_col_name="move", grain1_name="store", grain2_name="brand", min_timestep=min_week, num_samples=num_samples, predict_at_timestep=145, line_at_predict_time=False, title="Prediction results for a few sample time series", x_label="week", y_label="unit sales", random_seed=2, ) ``` ## Additional Reading \[1\] Rob J Hyndman and George Athanasopoulos. 2018. Forecasting: Principles and Practice. Chapter 8 ARIMA models: https://otexts.com/fpp2/arima.html <br> \[2\] Modern Parallel and Distributed Python: A Quick Tutorial on Ray: https://rise.cs.berkeley.edu/blog/modern-parallel-and-distributed-python-a-quick-tutorial-on-ray/ <br>
github_jupyter
# Assignment Algorithms : Part 1 The purpose of this notebook is to explore the fiberassign algorithms and the consequences of introducing different target populations and realistic nominal positioner motions and exclusion zones. Very often people see behavior of the fiberassign code which does not match their intuition and expectations. Usually these tests also use real target data, which further complicates the situation and makes it impossible to determine what effects are due to the target data which effects are just due to geometry. The goal of this notebook is to examine the impacts of the just the DESI geometry applied to synthetic targets with different densities and priorities. For all these cells, we are using a single RA / DEC location on the sky and one or more tiles. To give a more representative spread on our results we do 100 realizations of the simulation for each exercise. ## Imports and Definitions These are global imports and variables used throughout the notebook. ``` import os import sys from collections import OrderedDict import subprocess as sp import numpy as np import matplotlib.pyplot as plt from IPython.display import Image, display import fitsio from desitarget.targetmask import desi_mask from fiberassign import __version__ as fba_version from fiberassign.hardware import ( load_hardware, ) from fiberassign.targets import ( Targets, TargetTree, TargetsAvailable, LocationsAvailable, load_target_table, TARGET_TYPE_SCIENCE, TARGET_TYPE_SKY, TARGET_TYPE_SUPPSKY, TARGET_TYPE_STANDARD ) from fiberassign.tiles import ( load_tiles, ) from fiberassign.assign import ( Assignment, ) from fiberassign.vis import ( plot_assignment_tile, ) # Date used for the focalplane model assign_date = "2020-01-01T00:00:00" # This is a small patch around one DESI pointing patch_ra_min = 148.0 patch_ra_max = 152.0 patch_dec_min = 29.0 patch_dec_max = 33.0 # Tile center (for our co-incident tiles) tile_ra = 150.0 tile_dec = 31.0 # Target densities (table 3.1 of DSR) target_density = { "ELG": 2400, "LRG": 350, "QSO-tracer": 170, "QSO-lyman": 90, "FAKE-high": 4000, "standards": 300, "sky": 4000 } # DESITARGET defs target_bitname = { "ELG": "ELG", "LRG": "LRG", "QSO-tracer": "QSO", "QSO-lyman": "QSO", "FAKE-high": "ELG", "standards": "STD_WD", "sky": "SKY" } # Target priorities target_priority = { "ELG": 3000, "LRG": 3200, "QSO-tracer": 3400, "QSO-lyman": 3400, "FAKE-high": 1000, "standards": 0, "sky": 0 } # Target requested number of observations target_numobs = { "ELG": 1, "LRG": 1, "QSO-tracer": 1, "QSO-lyman": 4, "FAKE-high": 1, "standards": 0, "sky": 0 } # Plotting color target_pltcolor = { "ELG": (0.12156862745098039, 0.4666666666666667, 0.7058823529411765), "LRG": (1.0, 0.4980392156862745, 0.054901960784313725), "QSO-tracer": (0.17254901960784313, 0.6274509803921569, 0.17254901960784313), "QSO-lyman": (0.8392156862745098, 0.15294117647058825, 0.1568627450980392), "FAKE-high": (0.5803921568627451, 0.403921568627451, 0.7411764705882353), "standards": (1.0, 0.832, 0.0), "sky": (0.0, 1.0, 1.0) } # Working directory (where input and output files will be written). Change this to wherever you like. workdir = os.path.join(os.environ["HOME"], "scratch", "desi", "tutorials", "fiberassign_part1") # workdir = os.path.join(os.environ["SCRATCH"], "desi", "tutorials", "fiberassign_part1") ``` ## Helper Functions Here we define some functions to simulate tiles and uniform random target distributions, as well as some functions to help with plotting later. ``` def sim_tiles(path, ntile): """Function to generate some co-incident tiles. """ tile_dtype = np.dtype([ ("TILEID", "i4"), ("RA", "f8"), ("DEC", "f8"), ("IN_DESI", "i4"), ("PROGRAM", "S6"), ("OBSCONDITIONS", "i4") ]) fdata = np.zeros(ntile, dtype=tile_dtype) for i in range(ntile): fdata[i] = (1234+i, tile_ra, tile_dec, 1, "DARK", 1) if os.path.isfile(path): os.remove(path) fd = fitsio.FITS(path, "rw") header = dict() header["FBAVER"] = fba_version fd.write(fdata, header=header) return def sim_targets(tgtype, tgoffset, path=None, density=5000.0, priority=0, numobs=0, tgbits=0): target_cols = OrderedDict([ ("TARGETID", "i8"), ("RA", "f8"), ("DEC", "f8"), ("DESI_TARGET", "i8"), ("BRICKID", "i4"), ("BRICK_OBJID", "i4"), ("BRICKNAME", "a8"), ("PRIORITY", "i4"), ("SUBPRIORITY", "f8"), ("OBSCONDITIONS", "i4"), ("NUMOBS_MORE", "i4"), ("FIBERFLUX_G", "f4"), ("FIBERFLUX_R", "f4"), ("FIBERFLUX_Z", "f4"), ("FIBERFLUX_IVAR_G", "f4"), ("FIBERFLUX_IVAR_R", "f4"), ("FIBERFLUX_IVAR_Z", "f4") ]) target_dtype = np.dtype([(x, y) for x, y in target_cols.items()]) ndim = np.sqrt(density) nra = int(ndim * (patch_ra_max - patch_ra_min)) ndec = int(ndim * (patch_dec_max - patch_dec_min)) ntarget = nra * ndec fdata = np.zeros(ntarget, dtype=target_dtype) fdata["TARGETID"][:] = tgoffset + np.arange(ntarget) fdata["RA"][:] = np.random.uniform(low=patch_ra_min, high=patch_ra_max, size=ntarget) fdata["DEC"][:] = np.random.uniform(low=patch_dec_min, high=patch_dec_max, size=ntarget) fdata["OBSCONDITIONS"][:] = np.ones(ntarget, dtype=np.int32) fdata["SUBPRIORITY"][:] = np.random.uniform(low=0.0, high=1.0, size=ntarget) sky_mask = desi_mask["SKY"].mask suppsky_mask = desi_mask["SUPP_SKY"].mask std_mask = desi_mask["STD_BRIGHT"].mask if tgtype == TARGET_TYPE_SKY: fdata["PRIORITY"][:] = np.zeros(ntarget, dtype=np.int32) fdata["DESI_TARGET"][:] |= sky_mask elif tgtype == TARGET_TYPE_STANDARD: fdata["PRIORITY"][:] = priority * np.ones(ntarget, dtype=np.int32) fdata["DESI_TARGET"][:] |= std_mask elif tgtype == TARGET_TYPE_SUPPSKY: fdata["PRIORITY"][:] = np.zeros(ntarget, dtype=np.int32) fdata["DESI_TARGET"][:] |= suppsky_mask elif tgtype == TARGET_TYPE_SCIENCE: fdata["PRIORITY"][:] = priority * np.ones(ntarget, dtype=np.int32) fdata["NUMOBS_MORE"][:] = numobs fdata["DESI_TARGET"][:] |= desi_mask[tgbits].mask else: raise RuntimeError("unknown target type") if path is None: # Just return the data table return fdata else: # We are writing the output to a file. Return the number of targets. if os.path.isfile(path): os.remove(path) fd = fitsio.FITS(path, "rw") header = dict() header["FBAVER"] = fba_version fd.write(fdata, header=header) return ntarget def plot_assignment_stats( wd, outname, title, tile_ids, classes, hist_tgassign, hist_tgavail, hist_tgconsid ): fiberbins = 0.01 * np.arange(101) tgbins = { "ELG": np.arange(0, 22220, 220), "LRG": np.arange(0, 3232, 32), "QSO-tracer": np.arange(0, 1515, 15), "QSO-lyman": np.arange(0, 808, 8), "FAKE-high": np.arange(0, 36360, 360), "standards": np.arange(0, 3636, 36), "sky": np.arange(0, 40400, 400) } for tid in tile_ids: # The plots figfiber = plt.figure(figsize=(12, 6)) figtarget = plt.figure(figsize=(12, 12)) # Plot the positioner assignment fractions axfiber = figfiber.add_subplot(1, 1, 1) for tgclass in classes: if np.sum(hist_tgassign[tid][tgclass]) > 0: axfiber.hist( np.array(hist_tgassign[tid][tgclass]) / 5000, fiberbins, align="mid", alpha=0.8, label="{}".format(tgclass), color=target_pltcolor[tgclass] ) axfiber.set_xlabel("Fraction of Assigned Positioners", fontsize="large") axfiber.set_ylabel("Realization Counts", fontsize="large") axfiber.set_ylim(0, 100) axfiber.set_title( "Tile {}: Positioner Assignment : {}".format(tid, title) ) axfiber.legend() figfiber.savefig( os.path.join(wd, "fiberassign_{}_fibers_tile-{}.pdf".format(outname, tid)), dpi=300, format="pdf" ) figfiber.show() nplot = len(classes) pcols = 2 prows = (nplot + 1) // pcols poff = 1 for tgclass in classes: if np.sum(hist_tgavail[tid][tgclass]) == 0: continue axtarget = figtarget.add_subplot(prows, pcols, poff) axtarget.hist( np.array(hist_tgavail[tid][tgclass]), tgbins[tgclass], align="mid", alpha=0.4, label="{} Reachable".format(tgclass), color=(0.7, 0.7, 0.7) ) axtarget.hist( np.array(hist_tgconsid[tid][tgclass]), tgbins[tgclass], align="mid", alpha=0.4, label="{} Considered".format(tgclass), color=target_pltcolor[tgclass] ) axtarget.hist( np.array(hist_tgassign[tid][tgclass]), tgbins[tgclass], align="mid", alpha=0.8, label="{} Assigned".format(tgclass), color=target_pltcolor[tgclass] ) axtarget.set_xlabel("Targets Reachable, Considered, and Assigned", fontsize="large") axtarget.set_ylabel("Realization Counts", fontsize="large") axtarget.set_ylim(0, 100) axtarget.legend() poff += 1 figtarget.suptitle( "Tile {}: {}".format(tid, title), fontsize="x-large" ) figtarget.tight_layout(rect=[0, 0, 1, 0.97]) figtarget.savefig( os.path.join(wd, "fiberassign_{}_targets_tile-{}.pdf".format(outname, tid)), dpi=300, format="pdf" ) figtarget.show() def create_histogram_dicts(tiles, classes): hist_tgassign = dict() hist_tgconsid = dict() hist_tgavail = dict() for tid in tiles: hist_tgassign[tid] = dict() hist_tgavail[tid] = dict() hist_tgconsid[tid] = dict() for tgclass in classes: hist_tgassign[tid][tgclass] = list() hist_tgavail[tid][tgclass] = list() hist_tgconsid[tid][tgclass] = list() return (hist_tgassign, hist_tgavail, hist_tgconsid) def accum_histogram_data( tile, classes, tile_assign, tile_avail, id2class, tgobs, hist_tgassign, hist_tgavail, hist_tgconsid, verbose=False ): # The locations that were assigned to a target locs_assigned = [x for x, y in tile_assign.items() if y >= 0] if verbose: print( " tile {}: {} positioners ({:0.1f}%) assigned a target".format( tile, len(locs_assigned), 100*(len(locs_assigned)/5000) ) ) # Compute the numbers of each target class that were assigned and available assign_by_class = dict() avail_by_class = dict() consider_by_class = dict() for tgclass in classes: assign_by_class[tgclass] = list() avail_by_class[tgclass] = list() consider_by_class[tgclass] = list() for loc in locs_assigned: tgid = tile_assign[loc] tgstr = id2class(tgid) assign_by_class[tgstr].append(tgid) for avtg in tile_avail[loc]: avtgstr = id2class(avtg) avail_by_class[avtgstr].append(avtg) if avtgstr == "standards" or avtgstr == "sky" or tgobs[avtg] > 0: # This target was actually considered for assignment consider_by_class[avtgstr].append(avtg) # Decrement the obs remaining for the assigned targets for loc in locs_assigned: tgid = tile_assign[loc] tgobs[tgid] -= 1 # Accumulate to the target histograms for tgclass in classes: hist_tgassign[tid][tgclass].append(len(assign_by_class[tgclass])) uniq = np.unique(np.array(avail_by_class[tgclass])) hist_tgavail[tid][tgclass].append(len(uniq)) uniq = np.unique(np.array(consider_by_class[tgclass])) hist_tgconsid[tid][tgclass].append(len(uniq)) return ``` ## Focalplane Coverage The hardware information that fiberassign gets from the `desimodel` package contains the positioner arm lengths and ranges of motion for all positioners. We can use that along with a random sampling of locations to illustrate the regions that are not reachable by any positioner. ``` # Directory for this section wdir = os.path.join(workdir, "focalplane") os.makedirs(wdir, exist_ok=True) # Set the random seed to ensure reproducibility of this cell np.random.seed(123456) # Read hardware properties test_date = assign_date # If you uncomment this line, you will get the commissioning # focalplane with restricted positioner reach. #test_date = "2020-04-01T00:00:00" hw = load_hardware(rundate=test_date) # Simulate a single tile and load tfile = os.path.join(wdir, "footprint.fits") sim_tiles(tfile, 1) tiles = load_tiles(tiles_file=tfile) # Generate target table at high density tgdensity = 10000 tgdata = sim_targets( TARGET_TYPE_SCIENCE, 0, density=tgdensity, priority=0, numobs=0, tgbits="SKY" ) print("{} fake targets".format(len(tgdata))) tgs = Targets() load_target_table(tgs, tgdata) # Compute the targets available to all positioners tree = TargetTree(tgs, 0.01) tgsavail = TargetsAvailable(hw, tgs, tiles, tree) del tree # Availability for this single tile tid = tiles.id[0] tavail = tgsavail.tile_data(tid) # Get the unique set of target IDs reachable by *any* positioner reachable = np.unique([x for loc, tgl in tavail.items() for x in tgl]) print("{} reachable targets".format(len(reachable))) # Now get the rows of the original target table that are NOT in this set unreachable_rows = np.where( np.isin(tgdata["TARGETID"], reachable, invert=True) )[0] print("{} un-reachable targets".format(len(unreachable_rows))) # Now we would like to plot the targets that were not reachable. Also # plot the positioner angle ranges. locs = np.array(hw.locations) loc_theta_offset = hw.loc_theta_offset loc_theta_min = hw.loc_theta_min loc_theta_max = hw.loc_theta_max loc_phi_offset = hw.loc_phi_offset loc_phi_min = hw.loc_phi_min loc_phi_max = hw.loc_phi_max theta_off = np.array([loc_theta_offset[x]*180/np.pi for x in locs]) theta_min = np.array([loc_theta_min[x]*180/np.pi for x in locs]) theta_max = np.array([loc_theta_max[x]*180/np.pi for x in locs]) phi_off = np.array([loc_phi_offset[x]*180/np.pi for x in locs]) phi_min = np.array([loc_phi_min[x]*180/np.pi for x in locs]) phi_max = np.array([loc_phi_max[x]*180/np.pi for x in locs]) fig = plt.figure(figsize=(12, 8)) ax = fig.add_subplot(1, 2, 1) ax.scatter(locs, theta_off, marker="o", s=0.5, label="theta off") ax.scatter(locs, theta_min, marker="o", s=0.5, label="theta min") ax.scatter(locs, theta_max, marker="o", s=0.5, label="theta max") ax.set_xlabel("Positioner Location", fontsize="large") ax.set_ylabel("Degrees", fontsize="large") ax.legend() ax = fig.add_subplot(1, 2, 2) ax.scatter(locs, phi_off, marker="o", s=0.5, label="phi off") ax.scatter(locs, phi_min, marker="o", s=0.5, label="phi min") ax.scatter(locs, phi_max, marker="o", s=0.5, label="phi max") ax.set_xlabel("Positioner Location", fontsize="large") ax.set_ylabel("Degrees", fontsize="large") ax.legend() plt.show() fig = plt.figure(figsize=(10, 10)) ax = fig.add_subplot(1, 1, 1) ax.set_aspect("equal") center_mm = hw.loc_pos_curved_mm theta_arm = hw.loc_theta_arm phi_arm = hw.loc_phi_arm patrol_buffer = hw.patrol_buffer_mm for loc in hw.locations: patrol_rad = theta_arm[loc] + phi_arm[loc] - patrol_buffer patrol = plt.Circle( (center_mm[loc][0], center_mm[loc][1]), radius=patrol_rad, fc="red", ec="none", alpha=0.1 ) ax.add_artist(patrol) tgxy = hw.radec2xy_multi( tiles.ra[0], tiles.dec[0], tiles.obstheta[0], tgdata["RA"][unreachable_rows], tgdata["DEC"][unreachable_rows], False, 1 ) tgx = np.array([x[0] for x in tgxy]) tgy = np.array([x[1] for x in tgxy]) ax.scatter( tgx, tgy, color=(0.6, 0.6, 0.6), marker=".", s=0.01 ) ax.set_xlabel("Millimeters", fontsize="large") ax.set_ylabel("Millimeters", fontsize="large") pfile = os.path.join(wdir, "coverage_{}.pdf".format(tgdensity)) plt.savefig(pfile, dpi=300, format="pdf") plt.close() print("PDF written to {}\n Displaying low-res image inline...".format(pfile)) png_file = os.path.join(wdir, "coverage_{}.png".format(tgdensity)) cmd = 'convert -density 200 {} {}'.format(pfile, png_file) sp.check_call(cmd, stderr=sp.STDOUT, shell=True) img = Image(filename=(png_file)) display(img) ``` ## Effects of Positioner Geometry The exclusion zone around each positioner and its range of motion leads to an average limit on the fraction of targets that can be assigned for a given target density. We can see this intrinsic limit by simulating realizations of a single population of random targets for several densities. The plots of the positioner assignments for the first realization can take a while- you can comment out that if you like. ``` # Directory for this section wdir = os.path.join(workdir, "geometry") os.makedirs(wdir, exist_ok=True) # Set the random seed to ensure reproducibility of this cell np.random.seed(123456) # Read hardware properties hw = load_hardware(rundate=assign_date) # Simulate a single tile and load tfile = os.path.join(wdir, "footprint.fits") sim_tiles(tfile, 1) tiles = load_tiles(tiles_file=tfile) # Accumulation dictionaries (hist_tgassign, hist_tgavail, hist_tgconsid) = \ create_histogram_dicts(tiles.id, list(target_density.keys())) # Consider these 5 classes. target_classes = ["ELG", "LRG", "QSO-tracer", "QSO-lyman", "FAKE-high"] for tgclass in target_classes: tgdensity = target_density[tgclass] print("Working on target density {}...".format(tgdensity)) # For each realization... for mc in range(100): # Generate target table tgdata = sim_targets( TARGET_TYPE_SCIENCE, 0, density=tgdensity, priority=target_priority[tgclass], numobs=target_numobs[tgclass], tgbits=target_bitname[tgclass] ) tgs = Targets() load_target_table(tgs, tgdata) # Make a working copy of the obs remaining for all targets, so we can decrement # and track the targets that were considered (which had obsremain > 0) tg_obs = dict() for tgid in tgs.ids(): tgprops = tgs.get(tgid) tg_obs[tgid] = tgprops.obsremain # Create a hierarchical triangle mesh lookup of the targets positions tree = TargetTree(tgs, 0.01) # Compute the targets available to each fiber for each tile. tgsavail = TargetsAvailable(hw, tgs, tiles, tree) # Free the tree del tree # Compute the fibers on all tiles available for each target favail = LocationsAvailable(tgsavail) # Create assignment object asgn = Assignment(tgs, tgsavail, favail) # assignment of science targets asgn.assign_unused(TARGET_TYPE_SCIENCE) # Targets available for all tile / locs tgsavail = asgn.targets_avail() # Get the assignment for this one tile tid = tiles.id[0] tassign = asgn.tile_location_target(tid) # Targets available for this tile tavail = tgsavail.tile_data(tid) # Plot the first realization # THIS IS SLOW- after doing it once you can comment it out to save time. # if mc == 0: # pfile = os.path.join(wdir, "assignment_density_{}.pdf".format(tgdensity)) # #print(locs_assigned, flush=True) # plot_assignment_tile( # hw, tgs, tiles.id[0], tiles.ra[0], tiles.dec[0], tiles.obstheta[0], # tassign, tile_avail=tgsavail.tile_data(tiles.id[0]), real_shapes=True, # outfile=pfile, figsize=8 # ) # print("Full resolution PDF written to {}, displaying low-res image inline...".format(pfile)) # png_file = os.path.join(wdir, "assignment_density_{}.png".format(tgdensity)) # cmd = 'convert -density 200 {} {}'.format(pfile, png_file) # sp.check_call(cmd, stderr=sp.STDOUT, shell=True) # img = Image(filename=(png_file)) # display(img) # Accumulate results def target_id_class(target_id): # We are doing one class at a time in this example return tgclass accum_histogram_data( tid, [tgclass], tassign, tavail, target_id_class, tg_obs, hist_tgassign, hist_tgavail, hist_tgconsid ) # Plot it plot_assignment_stats( wdir, "geometry", "One Target Class at a Time", [tiles.id[0]], list(target_density.keys()), hist_tgassign, hist_tgavail, hist_tgconsid ) ``` The previous plots are not "surprising". They simply show that for low (QSO-like) target densities there are fewer collisions and a larger fraction (80-90%) of targets can be assigned, even though most positioners do not get one of these targets. For a medium (LRG-like) target density we have more collisions and around 70% of available targets can be assigned while slightly less than half of the positioners get a target. Going to a higher (ELG-like) density we have far more targets than can be assigned, but there are sufficient numbers that nearly every positioner gets a target. **NOTE: remember that these are uniform spatial target distributions with no physically realistic clustering. Clustering of targets reduces the fraction that can be assigned, due to positioner collisions.** ## Effects of Multiple Target Classes The previous section looked at different target densities but assigned each one in isolation. Now we examine the consequences of having 3 target classes with different priorities (and densities). The goal of this section is to demonstrate how priorities affect the resulting assignment. The target locations are still spatially uniform with no clustering. We will also demonstrate explicitly that low priority targets never "take" positioners from high priority targets. In the case of two QSO populations, these have the same `PRIORITY` value. However the fiberassign code currently computes an effective "total priority" for each target that is a combination of the (integer) `PRIORITY`, the (random from 0.0-1.0) `SUBPRIORITY` and the number of remaining observations. See the discussion here: https://github.com/desihub/fiberassign/issues/196 . For now, targets with the same `PRIORITY` and a larger number of observations remaining are given a higher "total priority". ``` # Directory for this section wdir = os.path.join(workdir, "target_classes") os.makedirs(wdir, exist_ok=True) # Set the random seed to ensure reproducibility of this cell np.random.seed(123456) # Read hardware properties hw = load_hardware(rundate=assign_date) # Simulate a single tile and load tfile = os.path.join(wdir, "footprint.fits") sim_tiles(tfile, 1) tiles = load_tiles(tiles_file=tfile) # Consider these 4 classes. target_classes = ["ELG", "LRG", "QSO-tracer", "QSO-lyman"] # Accumulation dictionaries (hist_tgassign, hist_tgavail, hist_tgconsid) = \ create_histogram_dicts(tiles.id, target_classes) # For each realization... for mc in range(100): # Generate target tables separately for each target class so we can combine them later. tg_tables = dict() tgid_offset = dict() ntarget = dict() toff = 0 for tgclass in target_classes: tg_tables[tgclass] = sim_targets( TARGET_TYPE_SCIENCE, toff, density=target_density[tgclass], priority=target_priority[tgclass], numobs=target_numobs[tgclass], tgbits=target_bitname[tgclass] ) tgid_offset[tgclass] = toff ntarget[tgclass] = len(tg_tables[tgclass]) toff += ntarget[tgclass] def target_id_class(target_id): """Helper function to go from target ID back to the class""" for cls in target_classes: if (target_id >= tgid_offset[cls]) and (target_id < tgid_offset[cls] + ntarget[cls]): return cls return None # Assign just the QSOs separately, and save for comparison. qso_tgs = Targets() load_target_table(qso_tgs, tg_tables["QSO-tracer"]) load_target_table(qso_tgs, tg_tables["QSO-lyman"]) tree = TargetTree(qso_tgs, 0.01) tgsavail = TargetsAvailable(hw, qso_tgs, tiles, tree) del tree favail = LocationsAvailable(tgsavail) asgn = Assignment(qso_tgs, tgsavail, favail) asgn.assign_unused(TARGET_TYPE_SCIENCE) tid = tiles.id[0] qso_assign = dict(asgn.tile_location_target(tid)) # Now assign all targets all_tgs = Targets() for tgclass in target_classes: load_target_table(all_tgs, tg_tables[tgclass]) # Make a working copy of the obs remaining for all targets, so we can decrement # and track the targets that were considered (which had obsremain > 0) tg_obs = dict() for tgid in all_tgs.ids(): tgprops = all_tgs.get(tgid) tg_obs[tgid] = tgprops.obsremain tree = TargetTree(all_tgs, 0.01) tgsavail = TargetsAvailable(hw, all_tgs, tiles, tree) del tree favail = LocationsAvailable(tgsavail) asgn = Assignment(all_tgs, tgsavail, favail) asgn.assign_unused(TARGET_TYPE_SCIENCE) tid = tiles.id[0] all_assign = asgn.tile_location_target(tid) print( "Realization {:02d}: assigning all targets: {} positioners assigned".format( mc, int(np.sum([1 for x, y in all_assign.items() if y >= 0])) ) ) # Verify that QSO assignment for all targets is identical to the case of assigning only the QSOs qso_pos = [x for x, y in qso_assign.items() if y >= 0] mismatch = int(np.sum([1 for x, y in qso_assign.items() if all_assign[x] != y])) if mismatch > 0: print( "Realization {:02d}: WARNING: {} positioners from QSO-only assignment changed!".format( mc, mismatch ) ) # Targets available for all tile / locs tgsavail = asgn.targets_avail() # available for this tile avail = tgsavail.tile_data(tid) # Accumulate results accum_histogram_data( tid, target_classes, all_assign, avail, target_id_class, tg_obs, hist_tgassign, hist_tgavail, hist_tgconsid, verbose=False ) # Plot the results plot_assignment_stats( wdir, "allscience", "All Science Classes for One Tile", [tiles.id[0]], target_classes, hist_tgassign, hist_tgavail, hist_tgconsid ) ``` The exercise above shows several things: 1. The highest "total priority" targets (which also have a low density) are assigned first and nearly all get assigned to a fiber. 2. As the target "total priority" decreases (and the density increases), these targets take up larger fractions of the positioners. 3. During this first-pass assignment, about 98% of positioners get assigned to a science target (remember we are not assigning standards and sky in this excercise). 4. The introduction of lower priority targets **does not** affect the assignment of higher priority targets. Compare the "Fraction of Assigned Positioners" plot to the one from the previous section. You can see that the highest priority class has the same assignment fraction, since it is assigned first. The other target classes have smaller fractions of the positioners because they are forced to use only positioners leftover after the higher priority targets are assigned. ## Effects of Multiple Passes So far we have worked with one tile. However the QSO-lyman population has 4 requested observations. If we have multiple observations of the same point on the sky, we would expect these targets to continue to get preferential assignments until their observations remaining equals "1" at which point they have the same observations remaining as the other QSO-tracer population. At that point the random subriority will determine the total priority ordering. And of course positioners will continue to be assigned to all target classes as possible. In the real survey, tiles from multiple "layers" (passes over the footprint) are dithered so that they do not overlap perfectly. For this study of one pointing on the sky, we will use 4 tiles (passes) centered on the same RA / DEC. ``` # Directory for this section wdir = os.path.join(workdir, "multipass") os.makedirs(wdir, exist_ok=True) # Set the random seed to ensure reproducibility of this cell np.random.seed(123456) # Read hardware properties hw = load_hardware(rundate=assign_date) # Simulate 5 coincident tiles and load tfile = os.path.join(wdir, "footprint.fits") sim_tiles(tfile, 4) tiles = load_tiles(tiles_file=tfile) # Consider these 4 classes. target_classes = ["ELG", "LRG", "QSO-tracer", "QSO-lyman"] # Accumulation dictionaries (hist_tgassign, hist_tgavail, hist_tgconsid) = \ create_histogram_dicts(tiles.id, target_classes) # For each realization... for mc in range(100): # Generate target tables tg_tables = dict() tgid_offset = dict() ntarget = dict() toff = 0 for tgclass in target_classes: tg_tables[tgclass] = sim_targets( TARGET_TYPE_SCIENCE, toff, density=target_density[tgclass], priority=target_priority[tgclass], numobs=target_numobs[tgclass], tgbits=target_bitname[tgclass] ) tgid_offset[tgclass] = toff ntarget[tgclass] = len(tg_tables[tgclass]) toff += ntarget[tgclass] def target_id_class(target_id): for cls in target_classes: if (target_id >= tgid_offset[cls]) and (target_id < tgid_offset[cls] + ntarget[cls]): return cls return None # Load the targets all_tgs = Targets() for tgclass in target_classes: load_target_table(all_tgs, tg_tables[tgclass]) # Make a working copy of the obs remaining for all targets, so we can decrement # and track the targets that were considered (which had obsremain > 0) tg_obs = dict() for tgid in all_tgs.ids(): tgprops = all_tgs.get(tgid) tg_obs[tgid] = tgprops.obsremain tree = TargetTree(all_tgs, 0.01) tgsavail = TargetsAvailable(hw, all_tgs, tiles, tree) del tree favail = LocationsAvailable(tgsavail) asgn = Assignment(all_tgs, tgsavail, favail) asgn.assign_unused(TARGET_TYPE_SCIENCE) print("Realization {:02d}:".format(mc)) # Accumulate results for all tiles for tid in tiles.id: # Assignment for this tile tile_assign = asgn.tile_location_target(tid) # Targets available for all tile / locs tgsavail = asgn.targets_avail() # available for this tile tile_avail = tgsavail.tile_data(tid) # Accumulate results accum_histogram_data( tid, target_classes, tile_assign, tile_avail, target_id_class, tg_obs, hist_tgassign, hist_tgavail, hist_tgconsid, verbose=True ) # Plot the results plot_assignment_stats( wdir, "multipass", "All Science Classes for {} Passes".format(len(tiles.id)), tiles.id, target_classes, hist_tgassign, hist_tgavail, hist_tgconsid ) ``` As expected, the results from the first pass are identical to the previous exercise. This is because all targets have at least one requested observation. In the subsequent passes, we see the impact of some targets having met their required observations. Although the numbers of "reachable" targets remains stable, we can see that the number of targets "considered" for assignment (i.e. that have remaining observations) goes down with subsequent tiles. This is most notable with the QSO-lyman target class that has 4 requested observations per target. ## Sky and Standards After assigning science targets, we must free up a certain number of fibers for calibration standards and sky. ``` # Directory for this section wdir = os.path.join(workdir, "sky-std") os.makedirs(wdir, exist_ok=True) # Set the random seed to ensure reproducibility of this cell np.random.seed(123456) # Read hardware properties hw = load_hardware(rundate=assign_date) # Simulate 5 coincident tiles and load tfile = os.path.join(wdir, "footprint.fits") sim_tiles(tfile, 4) tiles = load_tiles(tiles_file=tfile) # Consider these 4 classes. target_classes = ["ELG", "LRG", "QSO-tracer", "QSO-lyman"] all_classes = list(target_classes) all_classes.extend(["standards", "sky"]) # Accumulation dictionaries (hist_tgassign, hist_tgavail, hist_tgconsid) = \ create_histogram_dicts(tiles.id, all_classes) # For each realization... for mc in range(100): # Generate target tables tg_tables = dict() tgid_offset = dict() ntarget = dict() toff = 0 for tgclass in target_classes: tg_tables[tgclass] = sim_targets( TARGET_TYPE_SCIENCE, toff, density=target_density[tgclass], priority=target_priority[tgclass], numobs=target_numobs[tgclass], tgbits=target_bitname[tgclass] ) tgid_offset[tgclass] = toff ntarget[tgclass] = len(tg_tables[tgclass]) toff += ntarget[tgclass] tg_tables["standards"] = sim_targets( TARGET_TYPE_STANDARD, toff, density=target_density["standards"], ) tgid_offset["standards"] = toff ntarget["standards"] = len(tg_tables["standards"]) toff += ntarget["standards"] tg_tables["sky"] = sim_targets( TARGET_TYPE_SKY, toff, density=target_density["sky"], ) tgid_offset["sky"] = toff ntarget["sky"] = len(tg_tables["sky"]) toff += ntarget["sky"] def target_id_class(target_id): for cls in all_classes: if (target_id >= tgid_offset[cls]) and (target_id < tgid_offset[cls] + ntarget[cls]): return cls return None # Load the targets all_tgs = Targets() for tgclass in all_classes: load_target_table(all_tgs, tg_tables[tgclass]) # Make a working copy of the obs remaining for all targets, so we can decrement # and track the targets that were considered (which had obsremain > 0) tg_obs = dict() for tgid in all_tgs.ids(): tgprops = all_tgs.get(tgid) tg_obs[tgid] = tgprops.obsremain tree = TargetTree(all_tgs, 0.01) tgsavail = TargetsAvailable(hw, all_tgs, tiles, tree) del tree favail = LocationsAvailable(tgsavail) asgn = Assignment(all_tgs, tgsavail, favail) asgn.assign_unused(TARGET_TYPE_SCIENCE) # Assign standards, up to some limit asgn.assign_unused(TARGET_TYPE_STANDARD, 10) # Assign sky to unused fibers, up to some limit asgn.assign_unused(TARGET_TYPE_SKY, 40) # Force assignment if needed asgn.assign_force(TARGET_TYPE_STANDARD, 10) asgn.assign_force(TARGET_TYPE_SKY, 40) print("Realization {:02d}:".format(mc)) # Accumulate results for all tiles for tid in tiles.id: # Assignment for this tile tile_assign = asgn.tile_location_target(tid) # Targets available for all tile / locs tgsavail = asgn.targets_avail() # available for this tile tile_avail = tgsavail.tile_data(tid) # Accumulate results accum_histogram_data( tid, all_classes, tile_assign, tile_avail, target_id_class, tg_obs, hist_tgassign, hist_tgavail, hist_tgconsid, verbose=True ) # Plot the results plot_assignment_stats( wdir, "sky-std", "Science, Standards and Sky for {} Passes".format(len(tiles.id)), tiles.id, all_classes, hist_tgassign, hist_tgavail, hist_tgconsid ) ``` When interpreting the above plots, first consider the "fraction of assigned positioners" for each tile. The sharply peaked histogram for standards and sky is a result of us **forcing** 10 standards and 40 skies per petal. When doing this "bumping" of science targets, we start with the lowest priority targets and this is why the overall number of ELGs drops for each tile compared to the previous section. ## Conclusions Hopefully this notebook has built some intuition about the limits of DESI positioner geometry and the assignment results that occur in the presence of multiple target classes, each with different priorities and densities, and observed over multiple passes / layers. Part 2 of this study will bring in a "real" target sample.
github_jupyter
``` # Necessary imports import pylab as py import numpy as np import qiskit as qk from qiskit import Aer from qiskit import assemble from scipy.linalg import eigvalsh, eigh # Define the Pauli matrices here to reduce dependencies on other packages X = np.array([[0,1],[1,0]],dtype=float) Z = np.array([[1,0],[0,-1]],dtype=float) Y = np.array([[0,-1.j],[1.j,0]],dtype=complex) eye2 = np.array([[1,0],[0,1]],dtype=float) # Part of the reduced Hubbard Hamiltonian X1 = np.kron(X,eye2) X2 = np.kron(eye2,X) ZZ = np.kron(Z,Z) # Helper function to determine the exact eigen energy (only used for plotting) def energy_exact(t,U): ham = -t*(X1+X2) + U/2. * ZZ evals = eigvalsh(ham) return evals[0] # Try it out energy_exact(1.,4.) # Creates a minimal state preparation circuit with input angles that we will # optimize. def stateprep_circuit(th0,th1,th2,th3): qr = qk.QuantumRegister(2) qc = qk.QuantumCircuit(qr) qc.h(0) qc.h(1) qc.rz(th0,0) qc.rz(th0,1) qc.rx(np.pi/2,0) qc.rx(np.pi/2,1) qc.cx(0,1) qc.rx(th1,0) qc.rz(th2,1) qc.cx(0,1) qc.rz(th3,0) qc.rz(th3,1) qc.rx(-np.pi/2,0) qc.rx(-np.pi/2,1) # Measurement will be added later on return qc angles = [0.01,0.1,0.2,0.3] qc=stateprep_circuit(*angles) qc.draw() # Function that prepares the circuit, adds the Z # measurements, runs the circuit, and does some # simple parsing to extract the 4 possible # 2-site Z measurements. def prepare_measure_Z(angles,nshots=2048): qc=stateprep_circuit(*angles) qc.measure_all() sv_sim = Aer.get_backend('qasm_simulator') qobj = assemble(qc,shots=nshots) job = sv_sim.run(qobj) res = job.result().get_counts() # In case we happened to get zero counts # for these, manually add them to the dict klist = ['00','01','10','11'] for k in klist: if k not in res.keys(): res[k] = 0 # Possible Z measurements: II, IZ, ZI, ZZ meas = {} meas['II'] = (res['00'] + res['01'] + res['10'] + res['11'])/nshots meas['IZ'] = (res['00'] - res['01'] + res['10'] - res['11'])/nshots meas['ZI'] = (res['00'] + res['01'] - res['10'] - res['11'])/nshots meas['ZZ'] = (res['00'] - res['01'] - res['10'] + res['11'])/nshots return meas # Function that prepares the circuit, adds the X # measurements, runs the circuit, and does some # simple parsing to extract the 4 possible # 2-site X measurements. def prepare_measure_X(angles,nshots=2048): qc=stateprep_circuit(*angles) # Rotate into X basis qc.h(0) qc.h(1) qc.measure_all() sv_sim = Aer.get_backend('qasm_simulator') qobj = assemble(qc,shots=nshots) job = sv_sim.run(qobj) res = job.result().get_counts() # In case we happened to get zero counts # for these, manually add them to the dict klist = ['00','01','10','11'] for k in klist: if k not in res.keys(): res[k] = 0 # Possible X measurements: II, IX, XI, XX meas = {} meas['II'] = (res['00'] + res['01'] + res['10'] + res['11'])/nshots meas['IX'] = (res['00'] - res['01'] + res['10'] - res['11'])/nshots meas['XI'] = (res['00'] + res['01'] - res['10'] - res['11'])/nshots meas['XX'] = (res['00'] - res['01'] - res['10'] + res['11'])/nshots return meas def cost_function(x, parameters): Xmeas = prepare_measure_X(x,parameters["nshots"]) Zmeas = prepare_measure_Z(x,parameters["nshots"]) hopping = -parameters["t"] * (Xmeas['IX'] + Xmeas['XI']) Coulomb = parameters["U"]/2 * Zmeas['ZZ'] return hopping + Coulomb def average_cost(x,parameters): N = parameters['N'] accumulator = 0 for j in range(N): accumulator += cost_function(x,parameters) return accumulator/N def test_average_cost(t,U): x = np.array([0.01,0.2,0.3,0.4])*2 parameters = {} parameters["t"] = t parameters["U"] = U parameters["N"] = 100 parameters["nshots"] = 100 res=average_cost(x, parameters) return res test_average_cost(1.,4.) # Test a single minimization. We'll use the Nelder-Mead method, # which is designed for optimizing noisy data. def test_min(t,U): from scipy.optimize import minimize # Initial guesses inspired by the solution at large U x = np.array([0.6,0.7,0,0.3])*2 parameters = {} parameters["t"] = t parameters["U"] = U parameters["N"] = 1 parameters["nshots"] = 4096 result = minimize(cost_function, x0 = x, args=(parameters),method='Nelder-Mead') print(result) # Compare to Exact Hamiltonian print("") print("Optimized energy: ",result.fun) print("Exact energy: ",energy_exact(t,U)) test_min(t=1.0,U=4.) # Run the optimization procedure for a single t,U point. def do_min(t,U,x): from scipy.optimize import minimize parameters = {} parameters["t"] = t parameters["U"] = U parameters["N"] = 1 parameters["nshots"] = 8096*5 result = minimize(average_cost, x0 = x, args=(parameters),method='Nelder-Mead') # Compare to Exact Hamiltonian print(U,t) print("Optimized energy: ",result.fun) print("Exact energy: ",energy_exact(t,U)) return result.fun,result.x # Warning -- this cell might take a bit! # Set physics parameters: t and list of U to try t = 1.0 Ulist = np.linspace(0,4,10) # Initial guesses inspired by the solution at large U guess = np.array([0.6,0.7,0,0.3])*2 # Store optimized angles and energies opt_results = {} Elist = np.zeros([len(Ulist),2]) # Optimize for all U values in the selected range for iU,U in enumerate(Ulist): Elist[iU,0] = energy_exact(t,U) Elist[iU,1],coefs = do_min(t,U,guess) opt_results[iU] = coefs # Make a plot of the resulting optimized energies fig, ax = py.subplots() ax.plot(Ulist, Elist[:,0],label="Exact") ax.plot(Ulist, Elist[:,1],'o',label="VQE") ax.set_xlabel("U/J") ax.set_ylabel("Energy [J]") ax.legend() # Sample a number of energies at the optimum to get an idea of # the spread def build_energy_statistics(angles): Nsamples = 10 parameters = {} parameters["t"] = t parameters["U"] = U parameters["N"] = 1 parameters["nshots"] = 8096 energy_samples = np.zeros(Nsamples) for n in range(Nsamples): energy_samples[n] = average_cost(angles,parameters) return np.average(energy_samples),np.std(energy_samples) en_stats = np.zeros([len(Ulist),2]) for iU,U in enumerate(Ulist): angles = opt_results[iU] en_stats[iU,:] = build_energy_statistics(angles) fig, ax = py.subplots() ax.errorbar(Ulist, en_stats[:,0], en_stats[:,1],capsize=4,fmt='.',label='VQE') ax.plot(Ulist,Elist[:,1],label='Exact') ax.set_xlabel("U/J",fontsize=20) ax.set_ylabel(r"$\langle H \rangle$/J",fontsize=20) ax.legend() ```
github_jupyter
# 1Strategy ML Immersion Day ### Building an xgboost model from movie data ``` import json import math import sys import boto3 import matplotlib.pyplot as plt import numpy as np import pandas as pd import sagemaker as sm from sagemaker.amazon.amazon_estimator import get_image_uri import workshop_utils as wu # prevent warnings from displaying import warnings warnings.filterwarnings('ignore') ``` ## Initialize variables ``` bucket = '1s-ml' # !!!!!!!!!!!!!!!!!! # PUT YOUR NAME HERE your_name = 'agraves' model_artifacts_location = f's3://{bucket}/movies/artifacts/{your_name}' role = sm.get_execution_role() sm_session = sm.session.Session() print(f'IAM Role: {role}') ratings = 'movies/data/title.ratings.tsv' basics = 'movies/data/title.basics.tsv' ``` ### A note about this data source: https://datasets.imdbws.com We will be downloading the data from S3 in order to inspect it and perform any cleanup necessary before we train our model. ``` s3 = boto3.resource('s3') s3.Bucket(bucket).download_file(ratings, 'ratings.tsv') s3.Bucket(bucket).download_file(basics, 'basics.tsv') ratings_csv = pd.read_csv('ratings.tsv', sep='\t') basics_csv = pd.read_csv('basics.tsv', sep='\t') movie_data = pd.merge(ratings_csv, basics_csv, how='inner', on='tconst') print(f'Movie Data Shape: {movie_data.shape}') movie_data.head(15) ``` ## Cleanup There are several unecessary columns in this data as well as observations we aren't concerned about. This is an investigation of movie ratings, so we can eliminate the rows which contain data about television shows. This data also contains records from silent films. We can make a reasonable assumption that silent film appreciation is a bit different than modern film appreciation, so we will drop these observations as well. ``` # Eliminate TV Shows movie_data = movie_data[(movie_data.titleType == 'movie') | (movie_data.titleType == 'short') | (movie_data.titleType == 'tvMovie')] # Shape: (395863, 11) # Limit to only years with talkies movie_data = movie_data[movie_data.startYear != '\\N'] movie_data.startYear = movie_data.startYear.astype(int) movie_data = movie_data[movie_data.startYear > 1927] # Shape: (383612, 11) # Remove unnecessary columns movie_data.drop('originalTitle', axis=1, inplace=True) movie_data.drop('endYear', axis=1, inplace=True) movie_data.drop('startYear', axis=1, inplace=True) movie_data.drop('tconst', axis=1, inplace=True) movie_data.drop('primaryTitle', axis=1, inplace=True) movie_data.drop('genres', axis=1, inplace=True) # I am working to one hot encode the genres column. It requires a custom function. movie_data.head(15) ``` There are some values in this data that are not NaN. We need to convert them before dropping them. ``` # Convert \\N to NaN movie_data = movie_data[movie_data != r'\N'] # Check to see how many NaN values we have now that we've dropped the /N entries movie_data.isna().sum() # Remove any observations with null values movie_data.dropna(inplace=True) movie_data.isna().sum() ``` ### Save data for use in the next model ``` movie_data.to_csv('movie_data.csv', index=False) ``` ## Visualization It is quite important to spend time visualizing your data in order to see any patterns or relations within it. We won't spend much time here mulling over plots, but I wanted to show at least one. The plot below should show the relationship between the number of votes a movie has its average rating. We can see that as the average rating increases a movie receives more votes. ``` # # Create a figure instance fig = plt.figure(1, figsize=(15, 10)) # # Create an axes instance ax = fig.add_subplot(111) # plt.plot(movie_data.titleType, movie_data.numVotes, 'o') plt.plot(movie_data.averageRating, movie_data.numVotes, 'o') ``` ## Model Prep The simplest prediction type to grok is a binary outcome. Our data doesn't have a binary value, but we can create one. We will, of course, lose some granularity in the detail of our ratings system, but a binary value for "likability" may be enough to answer our business problem. ``` # Create a binary dependent variable likable = movie_data.apply(lambda row: wu.label_rating(row), axis=1) movie_data = pd.concat([likable, movie_data], axis=1) movie_data.rename(columns={0:'likable'}, inplace=True) movie_data.drop('averageRating', axis=1, inplace=True) movie_data.head(15) ``` We have another problematic column: the titleType column. This field indicates the type of film for each observation. There are only three possibilities movie, short, and tvMovie. We can use the *pandas* built in method *.get_dummies()* to create three dummy columns corresponding to the types. This is called one hot encoding, and it is necessary here because the xgboost algorithm requires its inputs to be numeric, not categorical. ``` # One Hot Encode titleType column dummy_types = pd.get_dummies(movie_data['titleType']) movie_data = pd.concat([movie_data, dummy_types.reindex(movie_data.index)], axis=1) movie_data.drop('titleType', axis=1, inplace=True) movie_data.head(15) # Now that we have only numbers in runtimeMinutes, we can convert to int movie_data['runtimeMinutes'] = movie_data['runtimeMinutes'].astype(int) movie_data['movie'] = movie_data['movie'].astype(int) movie_data['short'] = movie_data['short'].astype(int) movie_data['tvMovie'] = movie_data['tvMovie'].astype(int) movie_data.dtypes ``` ### A note about splitting data Our data needs to be divided into three pieces. We need a subset of our observations for training the model on, a subset to evaluate the effectiveness of the model, and a subset to test our hosted endpoint. Typically, data is divided between test and validation at a proportion of 70% training to 30% validation. Here, it will be 70% training, 20% validation, and 10% test. The split function below is rather unintuitive. The *.sample()* method is randomly sorting the columns so they are not in a particular order. Then the *.split()* method is dividing the dataset at the 70% mark and the 90% mark, resulting in three new datasets. ``` movie_train, movie_eval, movie_test = np.split(movie_data.sample(frac=1, random_state=1278), [int(0.7 * len(movie_data)), int(0.9 * len(movie_data))]) print(f'Movie Train Shape: {movie_train.shape}') print(f'Movie Eval Shape: {movie_eval.shape}') print(f'Movie Test Shape: {movie_test.shape}') movie_train.dtypes ``` Now that we have our individual data sets, we need to store them in S3 to be retrievable by SageMaker when it creates our training instance. ``` movie_train.to_csv('movie_train.csv', header=False, index=False) train_upload = f'movies/artifacts/{your_name}/movie_train.csv' s3.Bucket(bucket).Object(train_upload).upload_file('movie_train.csv') movie_eval.to_csv('movie_eval.csv', header=False, index=False) eval_upload = f'movies/artifacts/{your_name}/movie_eval.csv' s3.Bucket(bucket).Object(eval_upload).upload_file('movie_eval.csv') ``` ## Create ML resources The designation of the 'xgboost' container below is the indicator that we are using the xgboost algorithm. The SageMaker Python SDK provides an Estimator class for creatng model resources. This is quite similar to TensorFlow except the type of model is designated by the container image we pass into the class. If it were necessary, we could provision multiple training instances to perform the work faster, but in this example a single instance will do. ``` container = get_image_uri('us-west-2', 'xgboost', '0.90-1') xgboost = sm.estimator.Estimator( container, role, base_job_name=f'{your_name}-ml-im', train_instance_count=1, train_instance_type='ml.m5.large', output_path=f's3://{bucket}/movies/artifacts/{your_name}/output', sagemaker_session=sm_session) # sc_pos_weight = sum(negative cases) / sum(positive cases) ``` ### A note about hyperparameters Hyperparameters control the execution of the algorithm being used to train the model. There are many hyperparameters available for the built in xgboost algorithm, but we are only setting six. **max_depth**: maximum depth of a tree. a higher value can induce overfitting. **eta**: shrinks the feature weights during each step. this is useful for preventing overfitting because a higher value makes the boosting more conservative (take smaller steps) **subsample**: sets the amount of data sampled for each tree grown. **objective**: specifies the learning task and type **scale_pos_weight**: controls the balance of weighting between positive and negative weights **num_round**: number of training rounds For more details on the xgboost hyperparameters, check: https://docs.aws.amazon.com/sagemaker/latest/dg/xgboost_hyperparameters.html ``` xgboost.set_hyperparameters( max_depth=3, eta=0.1, subsample=0.5, objective='binary:logistic', scale_pos_weight=2.0, num_round=100) ``` The *.fit()* method will map data from our locations to the specific channels recognized by the xgboost container. The SageMaker service will then spin up a training instance based on the training attributes we specified when the Estimator was created. This is a syncronous call, so you should see training validation return here in the notebook. ``` train_data = sm.s3_input(s3_data=f's3://{bucket}/{train_upload}', content_type='csv') eval_data = sm.s3_input(s3_data=f's3://{bucket}/{eval_upload}', content_type='csv') xgboost.fit({'train': train_data, 'validation': eval_data}) ``` ## Deploy model Now that we have a successfully trained model in SageMaker (you can go verify this in the AWS console), we need to "deploy" this model. The *.deploy()* method will instruct SageMaker to create a hosting instance of our desired size to present our model behind an endpoint. ``` xgboost_predict = xgboost.deploy( initial_instance_count=1, instance_type='ml.m5.large') xgboost_predict.content_type = 'text/csv' xgboost_predict.deserializer = None # We need to drop the label column in order to retreive inferences for this data movie_test.drop('likable', axis=1, inplace=True) movie_test.to_csv('movie_test.csv', header=False, index=False) movie_test.head(15) ``` The return value of the *.deploy()* method is the SageMaker endpoint resource. We can use this endpoint resource to retreive inferences of our test data. ``` with open('movie_test.csv', 'r') as file: payload = file.read().strip() test_data = [line for line in payload.split('\n')] preds = wu.do_predict(test_data, xgboost_predict) print(preds) ``` ## Delete endpoint when done ``` sm_session.delete_endpoint(xgboost_predict.endpoint) ```
github_jupyter
<a href="https://colab.research.google.com/github/Sparrow0hawk/crime_sim_toolkit/blob/crime_cat_refac/data_manipulation/Forces_%2B_LSOAs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Generate a reference document matching LSOAs to Police Force codes Some data is retrieved by link shown in the code. Other data was downloaded from data.police for all forces for 2019-06 and uploaded into the colabs environment. ## Revision 14/08/2019 Issues arose from using the Falls within category from data.police. Therefore matching is now performed using Community Safety Partnership (analogous to local authority) using an established mapping dataset. ``` import pandas as pd import glob import numpy as np # get sheet with LSOA/MSOA/LA breakdown ! wget -O table_2011_MSOA_LSOA.zip https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/populationandmigration/populationestimates/datasets/2011censuspopulationandhouseholdestimatesforwardsandoutputareasinenglandandwales/rft-table-php01-2011-msoas-and-lsoas.zip # get CSP data with matching ! wget -O recordedcrimeatcsplevelyearendingmarch2019.xls https://www.ons.gov.uk/file?uri=/peoplepopulationandcommunity/crimeandjustice/datasets/recordedcrimedatabycommunitysafetypartnershiparea/current/recordedcrimeatcsplevelyearendingmarch2019.xls # unzip file ! unzip table_2011_MSOA_LSOA.zip PRCLE_data = pd.read_excel('/content/recordedcrimeatcsplevelyearendingmarch2019.xls', sheet_name='Table C2', skiprows=4) PRCLE_data = PRCLE_data.iloc[:,:6] PRCLE_data.dropna(how='any',inplace=True) PRCLE_data.head() # open file on sheet five skipping useless rows ONS_geo_ref = pd.read_excel('R1_4_EW__RT__Table_PHP01___LSOA_MSOA_v4.xls', sheet_name=5, skiprows=11) ONS_geo_ref.columns # select out desired columns ONS_geo_ref1 = ONS_geo_ref[['Region code', 'Region name', 'Local authority code', 'Local authority name', 'MSOA Code', 'MSOA Name', 'LSOA Code', 'LSOA Name']] # drop empty rows ONS_geo_ref1.dropna(how='any', inplace=True) ONS_geo_ref1.head() PRCLE_data[PRCLE_data['Local Authority code'].isin(['E06000047'])] # build a comprehensive matching table between LSOA/MSOA and police force using local authority # using apply to match LA code and then add police force ONS_geo_ref1['Police_force'] = ONS_geo_ref1['Local authority code'].map(lambda x: PRCLE_data[PRCLE_data['Local Authority code'].isin([x])]['Police Force Area name'].tolist()[0] if len(PRCLE_data[PRCLE_data['Local Authority code'].isin([x])]['Police Force Area name'].tolist()) > 0 else np.NaN) # add this column so we now have LSOA code, local authority name and police force ONS_geo_ref1.head() # now happy to proceed with saving this list to use as reference list between LSOA and police force ONS_geo_ref1.to_csv('PoliceforceLSOA.csv') # prompt download from colab from google.colab import files files.download('PoliceforceLSOA.csv') ```
github_jupyter
# Sensitivity Tests Output ``` import numpy as np import matplotlib.pyplot as plt import matplotlib import pandas as pd import os from tqdm import tqdm from pathlib import Path import photoeccentric as ph from pathlib import Path plt.rcParams['figure.dpi'] = 150 def rmdir(directory): directory = Path(directory) for item in directory.iterdir(): if item.is_dir(): rmdir(item) else: item.unlink() distpath_45 = "/Users/ssagear/Dropbox (UFL)/Research/MetallicityProject/HiPerGator/HPG_Replica/SNRs_dynesty_bprior/SNR_45.0" def find_empty_dirs(root_dir=distpath_uniform): for dirpath, dirs, files in os.walk(root_dir): if not files: yield dirpath elif len(files) == 0: yield dirpath print(list(find_empty_dirs())) truee = [] truew = [] edists = [] gdists = [] wdists = [] i = 0 for subdir, dirs, files in os.walk(distpath_45): try: trueparams = subdir.split("/Users/ssagear/Dropbox (UFL)/Research/MetallicityProject/HiPerGator/HPG_Replica/SNRs_dynesty_bprior/SNR_45.0/e_",1)[1] truee.append(float(trueparams.split('_w_')[0])) truew.append(float(trueparams.split('_w_')[1])) except IndexError: continue for file in files: if 'wdist' in file: i+=1 distpath = os.path.join(subdir, file) edists.append(np.genfromtxt(distpath, delimiter=',')) if 'gdist' in file: i+=1 distpath = os.path.join(subdir, file) gdists.append(np.genfromtxt(distpath, delimiter=',')) if 'edist' in file: i+=1 distpath = os.path.join(subdir, file) wdists.append(np.genfromtxt(distpath, delimiter=',')) truee = np.array(truee) truew = np.array(truew) edist_uniform = np.array(edist_uniform) fite = [] for i in range(len(edist_uniform)): fite.append(ph.mode(edists[i])) fite = np.array(fite) fitw = [] for i in range(len(edist_uniform)): fitw.append(ph.mode(wdists[i])) fitw = np.array(fitw) len(truee) e_errors = np.abs(truee-fite) plt.scatter(truew, truee, c=e_errors, cmap='viridis') plt.ylabel('True $e$') plt.xlabel('True $\omega$') plt.title('True W vs. True E vs. Error') plt.clim(min(abs(e_errors)), max(abs(e_errors))) plt.colorbar(label='Absolute Value of (True $e$ - Fit $e$)') plt.savefig('nodiv_scatter_eerror_new.png') def deltallike(g, gerr, truee, truew, fite, fitw): model_fit = (1+e*np.sin(w*(np.pi/180.)))/np.sqrt(1-e**2) sigma2_fit = gerr ** 2 loglike_fit = -0.5 * np.sum((g - model_fit) ** 2 / sigma2_fit + np.log(sigma2_fit)) model_true = (1+truee*np.sin(truew*(np.pi/180.)))/np.sqrt(1-truee**2) sigma2_true = gerr ** 2 loglike_true = -0.5 * np.sum((g - model_true) ** 2 / sigma2_true + np.log(sigma2_true)) llike = np.abs(loglike_fit-loglike_true) return llike llike = [] for i in range(len(truee)): g = ph.mode(gdists[i]) e = ph.mode(edists[i]) w = ph.mode(wdists[i]) print(e) gerr = np.nanstd(gdists[i]) llike.append(deltallike(g, gerr, truee[i], truew[i], e, w)) llike = np.array(llike) like = np.e**llike plt.scatter(truew, truee, c=llike, cmap='viridis') #plt.scatter(truew, truee, c=like, cmap='cool', norm=matplotlib.colors.LogNorm()) plt.clim(vmax=50) plt.ylabel('True $e$') plt.xlabel('True $\omega$') plt.title('True W vs. True E vs. Error') plt.colorbar(label='$\Delta$ $ln($ likelihood $)$') plt.savefig('nodiv_scatter_eerror_llike_new.png') ``` # Heatmap ``` def heatmap_bins(e_min, e_max, nebins, w_min, w_max, nwbins, loge=False): """ Gets bin edges for detection efficiency heatmap. Parameters ---------- e_min: float Minimum e in sample period_max: float Maximum period in sample nperbins: int Number of period bins in heatmap rprs_min: float Minimum Rp/Rs in sample (or radius) rprs_max: float Maximum Rp/Rs in sample (or radius) nradbins: int Number of radius bins in heatmap logper: boolean, default True Log x (period)-axis? Returns ------- per_segments: np.array Array of x-axis (period) bin edges rprs_segments: np.array Array of y-axis (radius) bin edges """ if loge==True: natural_val = np.linspace(e_min, np.log10(e_max), num=nebins, endpoint=True).round(decimals=2) e_segments = np.array([10**x for x in natural_val]).round(decimals=2) if loge==False: e_segments = np.linspace(e_min, e_max, num=nebins, endpoint=True).round(decimals=2) w_segments = np.linspace(w_min, w_max, num=nwbins).round(decimals=2) return e_segments, w_segments # Getting bin edges e_segments, w_segments = heatmap_bins(0.0, 1.0, 12, -90., 270., 12, loge=False) def is_recovered(error, e, fe, w, fw, e_segments, w_segments):#true (injected) period, L-M fitting period, true rp/rs, L-M fitting rp/rs """ Determines whether a planet is recovered. ** Note ** The first if-statement can be changed to reflect the conditions for planet recovery. ** Note ** r_list, recovered_period, and recovered_rprs must be defined outside of this function (see cell below) Parameters ---------- period: float True (known) period of simulated planet fitper: float Fit (recovered) period of simulated planet rprs: float True (known) Rp/Rs of simulated planet fitrprs: float Fit (recovered) Rp/Rs of simulated planet per_segments: np.array Period bin edges rprs_segments: np.array Radius bin edges Returns ------- None """ if error < 0.4: #You can define your own definition of "recovered" here. recovered = True recovered_es.append(e) recovered_ws.append(w) for e_i in range(len(e_segments)-1): for w_i in range(len(w_segments)-1): if e_segments[e_i] < e < e_segments[e_i+1]: if w_segments[w_i] < w < w_segments[w_i+1]: r_list[w_i][e_i] += 1 else: recovered = False # Define lists to save for heatmap r_list = np.zeros((len(e_segments), len(w_segments))) recovered_es = [] recovered_ws = [] for i in tqdm(range(len(truee))): is_recovered(e_errors[i], truee[i], fite[i], truew[i], fitw[i], e_segments, w_segments) def plot(recovered_es, recovered_ws, true_es, true_ws, filename='plots/hpgsensitivity_e_w_heatmap.png', xlog=False): """ Plots + saves detection efficiency heatmap Parameters ---------- recovered_period: np.array Array of "fit" es recovered_rprs: np.array Array of "fit" ws true_periods: np.array Array of true (simulated) periods true_rprs: np.array Array of true (simulated) radii filename: str, default 'heatmap.png' Directory + filename to save heatmap xlog: boolean, default True Log x-axis? Must match logper argument in heatmap_bins() Returns ------- fig: matplotlib.figure.Figure object Heatmap """ counts, _, _ = np.histogram2d(recovered_ws, recovered_es, bins=(w_segments, e_segments)) counts_tot, _, _ = np.histogram2d(true_ws, true_es, bins=(w_segments, e_segments)) for i in range(len(counts.T)): for j in range(len(counts.T[i])): counts.T[i][j] = counts.T[i][j]/counts_tot.T[i][j] if np.isnan(counts.T[i][j]): counts.T[i][j] = 0 matplotlib.rcParams['xtick.minor.size'] = 0 matplotlib.rcParams['xtick.minor.width'] = 0 fig, ax = plt.subplots() heatmap = ax.pcolormesh(w_segments, e_segments, counts.T, cmap='viridis') if xlog==True: ax.set_xscale('log') ax.xaxis.set_ticks(w_segments) ax.xaxis.set_ticklabels(w_segments, fontsize=10, rotation='vertical') ax.yaxis.set_ticks(e_segments) ax.yaxis.set_ticklabels(e_segments) ax.set_title('Fraction of Recovered $(e,w)$') ax.set_ylabel('True eccentricity $e$') ax.set_xlabel('True logitude of periastron $w$') cbar = plt.colorbar(heatmap) heatmap.set_clim(0.0, 1.0) cbar.set_label('Fraction $e$ error within 0.4', rotation=270, labelpad=10) plt.savefig(filename) plt.show() return fig heatmap = plot(recovered_es, recovered_ws, truee, truew, xlog=False) H, xedges, yedges = np.histogram2d(truew, truee, bins = [w_segments, e_segments], weights = llike) H_counts, xedges, yedges = np.histogram2d(truew, truee, bins = [w_segments, e_segments]) H = H/H_counts fig, ax = plt.subplots() heatmap = ax.imshow(H.T, origin='lower', cmap='viridis', extent = [-90, 270, 0, 1], aspect=350) #norm=matplotlib.colors.LogNorm() ax.xaxis.set_ticks(w_segments) ax.xaxis.set_ticklabels(w_segments, fontsize=10, rotation='vertical') ax.yaxis.set_ticks(e_segments) ax.yaxis.set_ticklabels(e_segments) ax.set_title('$\Delta$ ln(likelihood) of (e,w)') ax.set_ylabel('True $e$') ax.set_xlabel('True $w$') cbar = plt.colorbar(heatmap) cbar.set_label('$\Delta$ ln(likelihood)', rotation=270, labelpad=10) plt.savefig('plots/hpgsensitivity_deltallike_heatmap.png') ```
github_jupyter
# DoWhy: Different estimation methods for causal inference This is a quick introduction to the DoWhy causal inference library. We will load in a sample dataset and use different methods for estimating the causal effect of a (pre-specified)treatment variable on a (pre-specified) outcome variable. First, let us add the required path for Python to find the DoWhy code and load all required packages ``` import os, sys sys.path.append(os.path.abspath("../../../")) import numpy as np import pandas as pd import logging import dowhy from dowhy import CausalModel import dowhy.datasets ``` Now, let us load a dataset. For simplicity, we simulate a dataset with linear relationships between common causes and treatment, and common causes and outcome. Beta is the true causal effect. ``` data = dowhy.datasets.linear_dataset(beta=10, num_common_causes=5, num_instruments = 2, num_treatments=1, num_samples=10000, treatment_is_binary=True, outcome_is_binary=False) df = data["df"] df ``` Note that we are using a pandas dataframe to load the data. ## Identifying the causal estimand We now input a causal graph in the DOT graph format. ``` # With graph model=CausalModel( data = df, treatment=data["treatment_name"], outcome=data["outcome_name"], graph=data["gml_graph"], instruments=data["instrument_names"], logging_level = logging.INFO ) model.view_model() from IPython.display import Image, display display(Image(filename="causal_model.png")) ``` We get a causal graph. Now identification and estimation is done. ``` identified_estimand = model.identify_effect() print(identified_estimand) ``` ## Method 1: Regression Use linear regression. ``` causal_estimate_reg = model.estimate_effect(identified_estimand, method_name="backdoor.linear_regression", test_significance=True) print(causal_estimate_reg) print("Causal Estimate is " + str(causal_estimate_reg.value)) ``` ## Method 2: Stratification We will be using propensity scores to stratify units in the data. ``` causal_estimate_strat = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_stratification", target_units="att") print(causal_estimate_strat) print("Causal Estimate is " + str(causal_estimate_strat.value)) ``` ## Method 3: Matching We will be using propensity scores to match units in the data. ``` causal_estimate_match = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_matching", target_units="atc") print(causal_estimate_match) print("Causal Estimate is " + str(causal_estimate_match.value)) ``` ## Method 4: Weighting We will be using (inverse) propensity scores to assign weights to units in the data. DoWhy supports a few different weighting schemes: 1. Vanilla Inverse Propensity Score weighting (IPS) (weighting_scheme="ips_weight") 2. Self-normalized IPS weighting (also known as the Hajek estimator) (weighting_scheme="ips_normalized_weight") 3. Stabilized IPS weighting (weighting_scheme = "ips_stabilized_weight") ``` causal_estimate_ipw = model.estimate_effect(identified_estimand, method_name="backdoor.propensity_score_weighting", target_units = "ate", method_params={"weighting_scheme":"ips_weight"}) print(causal_estimate_ipw) print("Causal Estimate is " + str(causal_estimate_ipw.value)) ``` ## Method 5: Instrumental Variable We will be using the Wald estimator for the provided instrumental variable. ``` causal_estimate_iv = model.estimate_effect(identified_estimand, method_name="iv.instrumental_variable", method_params = {'iv_instrument_name': 'Z0'}) print(causal_estimate_iv) print("Causal Estimate is " + str(causal_estimate_iv.value)) ``` ## Method 6: Regression Discontinuity We will be internally converting this to an equivalent instrumental variables problem. ``` causal_estimate_regdist = model.estimate_effect(identified_estimand, method_name="iv.regression_discontinuity", method_params={'rd_variable_name':'Z1', 'rd_threshold_value':0.5, 'rd_bandwidth': 0.1}) print(causal_estimate_regdist) print("Causal Estimate is " + str(causal_estimate_regdist.value)) ```
github_jupyter
# Jouer avec les mots Les mots utilisés dans ce chapitre sont des mots français pour le Scrabble de Jean-Philippe Durand, disponible sous http://jph.durand.free.fr/scrabble.txt Le document est sauvegardé localement sous le nom `mots.txt`. ## Lire des listes de mots La première action est de créer une objet `fichier` (fin = file input) avec la fonction `open`. ``` fin = open('mots.txt') ``` **Exercice : aide pour open** Trouve de l'aide pour `open` et `readline` ``` #help ``` La méthode `readline` retourne la ligne suivante, jusqu'à EOF (End Of File). Le premier mot est *AA* qui est une sorte de lave. Les mots se terminent par le caractère de saut de ligne `\n` (newline). ``` fin.readline() ``` **Exercice : lire 10 lignes** Dans une boucle, lisez et affichez les 10 prochaines lignes de `mot.txt`. Afficher les lignes sans interlignes. ``` for i in range(10): pass ``` L'objet `fin` garde en mémoire la ligne actuelle, et continue avec la prochaine ligne. ``` fin.readline() ``` Nous pouvons nous débarasser du newline avec la fonction `strip` ``` fin.readline().strip() for i in range(10): print(fin.readline().strip()) ``` ## ex 1 - Mots avec 14 caractères Ecrivez un programme qui lit `mots.txt`et affiche uniquement les mots de plus de 14 caractères (sans compter le `\n`). N'afficher que les premiers 20 mots. ``` fin = open('mots.txt') i = 0 for line in fin: pass ``` ## ex 2 - mots sans E Ecrivez une fonction `sans_e` et testez-la avec deux mots. ``` def sans_e(mot): pass sans_e('HELLO'), sans_e('CIAO') ``` Modifiez le programme précédent pour afficher seulement les mots de plus de 14 lettres qui ne contiennent aucun 'e'. ``` fin = open('mots.txt') for line in fin: pass ``` Calculez le nombre et le pourcentage des mots qui ne contiennent aucun 'e'. ``` fin = open('mots.txt') n = 0 i = 0 for line in fin: pass ``` ## ex 3 - eviter Ecrivez une fonction nommée 'evite' qui prend un mot et une liste de lettres interdites, et qui renvoie `True`si le mot ne contient aucune des lettres interdites. ``` def evite(mot, interdit): pass evite('hello', 'epfl'), evite('ciao', 'epfl') ``` Modifier le programme pour inviter l'utilisateur à saisir une chaine de lettres interdites et ensuite affiche le nombre de mots de la liste en entrée qui ne contiennent aucunee de celles-ci. ``` interdit = input('forbidden letters = ') fin = open('mots.txt') interdit = interdit.upper() pass ``` ## ex 4 - utiliser seulement Ecrivez une fonction `utilise` qui prend un mots et une chaine de lettres, et qui renvoie `True` si le mot ne contient que de lettres de la liste. ``` def utilise(mot, permis): pass utilise('hello', 'helpo'), utilise('hello', 'hefko'), permis = input('allowed letters = ') fin = open('mots.txt') permis = permis.upper() pass ``` ## ex 5 - utiliser tous Ecrivez une fonction `utilise_tout` qui prend un mot et une chaine de lettres requise et renvoie `True`si le mot utilise toutes les lettres requises au moin une fois. ``` def utilise_tout(mot, lettres): pass utilise_tout('hello', 'hel'), utilise_tout('hello', 'helk') ``` Commbien y a-t-il de mots qui utilisent toutes les voyelles `aeiou` ? Et `aeiouy` ? ``` lettres = 'aeiou'.upper() fin = open('mots.txt') pass ``` ## ex 6 - ordre alphabétique Ecrivez une fonction `est_en_ordre_alphabétique` qui renvoie `True`si les lettres d'un mot apparaissent dans l'ordre alphabétique (les lettres doubles sont autorisées). Combient y a-t-il de mots de ce genre? ``` def est_en_ordre_alphabetique(mot): pass est_en_ordre_alphabetique('hello'), est_en_ordre_alphabetique('bello'), fin = open('mots.txt') n = 0 k = 0 for line in fin: pass ``` ## Exercices ### ex - 3 fois une lettre double Trouver un mot avec 3 fois une lettre double, conséqutifs. ``` def double_letters(s): pass double_letters('hello'), double_letters('helloo'), double_letters('heelloo'), fin = open('mots.txt') for line in fin: word = line.strip() if double_letters(word): print(word) ``` ### ex - palindromes ``` for i in range(1000, 9999): s = str(i) if (s[0]==s[3] and s[1]==s[2]): print(i) for i in range(100000, 999999): s = str(i) if (s[2]==s[5] and s[3]==s[4]): i += 1 s = str(i) if (s[1]==s[5] and s[2]==s[4]): i += 1 s = str(i) if (s[1]==s[4] and s[2]==s[3]): i += 1 s = str(i) if (s[0]==s[5] and s[1]==s[4]) and s[2]==s[3]: print(i-3) ``` ### ex age ``` for dif in range(15, 100): n = 0 for mom in range(dif, 100): s = str(mom-dif).zfill(2) m = str(mom) if s[0]==m[1] and s[1]==m[0]: n += 1 if n == 8: print(dif) dif = 18 for mom in range(dif, 100): s = str(mom-dif).zfill(2) m = str(mom) if s[0]==m[1] and s[1]==m[0]: print(s, m) ``` La différence d'age est 18, et l'age actuel est 57.
github_jupyter
``` #!/usr/bin/env python3 """feature_vectors.ipynb James Gardner 2019 reads in TGSS and NVSS sources in a 20° patch of sky and computes positional matches within 10' adds labels based off of positional matching requires the unzipped catalogues to be present in cwd and expects names: TGSSADR1_7sigma_catalog.tsv and CATALOG.FIT saves feature vectors (individuals and matches) as: tgss.csv, nvss.csv, and patch_catalogue.csv """ import pandas as pd import numpy as np from astropy.io import fits from tqdm import tqdm_notebook as tqdm import matplotlib.pyplot as plt PATCH_SIZE = 20 SEPARATION_LIMIT = 10*1/60 def geodesic_dist(p1,p2): """arguments are two points on the unit sphere, with ra and dec given in radians; returns their geodesic distance, see: https://en.wikipedia.org/wiki/Great-circle_distance#Formulae""" ra1,dec1,ra2,dec2 = p1[0],p1[1],p2[0],p2[1] decdiff = (dec1-dec2)/2 radiff = (ra1-ra2)/2 better_circle = 2*np.arcsin(np.sqrt(np.sin(decdiff)**2 + np.cos(dec1)*np.cos(dec2) * np.sin(radiff)**2)) r = 1 return better_circle*r def degdist(p1,p2): """calls geodesic_dist on two points, with ra and dec given in degrees; returns their separation in degrees""" return 180/np.pi*geodesic_dist([x*np.pi/180 for x in p1], [x*np.pi/180 for x in p2]) def deci_deg_to_deg_min_sec(deci_deg): """converts decimal degrees to degrees-minutes-seconds""" is_positive = (deci_deg >= 0) deci_deg = abs(deci_deg) # divmod returns quotient and remainder minutes,seconds = divmod(deci_deg*3600,60) degrees,minutes = divmod(minutes,60) degrees = degrees if is_positive else -degrees return (degrees,minutes,seconds) def deci_deg_to_hr_min_sec(deci_deg): """converts decimal degrees to hours-minutes-seconds assumes that deci_deg is postitive""" deci_hours = deci_deg/15. schminutes,schmeconds = divmod(deci_hours*3600,60) hours,schminutes = divmod(schminutes,60) return (hours,schminutes,schmeconds) def iau_designation(ra,dec): """generate NVSS names as per: https://heasarc.gsfc.nasa.gov/W3Browse/all/nvss.html There are four cases where there are pairs of sources which are so close together that their names would be identical according to this schema (see below), and the HEASARC has added suffixes of 'a' (for the source with smaller RA) and 'b' (for the source with the larger RA) in such cases in order to differentate them. It was easier just to hard-code this in, should really check if designation alreadys exists and compare """ hr,schmin,schmec = deci_deg_to_hr_min_sec(ra) rhh = str(int(hr)).zfill(2) rmm = str(int(schmin)).zfill(2) rss = str(int(schmec - schmec%1)).zfill(2) deg,minu,sec = deci_deg_to_deg_min_sec(dec) sgn = '+' if deg>=0 else '-' ddd = str(int(abs(deg))).zfill(2) dmm = str(int(minu)).zfill(2) dss = str(int(sec - sec%1)).zfill(2) designation = ''.join(('NVSS J',rhh,rmm,rss,sgn,ddd,dmm,dss)) close_pairs = {'NVSS J093731-102001':144.382, 'NVSS J133156-121336':202.987, 'NVSS J160612+000027':241.553, 'NVSS J215552+380029':328.968} if designation in close_pairs: if ra < close_pairs[designation]: designation = ''.join((designation,'a')) else: designation = ''.join((designation,'b')) return designation # choice of patch is arbitrary but must be within both surveys # testing shows no discernible difference between patches PATCH_DEC = -35 PATCH_RA = 149 def df_in_patch(df_ra,df_dec,): in_patch = ((PATCH_RA < df_ra) & (df_ra < PATCH_RA+PATCH_SIZE) & (PATCH_DEC < df_dec) & (df_dec < PATCH_DEC+PATCH_SIZE)) return in_patch # import TGSS and save desired feature vectors tgss_df = pd.read_csv('TGSSADR1_7sigma_catalog.tsv',delimiter='\t', index_col=0,usecols=(0,1,3,5,7,9,11,13)) tgss_df = tgss_df.sort_values(by=['DEC']) tgss_df['Total_flux'] = tgss_df['Total_flux']*1e-3 tgss_df['Peak_flux'] = tgss_df['Peak_flux']*1e-3 tgss_df = tgss_df[df_in_patch(tgss_df['RA'],tgss_df['DEC'])] tgss_df.index.names = ['name_TGSS'] tgss_df.columns = ['ra_TGSS','dec_TGSS','integrated_TGSS','peak_TGSS', 'major_ax_TGSS','minor_ax_TGSS','posangle_TGSS'] tgss_df.to_csv('tgss.csv') # import NVSS and save feature vectors with fits.open('CATALOG.FIT') as hdulist: data = hdulist[1].data nvss_data = np.column_stack((data['RA(2000)'],data['DEC(2000)'],data['PEAK INT'], data['MAJOR AX'],data['MINOR AX'],data['POSANGLE'], data['Q CENTER'],data['U CENTER'],data['P FLUX'], data['RES PEAK'],data['RES FLUX'])) nvss_columns = ['RA(2000)','DEC(2000)','PEAK INT','MAJOR AX','MINOR AX','POSANGLE', 'Q CENTER','U CENTER','P FLUX','RES PEAK','RES FLUX'] nvss_df = pd.DataFrame(data = nvss_data, columns = nvss_columns) nvss_df = nvss_df.sort_values(by=['DEC(2000)']).reset_index(drop = True) nvss_df = nvss_df[df_in_patch(nvss_df['RA(2000)'],nvss_df['DEC(2000)'])] nvss_labels = np.array([iau_designation(p[0],p[1]) for p in nvss_df[['RA(2000)','DEC(2000)']].values]) nvss_df['name_NVSS'] = nvss_labels nvss_df.set_index('name_NVSS',inplace=True) nvss_df.columns = ['ra_NVSS','dec_NVSS','peak_NVSS','major_ax_NVSS','minor_ax_NVSS','posangle_NVSS', 'q_centre_NVSS','u_centre_NVSS','polarised_NVSS','res_peak_NVSS','res_flux_NVSS'] nvss_df.to_csv('nvss.csv') # positional matching the two surveys tgss = tgss_df[['ra_TGSS','dec_TGSS']].values nvss = nvss_df[['ra_NVSS','dec_NVSS']].values nvss_dec_min = round(nvss[:,1].min(),1) nvss_dec_max = round(nvss[:,1].max(),1) # rough filtering using sorting of both surveys # this is a small scale of the process in sky_positional_matching.ipynb patch_matches = [] tqdmbar = tqdm(total=len(tgss)) for i1,p1 in enumerate(tgss): for i2,p2 in enumerate(nvss): if p2[1] < p1[1] - SEPARATION_LIMIT: continue elif p1[1] + SEPARATION_LIMIT < p2[1]: break elif (abs((p1[0]-p2[0])*np.cos(p1[1]*np.pi/180)) < SEPARATION_LIMIT and abs(p1[1]-p2[1]) < SEPARATION_LIMIT): patch_matches.append((i1,i2)) tqdmbar.postfix = 'matches = {}'.format(len(patch_matches)) tqdmbar.update(1) patch_matches = np.array(patch_matches) # proper filtering based off of proper geodesic distance tmp_patch_matches = [] for i1,i2 in tqdm(patch_matches): p1,p2 = tgss[i1],nvss[i2] d = degdist(p1,p2) if d < SEPARATION_LIMIT: tmp_patch_matches.append([i1,i2]) patch_matches = np.array(tmp_patch_matches) patch_cat_columns = np.concatenate((tgss_df.reset_index().columns.values, nvss_df.reset_index().columns.values)) patch_cat = pd.DataFrame(columns=patch_cat_columns) # construction of combined match catalogue # warning: this can take a few minutes to complete FREQ_TGSS,FREQ_NVSS = 150e6,1.4e9 for i1,i2 in tqdm(patch_matches): obj_t = tgss_df.reset_index().iloc[i1] obj_n = nvss_df.reset_index().iloc[i2] match_row = {**obj_t,**obj_n} separation = degdist((obj_t['ra_TGSS'],obj_t['dec_TGSS']), (obj_n['ra_NVSS'],obj_n['dec_NVSS'])) match_row['separation'] = separation # adding derived feature of spectral index will prove useful alpha = np.log(obj_t['peak_TGSS']/obj_n['peak_NVSS'])/np.log(FREQ_NVSS/FREQ_TGSS) match_row['spectral_alpha'] = alpha patch_cat = patch_cat.append(match_row, ignore_index=True) patch_cat.set_index(['name_TGSS','name_NVSS'], inplace=True) def separation_scorer(name_TGSS,name_NVSS): """given a name in each catalogue s.t. match is in patch_catalogue returns a score based off of the separation between sources note the choice of threshold at 40'' cuts off the second mode of separations as seen in hist_angle.pdf from sky_positional_matching.ipynb""" threshold = 40 if (name_TGSS,name_NVSS) in patch_cat.index: sep = patch_cat.loc[name_TGSS,name_NVSS].separation sep *= 3600 return max(0,(threshold-sep)/threshold) else: return 0 # labels to train against are the rounded separation scores scores = [separation_scorer(name_TGSS,name_NVSS) for (name_TGSS,name_NVSS) in tqdm(patch_cat.index.values)] patch_cat['score'] = scores patch_cat.sort_values(by=['score'],inplace=True) patch_cat.to_csv('patch_catalogue.csv') # plot histogram of separation scores, observe 0.8 cut-off decision scores = patch_cat['score'].values plt.figure(figsize=(14,7)) plt.rcParams.update({'font.size': 18}) plt.hist(scores[scores>0],bins=80,density=True,color='slateblue') plt.title('scores from separation for matches inside patch') plt.xlabel('scores') plt.ylabel('pdf') plt.savefig('patch_scores_dist.pdf') ```
github_jupyter
# Cross-Validation and the Test Set In the last lecture, we saw how keeping some data hidden from our model could help us to get a clearer understanding of whether or not the model was overfitting. This time, we'll introduce a common automated framework for handling this task, called **cross-validation**. We'll also incorporate a designated **test set**, which we won't touch until the very end of our analysis to get an overall view of the performance of our model. ``` import numpy as np from matplotlib import pyplot as plt import pandas as pd # assumes that you have run the function retrieve_data() # from "Introduction to ML in Practice" in ML_3.ipynb titanic = pd.read_csv("data.csv") titanic ``` We are again going to use the `train_test_split` function to divide our data in two. This time, however, we are not going to be using the holdout data to determine the model complexity. Instead, we are going to hide the holdout data until the very end of our analysis. We'll use a different technique for handling the model complexity. ``` from sklearn.model_selection import train_test_split np.random.seed(1234) # for reproducibility train, test = train_test_split(titanic, test_size = 0.2) # hold out 20% of data ``` We again need to clean our data: ``` from sklearn import preprocessing def prep_titanic_data(data_df): df = data_df.copy() le = preprocessing.LabelEncoder() df['Sex'] = le.fit_transform(df['Sex']) df = df.drop(['Name'], axis = 1) X = df.drop(['Survived'], axis = 1).values y = df['Survived'].values return(X, y) X_train, y_train = prep_titanic_data(train) X_test, y_test = prep_titanic_data(test) ``` ## K-fold Cross-Validation The idea of k-fold cross validation is to take a small piece of our training data, say 10%, and use that as a mini test set. We train the model on the remaining 90%, and then evaluate on the 10%. We then take a *different* 10%, train on the remaining 90%, and so on. We do this many times, and finally average the results to get an overall average picture of how the model might be expected to perform on the real test set. Cross-validation is a highly efficient tool for estimating the optimal complexity of a model. <figure class="image" style="width:100%"> <img src="https://scikit-learn.org/stable/_images/grid_search_cross_validation.png" alt="Illustration of k-fold cross validation. The training data is sequentially partitioned into 'folds', each of which is used as mini-testing data exactly once. The image shows five-fold validation, with four boxes of training data and one box of testing data. The diagram then indicates a final evaluation against additional testing data not used in cross-validation." width="600px"> <br> <caption><i>K-fold cross-validation. Source: scikit-learn docs.</i></caption> </figure> The good folks at `scikit-learn` have implemented a function called `cross_val_score` which automates this entire process. It repeatedly selects holdout data; trains the model; and scores the model against the holdout data. While exceptions apply, you can often use `cross_val_score` as a plug-and-play replacement for `model.fit()` and `model.score()` during your model selection phase. ``` from sklearn.model_selection import cross_val_score from sklearn import tree # make a model T = tree.DecisionTreeClassifier(max_depth = 3) # 10-fold cross validation: hold out 10%, train on the 90%, repeat 10 times. cv_scores = cross_val_score(T, X_train, y_train, cv=10) cv_scores cv_scores.mean() fig, ax = plt.subplots(1) best_score = 0 for d in range(1,30): T = tree.DecisionTreeClassifier(max_depth = d) cv_score = cross_val_score(T, X_train, y_train, cv=10).mean() ax.scatter(d, cv_score, color = "black") if cv_score > best_score: best_depth = d best_score = cv_score l = ax.set(title = "Best Depth : " + str(best_depth), xlabel = "Depth", ylabel = "CV Score") ``` Now that we have a reasonable estimate of the optimal depth, we can try evaluating against the unseen testing data. ``` T = tree.DecisionTreeClassifier(max_depth = best_depth) T.fit(X_train, y_train) T.score(X_test, y_test) ``` Great! We even got slightly higher accuracy on the test set than we did in validation, although this is rare. # Machine Learning Workflow: The Big Picture We now have all of the elements that we need to execute the core machine learning workflow. At a high-level, here's what should go into a machine task: 1. Separate out the test set from your data. 2. Clean and prepare your data if needed. It is best practice to clean your training and test data separately. It's convenient to write a function for this. 3. Identify a set of candidate models (e.g. decision trees with depth up to 30, logistic models with between 1 and 3 variables, etc). 4. Use a validation technique (k-fold cross-validation is usually sufficient) to estimate how your models will perform on the unseen test data. Select the best model as measured by validation. 5. Finally, score the best model against the test set and report the result. Of course, this isn't all there is to data science -- you still need to do exploratory analysis; interpret your model; etc. etc. We'll discuss model interpretation further in a coming lecture.
github_jupyter
``` from z3 import * import numpy as np from itertools import combinations from typing import Sequence from tqdm.notebook import tqdm ``` Read instance file: ``` input_filename = '../../Instances/12x12.txt' w, h, n, DX, DY = None, None, None, None, None with open(input_filename, 'r') as f_in: lines = f_in.read().splitlines() split = lines[0].split(' ') w = int(split[0]) h = int(split[1]) n = int(lines[1]) DX = [] DY = [] for i in range(int(n)): split = lines[i + 2].split(' ') DX.append(int(split[0])) DY.append(int(split[1])) ``` Solver: ``` solver = Solver() ``` Model: ``` B = [[[Bool(f'B_{i}_{j}_{k}') for k in range(n)] for j in range(w)] for i in range(h)] R = [Bool(f'R_{k}') for k in range(n)] def at_least_one(bool_vars: Sequence): return Or(bool_vars) def at_most_one(bool_vars: Sequence): return [Not(And(pair[0], pair[1])) for pair in combinations(bool_vars, 2)] # Constraint "at most one piece" for i in range(h): for j in range(w): solver.add(at_most_one(B[i][j])) # Iterate over all the pieces p for p in tqdm(range(n), leave=False): dx = DX[p] dy = DY[p] package_clauses = [] # Iterate over all the coordinates where p can fit for i in range(h - dy + 1): for j in range(w - dx + 1): patch_clauses = [] # Iterate over the cells of p's patch for f1 in range(dy): for f2 in range(dx): patch_clauses.append(B[i + f1][j + f2][p]) package_clauses.append(And(patch_clauses)) solver.add(Or(R[p], at_least_one(package_clauses))) dx, dy = dy, dx package_clauses = [] # Iterate over all the coordinates where p can fit for i in range(h - dy + 1): for j in range(w - dx + 1): patch_clauses = [] # Iterate over the cells of p's patch for f1 in range(dy): for f2 in range(dx): patch_clauses.append(B[i + f1][j + f2][p]) package_clauses.append(And(patch_clauses)) solver.add(Or(Not(R[p]), at_least_one(package_clauses))) solver.add(R[1]) %%time solver.check() ``` From Z3 model solution to file: ``` solution = np.zeros((h, w, n), dtype=bool) model = solver.model() for i in range(h): for j in range(w): for k in range(n): solution[i, j, k] = is_true(model[B[i][j][k]]) xy = {} for p in range(n): y_ids, x_ids = solution[:, :, p].nonzero() #print(solution[:, :, p]) x = np.min(x_ids) y = h-1-np.max(y_ids) xy[p] = [x, y] xy output_filename = '../../pwp_utilities/12x12_sol.txt' with open(output_filename, 'w') as f_out: f_out.write('{} {}\n'.format(w, h)) f_out.write('{}\n'.format(n)) for i in range(n): if is_true(model[R[i]]): f_out.write('{} {}\t{} {}\n'.format(DY[i], DX[i], xy[i][0], xy[i][1])) print('R') else: f_out.write('{} {}\t{} {}\n'.format(DX[i], DY[i], xy[i][0], xy[i][1])) ```
github_jupyter
``` import sys sys.path.append("..") import pandas as pd import numpy as np from numba import jit import json import matplotlib.pyplot as plt import seaborn as sns import numpy as np import xmltodict import numpy.polynomial as p from multiprocessing import Pool import time from datetime import datetime , date, timedelta from fottech_lib.market_data.dmds import DMDSServices from fottech_lib import instrumentservice from fottech_lib.market_data.repo import Repo import project.market_data.repocurves as repoc from project.market_data.repocurves import RepoCurves %matplotlib inline ``` ### Loading Indices ``` #Loading the indices file_path = '../data/universe_indices.npy' universe_indices = np.load(file_path) ``` ### Computing Universe Repo ``` def get_repo_schedules(universe_indices_ric,business_date): dictionary = {} for ric in universe_indices_ric: print('############################## Index {} ##############################'.format(ric)) try: div_paths = 'RepoCurve/official/{}/PARIS/INTRADAY/equity/{}/sophis'.format(business_date,ric) ds = DMDSServices('prod', 'APAC') docs = ds.get_documents(div_paths) d_s = docs['documents']['document'][0].__values__.get('content') repo_schedule = xmltodict.parse(d_s) date = repo_schedule['RepoCurve']['@businessDate'] df = pd.DataFrame(repo_schedule['RepoCurve']['repo']) df['#text'] = df['#text'].astype(str) df['@term'] = df['@term'].astype(str) for i in range(df.shape[0]): f_date = datetime.strptime(date, "%Y-%m-%d").date() l_date = datetime.strptime(df['@term'][i], "%Y-%m-%d").date() delta = l_date - f_date if (delta.days >= 0): df['@term'][i] = delta.days else: df = df.drop(i, axis = 0) df = df.reset_index(drop=True) df = df.get_values() col1 = df[:,0].tolist() col2 = df[:,1].tolist() col = [col1 , col2, date] dictionary[ric]=col except: dictionary[ric]=None return dictionary def save_dict(dictionary): file_path = '../output/universe_repo_processed.json' try: with open(file_path, 'w') as fp: json.dump(dictionary, fp) print('file saved') except: print('For some reasons, the file couldnt be saved') universe_indices_ric = [] B_to_R = instrumentservice.InstrumentService('prod','APAC') for index in universe_indices: index_ric = B_to_R.transcode(index, target='reuter', partial_match=False) if(index_ric != None): ric = index_ric[1:] universe_indices_ric.append(ric) dictionary = get_repo_schedules(universe_indices_ric,'latest') save_dict(dictionary) len(dictionary.keys()) ``` ### Now cleaning and preprocessing the universe repo curves ``` path_to_data_Universe = '../output/universe_repo_processed.json' path_to_cleaned_data_Universe = '../output/universe_repo_cleaned.json' print('################## Cleaning dividends for Universe index ##################') new_dict = {} with open(path_to_data_Universe) as json_file: dictionary = json.load(json_file) for key in list(dictionary.keys()): if (dictionary[key]!=None): if np.sum(np.isnan(dictionary[key][0]))==0 and np.sum(np.isnan(list(map(float,dictionary[key][1]))))==0 : dictionary[key][1] = list(map(float,dictionary[key][1])) new_dict[key] = dictionary[key] xvals = [90, 180, 365, 730, 1095, 1460, 1825, 2190, 2555, 2920, 3285, 3650, 4015, 4380] for key in new_dict.keys(): x = new_dict[key][0] y = new_dict[key][1] yinterp = np.interp(xvals, x, y) #computing new interpolated values new_dict[key][0] = xvals new_dict[key][1] = yinterp.tolist() with open(path_to_cleaned_data_Universe, 'w') as fp: json.dump(new_dict, fp) print('file saved') ```
github_jupyter
# Report for yuvipanda ``` import seaborn as sns import pandas as pd import numpy as np import altair as alt from markdown import markdown from IPython.display import Markdown from ipywidgets.widgets import HTML, Tab from ipywidgets import widgets from datetime import timedelta from matplotlib import pyplot as plt import os.path as op from mycode import alt_theme from warnings import simplefilter simplefilter('ignore') def author_url(author): return f"https://github.com/{author}" # Parameters fmt_date = "{:%Y-%m-%d}" n_days = 30 * 2 start_date = fmt_date.format(pd.datetime.today() - timedelta(days=n_days)) end_date = fmt_date.format(pd.datetime.today()) renderer = "html" person = "jasongrout" # Parameters person = "yuvipanda" n_days = 60 alt.renderers.enable(renderer); alt.themes.register('my_theme', alt_theme) alt.themes.enable("my_theme") ``` ## Load data ``` from pathlib import Path path_data = Path("./") comments = pd.read_csv(path_data.joinpath('comments.csv'), index_col=0) issues = pd.read_csv(path_data.joinpath('issues.csv'), index_col=0) prs = pd.read_csv(path_data.joinpath('prs.csv'), index_col=0) comments = comments.query('author == @person').drop_duplicates() issues = issues.query('author == @person').drop_duplicates() closed_by = prs.query('mergedBy == @person') prs = prs.query('author == @person').drop_duplicates() # Time columns # Also drop dates outside of our range time_columns = ['updatedAt', 'createdAt', 'closedAt'] for col in time_columns: for item in [comments, issues, prs, closed_by]: if col not in item.columns: continue dt = pd.to_datetime(item[col]).dt.tz_localize(None) item[col] = dt item.query("updatedAt < @end_date and updatedAt > @start_date", inplace=True) ``` # Repository summaries ``` summaries = [] for idata, name in [(issues, 'issues'), (prs, 'prs'), (comments, 'comments')]: idata = idata.groupby(["repo", "org"]).agg({'id': "count"}).reset_index().rename(columns={'id': 'count'}) idata["kind"] = name summaries.append(idata) summaries = pd.concat(summaries) repo_summaries = summaries.groupby(["repo", "kind"]).agg({"count": "sum"}).reset_index() org_summaries = summaries.groupby(["org", "kind"]).agg({"count": "sum"}).reset_index() repo_summaries['logcount'] = np.log(repo_summaries["count"]) ch1 = alt.Chart(repo_summaries, width=600, title="Activity per repository").mark_bar().encode( x='repo', y='count', color='kind', tooltip="kind" ) ch2 = alt.Chart(repo_summaries, width=600, title="Log activity per repository").mark_bar().encode( x='repo', y='logcount', color='kind', tooltip="kind" ) ch1 | ch2 alt.Chart(org_summaries, width=600).mark_bar().encode( x='org', y='count', color='kind', tooltip="org" ) ``` # By repository over time ## Comments ``` comments_time = comments.groupby('repo').resample('W', on='createdAt').count()['author'].reset_index() comments_time = comments_time.rename(columns={'author': 'count'}) comments_time_total = comments_time.groupby('createdAt').agg({"count": "sum"}).reset_index() ch1 = alt.Chart(comments_time, width=600).mark_line().encode( x='createdAt', y='count', color='repo', tooltip="repo" ) ch2 = alt.Chart(comments_time_total, width=600).mark_line(color="black").encode( x='createdAt', y='count', ) ch1 + ch2 ``` ## PRs ``` prs_time = prs.groupby('repo').resample('W', on='createdAt').count()['author'].reset_index() prs_time = prs_time.rename(columns={'author': 'count'}) prs_time_total = prs_time.groupby('createdAt').agg({"count": "sum"}).reset_index() ch1 = alt.Chart(prs_time, width=600).mark_line().encode( x='createdAt', y='count', color='repo', tooltip="repo" ) ch2 = alt.Chart(prs_time_total, width=600).mark_line(color="black").encode( x='createdAt', y='count', ) ch1 + ch2 closed_by_time = closed_by.groupby('repo').resample('W', on='closedAt').count()['author'].reset_index() closed_by_time = closed_by_time.rename(columns={'author': 'count'}) alt.Chart(closed_by_time, width=600).mark_line().encode( x='closedAt', y='count', color='repo', tooltip="repo" ) ``` # By type over time ``` prs_time = prs[['author', 'createdAt']].resample('W', on='createdAt').count()['author'].reset_index() prs_time = prs_time.rename(columns={'author': 'prs'}) comments_time = comments[['author', 'createdAt']].resample('W', on='createdAt').count()['author'].reset_index() comments_time = comments_time.rename(columns={'author': 'comments'}) total_time = pd.merge(prs_time, comments_time, on='createdAt', how='outer') total_time = total_time.melt(id_vars='createdAt', var_name="kind", value_name="count") alt.Chart(total_time, width=600).mark_line().encode( x='createdAt', y='count', color='kind' ) ```
github_jupyter
``` import pandas as pd import datetime as dt ``` # testing it ## Review of Python's `datetime` Module ``` someday = dt.date(2010, 1, 20) someday.year someday.month someday.day str(someday) str(dt.datetime(2010, 1, 10, 17, 13, 57)) sometime = dt.datetime(2010, 1, 10, 17, 13, 57) sometime.year sometime.month sometime.day sometime.hour sometime.minute sometime.second ``` ## The `pandas Timestamp` Object ``` pd.Timestamp("2015-03-31") pd.Timestamp("2015/03/31") pd.Timestamp("2013, 11, 04") pd.Timestamp("1/1/2015") pd.Timestamp("19/12/2015") pd.Timestamp("12/19/2015") pd.Timestamp("4/3/2000") pd.Timestamp("2021-03-08 08:35:15") pd.Timestamp("2021-03-08 6:13:29 PM") pd.Timestamp(dt.date(2015, 1, 1)) pd.Timestamp(dt.datetime(2000, 2, 3, 21, 35, 22)) ``` ## The `pandas DateTimeIndex` Object ``` dates = ["2016/01/02", "2016/04/12", "2009/09/07"] pd.DatetimeIndex(dates) dates = [dt.date(2016, 1, 10), dt.date(1994, 6, 13), dt.date(2003, 12, 29)] dtIndex = pd.DatetimeIndex(dates) values = [100, 200, 300] pd.Series(data = values, index = dtIndex) ``` ## The `pd.to_datetime()` Method ``` pd.to_datetime("2001-04-19") pd.to_datetime(dt.date(2015, 1, 1)) pd.to_datetime(dt.datetime(2015, 1, 1, 14, 35, 20)) pd.to_datetime(["2015-01-03", "2014/02/08", "2016", "July 4th, 1996"]) times = pd.Series(["2015-01-03", "2014/02/08", "2016", "July 4th, 1996"]) times pd.to_datetime(times) dates = pd.Series(["July 4th, 1996", "10/04/1991", "Hello", "2015-02-31"]) dates pd.to_datetime(dates, errors = "coerce") pd.to_datetime([1349720105, 1349806505, 1349892905, 1349979305, 1350065705], unit = "s") pd.Period("2016-01-08", freq = "10D") dates = ["2016-01-01", "2016-02-01", "2016-03-01"] pd.Series([1, 2, 3], index = pd.PeriodIndex(dates, freq = "2M")) pd.Period("2016-01-08", freq = "W") pd.Period("2016-01-08", freq = "W-SUN") pd.Period("2016-01-08", freq = "W-WED") pd.Period("2015-12-10", freq = "10D") dates = ["2016-01-01", "2016-02-01", "2016-02-01"] pd.PeriodIndex(dates, freq = "W-MON") weeks = pd.PeriodIndex(dates, freq = "W-MON") pd.Series([999, 500, 325], index = weeks, name = "Weekly Revenue") ``` ## Create Range of Dates with the `pd.date_range()` Method, Part 1 ``` times = pd.date_range(start = "2016-01-01", end = "2016-01-10", freq = "D") type(times) type(times[0]) pd.date_range(start = "2016-01-01", end = "2050-01-01", freq = "A") ``` ## Create Range of Dates with the `pd.date_range()` Method, Part 2 ``` pd.date_range(start = "2012-09-09", periods = 50, freq = "6H") ``` ## Create Range of Dates with the `pd.date_range()` Method, Part 3 ``` pd.date_range(end = "1999-12-31", periods = 100, freq = "7H") ``` ## The `.dt` Accessor ``` bunch_of_dates = pd.date_range(start = "2000-01-01", end = "2010-12-31", freq = "24D") s = pd.Series(bunch_of_dates) s.head(3) mask = s.dt.is_month_end s[mask] ``` ## Import Financial Data Set with `pandas_datareader` Library ``` import pandas as pd import datetime as dt from pandas_datareader import data company = "MSFT" start = "2010-01-01" end = "2017-12-31" stocks = data.DataReader(name = company, data_source = "google", start = start, end = end) stocks.head(3) stocks.values stocks.columns stocks.index[0] stocks.axes ``` ## Selecting from a `DataFrame` with a `DateTimeIndex` ## `Timedeltas` in a Dataset ``` shipping = pd.read_csv("../data/ecommerce.csv", index_col = "ID", parse_dates = ["order_date", "delivery_date"]) shipping.head(3) shipping["Delivery Time"] = shipping["delivery_date"] - shipping["order_date"] shipping.head(3) shipping["Twice As Long"] = shipping["delivery_date"] + shipping["Delivery Time"] shipping.head(3) shipping.dtypes mask = shipping["Delivery Time"] == "3423 days" shipping[mask] shipping["Delivery Time"].min() ```
github_jupyter
# The Basic Tools of the Deep Life Sciences Welcome to DeepChem's introductory tutorial for the deep life sciences. This series of notebooks is a step-by-step guide for you to get to know the new tools and techniques needed to do deep learning for the life sciences. We'll start from the basics, assuming that you're new to machine learning and the life sciences, and build up a repertoire of tools and techniques that you can use to do meaningful work in the life sciences. **Scope:** This tutorial will encompass both the machine learning and data handling needed to build systems for the deep life sciences. ## Colab This tutorial and the rest in the sequences are designed to be done in Google colab. If you'd like to open this notebook in colab, you can use the following link. [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/deepchem/deepchem/blob/master/examples/tutorials/The_Basic_Tools_of_the_Deep_Life_Sciences.ipynb) ## Why do the DeepChem Tutorial? **1) Career Advancement:** Applying AI in the life sciences is a booming industry at present. There are a host of newly funded startups and initiatives at large pharmaceutical and biotech companies centered around AI. Learning and mastering DeepChem will bring you to the forefront of this field and will prepare you to enter a career in this field. **2) Humanitarian Considerations:** Disease is the oldest cause of human suffering. From the dawn of human civilization, humans have suffered from pathogens, cancers, and neurological conditions. One of the greatest achievements of the last few centuries has been the development of effective treatments for many diseases. By mastering the skills in this tutorial, you will be able to stand on the shoulders of the giants of the past to help develop new medicine. **3) Lowering the Cost of Medicine:** The art of developing new medicine is currently an elite skill that can only be practiced by a small core of expert practitioners. By enabling the growth of open source tools for drug discovery, you can help democratize these skills and open up drug discovery to more competition. Increased competition can help drive down the cost of medicine. ## Getting Extra Credit If you're excited about DeepChem and want to get more involved, there are some things that you can do right now: * Star DeepChem on GitHub! - https://github.com/deepchem/deepchem * Join the DeepChem forums and introduce yourself! - https://forum.deepchem.io * Say hi on the DeepChem gitter - https://gitter.im/deepchem/Lobby * Make a YouTube video teaching the contents of this notebook. ## Prerequisites This tutorial sequence will assume some basic familiarity with the Python data science ecosystem. We will assume that you have familiarity with libraries such as Numpy, Pandas, and TensorFlow. We'll provide some brief refreshers on basics through the tutorial so don't worry if you're not an expert. ## Setup The first step is to get DeepChem up and running. We recommend using Google Colab to work through this tutorial series. You'll need to run the following commands to get DeepChem installed on your colab notebook. Note that this will take something like 5 minutes to run on your colab instance. ``` !curl -Lo conda_installer.py https://raw.githubusercontent.com/deepchem/deepchem/master/scripts/colab_install.py import conda_installer conda_installer.install() !/root/miniconda/bin/conda info -e !pip install --pre deepchem ``` You can of course run this tutorial locally if you prefer. In this case, don't run the above cell since it will download and install Anaconda on your local machine. In either case, we can now import the `deepchem` package to play with. ``` import deepchem as dc dc.__version__ ``` # Training a Model with DeepChem: A First Example Deep learning can be used to solve many sorts of problems, but the basic workflow is usually the same. Here are the typical steps you follow. 1. Select the data set you will train your model on (or create a new data set if there isn't an existing suitable one). 2. Create the model. 3. Train the model on the data. 4. Evaluate the model on an independent test set to see how well it works. 5. Use the model to make predictions about new data. With DeepChem, each of these steps can be as little as one or two lines of Python code. In this tutorial we will walk through a basic example showing the complete workflow to solve a real world scientific problem. The problem we will solve is predicting the solubility of small molecules given their chemical formulas. This is a very important property in drug development: if a proposed drug isn't soluble enough, you probably won't be able to get enough into the patient's bloodstream to have a therapeutic effect. The first thing we need is a data set of measured solubilities for real molecules. One of the core components of DeepChem is MoleculeNet, a diverse collection of chemical and molecular data sets. For this tutorial, we can use the Delaney solubility data set. ``` tasks, datasets, transformers = dc.molnet.load_delaney(featurizer='GraphConv') train_dataset, valid_dataset, test_dataset = datasets ``` I won't say too much about this code right now. We will see many similar examples in later tutorials. There are two details I do want to draw your attention to. First, notice the `featurizer` argument passed to the `load_delaney()` function. Molecules can be represented in many ways. We therefore tell it which representation we want to use, or in more technical language, how to "featurize" the data. Second, notice that we actually get three different data sets: a training set, a validation set, and a test set. Each of these serves a different function in the standard deep learning workflow. Now that we have our data, the next step is to create a model. We will use a particular kind of model called a "graph convolutional network", or "graphconv" for short. ``` model = dc.models.GraphConvModel(n_tasks=1, mode='regression', dropout=0.2) ``` Here again I will not say much about the code. Later tutorials will give lots more information about `GraphConvModel`, as well as other types of models provided by DeepChem. We now need to train the model on the data set. We simply give it the data set and tell it how many epochs of training to perform (that is, how many complete passes through the data to make). ``` model.fit(train_dataset, nb_epoch=100) ``` If everything has gone well, we should now have a fully trained model! But do we? To find out, we must evaluate the model on the test set. We do that by selecting an evaluation metric and calling `evaluate()` on the model. For this example, let's use the Pearson correlation, also known as r<sup>2</sup>, as our metric. We can evaluate it on both the training set and test set. ``` metric = dc.metrics.Metric(dc.metrics.pearson_r2_score) print("Training set score:", model.evaluate(train_dataset, [metric], transformers)) print("Test set score:", model.evaluate(test_dataset, [metric], transformers)) ``` Notice that it has a higher score on the training set than the test set. Models usually perform better on the particular data they were trained on than they do on similar but independent data. This is called "overfitting", and it is the reason it is essential to evaluate your model on an independent test set. Our model still has quite respectable performance on the test set. For comparison, a model that produced totally random outputs would have a correlation of 0, while one that made perfect predictions would have a correlation of 1. Our model does quite well, so now we can use it to make predictions about other molecules we care about. Since this is just a tutorial and we don't have any other molecules we specifically want to predict, let's just use the first ten molecules from the test set. For each one we print out the chemical structure (represented as a SMILES string) and the predicted solubility. To put these predictions in context, we print out the solubility values from the test set as well. ``` solubilities = model.predict_on_batch(test_dataset.X[:10]) for molecule, solubility, test_solubility in zip(test_dataset.ids, solubilities, test_dataset.y): print(solubility, test_solubility, molecule) ``` # Congratulations! Time to join the Community! Congratulations on completing this tutorial notebook! If you enjoyed working through the tutorial, and want to continue working with DeepChem, we encourage you to finish the rest of the tutorials in this series. You can also help the DeepChem community in the following ways: ## Star DeepChem on [GitHub](https://github.com/deepchem/deepchem) This helps build awareness of the DeepChem project and the tools for open source drug discovery that we're trying to build. ## Join the DeepChem Gitter The DeepChem [Gitter](https://gitter.im/deepchem/Lobby) hosts a number of scientists, developers, and enthusiasts interested in deep learning for the life sciences. Join the conversation!
github_jupyter
# stockmanager stockmanager has the following main modules: - Ticker: a class to retrieve price, company info of a ticker. - visualization: a set of visualization functions, e.g. plot_price() - Portfolio: a class ``` from stockmanager import Ticker, Portfolio, plot_price # For debugging: import matplotlib.pyplot as plt import numpy as np import mplfinance as mpf msft = Ticker('MSFT') price = msft.get_price(period='1mo', interval='1d') plot_price(price, backend='matplotlib', mav=[2,5], title=msft.name, type='ohlc') fig.update_layout(title={'text': msft.name, 'xanchor': 'auto'}, yaxis_title='Price', xaxis=dict(tickangle=-90)) fig.show() plot_price(price, type='line') plot(price, type='candle', mav=5) plot(price, type='line') import chart_studio.plotly as py import plotly.figure_factory as ff import pandas as pd price import chart_studio.plotly as py import plotly.graph_objects as go data = [go.Bar(x=price.Close, y=price.index)] # py.offline.iplot(data, filename='jupyter-basic_bar') import plotly print(plotly.__version__) import plotly.express as px def plotly(): fig = px.line(price, x=price.index, y="Close", title='Price') fig.show() plotly() plot(price, type='line') import plotly.graph_objects as go def price_plot_with_plotly(price): """Use plotly to plot the stock data Parameters ---------- price : pd.DataFrame price data frame """ show_hours = True if show_hours: pstr = [p.strftime("%y-%m-%d (%H:%M:%S)") for p in price.index.to_list()] else: pstr = [p.strftime("%y-%m-%d") for p in price.index.to_list()] fig = go.Figure() fig.add_trace(go.Scatter(x=pstr, y=price.Close, line=dict(color='royalblue'))) fig.update_layout(title='Stock Price Chart', yaxis_title='Price', xaxis = dict(tickangle=-90)) fig.show() price_plot_with_plotly(price) def price_plot_with_plotly(price): """Use plotly to plot the stock data Parameters ---------- price : pd.DataFrame price data frame """ show_hours = True has_ohlc, ohlc = get_ohlc(price) if show_hours: pstr = [p.strftime("%y-%m-%d (%H:%M:%S)") for p in price.index.to_list()] else: pstr = [p.strftime("%y-%m-%d") for p in price.index.to_list()] fig = go.Figure() fig.add_trace(go.Candlestick(x=pstr, open=ohlc[0], high=ohlc[1], low=ohlc[2], close=ohlc[3])) fig.update_layout(title='Stock Price Chart', yaxis_title='Price', xaxis = dict(tickangle=-90)) fig.show() price_plot_with_plotly(price) ```
github_jupyter
# Naive Bayes Classifiers We want to classify vectors of discrete value features, $\mathbf{x}\in\{1,\ldots,K\}^D$, where $K$ is the number of values for each feature, and $D$ is the number of features. If we use a generative approach, we will need to specify the class conditional distribution $p(\mathbf{x}|y=c), c\in\{1, 2, \ldots, K\}$. The simplest approach is to assume that the features are **conditionally independent** given the class label. This means we can write the class conditional density as a product of one-dimensional densities: $$ p(\mathbf{x}|y=c, \theta) = \prod_{j=1}^Dp(x_j|y=c,\theta_{jc}) $$ The resulting model is called a Naive Bayes Classifier (NBC). This has $O(CD)$ parameters, for $C$ classes and $D$ features. ## Model fitting 'Training' a Naive Bayes classifier usually means computing the MLE or MAP estimate for the parameters. We can also compute the full posterior $p(\theta|\mathcal{D})$ Let's assume throughout that each vector $\mathbf{x}$ is in the set $\{0, 1\}^D$ for the integer $D$, which is the number of features. In other words, each component $x_j$ for $j=1,\ldots,D$ can take one of two possible values. ### Example: classifying documents into $K$ different categories For example, $y=1$ might corresponds to a *sports* category, $y=2$ might correspond to a *politics* category, and so on. The label $y^{(i)}$ represents the category of the $i$-th document in the dataset. Each component $x_j^{(i)}$ for $j=1,\ldots,D$ might represent the presence or absence of a particular word. For example, we might define $x_1^{(i)}$ to be 1 if the $i$-th document contains the word *Giants*, or zero otherwise; $x_2^{(i)}$ to be 1 if the $i$-th document contains the word *Obama* and zero otherwise, and so on. The Naive Bayes model is derived as follows: we assume random variables $Y$ and $\mathbf{x}_1,\ldots,\mathbf{x}_D$ corresponding to the label $y$ and vector components $x_1,x_2,\ldots,x_D$. Our task is to model the joint probability $$ p(y=c, \mathbf{x}_1=x_1, \mathbf{x}_2=x_2, \mathbf{x}_D = x_D) $$ for any label $y$ paired with attribute values $x_1,\ldots,x_D$. A key idea in the Naive Bayes model is the following assumptions \begin{aligned} p(y=c, \mathbf{x}_1=x_1, \mathbf{x}_2=x_2, \mathbf{x}_D = x_D) = p(y=c)\prod_{j=1}^D p(\mathbf{x}_j=x_j|y=c) \end{aligned} This means we can write $$ p(\mathbf{x}|y = c) = \prod_{j=1}^Dp(\mathbf{x}_j=x_j|y=c) $$ Following this equation, the NB model has two types of parameters: $\pi(y)$ for $y \in\{1,\ldots,K\}$, with $$ p(y=c) = \pi(y) $$ and $\theta_j(x|y)$ for $j\in\{1,\ldots,D\}$, $x\in\{0, 1\}$, $y\in\{1,\ldots,K\}$, with $$ p(\mathbf{x}_j = x_j|y=c) = \theta_j(x|y) $$ We then have $$ p(y, x_1,\ldots, x_D) = \pi(y)\prod_{j=1}^D \theta_j(x_j|y) $$ ### Definition 1: Naive Bayes Model A Naive Bayes model consists of an inteter $K$ specifying the number of possible labels, an integer $D$ specifying the number of features, and in addition the following parameters: * $\pi(y)$ for any $y\in\{1,\ldots,K\}$. The parameter $\pi(y)$ can be interpreted as the probability of seeing the label $y$. We have the constraints $\pi(y)\ge 0$ and $\sum_{y=1}^K\pi(y)=1$. * A parameter $\theta_j(x|y)$ for any $j\in\{1,\ldots,D\}, x\in\{0, 1\}, y\in\{1,\ldots,K\}$. The value for $\theta_j(x|y)$ can be interpreted as the probability of feature $j$ taking the value $x$, conditioned on the underlying label being $y$. We have the constraints that $\theta_j(x|y)\ge 0$, and for all $y, j$, $\sum_{x\in\{0, 1\}}\theta_j(x|y) = 1$. Once the parameters have been estimated, given a new test example $\hat{\mathbf{x}} = (x_1,x_2,\ldots,x_D)$, the output of the NB classifier is $$ \mathrm{argmax}_{y\in\{1,\ldots,K\}}p(y,x_1,\ldots,x_D) = \mathrm{argmax}_{y\in\{1,\ldots,K\}}\left(\pi(y)\prod_{j=1}^D\theta_j(x_j|y)\right) $$ ### MLE the probability for a single data case is given by $$ p(\mathbf{x}_i, y_i|\theta) = p(y_i|\pi)\prod_jp(x_{ij}|\theta_j) = \prod_c\pi_c^{\mathbb{I}(y_i=c)}\prod_j\prod_c p(x_{ij}|\boldsymbol\theta_{jc})^{\mathbb{I}(y_i=c)} $$ Hence the log-likelihood is given by $$ \log p(\mathcal{D}|\boldsymbol\theta) = \sum_{c=1}^C N_c\log\pi_c + \sum_{j=1}^D\sum_{c=1}^C\sum_{i:y_i=c}\log p(x_{ij}|\boldsymbol\theta_{jc}) $$ We see that this expression decomposes into a series of terms, one concerning $\boldsymbol\pi$, and $DC$ terms containing the $\boldsymbol\theta_{jc}$s. Hence we can optimize all parameters separately. The MLE for the class prior is given by $$ \hat{\pi}_c = \frac{N_c}{N} $$ where $N_c\triangleq \sum_i\mathbb{I}(y_i=c)$ is the number of examples in class $c$. The MLE for the likelihood depends on the type of distribution we choose to use for each feature. For simplicity, let us suppose all features are binary, so $x_j|y = c\sim \mathrm{Ber}(\theta_{jc})$. In this case, the MLE becomes $$ \hat{\theta}_{jc} = \frac{N_{jc}}{N_c} $$ ## Bayesian Naive Bayes The trouble with maximum likelihood is that it can overfit. A simple solution to overfitting is to be Bayesian. For simplicity, we will use a factored prior: $$ p(\boldsymbol\theta) = p(\boldsymbol\pi)\prod_{j=1}^D\prod_{c=1}^Cp(\theta_{jc}) $$ We will use a $\mathrm{Dir}(\boldsymbol\alpha)$ prior for $\boldsymbol\pi$ and a $\mathrm{Beta}(\beta_0, \beta_1)$ prior for each $\theta_{jc}$. Often we just take $\boldsymbol\alpha=1$ and $\boldsymbol\beta=1$ corresponding to add-one or Laplace smoothing. Combining the factored likelihood with the factored prior above, gives the following factored posterior \begin{aligned} p(\boldsymbol\theta|\mathcal{D} & = p(\boldsymbol\pi|\mathcal{D})\prod_{j=1}^D\prod_{c=1}^C p(\theta_{jc}|\mathcal{D}) \\ p(\pi|\mathcal{D}) & = \mathrm{Dir}(N_1 + \alpha_1, \ldots, N_c+\alpha_c) \\ p(\theta_{jc}|\mathcal{D}) & = \mathrm{Beta}((N_c - N_{jc}) + \beta_0, N_jc +\beta_1) \end{aligned} In order words, to compute the posterior, we just update the prior counts with empirical counts from the likelihood
github_jupyter
### Лекция 5. Шаблоны <br /> ##### Какая идея стоит за шаблонами Ранее мы познакомились с возможностью перегрузки функций. Давайте вспомним её на примере swap: ```c++ // поменять местами два int void my_swap(int& a, int& b) { int tmp = a; a = b; b = tmp; } // поменять местами два short void my_swap(short& a, short& b) { short tmp = a; a = b; b = tmp; } // поменять местами два float void my_swap(float& a, float& b) { float tmp = a; a = b; b = tmp; } ... ``` Вечер начинает быть томным ... Для решения проблем дублирования кода придуманы шаблоны: ```c++ // напишем шаблон - как должна выглядеть функция template<typename T> void my_swap(T& a, T& b) { T tmp = a; a = b; b = tmp; } ``` Применение шаблона: ```c++ int a = 3, b = 5; // вызов my_swap(int&, int&), тип T указывается программистом явно my_swap<int>(a, b); // вызов my_swap(int&, int&), тип T выводится компилятором автоматически my_swap(a, b); float x = 3.f, y = 5.f; my_swap(x, y); my_swap<float>(x, y); ``` Важное свойство шаблонов по сравнению с перегрузкой функций: * шаблон компилируется только тогда, когда он вызывается * шаблон компилируется только для тех типов, для которых он вызывается. _и в каждом cpp-файле шаблон компилируется снова и снова_ Показать пример на godbolt.org, позакомментировать функции, продемонстрировать разницу в выхлопе компилятора. ```c++ #include <string> template<typename T> void __attribute__ ((noinline)) myswap(T& a, T& b) { T tmp = a; a = b; b = tmp; } int main() { int i1 = 3, i2 = 5; myswap(i1, i2); float f1 = 3.f, f2 = 5.f; myswap(f1, f2); double d1 = 3., d2 = 5.; myswap(d1, d2); std::string s1 = "abc", s2 = "def"; myswap(s1, s2); return 0; } ``` Особенности шаблонов по сравнению с перегруженными функциями: * компилируется только то, что инстанциируется в коде * компилируется столько раз, в скольки единицах трансляции инстанциируется: * можно в одном cpp-файле 10 раз позвать myswap(int&, int&) - эта функция скомпилируется единожды * можно в 10 cpp-файлах один раз позвать myswap(int&, int&) - эта функция скомпилируется 10 раз * накладные расходы во время компиляции на кодогенерацию при истанциации * позволяет компилятору агрессивнее оптимизировать. Раскомментировать `__attribute__((noinline))` из примера и показать какой код сгенерирует компилятор. Объяснить, почему. * позволяет нарушать ODR Коротко: * (+) меньше кода * (+) быстрее * (-) дольше компилируется * (-) сложнее писать __Вопросы__: * Где поместить шаблонную функцию, которую нужно использовать в разных cpp-файлах? * Где поместить её реализацию? * Может ли шаблонная функция содержать некомпилирующийся код? <br /> ##### Специализация Перегрузка функций позволяла сделать `myswap` у `std::string` более эффективно, без лишнего копирования памяти: ```c++ void myswap(int& a, int& b) { ... } void myswap(short& a, short& b) { ... } void myswap(std::string& a, std::string& b) { a.swap(b); } ``` Шаблоны тоже позволяют специализировать поведение функций, если наложить на шаблонный параметр ограничение, например: (закинуть этот код на godbolt, показать во что компилируется программа) ```c++ #include <string> template<typename T> void __attribute__ ((noinline)) myswap(T& a, T& b) { T tmp = a; a = b; b = tmp; } template<> void __attribute__ ((noinline)) myswap<std::string>(std::string& a, std::string& b) { a.swap(b); } int main() { int i1 = 3, i2 = 5; myswap(i1, i2); float f1 = 3.f, f2 = 5.f; myswap(f1, f2); double d1 = 3., d2 = 5.; myswap(d1, d2); std::string s1 = "abc", s2 = "def"; myswap(s1, s2); return 0; } ``` Во-первых, шаблон может иметь несколько параметров, а во-вторых, параметры не обязаны быть типами. Они могут быть, например, целыми числами: ```c++ template<int N, typename T> T add_value(T x) { return x + N; } int a = add_value<5, int>(100); int a = add_value<5>(100); // 1. шаблон специализирован программистом частично, тип Т компилятор определит сам // 2. параметром шаблона выступает целое число. ``` __Вопрос__: Какую информацию здесь компилятор использует, чтобы вывести тип `T`? <br /> ##### Шаблонные классы Аналогично функциям, классы тоже могут быть шаблонными: Пример структуры: ```c++ // N-мерный вектор из курса линейной алгебры типа T template<typename T, int N> struct VectorN { T data[N]; }; // в качестве примера запишем операции сложения и умножения для таких векторов template<typename T, int N> VectorN<T, N> operator +(const VectorN<T, N>& l, const VectorN<T, N>& r) { VectorN<T, N> rv; for (int i = 0; i < N; ++i) rv.data[i] = l.data[i] + r.data[i]; return rv; } template<typename T, int N> T operator * (const VectorN<T, N>& l, const VectorN<T, N>& r) { T rv = 0; for (int i = 0; i < N; ++i) rv += l.data[i] * r.data[i]; return rv; } ``` Пример шаблонного класса: Напишем свой собственный `optional`. Он будет не так хорош, как `std::optional`, потому что мы не знаем пока всех необходимых трюков С++, но для начала пойдёт. Идея `optional` - класс-обёртка над значением, которое может отсутствовать. Для начала объявим шаблонный класс: ```c++ template<typename T> class Optional { ... }; ``` Добавим в класс поля: ```c++ template<typename T> class Optional { private: T value_; bool has_value_; ... }; ``` **Вопрос:** Мы выбрали способ хранения объекта как поле класса, потому что так проще для демонстрации. Какие у него недостатки? Как их обойти? <details> <summary>Ответ</summary> * Недостаток - объект существует даже когда он не нужен * Вариант обхода 1 - хранить объект в куче через new. Проблема: расходы на new/delete. * Вариант обхода 2 - кусок сырой памяти под объект + placement new и ручной вызов деструкторов (трюк в std::optional) </details> Добавим конструкторы: ```c++ template<typename T> class Optional { ... public: Optional() : has_value_(false) {} Optional(const T& another_value) : value_(another_value) , has_value_(true) {} }; // // usage: // Optional<std::string> maybe_string_1; Optional<std::string> maybe_string_2("hello world"); ``` **Вопрос:** Почему `const &` у `another_value` ? **Вопрос:** Что мы должны срочно добавить в класс? Срочно добавим: ```c++ template<typename T> class Optional { ... public: Optional(const Optional&) = default; Optional(Optional&&) = default; Optional& operator = (const Optional&) = default; Optional& operator = (Optional&&) = default; ~Optional() = default; }; // // usage: // Optional<std::string> maybe_string_1("hello world"); Optional<std::string> maybe_string_2 = maybe_string_1; Optional<std::string> maybe_string_3 = std::move(maybe_string_1); ``` Добавим немного функционала: ```c++ template<typename T> class Optional { ... public: bool has_value() const { return has_value_; } T& get_value() { return value_; } const T& get_value() const { return value_; } T* get_ptr() { return has_value_ ? &value_ : nullptr; } const T* get_ptr() const { return has_value_ ? &value_ : nullptr; } void reset() { value_ = T(); has_value_ = false; } void reset(const T& another_value) { value_ = another_value; has_value_ = true; } void emplace() { value_ = T(); has_value_ = true; } }; // // usage: // Optional<std::string> maybe_string_1("hello world"); if (maybe_string_1.has_value()) std::cout << maybe_string_1.get_value() << std::endl; if (std::string* s = maybe_string_1.get_ptr()) std::cout << *s << std::endl; maybe_string_1.reset(); maybe_string_1.emplace(); ``` Добавим операторы сравнения: ```c++ template<typename T> class Optional { ... }; template<typename T> bool operator == (const Optional<T>& lhs, const Optional<T>& rhs) { if (!lhs.has_value()) return !rhs.has_value(); if (!rhs.has_value()) return false; return lhs.get_value() == rhs.get_value(); } template<typename T> bool operator != (const Optional<T>& lhs, const Optional<T>& rhs) { return !(lhs == rhs); } template<typename T> bool operator == (const Optional<T>& lhs, const T& rhs) { return lhs.has_value() && lhs.get_value() == rhs; } template<typename T> bool operator == (const T& lhs, const Optional<T>& rhs) { return rhs == lhs; } template<typename T> bool operator != (const Optional<T>& lhs, const T& rhs) { return !(lhs == rhs); } template<typename T> bool operator != (const T& lhs, const Optional<T>& rhs) { return !(lhs == rhs); } // // usage: // Optional<std::string> maybe_string_1("hello world"); Optional<std::string> maybe_string_2 = maybe_string_1; if (maybe_string_1 != maybe_string_2) std::cout << "unreachable!" << std::endl; Optional<int> maybe_int; if (maybe_string_1 == maybe_int) // compile-time error! std::cout << "unreachable!" << std::endl; ``` **Вопрос:** какая проблема в этом коде? ```c++ Optional<std::string> maybe_string("hello world"); if (maybe_string == "C++ is designed for faster code") std::cout << "Fast enough?" << std::endl; ``` Решим эту проблему, добавим ещё один более хитрый оператор сравнения: ```c++ template<typename T, typename U> bool operator == (const Optional<T>& lhs, const U& rhs) { return lhs.has_value() && lhs.get_value() == rhs; } template<typename T, typename U> bool operator == (const T& lhs, const Optional<U>& rhs) { return rhs == lhs; } template<typename T, typename U> bool operator != (const Optional<T>& lhs, const U& rhs) { return !(lhs == rhs); } template<typename T, typename U> bool operator != (const T& lhs, const Optional<U>& rhs) { return !(rhs == lhs); } // // usage: // Optional<std::string> maybe_string_1("hello world"); if (maybe_string == "C++ is designed for faster code") std::cout << "Fast enough?" << std::endl; ``` Чтобы не было скучно, добавим в шаблонный класс шаблонный метод. ```c++ template<typename T> class Optional { ... public: // уже было void emplace(const T& another_value) { value_ = another_value; has_value_ = true; } // добавили template<typename U> void emplace(const U& source) { value_ = T(source); has_value_ = true; } }; // // usage: // Optional<std::string> maybe_string; maybe_string.emplace("hello world"); ``` **Вопрос:** Что поменялось при вызове `emplace`? Самое время добавить в класс шаблонный конструктор! ```c++ template<typename T> class Optional { ... public: // уже было Optional(const T& another_value) : value_(another_value) , has_value_(true) {} // добавили template<typename U> Optional(const U& source) : value_(source) , has_value_(true) {} }; // // usage: // Optional<std::string> maybe_string("hello world"); ``` **Вопрос:** Что поменялось в вызове конструктора от `const char*` ? **Вопрос:** Можем ли мы что-нибудь выиграть от шаблонного деструктора ? <br /> Пример целиком без операторов. Закинуть на godbolt, показать сколько каких методов генерируется, не забыть убрать оптимизации и demangle. ```c++ #include <string> template<typename T> class Optional { private: T value_; bool has_value_; public: Optional() : has_value_(false) {} Optional(const T& another_value) : value_(another_value) , has_value_(true) {} template<typename U> Optional(const U& source) : value_(source) , has_value_(true) {} Optional(const Optional&) = default; Optional(Optional&&) = default; Optional& operator = (const Optional&) = default; Optional& operator = (Optional&&) = default; ~Optional() = default; bool has_value() const { return has_value_; } T& get_value() { return value_; } const T& get_value() const { return value_; } T* get_ptr() { return has_value_ ? &value_ : nullptr; } const T* get_ptr() const { return has_value_ ? &value_ : nullptr; } void reset() { value_ = T(); has_value_ = false; } void reset(const T& another_value) { value_ = another_value; has_value_ = true; } void emplace() { value_ = T(); has_value_ = true; } template<typename U> void emplace(const U& source) { value_ = T(source); has_value_ = true; } }; std::string f() { Optional<std::string> opt; opt.emplace("hello"); opt.emplace("C++"); opt.emplace("world"); opt.emplace(std::string("-hello")); opt.emplace(std::string("-C++")); opt.emplace(std::string("-world")); return opt.get_value(); } ``` <br /> ##### Специфика компиляции шаблонов Создадим свой собственный тип, которому запретим copy assignment: ```c++ class C { public: C() = default; C(const C&) = default; C(C&&) = default; C& operator = (const C&) = delete; // DELETE C& operator = (C&&) = default; ~C() = default; }; ``` Сделаем от него `Optional`: ```c++ Optional<C> opt; ``` **Вопрос:** Какие-нибудь возникли подозрения? <br /> Ответ: компилируются только те методы, что вызываются: ```c++ Optional<C> opt1; // ok Optional<C> opt2; // ok Optional<C> opt3(C()); // ok if (opt1 == opt2) // ok std::cout << "equal!" << std::endl; if (opt1 != opt3) // ok std::cout << "not equal!" << std::endl; opt1.reset(); // ok opt1 = opt3; // compilation error: C::operator = (const C&) is deleted opt1.reset(C()); // compilation error: C::operator = (const C&) is deleted ``` <br /> ##### Member types ```c++ template<typename T> class Optional { public: using value_type = T; ... }; // // usage: // typename Optional<T>::value_type value; ``` Практический смысл этой конструкции станет понятен, когда будете делать итерирование по полиномам во втором домашнем задании. <br /> ##### just for fun: compile-time факториал Теперь мы разбираемся в шаблонах достаточно чтобы посчитать факториал во время компиляции на шаблонах (разобрать пример, показать результат в godbolt). Примечание: C++ значительно эволюционировал, и больше во время компиляции таким образом вычисления не проводят. Пример исключительно ученический. Compile-time вычисления будут рассмотрены в курсе далее. ```c++ template<unsigned N> struct Factorial { static const int value = N * Factorial<N - 1>::value; }; template<> struct Factorial<0> { static const int value = 1; }; int main() { return Factorial<10>::value; } ``` __Вопросы__: * Что делает следующий пример? ```c++ #include <cstdio> template<unsigned N> struct f { static const int value = f<N-1>::value + f<N-2>::value; }; template<> struct f<0> { static const int value = 0; }; template<> struct f<1> { static const int value = 1; }; int main() { printf("%i\n", f<45>::value); } ``` <br /> ##### Примеры шаблонных функций / классов из стандартной библиотеки Рассмотреть примеры подробнее, специализации (если есть). Показать секции member types, non-member functions etc. https://en.cppreference.com/w/cpp/numeric/complex https://en.cppreference.com/w/cpp/container/vector <br /> За пределами лекции: * Частичная специализация шаблонов (для классов) Во второй части курса про шаблоны: * SFINAE * variadic templates * type traits * tag dispatching <br /> **Замечания после лекции:** * По традиции осталось минут 30 от лекции. Это время можно разбавить примерами или рассказать что-нибудь полезное. * Переделать материал по member types. Сейчас сумбурно и не очевидно, зачем он нужен.
github_jupyter
# Lista 01 - EDA + Visualização ``` # -*- coding: utf 8 from matplotlib import pyplot as plt import pandas as pd import numpy as np plt.style.use('seaborn-colorblind') plt.ion() ``` # Exercício 01: Em determinadas épocas do ano a venda de certos produtos sofre um aumento significativo. Um exemplo disso, são as vendas de sorvete que aumentam bastante no verão. Além do sorvete, outros itens como protetor solar e vestuário de banho podem ganhar maior atenção durante essa época do ano enquanto outros produtos podem não ser tão valorizados. Neste primeiro exercício, implemente a função abaixo que recebe quatro listas e cria um dataframe das quatro. A primeira lista será o índice do seu dataframe. A última, o nome das colunas. Por exemplo, ao passar: ```python ice_cream = [3000, 2600, 1400, 1500, 1200, 500, 300, 400, 700, 600, 800, 1900] sunglasses = [1000, 800, 100, 70, 50, 190, 60, 50, 100, 120, 130, 900] coats = [10, 20, 80, 120, 100, 500, 900, 780, 360, 100, 120, 20] labels = ["Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Out", "Nov", "Dez"] names = ["icecream", "sunglasses", "coats"] cria_df(labels, ice_cream, sunglasses, coats, names) ``` A tabela deve ser da forma: ``` icecream sunglasses coats ------------------------------------ Jan 3000 1000 10 Fev 2600 800 20 ... ... ... ... Dez 1900 900 20 ``` __Dica__ Usar `list(zip(colunas))`. Ou, montar um dicionário na mão. ``` def cria_df(labels, coluna1, coluna2, coluna3, names): total = list(zip(coluna1, coluna2, coluna3)) resultado = pd.DataFrame(data=total, columns=names, index=labels) return resultado ice_cream = [3000, 2600, 1400, 1500, 1200, 500, 300, 400, 700, 600, 800, 1900] sunglasses = [1000, 800, 100, 70, 50, 190, 60, 50, 100, 120, 130, 900] coats = [10, 20, 80, 120, 100, 500, 900, 780, 360, 100, 120, 20] labels = ["Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Out", "Nov", "Dez"] names = ["icecream", "sunglasses", "coats"] df = cria_df(labels, ice_cream, sunglasses, coats, names) df ``` # Exercício 02: Agora, crie uma função que recebe seu dataframe e crie um gráfico de linhas mostrando a evolução das vendas dos produtos ao longo dos meses em porcentagem. Ou seja, um gráfico relacionando a porcentagem de produtos vendidos naquele mês em relação ao ano como um todo para as vendas de sorvetes, óculos de sol e casacos. Seu gráfico deve parecer com o plot abaixo: ``` # Note as duas linhas de código abaixo não é a resposta!!! Estou apenas mostrando a imagem que espero! from IPython.display import Image Image('plot1.png') x = [i for i in range(0, len(labels))] y = df.values / np.array(df.sum()) data = pd.DataFrame(data=y, columns=names, index=labels) grafico = data.plot(title="Sales", linewidth=3) grafico.set_ylabel("% sold") plt.xticks(x, labels) grafico ``` # Exercício 03: Utilizando os mesmos dados do exercício anterior, crie uma função que faz um scatter plot entre **icecream** e as outras duas colunas.. __Dicas:__ 1. "_Correlação não é o mesmo que causalidade!_" 1. Abaixo novamente mostramos exemplos de figuras que você pode gerar. ``` Image('plot2.png') Image('plot3.png') #Exemplo: ice_cream = [3000, 2600, 1400, 1500, 1200, 500, 300, 400, 700, 600, 800, 1900] sunglasses = [1000, 800, 100, 70, 50, 190, 60, 50, 100, 120, 130, 900] coats = [10, 20, 80, 120, 100, 500, 900, 780, 360, 100, 120, 20] labels = ["Jan", "Fev", "Mar", "Abr", "Mai", "Jun", "Jul", "Ago", "Set", "Out", "Nov", "Dez"] def scatter(df): for column in df: if column != 'icecream': df.plot(x='icecream', y=column, style='o', legend=False) plt.ylabel(column) scatter(df) ``` # Exercício 04: Agora vamos trabalhar com dados reais. Na mesma pasta deste notebook, encontra-se um `json` com os dados do site http://www.capitaldoscandidatos.info/. Sua tarefa será usar funções como `groupby` e `hist` para analisar tais dados. Diferente das perguntas anteriores, não vamos mais pedir para que você implemente funções. Ou seja, pode trabalhar diretamente nas células do Jupyter estilo um cientista de dados. Sua primeira tarefa será indicar os 10 partidos que em média mais lucraram depois da primeira eleição. Ou seja, a diferença de patrimônio entre 2014 (eleição 1) e 2018 (eleição 2). Assim, a célula de solução (abaixo, depois da célula que carrega os dados), deve criar uma variável `resposta`. A mesma é uma série pandas com os top 10 partidos que mais lucraram em média. **A resposta tem que ser um pd.Series, ou seja, uma única coluna!** __Dicas__ Não necessariamente para este trabalho, mas é sempre bom lembrar: 1. Você já aprendeu a programar e quando estiver repetindo muito chamadas, é um bom sinal que deve criar um função. 2. Notebooks não são IDEs, use para trabalho exploratório. ``` df = pd.read_json('capital.json') ax = df.groupby('sigla_partido')[['patrimonio_eleicao_1', 'patrimonio_eleicao_2']].sum() ax = ax.patrimonio_eleicao_2.sub(ax.patrimonio_eleicao_1).to_frame('resposta').sort_values(by='resposta', ascending=False) resposta = ax.head(10).T.squeeze() resposta ``` Plote sua resposta abaixo! ``` resposta.plot.bar() ``` # Exercício 05: Por fim, plote o histograma dos valores acima (lucro entre eleições) para todos os partidos. Brinque com valores diferentes do número de bins e interprete os dados. Para que a correção funcione, use a chamada da seguinte forma. Brinque também com variações de histograma normalizado ou não. ``` df = pd.read_json('capital.json') # carregando os dados +1 vez, caso tenha alterado. ax.hist(bins=20) ```
github_jupyter
``` import numpy as np import matplotlib.pyplot as plt import seaborn as sns # Plot parameters sns.set() %pylab inline pylab.rcParams['figure.figsize'] = (4, 4) plt.rcParams['xtick.major.size'] = 0 plt.rcParams['ytick.major.size'] = 0 # Avoid inaccurate floating values (for inverse matrices in dot product for instance) # See https://stackoverflow.com/questions/24537791/numpy-matrix-inversion-rounding-errors np.set_printoptions(suppress=True) %%html <style> .pquote { text-align: left; margin: 40px 0 40px auto; width: 70%; font-size: 1.5em; font-style: italic; display: block; line-height: 1.3em; color: #5a75a7; font-weight: 600; border-left: 5px solid rgba(90, 117, 167, .1); padding-left: 6px; } .notes { font-style: italic; display: block; margin: 40px 10%; } img + em { text-align: center; display: block; color: gray; font-size: 0.9em; font-weight: 600; } </style> ``` $$ \newcommand\bs[1]{\boldsymbol{#1}} $$ <span class='notes'> This content is part of a series following the chapter 2 on linear algebra from the [Deep Learning Book](http://www.deeplearningbook.org/) by Goodfellow, I., Bengio, Y., and Courville, A. (2016). It aims to provide intuitions/drawings/python code on mathematical theories and is constructed as my understanding of these concepts. You can check the syllabus in the [introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). </span> # Introduction We have seen in [2.3](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/) some interesting kind of matrices. We will see other type of vectors and matrices in this chapter. It is not a big chapter but it is important to understand the next ones. # 2.6 Special Kinds of Matrices and Vectors <img src="images/diagonal-and-symmetric-matrices.png" width="400" alt="Diagonal and symmetric matrices" title="Diagonal and symmetric matrices"> <em>Example of diagonal and symmetric matrices</em> # Diagonal matrices <img src="images/diagonal-matrix.png" width="150" alt="Example of a diagonal matrix" title="Diagonal matrix"> <em>Example of a diagonal matrix</em> A matrix $\bs{A}_{i,j}$ is diagonal if its entries are all zeros except on the diagonal (when $i=j$). ### Example 1. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} $$ In this case the matrix is also square but there can be non square diagonal matrices. ### Example 2. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0\\\\ 0 & 4 & 0\\\\ 0 & 0 & 3\\\\ 0 & 0 & 0 \end{bmatrix} $$ Or $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0 \end{bmatrix} $$ The diagonal matrix can be denoted $diag(\bs{v})$ where $\bs{v}$ is the vector containing the diagonal values. ### Example 3. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} $$ In this matrix, $\bs{v}$ is the following vector: $$ \bs{v}= \begin{bmatrix} 2\\\\ 4\\\\ 3\\\\ 1 \end{bmatrix} $$ The Numpy function `diag()` can be used to create square diagonal matrices: ``` v = np.array([2, 4, 3, 1]) np.diag(v) ``` The mutliplication between a diagonal matrix and a vector is thus just a ponderation of each element of the vector by $v$: ### Example 4. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} $$ and $$ \bs{x}= \begin{bmatrix} 3\\\\ 2\\\\ 2\\\\ 7 \end{bmatrix} $$ $$ \begin{align*} &\bs{Dx}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} \times \begin{bmatrix} 3\\\\ 2\\\\ 2\\\\ 7 \end{bmatrix}\\\\ &=\begin{bmatrix} 2\times3 + 0\times2 + 0\times2 + 0\times7\\\\ 0\times3 + 4\times2 + 0\times2 + 0\times7\\\\ 0\times3 + 0\times2 + 3\times2 + 0\times7\\\\ 0\times3 + 0\times2 + 0\times2 + 1\times7 \end{bmatrix}\\\\ &= \begin{bmatrix} 2\times3\\\\ 4\times2\\\\ 3\times2\\\\ 1\times7 \end{bmatrix} \end{align*} $$ Non square matrices have the same properties: ### Example 5. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0\\\\ 0 & 4 & 0\\\\ 0 & 0 & 3\\\\ 0 & 0 & 0 \end{bmatrix} $$ and $$ \bs{x}= \begin{bmatrix} 3\\\\ 2\\\\ 2 \end{bmatrix} $$ $$ \bs{Dx}= \begin{bmatrix} 2 & 0 & 0\\\\ 0 & 4 & 0\\\\ 0 & 0 & 3\\\\ 0 & 0 & 0 \end{bmatrix} \times \begin{bmatrix} 3\\\\ 2\\\\ 2 \end{bmatrix} = \begin{bmatrix} 2\times3\\\\ 4\times2\\\\ 3\times2\\\\ 0 \end{bmatrix} $$ The invert of a square diagonal matrix exists if all entries of the diagonal are non-zeros. If it is the case, the invert is easy to find. Also, the inverse doen't exist if the matrix is non-square. $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} $$ $$ \bs{D}^{-1}= \begin{bmatrix} \frac{1}{2} & 0 & 0 & 0\\\\ 0 & \frac{1}{4} & 0 & 0\\\\ 0 & 0 & \frac{1}{3} & 0\\\\ 0 & 0 & 0 & \frac{1}{1} \end{bmatrix} $$ $$ \bs{D}= \begin{bmatrix} 2 & 0 & 0 & 0\\\\ 0 & 4 & 0 & 0\\\\ 0 & 0 & 3 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} \frac{1}{2} & 0 & 0 & 0\\\\ 0 & \frac{1}{4} & 0 & 0\\\\ 0 & 0 & \frac{1}{3} & 0\\\\ 0 & 0 & 0 & \frac{1}{1} \end{bmatrix}= \begin{bmatrix} 1 & 0 & 0 & 0\\\\ 0 & 1 & 0 & 0\\\\ 0 & 0 & 1 & 0\\\\ 0 & 0 & 0 & 1 \end{bmatrix} $$ Let's check with Numpy that the multiplication of the matrix with its invert gives us the identity matrix: ``` A = np.array([[2, 0, 0, 0], [0, 4, 0, 0], [0, 0, 3, 0], [0, 0, 0, 1]]) A A_inv = np.array([[1/2., 0, 0, 0], [0, 1/4., 0, 0], [0, 0, 1/3., 0], [0, 0, 0, 1/1.]]) A_inv A.dot(A_inv) ``` Great! This gives the identity matrix # Symmetric matrices <img src="images/symmetric-matrix.png" width="150" alt="Illustration of a symmetric matrix" title="Symmetric matrix"> <em>Illustration of a symmetric matrix</em> The matrix $A$ is symmetric if it is equal to its transpose: $$ \bs{A} = \bs{A}^\text{T} $$ This concerns only square matrices. ### Example 6. $$ \bs{A}= \begin{bmatrix} 2 & 4 & -1\\\\ 4 & -8 & 0\\\\ -1 & 0 & 3 \end{bmatrix} $$ ``` A = np.array([[2, 4, -1], [4, -8, 0], [-1, 0, 3]]) A A.T ``` # Unit vectors A unit vector is a vector of length equal to 1. It can be denoted by a letter with a hat: $\hat{u}$ # Orthogonal vectors Two orthogonal vectors are separated by a 90° angle. The dot product of two orthogonal vectors gives 0. ### Example 7. ``` x = [0,0,2,2] y = [0,0,2,-2] plt.quiver([x[0], y[0]], [x[1], y[1]], [x[2], y[2]], [x[3], y[3]], angles='xy', scale_units='xy', scale=1) plt.xlim(-2, 4) plt.ylim(-3, 3) plt.axvline(x=0, color='grey') plt.axhline(y=0, color='grey') plt.text(1, 1.5, r'$\vec{u}$', size=18) plt.text(1.5, -1, r'$\vec{v}$', size=18) plt.show() plt.close() ``` $$ \bs{x}= \begin{bmatrix} 2\\\\ 2 \end{bmatrix} $$ and $$ \bs{y}= \begin{bmatrix} 2\\\\ -2 \end{bmatrix} $$ $$ \bs{x^\text{T}y}= \begin{bmatrix} 2 & 2 \end{bmatrix} \begin{bmatrix} 2\\\\ -2 \end{bmatrix}= \begin{bmatrix} 2\times2 + 2\times-2 \end{bmatrix}=0 $$ In addition, when the norm of orthogonal vectors is the unit norm they are called **orthonormal**. <span class='pquote'> It is impossible to have more than $n$ vectors mutually orthogonal in $\mathbb{R}^n$. </span> It is impossible to have more than $n$ vectors mutually orthogonal in $\mathbb{R}^n$. For instance try to draw 3 vectors in a 2-dimensional space ($\mathbb{R}^2$) that are mutually orthogonal... # Orthogonal matrices Orthogonal matrices are important because they have interesting properties. A matrix is orthogonal if columns are mutually orthogonal and have a unit norm (orthonormal) and rows are mutually orthonormal and have unit norm. <img src="images/orthogonal-matrix.png" width="300" alt="Under the hood of an orthogonal matrix" title="Under the hood of an orthogonal matrix"> <em>Under the hood of an orthogonal matrix</em> $$ \bs{A}= \begin{bmatrix} A_{1,1} & A_{1,2}\\\\ A_{2,1} & A_{2,2} \end{bmatrix} $$ This means that $$ \begin{bmatrix} A_{1,1}\\\\ A_{2,1} \end{bmatrix} $$ and $$ \begin{bmatrix} A_{1,2}\\\\ A_{2,2} \end{bmatrix} $$ are orthogonal vectors and also that the rows $$ \begin{bmatrix} A_{1,1} & A_{1,2} \end{bmatrix} $$ and $$ \begin{bmatrix} A_{2,1} & A_{2,2} \end{bmatrix} $$ are orthogonal vectors (cf. above for definition of orthogonal vectors). ## Property 1: $\bs{A^\text{T}A}=\bs{I}$ A orthogonal matrix has this property: $$ \bs{A^\text{T}A}=\bs{AA^\text{T}}=\bs{I} $$ We can see that this statement is true with the following reasoning: Let's have the following matrix: $$ \bs{A}=\begin{bmatrix} a & b\\\\ c & d \end{bmatrix} $$ and thus $$ \bs{A}^\text{T}=\begin{bmatrix} a & c\\\\ b & d \end{bmatrix} $$ Let's do the product: $$ \begin{align*} &\bs{A^\text{T}A}=\begin{bmatrix} a & c\\\\ b & d \end{bmatrix} \begin{bmatrix} a & b\\\\ c & d \end{bmatrix} = \begin{bmatrix} aa + cc & ab + cd\\\\ ab + cd & bb + dd \end{bmatrix}\\\\ &= \begin{bmatrix} a^2 + c^2 & ab + cd\\\\ ab + cd & b^2 + d^2 \end{bmatrix} \end{align*} $$ We saw in [2.5](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.5-Norms/) that the norm of the vector $\begin{bmatrix} a & c \end{bmatrix}$ is equal to $a^2+c^2$ ($L^2$ or squared $L^2$). In addtion, we saw that the rows of $\bs{A}$ have a unit norm because $\bs{A}$ is orthogonal. This means that $a^2+c^2=1$ and $b^2+d^2=1$. So we now have: $$ \bs{A^\text{T}A}= \begin{bmatrix} 1 & ab + cd\\\\ ab + cd & 1 \end{bmatrix} $$ Also, $ab+cd$ corresponds to the product of $\begin{bmatrix} a & c \end{bmatrix} and \begin{bmatrix} b & d \end{bmatrix}$: $$ \begin{bmatrix} a & c \end{bmatrix} \begin{bmatrix} b\\\\ d \end{bmatrix} = ab+cd $$ And we know that the columns are orthogonal which means that: $$ \begin{bmatrix} a & c \end{bmatrix} \begin{bmatrix} b\\\\ d \end{bmatrix}=0 $$ We thus have the identity matrix: $$ \bs{A^\text{T}A}=\begin{bmatrix} 1 & 0\\\\ 0 & 1 \end{bmatrix} $$ ## Property 2: $\bs{A}^\text{T}=\bs{A}^{-1}$ We can show that if $\bs{A^\text{T}A}=\bs{I}$ then $ \bs{A}^\text{T}=\bs{A}^{-1}$. If we multiply each side of the equation $\bs{A^\text{T}A}=\bs{I}$ by $\bs{A}^{-1}$ we have: $$ (\bs{A^\text{T}A})\bs{A}^{-1}=\bs{I}\bs{A}^{-1} $$ Recall from [2.3](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/) that a matrix or vector doesn't change when it is multiplied by the identity matrix. So we have: $$ (\bs{A^\text{T}A})\bs{A}^{-1}=\bs{A}^{-1} $$ We also saw in [2.2](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.2-Multiplying-Matrices-and-Vectors/) that matrix multiplication is associative so we can remove the parenthesis: $$ \bs{A^\text{T}A}\bs{A}^{-1}=\bs{A}^{-1} $$ We also know that $\bs{A}\bs{A}^{-1}=\bs{I}$ (see [2.3](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-2.3-Identity-and-Inverse-Matrices/)) so we can replace: $$ \bs{A^\text{T}}\bs{I}=\bs{A}^{-1} $$ This shows that $$\bs{A}^\text{T}=\bs{A}^{-1}$$ You can refer to [this question](https://math.stackexchange.com/questions/1936020/why-is-the-inverse-of-an-orthogonal-matrix-equal-to-its-transpose). ### Example 8. Sine and cosine are convenient to create orthogonal matrices. Let's take the following matrix: $$ \bs{A}= \begin{bmatrix} cos(50) & -sin(50)\\\\ sin(50) & cos(50) \end{bmatrix} $$ ``` A = np.array([[np.cos(50), -np.sin(50)], [np.sin(50), np.cos(50)]]) A col0 = A[:, 0].reshape(A[:, 0].shape[0], 1) col1 = A[:, 1].reshape(A[:, 1].shape[0], 1) row0 = A[0, :].reshape(A[0, :].shape[0], 1) row1 = A[1, :].reshape(A[1, :].shape[0], 1) ``` Let's check that rows and columns are orthogonal: ``` col0.T.dot(col1) row0.T.dot(row1) ``` Let's check that $ \bs{A^\text{T}A}=\bs{AA^\text{T}}=\bs{I} $ and thus $ \bs{A}^\text{T}=\bs{A}^{-1} $ ``` A.T.dot(A) A.T numpy.linalg.inv(A) ``` Everything is correct! # Conclusion In this chapter we saw different interesting type of matrices with specific properties. It is generally useful to recall them while we deal with this kind of matrices. In the next chapter we will saw a central idea in linear algebra: the eigendecomposition. Keep reading! <span class='notes'> Feel free to drop me an email or a comment. The syllabus of this series can be found [in the introduction post](https://hadrienj.github.io/posts/Deep-Learning-Book-Series-Introduction/). All the notebooks can be found on [Github](https://github.com/hadrienj/deepLearningBook-Notes). </span> # References ## Inverse and transpose of orthogonal matrix - https://math.stackexchange.com/questions/1936020/why-is-the-inverse-of-an-orthogonal-matrix-equal-to-its-transpose - https://dyinglovegrape.wordpress.com/2010/11/30/the-inverse-of-an-orthogonal-matrix-is-its-transpose/
github_jupyter
##### Copyright 2020 The TensorFlow IO Authors. ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Robust machine learning on streaming data using Kafka and Tensorflow-IO <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/io/tutorials/kafka"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/io/blob/master/docs/tutorials/kafka.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/io/blob/master/docs/tutorials/kafka.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/io/docs/tutorials/kafka.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> ## Overview This tutorial focuses on streaming data from a [Kafka](https://kafka.apache.org/quickstart) cluster into a `tf.data.Dataset` which is then used in conjunction with `tf.keras` for training and inference. Kafka is primarily a distributed event-streaming platform which provides scalable and fault-tolerant streaming data across data pipelines. It is an essential technical component of a plethora of major enterprises where mission-critical data delivery is a primary requirement. **NOTE:** A basic understanding of the [kafka components](https://kafka.apache.org/documentation/#intro_concepts_and_terms) will help you in following the tutorial with ease. **NOTE:** A Java runtime environment is required to run this tutorial. ## Setup ### Install the required tensorflow-io and kafka packages ``` !pip install tensorflow-io !pip install kafka-python ``` ### Import packages ``` import os from datetime import datetime import time import threading import json from kafka import KafkaProducer from kafka.errors import KafkaError from sklearn.model_selection import train_test_split import pandas as pd import tensorflow as tf import tensorflow_io as tfio ``` ### Validate tf and tfio imports ``` print("tensorflow-io version: {}".format(tfio.__version__)) print("tensorflow version: {}".format(tf.__version__)) ``` ## Download and setup Kafka and Zookeeper instances For demo purposes, the following instances are setup locally: - Kafka (Brokers: 127.0.0.1:9092) - Zookeeper (Node: 127.0.0.1:2181) ``` !curl -sSOL https://dlcdn.apache.org/kafka/3.1.0/kafka_2.13-3.1.0.tgz !tar -xzf kafka_2.13-3.1.0.tgz ``` Using the default configurations (provided by Apache Kafka) for spinning up the instances. ``` !./kafka_2.13-3.1.0/bin/zookeeper-server-start.sh -daemon ./kafka_2.13-3.1.0/config/zookeeper.properties !./kafka_2.13-3.1.0/bin/kafka-server-start.sh -daemon ./kafka_2.13-3.1.0/config/server.properties !echo "Waiting for 10 secs until kafka and zookeeper services are up and running" !sleep 10 ``` Once the instances are started as daemon processes, grep for `kafka` in the processes list. The two java processes correspond to zookeeper and the kafka instances. ``` !ps -ef | grep kafka ``` Create the kafka topics with the following specs: - susy-train: partitions=1, replication-factor=1 - susy-test: partitions=2, replication-factor=1 ``` !./kafka_2.13-3.1.0/bin/kafka-topics.sh --create --bootstrap-server 127.0.0.1:9092 --replication-factor 1 --partitions 1 --topic susy-train !./kafka_2.13-3.1.0/bin/kafka-topics.sh --create --bootstrap-server 127.0.0.1:9092 --replication-factor 1 --partitions 2 --topic susy-test ``` Describe the topic for details on the configuration ``` !./kafka_2.13-3.1.0/bin/kafka-topics.sh --describe --bootstrap-server 127.0.0.1:9092 --topic susy-train !./kafka_2.13-3.1.0/bin/kafka-topics.sh --describe --bootstrap-server 127.0.0.1:9092 --topic susy-test ``` The replication factor 1 indicates that the data is not being replicated. This is due to the presence of a single broker in our kafka setup. In production systems, the number of bootstrap servers can be in the range of 100's of nodes. That is where the fault-tolerance using replication comes into picture. Please refer to the [docs](https://kafka.apache.org/documentation/#replication) for more details. ## SUSY Dataset Kafka being an event streaming platform, enables data from various sources to be written into it. For instance: - Web traffic logs - Astronomical measurements - IoT sensor data - Product reviews and many more. For the purpose of this tutorial, lets download the [SUSY](https://archive.ics.uci.edu/ml/datasets/SUSY#) dataset and feed the data into kafka manually. The goal of this classification problem is to distinguish between a signal process which produces supersymmetric particles and a background process which does not. ``` !curl -sSOL https://archive.ics.uci.edu/ml/machine-learning-databases/00279/SUSY.csv.gz ``` ### Explore the dataset The first column is the class label (1 for signal, 0 for background), followed by the 18 features (8 low-level features then 10 high-level features). The first 8 features are kinematic properties measured by the particle detectors in the accelerator. The last 10 features are functions of the first 8 features. These are high-level features derived by physicists to help discriminate between the two classes. ``` COLUMNS = [ # labels 'class', # low-level features 'lepton_1_pT', 'lepton_1_eta', 'lepton_1_phi', 'lepton_2_pT', 'lepton_2_eta', 'lepton_2_phi', 'missing_energy_magnitude', 'missing_energy_phi', # high-level derived features 'MET_rel', 'axial_MET', 'M_R', 'M_TR_2', 'R', 'MT2', 'S_R', 'M_Delta_R', 'dPhi_r_b', 'cos(theta_r1)' ] ``` The entire dataset consists of 5 million rows. However, for the purpose of this tutorial, let's consider only a fraction of the dataset (100,000 rows) so that less time is spent on the moving the data and more time on understanding the functionality of the api. ``` susy_iterator = pd.read_csv('SUSY.csv.gz', header=None, names=COLUMNS, chunksize=100000) susy_df = next(susy_iterator) susy_df.head() # Number of datapoints and columns len(susy_df), len(susy_df.columns) # Number of datapoints belonging to each class (0: background noise, 1: signal) len(susy_df[susy_df["class"]==0]), len(susy_df[susy_df["class"]==1]) ``` ### Split the dataset ``` train_df, test_df = train_test_split(susy_df, test_size=0.4, shuffle=True) print("Number of training samples: ",len(train_df)) print("Number of testing sample: ",len(test_df)) x_train_df = train_df.drop(["class"], axis=1) y_train_df = train_df["class"] x_test_df = test_df.drop(["class"], axis=1) y_test_df = test_df["class"] # The labels are set as the kafka message keys so as to store data # in multiple-partitions. Thus, enabling efficient data retrieval # using the consumer groups. x_train = list(filter(None, x_train_df.to_csv(index=False).split("\n")[1:])) y_train = list(filter(None, y_train_df.to_csv(index=False).split("\n")[1:])) x_test = list(filter(None, x_test_df.to_csv(index=False).split("\n")[1:])) y_test = list(filter(None, y_test_df.to_csv(index=False).split("\n")[1:])) NUM_COLUMNS = len(x_train_df.columns) len(x_train), len(y_train), len(x_test), len(y_test) ``` ### Store the train and test data in kafka Storing the data in kafka simulates an environment for continuous remote data retrieval for training and inference purposes. ``` def error_callback(exc): raise Exception('Error while sendig data to kafka: {0}'.format(str(exc))) def write_to_kafka(topic_name, items): count=0 producer = KafkaProducer(bootstrap_servers=['127.0.0.1:9092']) for message, key in items: producer.send(topic_name, key=key.encode('utf-8'), value=message.encode('utf-8')).add_errback(error_callback) count+=1 producer.flush() print("Wrote {0} messages into topic: {1}".format(count, topic_name)) write_to_kafka("susy-train", zip(x_train, y_train)) write_to_kafka("susy-test", zip(x_test, y_test)) ``` ### Define the tfio train dataset The `IODataset` class is utilized for streaming data from kafka into tensorflow. The class inherits from `tf.data.Dataset` and thus has all the useful functionalities of `tf.data.Dataset` out of the box. ``` def decode_kafka_item(item): message = tf.io.decode_csv(item.message, [[0.0] for i in range(NUM_COLUMNS)]) key = tf.strings.to_number(item.key) return (message, key) BATCH_SIZE=64 SHUFFLE_BUFFER_SIZE=64 train_ds = tfio.IODataset.from_kafka('susy-train', partition=0, offset=0) train_ds = train_ds.shuffle(buffer_size=SHUFFLE_BUFFER_SIZE) train_ds = train_ds.map(decode_kafka_item) train_ds = train_ds.batch(BATCH_SIZE) ``` ## Build and train the model ``` # Set the parameters OPTIMIZER="adam" LOSS=tf.keras.losses.BinaryCrossentropy(from_logits=True) METRICS=['accuracy'] EPOCHS=10 # design/build the model model = tf.keras.Sequential([ tf.keras.layers.Input(shape=(NUM_COLUMNS,)), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(256, activation='relu'), tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(128, activation='relu'), tf.keras.layers.Dropout(0.4), tf.keras.layers.Dense(1, activation='sigmoid') ]) print(model.summary()) # compile the model model.compile(optimizer=OPTIMIZER, loss=LOSS, metrics=METRICS) # fit the model model.fit(train_ds, epochs=EPOCHS) ``` Note: Please do not confuse the training step with online training. It's an entirely different paradigm which will be covered in a later section. Since only a fraction of the dataset is being utilized, our accuracy is limited to ~78% during the training phase. However, please feel free to store additional data in kafka for a better model performance. Also, since the goal was to just demonstrate the functionality of the tfio kafka datasets, a smaller and less-complicated neural network was used. However, one can increase the complexity of the model, modify the learning strategy, tune hyper-parameters etc for exploration purposes. For a baseline approach, please refer to this [article](https://www.nature.com/articles/ncomms5308#Sec11). ## Infer on the test data To infer on the test data by adhering to the 'exactly-once' semantics along with fault-tolerance, the `streaming.KafkaGroupIODataset` can be utilized. ### Define the tfio test dataset The `stream_timeout` parameter blocks for the given duration for new data points to be streamed into the topic. This removes the need for creating new datasets if the data is being streamed into the topic in an intermittent fashion. ``` test_ds = tfio.experimental.streaming.KafkaGroupIODataset( topics=["susy-test"], group_id="testcg", servers="127.0.0.1:9092", stream_timeout=10000, configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest" ], ) def decode_kafka_test_item(raw_message, raw_key): message = tf.io.decode_csv(raw_message, [[0.0] for i in range(NUM_COLUMNS)]) key = tf.strings.to_number(raw_key) return (message, key) test_ds = test_ds.map(decode_kafka_test_item) test_ds = test_ds.batch(BATCH_SIZE) ``` Though this class can be used for training purposes, there are caveats which need to be addressed. Once all the messages are read from kafka and the latest offsets are committed using the `streaming.KafkaGroupIODataset`, the consumer doesn't restart reading the messages from the beginning. Thus, while training, it is possible only to train for a single epoch with the data continuously flowing in. This kind of a functionality has limited use cases during the training phase wherein, once a datapoint has been consumed by the model it is no longer required and can be discarded. However, this functionality shines when it comes to robust inference with exactly-once semantics. ### evaluate the performance on the test data ``` res = model.evaluate(test_ds) print("test loss, test acc:", res) ``` Since the inference is based on 'exactly-once' semantics, the evaluation on the test set can be run only once. In order to run the inference again on the test data, a new consumer group should be used. ### Track the offset lag of the `testcg` consumer group ``` !./kafka_2.13-3.1.0/bin/kafka-consumer-groups.sh --bootstrap-server 127.0.0.1:9092 --describe --group testcg ``` Once the `current-offset` matches the `log-end-offset` for all the partitions, it indicates that the consumer(s) have completed fetching all the messages from the kafka topic. ## Online learning The online machine learning paradigm is a bit different from the traditional/conventional way of training machine learning models. In the former case, the model continues to incrementally learn/update it's parameters as soon as the new data points are available and this process is expected to continue indefinitely. This is unlike the latter approaches where the dataset is fixed and the model iterates over it `n` number of times. In online learning, the data once consumed by the model may not be available for training again. By utilizing the `streaming.KafkaBatchIODataset`, it is now possible to train the models in this fashion. Let's continue to use our SUSY dataset for demonstrating this functionality. ### The tfio training dataset for online learning The `streaming.KafkaBatchIODataset` is similar to the `streaming.KafkaGroupIODataset` in it's API. Additionally, it is recommended to utilize the `stream_timeout` parameter to configure the duration for which the dataset will block for new messages before timing out. In the instance below, the dataset is configured with a `stream_timeout` of `10000` milliseconds. This implies that, after all the messages from the topic have been consumed, the dataset will wait for an additional 10 seconds before timing out and disconnecting from the kafka cluster. If new messages are streamed into the topic before timing out, the data consumption and model training resumes for those newly consumed data points. To block indefinitely, set it to `-1`. ``` online_train_ds = tfio.experimental.streaming.KafkaBatchIODataset( topics=["susy-train"], group_id="cgonline", servers="127.0.0.1:9092", stream_timeout=10000, # in milliseconds, to block indefinitely, set it to -1. configuration=[ "session.timeout.ms=7000", "max.poll.interval.ms=8000", "auto.offset.reset=earliest" ], ) ``` Every item that the `online_train_ds` generates is a `tf.data.Dataset` in itself. Thus, all the standard transformations can be applied as usual. ``` def decode_kafka_online_item(raw_message, raw_key): message = tf.io.decode_csv(raw_message, [[0.0] for i in range(NUM_COLUMNS)]) key = tf.strings.to_number(raw_key) return (message, key) for mini_ds in online_train_ds: mini_ds = mini_ds.shuffle(buffer_size=32) mini_ds = mini_ds.map(decode_kafka_online_item) mini_ds = mini_ds.batch(32) if len(mini_ds) > 0: model.fit(mini_ds, epochs=3) ``` The incrementally trained model can be saved in a periodic fashion (based on use-cases) and can be utilized to infer on the test data in either online or offline modes. Note: The `streaming.KafkaBatchIODataset` and `streaming.KafkaGroupIODataset` are still in experimental phase and have scope for improvements based on user-feedback. ## References: - Baldi, P., P. Sadowski, and D. Whiteson. “Searching for Exotic Particles in High-energy Physics with Deep Learning.” Nature Communications 5 (July 2, 2014) - SUSY Dataset: https://archive.ics.uci.edu/ml/datasets/SUSY#
github_jupyter
# Process specifications Dynamically adjusting parameters in a process to meet a specification is critical in designing a production process, and even more so when its under uncertaintly. BioSTEAM groups process specifications into two categories: analytical specifications, and numerical specifications. As the name suggests, an analytical specification is directly solved within a single loop of a system. A numerical specification, on the other hand, is solved numerically by rerunning a unit operation or even by reconverging a recycle system. The following real world examples will explain this in detail. ## Analytical specifications ### Denature ethanol fuel in a bioethanol process Vary the amount of denaturant to add according to the flow of bioethanol. The final bioethanol product must be 2 wt. % denaturant: ``` from biosteam import settings, Chemical, Stream, units, main_flowsheet # First name a new flowsheet main_flowsheet.set_flowsheet('mix_ethanol_with_denaturant') # Set the thermodynamic property package. # In an actual process, much more chemicals # would be defined, but here we keep it short. settings.set_thermo(['Water', 'Ethanol', 'Octane']) # Assume 40 million gal ethanol produced a year # with 330 operating days dehydrated_ethanol = Stream('dehydrated_ethanol', T=340, Water=0.1, Ethanol=99.9, units='kg/hr') operating_days_per_year = 330 dehydrated_ethanol.F_vol = 40e6 / operating_days_per_year denaturant = Stream('denaturant', Octane=1) M1 = units.Mixer('M1', ins=(dehydrated_ethanol, denaturant), outs='denatured_ethanol') # Create the specification function. @M1.add_specification def adjust_denaturant_flow(): denaturant_over_ethanol_flow = 0.02 / 0.98 # A mass ratio denaturant.imass['Octane'] = denaturant_over_ethanol_flow * dehydrated_ethanol.F_mass M1.run() # Run mass and energy balance # Simulate, and check results. M1.simulate() M1.show(composition=True, flow='kg/hr') ``` All specifications are in the unit's specification list: ``` M1.specification ``` ### Preparing corn slurry in a conventional dry-grind process The solids content of a corn slurry fed to a conventional dry-grind corn ethanol plant is typically about 32 wt. %. Adjust the flow rate of water mixed with the milled corn for such that the slurry is 32 wt. %: ``` # First name a new flowsheet main_flowsheet.set_flowsheet('corn_slurry_example') # Create a general chemicals to represent the # components of corn. Starch = Chemical.blank('Starch', phase='s') Fiber = Chemical.blank('Fiber', phase='s') Oil = Chemical('Oil', search_ID='Oleic_acid') Water = Chemical('Water') # The exact properties are not important for # the example, so just assume its like water at # 25 C and 1 atm. Starch.default() Fiber.default() # Set the thermodynamic property package. # In an actual process, much more chemicals # would be defined, but here we keep it short. settings.set_thermo([Starch, Oil, Fiber, Water]) # A typical dry grind process may produce # 40 million gal of ethanol a year with a # yield of 2.7 gal ethanol per bushel of corn. corn_flow_per_year = 40e6 / 2.7 # In bushels days_per_year = 365 operating_days_per_year = 330 corn_flow_per_day = corn_flow_per_year * days_per_year / operating_days_per_year # The corn kernel iscomposed of starch (62%), protein and fiber (19%), # water (15%), and oil (4%). corn_feed = Stream('corn_feed', Starch=62, Fiber=19, Water=15, Oil=4, units='kg/hr') corn_feed.set_total_flow(corn_flow_per_day, units='bu/day') # Water that will be mixed with the milled corn to create the slurry. slurry_water = Stream('slurry_water', Water=1) M1 = units.Mixer('M1', ins=(slurry_water, corn_feed), outs='slurry') @M1.add_specification def adjust_water_flow(): F_mass_moisture = corn_feed.imass['Water'] F_mass_solids = corn_feed.F_mass - F_mass_moisture slurry_water.F_mass = F_mass_solids * (1 - 0.32) / 0.32 - F_mass_moisture M1._run() # Run mass and energy balance # Simulate, and check results. M1.simulate() M1.show(flow='kg/hr', composition=True) ``` ## Numerical specifications ### Flash design specification Let's say we have a mixture of water, ethanol and propanol and we would like to evaporate 50% of the liquid by mass (not by mol). We can solve this problem numerically by testing whether the specification is met at a given temperature: ``` # First name a new flowsheet main_flowsheet.set_flowsheet('flash_specification_example') # Set the thermodynamic property package. # In an actual process, much more chemicals # would be defined, but here we keep it short. settings.set_thermo(['Water', 'Ethanol', 'Propanol']) # Feed stream mixture = Stream('mixture', T=340, Water=1000, Ethanol=1000, Propanol=1000, units='kg/hr') # Create a flash vessel F1 = units.Flash('F1', ins=mixture, outs=('vapor', 'liquid'), T=373, P=101325) # Set a numerical specification which solves the objective function when called. @F1.add_bounded_numerical_specification(x0=351.39, x1=373.15, xtol=1e-6) def f(x): # Objective function where f(x) = 0 at a # vapor fraction of 50 wt. %. F1.T = x F1._run() # IMPORTANT: This runs the mass and energy balance at the new conditions feed = F1.ins[0] vapor = F1.outs[0] V = vapor.F_mass / feed.F_mass return V - 0.5 # Now create the system, simulate, and check results. system = main_flowsheet.create_system() system.simulate() system.diagram() print('vapor mass fraction: ', format(F1.outs[0].F_mass / mixture.F_mass, '.0%')) ```
github_jupyter
# 3.1 Expressions # Programming languages are much simpler than human languages. Nonetheless, there are some rules of grammar to learn in any language, and that is where we will begin. In this text, we will use the [Python](https://www.python.org/) programming language. Learning the grammar rules is essential, and the same rules used in the most basic programs are also central to more sophisticated programs. Programs are made up of *expressions*, which describe to the computer how to combine pieces of data. For example, a multiplication expression consists of a `*` symbol between two numerical expressions. Expressions, such as `3 * 4`, are *evaluated* by the computer. The value (the result of *evaluation*) of the last expression in each cell, `12` in this case, is displayed below the cell. ``` 3 * 4 ``` The grammar rules of a programming language are rigid. In Python, the `*` symbol cannot appear twice in a row. The computer will not try to interpret an expression that differs from its prescribed expression structures. Instead, it will show a `SyntaxError` error. The *Syntax* of a language is its set of grammar rules, and a `SyntaxError` indicates that an expression structure doesn't match any of the rules of the language. ``` 3 * * 4 ``` Small changes to an expression can change its meaning entirely. Below, the space between the `*`'s has been removed. Because `**` appears between two numerical expressions, the expression is a well-formed *exponentiation* expression (the first number raised to the power of the second: 3 times 3 times 3 times 3). The symbols `*` and `**` are called *operators*, and the values they combine are called *operands*. ``` 3 ** 4 ``` **Common Operators.** Data science often involves combining numerical values, and the set of operators in a programming language are designed to so that expressions can be used to express any sort of arithmetic. In Python, the following operators are essential. | Expression Type | Operator | Example | Value | |-----------------|----------|------------|-----------| | Addition | `+` | `2 + 3` | `5` | | Subtraction | `-` | `2 - 3` | `-1` | | Multiplication | `*` | `2 * 3` | `6` | | Division | `/` | `7 / 3` | `2.66667` | | Remainder | `%` | `7 % 3` | `1` | | Exponentiation | `**` | `2 ** 0.5` | `1.41421` | Python expressions obey the same familiar rules of *precedence* as in algebra: multiplication and division occur before addition and subtraction. Parentheses can be used to group together smaller expressions within a larger expression. ``` 1 + 2 * 3 * 4 * 5 / 6 ** 3 + 7 + 8 - 9 + 10 1 + 2 * (3 * 4 * 5 / 6) ** 3 + 7 + 8 - 9 + 10 ``` This chapter introduces many types of expressions. Learning to program involves trying out everything you learn in combination, investigating the behavior of the computer. What happens if you divide by zero? What happens if you divide twice in a row? You don't always need to ask an expert (or the Internet); many of these details can be discovered by trying them out yourself.
github_jupyter
``` import arviz as az import matplotlib.pyplot as plt import numpy as np import pymc3 as pm %load_ext watermark az.style.use('arviz-darkgrid') ``` # Sequential Monte Carlo - Approximate Bayesian Computation Approximate Bayesian Computation methods (also called likelihood free inference methods), are a group of techniques developed for inferring posterior distributions in cases where the likelihood function is intractable or costly to evaluate. This does not mean that the likelihood function is not part of the analysis, rather that it is not directly evaluated. ABC comes useful when modelling complex phenomena in certain fields of study, like systems biology. Such models often contain unobservable random quantities, which make the likelihood function hard to specify, but data can be simulated from the model. These methods follow a general form: 1- Sample a parameter $\theta^*$ from a prior/proposal distribution $\pi(\theta)$. 2- Simulate a data set $y^*$ using a function that takes $\theta$ and returns a data set of the same dimensions as the observed data set $y_0$ (simulator). 3- Compare the simulated dataset $y^*$ with the experimental data set $y_0$ using a distance function $d$ and a tolerance threshold $\epsilon$. In some cases a distance function is computed between two summary statistics $d(S(y_0), S(y^*))$, avoiding the issue of computing distances for entire datasets. As a result we obtain a sample of parameters from a distribution $\pi(\theta | d(y_0, y^*)) \leqslant \epsilon$. If $\epsilon$ is sufficiently small this distribution will be a good approximation of the posterior distribution $\pi(\theta | y_0)$. [Sequential monte carlo](https://docs.pymc.io/notebooks/SMC2_gaussians.html?highlight=smc) ABC is a method that iteratively morphs the prior into a posterior by propagating the sampled parameters through a series of proposal distributions $\phi(\theta^{(i)})$, weighting the accepted parameters $\theta^{(i)}$ like: $$ w^{(i)} \propto \frac{\pi(\theta^{(i)})}{\phi(\theta^{(i)})} $$ It combines the advantages of traditional SMC, i.e. ability to sample from distributions with multiple peaks, but without the need for evaluating the likelihood function. _(Lintusaari, 2016), (Toni, T., 2008), (Nuñez, Prangle, 2015)_ # A trivial example Estimating the mean and standard deviation of normal data ``` data = np.random.normal(loc=0, scale=1, size=1000) def normal_sim(a, b): return np.random.normal(a, b, 1000) with pm.Model() as example: a = pm.Normal("a", mu=0, sd=5) b = pm.HalfNormal("b", sd=1) s = pm.Simulator("s", normal_sim, params=(a, b), observed=np.sort(data)) trace_example = pm.sample_smc(kernel="ABC", sum_stat="sorted") az.plot_trace(trace_example); az.summary(trace_example) _, ax = plt.subplots(figsize=(10, 4)) az.plot_kde(data, label="True data", ax=ax, plot_kwargs={"color": "C2"}) az.plot_kde(normal_sim(trace_example["a"].mean(), trace_example["b"].mean()), ax=ax) for i in np.random.randint(0, 500, 25): az.plot_kde( normal_sim(trace_example["a"][i], trace_example["b"][i]), ax=ax, plot_kwargs={"zorder": 0, "alpha": 0.2}, ) ax.legend(); ``` # Lotka–Volterra In this example we will try to find parameters for the Lotka-Volterra equations. A common biological competition model for describing how the number of individuals of each species changes when there is a predator/prey interaction (A Biologist’s Guide to Mathematical Modeling in Ecology and Evolution,Otto and Day, 2007). For example, rabbits and foxes. Given an initial population number for each species, the integration of this ordinary differential equations (ODE) describes curves for the progression of both populations. This ODE’s takes four parameters: * a is the natural growing rate of rabbits, when there’s no fox. * b is the natural dying rate of rabbits, due to predation. * c is the natural dying rate of fox, when there’s no rabbit. * d is the factor describing how many caught rabbits let create a new fox. This example is based on the Scipy Lokta-Volterra Tutorial. ``` from scipy.integrate import odeint ``` First we will generate data using known parameters. ``` # Definition of parameters a = 1.0 b = 0.1 c = 1.5 d = 0.75 # initial population of rabbits and foxes X0 = [10.0, 5.0] # size of data size = 100 # time lapse time = 15 t = np.linspace(0, time, size) # Lotka - Volterra equation def dX_dt(X, t, a, b, c, d): """ Return the growth rate of fox and rabbit populations. """ return np.array([a * X[0] - b * X[0] * X[1], -c * X[1] + d * b * X[0] * X[1]]) ``` This model is based on a simulator, a function that returns data in the same dimensions as the observed data. In this case, the function solves the ODE. ``` # simulator function def competition_model(a, b): return odeint(dX_dt, y0=X0, t=t, rtol=0.1, args=(a, b, c, d)) ``` Using the simulator function we will obtain a dataset with some noise added, for using it as observed data. ``` # function for generating noisy data to be used as observed data. def add_noise(a, b, c, d): noise = np.random.normal(size=(size, 2)) simulated = competition_model(a, b) simulated += noise indexes = np.sort(np.random.randint(low=0, high=size, size=size)) return simulated[indexes] # plotting observed data. observed = add_noise(a, b, c, d) _, ax = plt.subplots(figsize=(12, 4)) ax.plot(observed[:, 0], "x", label="prey") ax.plot(observed[:, 1], "x", label="predator") ax.set_xlabel("time") ax.set_ylabel("population") ax.set_title("Observed data") ax.legend(); ``` On this model, instead of specifyng a likelihood function, we use `pm.Simulator()`, a "container" that stores the simulator function and the observed data. During sampling, samples from a and b priors will be passed to the simulator function. ``` with pm.Model() as model: a = pm.Normal("a", mu=1, sd=5) b = pm.Normal("b", mu=1, sd=5) simulator = pm.Simulator( "simulator", competition_model, params=(a, b), observed=observed ) trace = pm.sample_smc(kernel="ABC", epsilon=20) az.plot_trace(trace); az.plot_posterior(trace); # plot results _, ax = plt.subplots(figsize=(14, 6)) ax.plot(observed[:, 0], "x", label="prey", c="C0") ax.plot(observed[:, 1], "x", label="predator", c="C1") ax.plot(competition_model(trace["a"].mean(), trace["b"].mean()), linewidth=2.5) for i in np.random.randint(0, size, 75): ax.plot( competition_model(trace["a"][i], trace["b"][i])[:, 0], alpha=0.1, c="C2", zorder=0, ) ax.plot( competition_model(trace["a"][i], trace["b"][i])[:, 1], alpha=0.1, c="C3", zorder=0, ) ax.set_xlabel("time") ax.set_ylabel("population") ax.legend(); %watermark -n -u -v -iv -w ```
github_jupyter
# Suave demo notebook: BAO basis on a periodic box Hello! In this notebook we'll show you how to use suave, an implementation of the Continuous-Function Estimator, with a basis based on the standard baryon acoustic oscillation (BAO) fitting function. ``` import os import numpy as np import matplotlib.pyplot as plt import Corrfunc from Corrfunc.io import read_lognormal_catalog from Corrfunc.theory.DDsmu import DDsmu from Corrfunc.theory.xi import xi from Corrfunc.utils import evaluate_xi from Corrfunc.utils import trr_analytic from Corrfunc.bases import bao_bases from colossus.cosmology import cosmology import matplotlib from matplotlib import pylab %config InlineBackend.figure_format = 'retina' matplotlib.rcParams['figure.dpi'] = 80 textsize = 'x-large' params = {'legend.fontsize': 'x-large', 'figure.figsize': (10, 8), 'axes.labelsize': textsize, 'axes.titlesize': textsize, 'xtick.labelsize': textsize, 'ytick.labelsize': textsize} pylab.rcParams.update(params) plt.ion() ``` ## Load in data We'll demonstrate with a low-density lognormal simulation box, which we've included with the code. We'll show here the box with 3e-4 ($h^{-1}$Mpc)$^{-3}$, but if you're only running with a single thread, you will want to run this notebook with the 1e-4 ($h^{-1}$Mpc)$^{-3}$ box for speed. (The code is extremely parallel, so when you're running for real, you'll definitely want to bump up the number of threads.) ``` x, y, z = read_lognormal_catalog(n='2e-4') boxsize = 750.0 nd = len(x) print("Number of data points:",nd) ``` We don't need a random catalog for this example, as we'll use a periodic box such that we can calculate the random-random (and data-random) term analytically. ## Construct BAO basis We will use a basis that is based on the standard BAO fitting function. It starts from the correlation function for a given cosmology, with the freedom for a scale shift using a scale dilation parameter $\alpha$. It includes a term that is the derivative of the correlation function with respect to $\alpha$, linearizing around this value. There are also nuisance parameter terms. For a full explanation, see [our paper](https://arxiv.org/abs/2011.01836). To construct the BAO basis, we'll need to choose the r-range, as well as the redshift and bias. We can also select the cosmology, using the Colossus package. Note that we can also use a custom cosmology; see the [Colossus docs](https://bitbucket.org/bdiemer/colossus/src/master/). We also select our initial guess for the scale dilation parameter $\alpha_\mathrm{guess}$. A value of 1.0 means that we will not shift the correlation function, so let's start there. We also choose $k_0$, the initial magnitude of the partial derivative term. ``` rmin = 40 rmax = 150 cosmo = cosmology.setCosmology('planck15') redshift = 1.0 bias = 2.0 alpha_guess = 1.0 k0 = 0.1 projfn = 'bao_basis.dat' bases = bao_bases(rmin, rmax, projfn, cosmo_base=cosmo, alpha_guess=alpha_guess, k0=k0, ncont=2000, redshift=0.0, bias=1.0) ``` Plotting the bases, we see that the dark green basis is the correlation function for the given cosmology (and redshift and bias). It depends on the scale shift `alpha_guess` parameter; the default `alpha_guess=1.0`, meaning no shift. The next-darkest green is the derivative with respect to the base cosmology. It depends on the dalpha and k0 parameters (we have just used the defaults here). The other bases are nuisances parameters to marginalize over the broadband shape of the correlation function. We can also set the initial magnitudes of these by passing the `k1`, `k2`, and `k3` parameters. ``` plt.figure(figsize=(8,5)) bao_base_colors = ['#41ab5d', '#74c476', '#a1d99b', '#005a32', '#238b45'] #from https://colorbrewer2.org/#type=sequential&scheme=Greens&n=8, last 5 out of 8 bao_base_names = [r'$\frac{k_1}{s^2}$', r'$\frac{k_2}{s}$', r'$k_3$', r'$\xi^\mathrm{mod}(\alpha_\mathrm{guess} s)$', r'$k_0 \frac{\mathrm{d} \xi^\mathrm{mod}(\alpha_\mathrm{guess} s)}{\mathrm{d} \alpha}$'] r = bases[:,0] base_vals = bases[:,1:] for i in range(base_vals.shape[1]): plt.plot(r, base_vals[:,i], label=bao_base_names[i], color=bao_base_colors[i]) plt.legend() plt.xlim(rmin, rmax) plt.ylim(-0.0025, 0.01) plt.xlabel(r'separation $r$ ($h^{-1}\,$Mpc)') plt.ylabel('BAO basis functions $f_k(r)$') ``` ## Suave with a BAO basis We set the suave parameters. The BAO basis we created is a file with a set of basis values at each separation r, so we use `proj_type=generalr`. We are also assuming a periodic box. We want the 3D correlation function, so we can use `DDsmu` with a single giant mu bin. ``` nthreads = 4 # Need to give a dummy r_edges for compatibility with standard Corrfunc. # But we will use this later to compute the standard xi, so give something reasonable. r_edges = np.linspace(rmin, rmax, 15) mumax = 1.0 nmubins = 1 periodic = True proj_type = 'generalr' ncomponents = base_vals.shape[1] dd_res_bao, dd_bao, _ = DDsmu(1, nthreads, r_edges, mumax, nmubins, x, y, z, boxsize=boxsize, periodic=periodic, proj_type=proj_type, ncomponents=ncomponents, projfn=projfn) ``` Because we are working with a periodic box, we can compute the v_RR and T_RR terms analytically. From those we can compute the amplitudes. Note that we are using Landy-Szalay here, but the v_DR term is equal to the v_RR term for a periodic box, so we don't need to compute it and the LS numerator reduces to v_DD - v_RR. ``` volume = boxsize**3 rr_ana_bao, trr_ana_bao = trr_analytic(rmin, rmax, nd, volume, ncomponents, proj_type, projfn=projfn) numerator = dd_bao - rr_ana_bao amps_ana_bao = np.linalg.solve(trr_ana_bao, numerator) # Use linalg.solve instead of actually computing inverse! ``` We can then evaluate the correlation function using these amplitudes at any set of r values: ``` r_fine = np.linspace(rmin, rmax, 2000) xi_ana_bao = evaluate_xi(amps_ana_bao, r_fine, proj_type, projfn=projfn) ``` Let's also compute the standard correlation function for comparison: ``` xi_res = xi(boxsize, nthreads, r_edges, x, y, z, output_ravg=True) r_avg, xi_standard = xi_res['ravg'], xi_res['xi'] ``` And plot the results: ``` plt.figure(figsize=(8,5)) plt.plot(r_fine, xi_ana_bao, color='green', label='BAO basis') plt.plot(r_avg, xi_standard, marker='o', ls='None', color='grey', label='Standard estimator') plt.xlim(rmin, rmax) plt.xlabel(r'r ($h^{-1}$Mpc)') plt.ylabel(r'$\xi$(r)') plt.legend() ``` Voila, a nice, continuous, well-motivated correlation function! ## Recovering the scale dilation parameter $\alpha$ We can read the estimated value of $\alpha$ directly from our amplitudes. The amplitude of the derivative term (let's call it C) is the amount that we need to shift $\alpha$ from our initial guess $\alpha_\mathrm{guess}$, moderated by $k_0$. Explicitly, $$ \hat{\alpha} = \alpha_\mathrm{guess} + C \, k_0 $$ ``` C = amps_ana_bao[4] alpha_est = alpha_guess + C*k0 print(f"alpha_est = {alpha_est:.4f}") ``` So we found that the best fit to the data is not the initial cosmology, but that correlation function shifted by this factor. This is a pretty signficant shift, so the right thing to do is perform an iterative procedure to converge on the best-fit alpha. To do this, the next time around we pass `alpha_guess = alpha_est`. Then we'll get a new value for `alpha_est`, and can repeat the process until some criterion is reached (e.g. the fractional change between `alpha_est` for subsequent iterations dips below some threshold). See [our paper](https://arxiv.org/abs/2011.01836) for details. Finally, remember to clean up the basis function file: ``` os.remove(projfn) ```
github_jupyter
<a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_3_python_collections.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # T81-558: Applications of Deep Neural Networks **Module 1: Python Preliminaries** * Instructor: [Jeff Heaton](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx) * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/). # Module 1 Material * Part 1.1: Course Overview [[Video]](https://www.youtube.com/watch?v=Rqq-UnVXtMg&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_1_overview.ipynb) * Part 1.2: Introduction to Python [[Video]](https://www.youtube.com/watch?v=czq5d53vKvo&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_2_intro_python.ipynb) * **Part 1.3: Python Lists, Dictionaries, Sets and JSON** [[Video]](https://www.youtube.com/watch?v=kcGx2I5akSs&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_3_python_collections.ipynb) * Part 1.4: File Handling [[Video]](https://www.youtube.com/watch?v=FSuSLCMgCZc&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_4_python_files.ipynb) * Part 1.5: Functions, Lambdas, and Map/Reduce [[Video]](https://www.youtube.com/watch?v=jQH1ZCSj6Ng&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_01_5_python_functional.ipynb) # Google CoLab Instructions The following code ensures that Google CoLab is running the correct version of TensorFlow. ``` try: from google.colab import drive %tensorflow_version 2.x COLAB = True print("Note: using Google CoLab") except: print("Note: not using Google CoLab") COLAB = False ``` # Part 1.3: Python Lists, Dictionaries, Sets and JSON Like most modern programming languages, Python includes Lists, Sets, Dictionaries, and other data structures as built-in types. The syntax appearance of both of these is similar to JSON. Python and JSON compatibility is discussed later in this module. This course will focus primarily on Lists, Sets, and Dictionaries. It is essential to understand the differences between these three fundamental collection types. * **Dictionary** - A dictionary is a mutable unordered collection that Python indexes with name and value pairs. * **List** - A list is a mutable ordered collection that allows duplicate elements. * **Set** - A set is a mutable unordered collection with no duplicate elements. * **Tuple** - A tuple is an immutable ordered collection that allows duplicate elements. Most Python collections are mutable, which means that the program can add and remove elements after definition. An immutable collection cannot add or remove items after definition. It is also essential to understand that an ordered collection means that items maintain their order as the program adds them to a collection. This order might not be any specific ordering, such as alphabetic or numeric. Lists and tuples are very similar in Python and are often confused. The significant difference is that a list is mutable, but a tuple isn’t. So, we include a list when we want to contain similar items, and include a tuple when we know what information goes into it ahead of time. Many programming languages contain a data collection called an array. The array type is noticeably absent in Python. Generally, the programmer will use a list in place of an array in Python. Arrays in most programming languages were fixed-length, requiring the program to know the maximum number of elements needed ahead of time. This restriction leads to the infamous array-overrun bugs and security issues. The Python list is much more flexible in that the program can dynamically change the size of a list. The next sections will look at each of these collection types in more detail. ### Lists and Tuples For a Python program, lists and tuples are very similar. It is possible to get by as a programmer using only lists and ignoring tuples. Both lists and tuples hold an ordered collection of items. The primary difference that you will see syntactically is that a list is enclosed by square braces [] and a tuple is enclosed by parenthesis (). The following code defines both list and tuple. ``` l = ['a', 'b', 'c', 'd'] t = ('a', 'b', 'c', 'd') print(l) print(t) ``` The primary difference that you will see programmatically is that a list is mutable, which means the program can change it. A tuple is immutable, which means the program cannot change it. The following code demonstrates that the program can change a list. This code also illustrates that Python indexes lists starting at element 0. Accessing element one modifies the second element in the collection. One advantage of tuples over lists is that tuples are generally slightly faster to iterate over than lists. ``` l[1] = 'changed' #t[1] = 'changed' # This would result in an error print(l) ``` Like many languages, Python has a for-each statement. This statement allows you to loop over every element in a collection, such as a list or a tuple. ``` # Iterate over a collection. for s in l: print(s) ``` The **enumerate** function is useful for enumerating over a collection and having access to the index of the element that we are currently on. ``` # Iterate over a collection, and know where your index. (Python is zero-based!) for i,l in enumerate(l): print(f"{i}:{l}") ``` A **list** can have multiple objects added to it, such as strings. Duplicate values are allowed. **Tuples** do not allow the program to add additional objects after definition. ``` # Manually add items, lists allow duplicates c = [] c.append('a') c.append('b') c.append('c') c.append('c') print(c) ``` Ordered collections, such as lists and tuples, allow you to access an element by its index number, such as is done in the following code. Unordered collections, such as dictionaries and sets, do not allow the program to access them in this way. ``` print(c[1]) ``` A **list** can have multiple objects added to it, such as strings. Duplicate values are allowed. Tuples do not allow the program to add additional objects after definition. For the insert function, an index, the programmer must specify an index. These operations are not allowed for tuples because they would result in a change. ``` # Insert c = ['a', 'b', 'c'] c.insert(0, 'a0') print(c) # Remove c.remove('b') print(c) # Remove at index del c[0] print(c) ``` ### Sets A Python **set** holds an unordered collection of objects, but sets do *not* allow duplicates. If a program adds a duplicate item to a set, only one copy of each item remains in the collection. Adding a duplicate item to a set does not result in an error. Any of the following techniques will define a set. ``` s = set() s = { 'a', 'b', 'c'} s = set(['a', 'b', 'c']) print(s) ``` A **list** is always enclosed in square braces [], a **tuple** in parenthesis (), and similarly a **set** is enclosed in curly braces. Programs can add items to a **set** as they run. Programs can dynamically add items to a **set** with the **add** function. It is important to note that the **append** function adds items to lists, whereas the **add** function adds items to a **set**. ``` # Manually add items, sets do not allow duplicates # Sets add, lists append. I find this annoying. c = set() c.add('a') c.add('b') c.add('c') c.add('c') print(c) ``` ## Maps/Dictionaries/Hash Tables Many programming languages include the concept of a map, dictionary, or hash table. These are all very related concepts. Python provides a dictionary, that is essentially a collection of name-value pairs. Programs define dictionaries using curly-braces, as seen here. ``` d = {'name': "Jeff", 'address':"123 Main"} print(d) print(d['name']) if 'name' in d: print("Name is defined") if 'age' in d: print("age defined") else: print("age undefined") ``` Be careful that you do not attempt to access an undefined key, as this will result in an error. You can check to see if a key is defined, as demonstrated above. You can also access the directory and provide a default value, as the following code demonstrates. ``` d.get('unknown_key', 'default') ``` You can also access the individual keys and values of a dictionary. ``` d = {'name': "Jeff", 'address':"123 Main"} # All of the keys print(f"Key: {d.keys()}") # All of the values print(f"Values: {d.values()}") ``` Dictionaries and lists can be combined. This syntax is closely related to [JSON](https://en.wikipedia.org/wiki/JSON). Dictionaries and lists together are a good way to build very complex data structures. While Python allows quotes (") and apostrophe (') for strings, JSON only allows double-quotes ("). We will cover JSON in much greater detail later in this module. The following code shows a hybrid usage of dictionaries and lists. ``` # Python list & map structures customers = [ {"name": "Jeff & Tracy Heaton", "pets": ["Wynton", "Cricket", "Hickory"]}, {"name": "John Smith", "pets": ["rover"]}, {"name": "Jane Doe"} ] print(customers) for customer in customers: print(f"{customer['name']}:{customer.get('pets', 'no pets')}") ``` The variable **customers** is a list that holds three dictionaries that represent customers. You can think of these dictionaries as records in a table. The fields in these individual records are the keys of the dictionary. Here the keys **name** and **pets** are fields. However, the field **pets** holds a list of pet names. There is no limit to how deep you might choose to nest lists and maps. It is also possible to nest a map inside of a map or a list inside of another list. ## More Advanced Lists Several advanced features are available for lists that this section introduces. One such function is **zip**. Two lists can be combined into a single list by the **zip** command. The following code demonstrates the **zip** command. ``` a = [1,2,3,4,5] b = [5,4,3,2,1] print(zip(a,b)) ``` To see the results of the **zip** function, we convert the returned zip object into a list. As you can see, the **zip** function returns a list of tuples. Each tuple represents a pair of items that the function zipped together. The order in the two lists was maintained. ``` a = [1,2,3,4,5] b = [5,4,3,2,1] print(list(zip(a,b))) ``` The usual method for using the zip command is inside of a for-loop. The following code shows how a for-loop can assign a variable to each collection that the program is iterating. ``` a = [1,2,3,4,5] b = [5,4,3,2,1] for x,y in zip(a,b): print(f'{x} - {y}') ``` Usually, both collections will be of the same length when passed to the zip command. It is not an error to have collections of different lengths. As the following code illustrates, the zip command will only process elements up to the length of the smaller collection. ``` a = [1,2,3,4,5] b = [5,4,3] print(list(zip(a,b))) ``` Sometimes you may wish to know the current numeric index when a for-loop is iterating through an ordered collection. Use the **enumerate** command to track the index location for a collection element. Because the **enumerate** command deals with numeric indexes of the collection, the zip command will assign arbitrary indexes to elements from unordered collections. Consider how you might construct a Python program to change every element greater than 5 to the value of 5. The following program performs this transformation. The enumerate command allows the loop to know which element index it is currently on, thus allowing the program to be able to change the value of the current element of the collection. ``` a = [2, 10, 3, 11, 10, 3, 2, 1] for i, x in enumerate(a): if x>5: a[i] = 5 print(a) ``` The comprehension command can dynamically build up a list. The comprehension below counts from 0 to 9 and adds each value (multiplied by 10) to a list. ``` lst = [x*10 for x in range(10)] print(lst) ``` A dictionary can also be a comprehension. The general format for this is: ``` dict_variable = {key:value for (key,value) in dictonary.items()} ``` A common use for this is to build up an index to symbolic column names. ``` text = ['col-zero','col-one', 'col-two', 'col-three'] lookup = {key:value for (value,key) in enumerate(text)} print(lookup) ``` This can be used to easily find the index of a column by name. ``` print(f'The index of "col-two" is {lookup["col-two"]}') ``` ### An Introduction to JSON Data stored in a CSV file must be flat; that is, it must fit into rows and columns. Most people refer to this type of data as structured or tabular. This data is tabular because the number of columns is the same for every row. Individual rows may be missing a value for a column; however, these rows still have the same columns. This sort of data is convenient for machine learning because most models, such as neural networks, also expect incoming data to be of fixed dimensions. Real-world information is not always so tabular. Consider if the rows represent customers. These people might have multiple phone numbers and addresses. How would you describe such data using a fixed number of columns? It would be useful to have a list of these courses in each row that can be of a variable length for each row, or student. JavaScript Object Notation (JSON) is a standard file format that stores data in a hierarchical format similar to eXtensible Markup Language (XML). JSON is nothing more than a hierarchy of lists and dictionaries. Programmers refer to this sort of data as semi-structured data or hierarchical data. The following is a sample JSON file. ``` { "firstName": "John", "lastName": "Smith", "isAlive": true, "age": 27, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" }, { "type": "mobile", "number": "123 456-7890" } ], "children": [], "spouse": null } ``` The above file may look somewhat like Python code. You can see curly braces that define dictionaries and square brackets that define lists. JSON does require there to be a single root element. A list or dictionary can fulfill this role. JSON requires double-quotes to enclose strings and names. Single quotes are not allowed in JSON. JSON files are always legal JavaScript syntax. JSON is also generally valid as Python code, as demonstrated by the following Python program. ``` jsonHardCoded = { "firstName": "John", "lastName": "Smith", "isAlive": True, "age": 27, "address": { "streetAddress": "21 2nd Street", "city": "New York", "state": "NY", "postalCode": "10021-3100" }, "phoneNumbers": [ { "type": "home", "number": "212 555-1234" }, { "type": "office", "number": "646 555-4567" }, { "type": "mobile", "number": "123 456-7890" } ], "children": [], "spouse": None } ``` Generally, it is better to read JSON from files, strings, or the Internet than hard coding, as demonstrated here. However, for internal data structures, sometimes such hard-coding can be useful. Python contains support for JSON. When a Python program loads a JSON the root list or dictionary is returned, as demonstrated by the following code. ``` import json json_string = '{"first":"Jeff","last":"Heaton"}' obj = json.loads(json_string) print(f"First name: {obj['first']}") print(f"Last name: {obj['last']}") ``` Python programs can also load JSON from a file or URL. ``` import requests r = requests.get("https://raw.githubusercontent.com/jeffheaton/" +"t81_558_deep_learning/master/person.json") print(r.json()) ``` Python programs can easily generate JSON strings from Python objects of dictionaries and lists. ``` python_obj = {"first":"Jeff","last":"Heaton"} print(json.dumps(python_obj)) ``` A data scientist will generally encounter JSON when they access web services to get their data. A data scientist might use the techniques presented in this section to convert the semi-structured JSON data into tabular data for the program to use with a model such as a neural network.
github_jupyter
## Compare CBC and Gurobi Compare the computation time of the CBC and Gurobi solvers for the same scenarios ``` import logging import matplotlib.pyplot as plt import matplotlib as mpl mpl.rcParams['pdf.fonttype'] = 42 mpl.rcParams['ps.fonttype'] = 42 import numpy as np import random import seaborn as sns import pandas as pd import statistics as stat import os import yaml import glob ``` ### 1 Load Data ``` data = {"AGVs": [], "randseed": [], "solver": [], "delay": [], "horizon": [], "total_time": [], "comp_time_vec": [], "comp_time_avg": [], "comp_time_max": []} yaml_list = glob.glob("ICAPS_solver_comp/*.yaml") horizon_0_data = {"AGVs": [], "randseed": [], "solver": [], "delay": [], "total_time": []} for file in yaml_list: split_filename = file.split("_") horizon = str(split_filename[-1].split(".")[0]) delay = str(split_filename[-3]) seed = str(split_filename[-5]) AGVs = str(split_filename[-7]) with open(file, "r") as stream: try: yaml_data = yaml.safe_load(stream) cumulative_time = yaml_data["results"]["total time"] comp_time_vec = yaml_data["results"]["comp time"]["solve_time"] comp_time_avg = yaml_data["results"]["comp time"]["avg"] comp_time_max = yaml_data["results"]["comp time"]["max"] solver = yaml_data["parameters"]["solver"] data["AGVs"].append(int(AGVs)) data["randseed"].append(int(seed)) data["solver"].append(solver) data["delay"].append(int(delay)) data["horizon"].append(int(horizon)) data["total_time"].append(int(cumulative_time)) data["comp_time_vec"].append(comp_time_vec) data["comp_time_avg"].append(comp_time_avg) data["comp_time_max"].append(comp_time_max) except yaml.YAMLError as exc: print(exc) columns = ["AGVs", "randseed", "solver", "delay", "horizon", "total_time", "comp_time_vec", "comp_time_avg", "comp_time_max"] df = pd.DataFrame(data, columns=columns) print(df) ``` ### 2 Compare Gurobi and CBC times ``` df_CBC = df[df.solver == "CBC"] df_GRB = df[df.solver == "GRB"] # print(df_CBC) # print(df_GRB) # dataframe for comparison compdata = {"AGVs": [], "randseed": [], "delay": [], "horizon": [], "time_CBC": [], "time_GRB": [], "factor": []} no_match_count = 0 # loop through all scenarios solved with CBC for index, row in df_GRB.iterrows(): AGVs = row["AGVs"] randseed = row["randseed"] delay = row["delay"] horizon = row["horizon"] total_time_GRB = row["comp_time_max"] try: # find corresponding scenario in CBC dataset CBC_data = df_CBC[(df_CBC.AGVs == AGVs) & (df_CBC.randseed == randseed) & (df_CBC.horizon == horizon) & (df_CBC.delay == delay)].iloc[0] total_time_CBC = float(CBC_data["comp_time_max"]) factor = float(total_time_CBC)/float(total_time_GRB) compdata["AGVs"].append(int(AGVs)) compdata["randseed"].append(int(randseed)) compdata["delay"].append(int(delay)) compdata["horizon"].append(int(horizon)) compdata["time_CBC"].append(float(total_time_CBC)) compdata["time_GRB"].append(float(total_time_GRB)) compdata["factor"].append(float(factor)) except IndexError: # no match found no_match_count += 1 continue print("no match count: {}".format(no_match_count)) # print(compdata["factor"]) for row in compdata: print(row) print(compdata[row]) # AGVs = compdata[row] # print(AGVs) # randseed = row[1] # delay = row[2] # horizon = row[3] # factor = row[4] # statss = AGVs + " " + randseed + " " + delay + " " + horizon # print(row) # print(AGVs) # print(" - {number:.0f} - {stats}".format(number=factor, stats=statss)) ```
github_jupyter
``` txt = '''Coronavirus disease 2019 (COVID-19), also known as the coronavirus, or COVID, is a contagious disease caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2). The first known case was identified in Wuhan, China, in December 2019.[7] The disease has since spread worldwide, leading to an ongoing pandemic.[8] Symptoms of COVID-19 are variable, but often include fever,[9] cough, headache,[10] fatigue, breathing difficulties, and loss of smell and taste.[11][12] Symptoms may begin one to fourteen days after exposure to the virus. At least a third of people who are infected do not develop noticeable symptoms.[13] Of those people who develop noticeable symptoms enough to be classed as patients, most (81%) develop mild to moderate symptoms (up to mild pneumonia), while 14% develop severe symptoms (dyspnea, hypoxia, or more than 50% lung involvement on imaging), and 5% suffer critical symptoms (respiratory failure, shock, or multiorgan dysfunction).[14] Older people are at a higher risk of developing severe symptoms. Some people continue to experience a range of effects (long COVID) for months after recovery, and damage to organs has been observed.[15] Multi-year studies are underway to further investigate the long-term effects of the disease.[15] Transmission of COVID-19 occurs when people are exposed to virus-containing respiratory droplets and airborne particles exhaled by an infected person.[16][17] Those particles may be inhaled or may reach the mouth, nose, or eyes of a person through touching or direct deposition (i.e. being coughed on).[16] The risk of infection is highest when people are in close proximity for a long time, but particles can be inhaled over longer distances, particularly indoors in poorly ventilated and crowded spaces.[16][18] In those conditions small particles can remain suspended in the air for minutes to hours.[16] Touching a contaminated surface or object may lead to infection although this does not contribute substantially to transmission.[16][19] People who are infected can transmit the virus to another person up to two days before they themselves show symptoms, as can people who do not experience symptoms.[20][21] People remain infectious for up to ten days after the onset of symptoms in moderate cases and up to twenty days in severe cases.[22] Several testing methods have been developed to diagnose the disease. The standard diagnostic method is by detection of the virus' nucleic acid by real-time reverse transcription polymerase chain reaction (rRT-PCR), transcription-mediated amplification (TMA), or by reverse transcription loop-mediated isothermal amplification (RT-LAMP) from a nasopharyngeal swab. Preventive measures include physical or social distancing, quarantining, ventilation of indoor spaces, covering coughs and sneezes, hand washing, and keeping unwashed hands away from the face. The use of face masks or coverings has been recommended in public settings to minimize the risk of transmissions. Several vaccines have been developed and many countries have initiated mass vaccination campaigns. Although work is underway to develop drugs that inhibit the virus, the primary treatment is symptomatic. Management involves the treatment of symptoms, supportive care, isolation, and experimental measures.''' txt = txt.lower() for i in range(23): name = '[' + str(i) + ']' txt = txt.replace(name , '') txt = txt.replace('\n\n','') txt = txt.replace('(','') txt = txt.replace(')','') txt = txt.replace(',','') unique_words = list(set(txt.split(' '))) ``` # Word Cloud ``` word_cloud = {} lst = [] for unique_word in unique_words: tmp = 0 for word in txt.split(' '): if (word == unique_word): tmp += 1 lst.append([unique_word, tmp]) word_cloud[unique_word] = tmp word_cloud['an'] import pandas as pd df = pd.DataFrame(lst, columns = ['word', 'freq']) df.sort_values(by = 'freq', ascending = False).head(10) df = pd.read_csv('t_asv.csv') len(df) df.isnull().sum() txt = '' for line in df['t']: txt += line + ' ' words = txt.split(' ') unique_words = list(set(txt.split(' '))) word_cloud = {} lst = [] for unique_word in unique_words: tmp = 0 for word in txt.split(' '): if (word == unique_word): tmp += 1 lst.append([unique_word, tmp]) word_cloud[unique_word] = tmp len(word_cloud) len(unique_words) 256/30918.0 3000/60 unique_words ```
github_jupyter
**Sustainable Software Development, block course, March 2021** *Scientific Software Center, Institute for Scientific Computing, Dr. Inga Ulusoy* # Analysis of the data Imagine you perform a "measurement" of some type and obtain "scientific data". You know what your data represents, but you have only a vague idea how different features in the data are connected, and what information you can extract from the data. You would start first with going through the data, making sure your data set is complete and that the result is reasonable. Imagine this already happened. In the next step, you would inspect your data more closely and try to identify structures. That is the step that we are focusing on in this unit. In the `data` folder, you will find several data files (`*.t` and `*.dat`). These are data files generated through some "new approach" that hasn't been used in your lab before. No previous analysis software exists, and you are going to establish a protocol for this "new approach" and "publish your results". The data can be grouped into two categories: 1. data to be analyzed using statistical methods; 2. data to be analyzed using numerical methods. In your hypothetical lab, you are an "expert" in one particular "method", and your co-worker is an "expert" in the other. Combined these two methods will lead to much more impactful results than if only one of you analyzed the data. Now, the task in this course is to be solved collaboratively with your team member working on one of the analysis approaches, and you working on the other. You will both implement functionality into the same piece of "software", but do so collaboratively through git. As you do not know yet which analysis is most meaningful for your data, and how to implement it, you will start with a jupyter notebook. You and your team member will work on the same notebook that will be part of a github repository for your project. This is the task for today. Discuss with your team members who will work on the statistical and who on the numerical analysis. ## Step 1 Generate a github repository with the relevant files. ## Step 2 Clone the repository to your local machine. ## Step 3 Start working on task 1 for your analysis approach. ## Step 4 Create your own branch of the repository and commit your changes to your branch; push to the remote repository. ## Step 5 Open a `pull request` so your team member can review your implementation. Likewise, your team member will ask you to review theirs. ## Step 6 Merge the changes in your branch into `main`. Resolve conflicts. ## Step 7 Repeat working on task; committing and pushing to your previously generated branch or a new branch; open a pull request; merge with main; until you have finished all the tasks in your analysis approach. Delete obsolete branches. # Start of the analysis notebook **Author : Hannah Weiser** *Date : 2022/03/01* *Affiliation : Heidelberg University, Institute of Geography, 3DGeo group* Place the required modules in the top, followed by required constants and global functions. ``` # required modules import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns from IPython.display import display sns.set_theme(style="darkgrid") # constants and global functions # filepaths: expec = "../data/expec.t" npop = "../data/npop.t" npop_corr = "npop_corr.csv" table = "../data/table.dat" euclid = "euclid.csv" # reading of the data files ``` # Statistical analysis Find correlations in the data sets. Analyse the data statistically and plot your results. Here we would want to do everything with pandas and leave the data in a dataframe. The files that are relevant to you are `expect.t`, `npop.t` and `table.dat`. ### Task 1: Read in expec.t and plot relevant data ``` # read and plot expec.t df_expec = pd.read_csv(expec, sep=" ", skipinitialspace=True) display(df_expec) ``` We can discard the entries norm, \<x>, and \<y> as these are mostly constant. ``` # explore variance of entries to find a suitable threshold df_expec.var() # eliminate columns based on the variance: # if the variance of the values # in a column is below a given threshold, that column is discarded var_thresh = 0.0001 df_expec_clean = df_expec.loc[:, (df_expec.var() >= var_thresh)] display(df_expec_clean) ``` ### Task 2: Create plots of the relevant data and save as .pdf. ``` # create plots fig, axs = plt.subplots(2) fig.suptitle("Exploring the data") axs[0].plot(df_expec_clean["time"], df_expec_clean["<z>"]) axs[0].set_ylabel("z") axs[1].plot(df_expec_clean["time"], df_expec_clean["<H>"]) axs[1].set_ylabel("H") plt.xlabel("Time") plt.savefig("expec.pdf") ``` ### Task 3: Read in file `npop.t` and analyze correlations in the data ``` # read in npop.t df_npop = pd.read_csv(npop, sep=" ", skipinitialspace=True) df_npop # explore variance of entries to find a suitable filtering threshold df_npop.var() # discard all columns with variance below a set threshold # - we can consider them as constant var_thresh = 0.00001 df_npop_clean = df_npop.loc[:, (df_npop.var() >= var_thresh)] display(df_npop_clean) ``` Plot the remaining columns. Seaborn prefers "long format" (one column for all measurement values, one column to indicate the type) as input, whereas the cvs is in "wide format" (one column per measurement type). ``` # plot ideally with seaborn df_npop_melted = df_npop_clean.melt("time", var_name="columns", value_name="values") g = sns.relplot(x="time", y="values", hue="columns", data=df_npop_melted, kind="line") g.set(title="All columns in one plot") plt.savefig("npop.pdf") sns.relplot(x="time", y="MO3", data=df_npop_clean, kind="line") sns.relplot(x="time", y="MO4", data=df_npop_clean, kind="line") sns.relplot(x="time", y="MO6", data=df_npop_clean, kind="line") sns.relplot(x="time", y="MO11", data=df_npop_clean, kind="line") sns.relplot(x="time", y="MO12", data=df_npop_clean, kind="line") sns.relplot(x="time", y="MO14", data=df_npop_clean, kind="line") ``` ## Quantify the pairwise correlation in the data - negative correlation: y values decrease for increasing x - large values of one feature correspond to small values of the other feature - weak or no correlation: no trend observable, association between two features is hardly observable - positive correlation: y values increase for decreasing x - small values of one feature correspond to small values of the other feature Remember that correlation does not indicate causation - the reason that two features are associated can lie in their dependence on same factors. Correlate the value pairs using Pearson's $r$. Pearson's $r$ is a measure of the linear relationship between features: $r = \frac{\sum_i(x_i − \bar{x})(y_i − \bar{y})}{\sqrt{\sum_i(x_i − \bar{x})^2 \sum_i(y_i − \bar{y})^2}}$ Here, $\bar{x}$ and $\bar{y}$ indicate mean values. $i$ runs over the whole data set. For a positive correlation, $r$ is positive, and negative for a negative correlation, with minimum and maximum values of -1 and 1, indicating a perfectly linear relationship. Weakly or not correlated features are characterized by $r$-values close to 0. Other measures of correlation that can be used are Spearman's rank (value pairs follow monotonic function) or Kendall's $\tau$ (measures ordinal association), but they do not apply here. You can also define measures yourself. ``` # print the correlation matrix df_npop_clean.corr() ``` The diagonal values tell us that each value is perfectly correlated with itself. We are not interested in the diagonal values and also not in the correlation with time. We also need to get rid of redundant entries. Finally, we need to find the value pairs that exhibit the highest linear correlation. We still want to know if it is positive or negative correlation, so we cannot get rid of the sign. ``` # get rid of time column, lower triangular and diagonal entries of the # correlation matrix # sort the remaing values according to their absolute value, but keep the sign r = df_npop_clean.corr() r_ut = r.where((np.triu(np.ones(r.shape)).astype(bool)) & (r != 1.0)) r_ut.pop("time") display(r_ut) sorted_r = r_ut.unstack().dropna().sort_values() display(sorted_r) ``` Note that the entries in the left column are not repeated if they do not change from the row above (so the fourth feature pair is MO3 and MO6). ### Task 4: Print the resulting data to a file ``` # write to file sorted_r.to_csv(npop_corr, header=False) ``` ### Task 5: Calculate the Euclidean distance (L2 norm) for the vectors in `table.dat` The Euclidean distance measures the distance between to objects that are not points: $d(p,q) = \sqrt{\left(p-q\right)^2}$ In this case, consider each of the columns in table.dat as a vector in Euclidean space, where column $r(x)$ and column $v(x)$ denote a pair of vectors that should be compared, as well as $r(y)$ and $v(y)$, and r(z) and v(z). (Background: These are dipole moment components in different gauges, the length and velocity gauge.) ``` # read in table.dat - I suggest reading it as a numpy array # replace the NaNs by zero tab = np.genfromtxt(table, names=True, autostrip=True, dtype=None) # using loadtxt, bc nan_to_num did not work with np.genfromtxt() tab = np.loadtxt(table, skiprows=1) tab = np.nan_to_num(tab) ``` Now calculate how different the vectors in column 2 are from column 3, column 4 from column 5, and column 6 from column 7. ``` # calculate the Euclidean distance def euclid_dist(a, b): return np.sqrt((a - b) ** 2) dist_x = euclid_dist(tab[:, 2], tab[:, 3]) dist_y = euclid_dist(tab[:, 4], tab[:, 5]) dist_z = euclid_dist(tab[:, 6], tab[:, 7]) # plot the result and save to a .pdf fig, axs = plt.subplots(1, 3, sharey=True, tight_layout=True) n_bins = 15 # we are not plotting the full range, because we have many Zeros, # which we want to exclude, and some very large outliers axs[0].hist(dist_x, bins=n_bins, range=[0.00001, 1]) axs[0].set_title("$r(x)$ - $v(x)$") axs[1].hist(dist_y, bins=n_bins, range=[0.00001, 1]) axs[1].set_title("$r(y)$ - $v(y)$") axs[2].hist(dist_z, bins=n_bins, range=[0.00001, 1]) axs[2].set_title("$r(z)$ - $v(z)$") fig.suptitle("Histograms of Euclidean distances") plt.savefig("euclid.pdf") # print the result to a file d = {"dist_x": dist_x, "dist_y": dist_y, "dist_z": dist_z} df_euclid = pd.DataFrame(data=d) df_euclid.to_csv(euclid, index=False) ``` # Numerical analysis Analyze the data using autocorrelation functions and discrete Fourier transforms. Plot your results. ``` # define some global functions path_efield = "../data/efield.t" ``` ### Task 1: Read in `efield.t` and Fourier-transform relevant columns ``` tmp = [] with open(path_efield) as f: for line in f: tmp.append(line.strip().split()) efield = np.array(tmp) display(efield) # read and plot efield.t t = efield[1:, 0].astype("float64") # drop first row (time) y = efield[1:, 2].astype("float64") # drop first row (y) plt.plot(t, y) plt.show() ``` Here we are interested in column 2 since the others are constant. ``` # discard the columns with variance below threshold # - these are considered constant thold = 0 efield_c = efield[ :, np.var( efield[ 1:, ].astype("float64"), 0, ) > thold, ] display(efield_c) # discrete Fourier transform of the remaining column: # You only need the real frequencies fft = np.fft.fft(efield_c[1:, 1].astype("float64")) fftfreq = np.fft.fftfreq(fft.size, 0.1) plt.plot(fftfreq, fft.real, fftfreq, fft.imag) plt.show() ``` ### Task 2: Generate a plot of your results to be saved as pdf. ``` # plot your results fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(25, 9)) plt.rcParams["font.size"] = "25" fig.suptitle("Task 1 results") ax1.plot(t, y) ax1.set_title("Electric field") ax2.plot(fftfreq, fft.real) ax2.set_title("Fourier transform") plt.savefig("task1_res.pdf") ``` ### Task 3: Calculate the autocorrelation function from nstate_i.t The autocorrelation function measures how correlated subsequent vectors are with an initial vector; ie. $\Psi_{corr} = \langle \Psi(t=0) | \Psi(t) \rangle = \int_0^{tfin} \Psi(0)^* \Psi(t) dt$ Since we are in a numerical representation, the integral can be replaced with a sum; and the given vectors are already normalized. ``` # read in as numpy array path_nstate = "../data/nstate_i.t" nstate = np.loadtxt(path_nstate, skiprows=1) # shape (101,481) # store the time column (column 0) in a vector and drop from array time = nstate[:, 0] # (101,) time_vector = time[:, np.newaxis] # (101, 1) nstate_notime = nstate[:, 1:] # (101, 480) # correct the data representation: this is in fact a complex matrix # the real part of each matrix column is contained in # numpy array column 0, 2, 4, 6, ... # the imaginary part of each matrix column is contained in # numpy array column 1, 3, 5, 7, ... # convert the array that was read as dtype=float into a dtype=complex array # thought: nstate_compl has half no. of columns compared to nstate_notime # for even numbers incl zero # nstate_compl = [] nstate_compl = np.empty((101,)) # print(nstate_compl.shape) for i in range(0, nstate_notime.shape[1], 2): real = np.asarray(nstate_notime[:, i]) imag = np.asarray(nstate_notime[:, i + 1]) # complex = np.vectorize(complex)(real, imag) mycomplex = real + 1j * imag nstate_compl = np.column_stack((nstate_compl, mycomplex)) nstate_compl = nstate_compl[:, 1:] # shape (101, 240) # complex = np.vectorize(complex, otypes=[np.float64])(real, imag) # or # complex = real + 1j*imag len(nstate_compl) # for the autocorrelation function, we want the overlap between the first # vector at time 0 and all # subsequent vectors at later times - the sum of the product of initial and # subsequent vectors for each time step # Def. Autocorrelation: correlation of a signal with a delayed copy of itself as a function of delay # ACF represents how similar a value is to a previous value within a time series # acf = np.zeros(len(nstate_compl[0]), dtype=complex) # for i in range(0, len(nstate_compl[0])): # acf[i] = np.sum(nstate_compl[:, 0] * np.conjugate(nstate_compl[:, i])) acf = np.zeros(len(nstate_compl), dtype=complex) for i in range(0, len(nstate_compl)): acf[i] = np.sum(nstate_compl[0, :] * np.conjugate(nstate_compl[i, :])) print(acf) plt.plot(abs(acf**2)) plt.show() ``` ### Task 4: Generate a plot of your results to be saved as pdf. ``` # plot the autocorrelation function - real, imaginary and absolute part plt.plot(abs(acf**2), label="absolute") plt.plot(acf.real**2, label="real") plt.plot(acf.imag**2, label="imaginary") plt.legend() plt.show() plt.savefig("task3_res.pdf") ``` ### Task 5: Discrete Fourier transform of the autocorrelation function ``` # discrete Fourier-transform the autocorrelation function # - now we need all frequency components, # also the negative ones ``` ### Task 6: Generate a plot of your results to be saved as pdf. ``` # plot the power spectrum (abs**2) ```
github_jupyter
# Implementing Simple Linear regression Python implementation of the linear regression exercise from Andrew Ng's course: Machine Learning on coursera. Exercise 1 Source notebooks: [1][1] [2][2] [3][3] [4][4] [1]:https://github.com/kaleko/CourseraML/blob/a815ac95ba3d863b7531926b1edcdb4f5dd0eb6b/ex1/ex1.ipynb [2]:http://nbviewer.jupyter.org/github/jdwittenauer/ipython-notebooks/blob/master/notebooks/ml/ML-Exercise1.ipynb [3]:http://nbviewer.jupyter.org/github/JWarmenhoven/Machine-Learning/blob/master/notebooks/Programming%20Exercise%201%20-%20Linear%20Regression.ipynb [4]:http://nbviewer.jupyter.org/github/JWarmenhoven/Machine-Learning/blob/master/notebooks/Programming%20Exercise%201%20-%20Linear%20Regression.ipynb ``` import os import math as m import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy import stats %matplotlib inline data_path = '/Users/User/Desktop/Computer_Science/stanford_ml/machine-learning-ex1/ex1' os.chdir(data_path) ``` # Loading data set and formating data ``` #Reading data file and shape data = pd.read_csv('ex1data1.txt', header = None) m,n = data.shape #Initializing X and Y according to shape and converting to numpy arrays X = data.iloc[:,0:n-1].values y = data.iloc[:,n-1:n].values #Adding the columns of 1s to X X = np.concatenate((np.ones((m,1)),X), axis = 1) #Initializing theta theta = np.zeros((n,1),dtype = 'int8') ``` ## Plotting the data ``` plt.scatter(X[:,1], y, s=30, c='r', marker='x', linewidths=1) plt.xlabel('Population of City in 10,000s') plt.ylabel('Profit in $10,000s'); ``` # Cost Function computation $J(\theta) = \frac{1}{2m}\sum_{i=1}^m(h_\theta(x^{(i)}) - y^{(i)})^2 $ $J(\theta) = \frac{1}{2m}(X\theta - y)^T(X\theta - y) $ (vectorized version) ## Gradient descent computation $\frac{\partial J(\theta)}{\partial \theta} = \frac{1}{m}X^T(X\theta - y) $ ``` theta = np.zeros((n,1),dtype = 'int8') def cost_function(X,y,theta): #Initialisation of useful values m = np.size(y) J = 0 #Hypothesis function in vectorized form h = np.dot(X,theta) #Cost function in vectorized form J = float((1./(2*m)) * np.dot((h - y).T, (h - y))); return J; def gradient_descent(X,y,theta,alpha = 0.0005,num_iters=1000): #Initialisation of useful values m = np.size(y) J_history = np.zeros(num_iters) J_vec = [] #Used to plot the cost function convergence thetahistory = [] #Used for three d plot of convergence for i in range(num_iters): #Hypothesis function h = np.dot(X,theta) #Calculating the grad function in vectorized form theta = theta - alpha * (1/m)* (X.T.dot(h-y)) J_history[i] = cost_function(X,y,theta) #Calculate the cost for each iteration(used to plot convergence) J_vec.append(cost_function(X,y,theta)) thetahistory.append(list(theta[:,0])) return theta,J_history,J_vec, thetahistory; def grad_descent_loop(X,y,theta,alpha = 0.015,num_iters=1000): #Initialisation of useful values m = np.size(y) theta0 = 0 theta1 = 0 h = 0 for _ in range(num_iters): grad0,grad1 = 0,0 for i in range(m): h = theta0 + theta1 * X[:,1][i] grad0 += (h - y[i]) grad1 += (h - y[i]) * X[:,1][i] #Calculating the grad function in vectorized form theta0 = theta0 - alpha * (1./m)* grad0 theta1 = theta1 - alpha * (1./m)* grad1 return np.array([theta0, theta1]) grad_descent_loop(X, y,theta) ``` ## Run gradient descent ``` theta_calc , Cost_J, J_vec,thetahistory = gradient_descent(X, y,theta) theta_calc #gradient_descent(X,y,theta,alpha = 0.0005,num_iters=1000): ``` ## Plot convergence ``` def plot_convergence(jvec): plt.figure(figsize=(10,6)) plt.plot(range(len(jvec)),jvec) plt.grid(True) plt.title("Convergence of Cost Function") plt.xlabel("Iteration number") plt.ylabel("Cost function") plot_convergence(J_vec) ``` ## Fit regression line to data Prediction = $h_\theta(x)=\theta_0 + \theta_1x$ ``` def prediction(X,theta): y_pred = theta[0] + theta[1] * X[:,1] return y_pred; #Calculating prediction y_pred = X @ theta_calc #Plotting figure plt.figure(figsize=(10,6)) plt.plot(X[:,1],y[:,0],'rx',markersize=10,label='Training Data') plt.plot(X[:,1],y_pred,'b-', label = 'Prediction h(x) = %0.2f + %0.2fx'%(theta_calc[0],theta_calc[1])) plt.title('Data vs Linear regression prediction') plt.xlabel('Population of City in 10,000s') plt.ylabel('Profit in $10,000s'); plt.xlim(4.9) plt.grid() plt.legend() ``` ## Visualizing the cost minimization path of gradient descent Source: https://github.com/kaleko/CourseraML/blob/a815ac95ba3d863b7531926b1edcdb4f5dd0eb6b/ex1/ex1.ipynb ``` theta_calc , Cost_J, J_vec,thetahistory = gradient_descent(X, y,np.array([0,0]).reshape(-1,1), alpha = .0005, num_iters = 10000 ) theta_calc #gradient_descent(X,y,theta,alpha = 0.0005,num_iters=1000): #Import necessary matplotlib tools for 3d plots from mpl_toolkits.mplot3d import axes3d, Axes3D from matplotlib import cm import itertools fig = plt.figure(figsize=(12,12)) ax = fig.gca(projection='3d') xvals = np.arange(-10,10,.5) yvals = np.arange(-4,4,.1) myxs, myys, myzs = [], [], [] for david in xvals: for kaleko in yvals: myxs.append(david) myys.append(kaleko) myzs.append(cost_function(X,y,np.array([[david], [kaleko]]))) scat = ax.scatter(myxs,myys,myzs,c=np.abs(myzs),cmap='jet') plt.xlabel(r'$\theta_0$',fontsize=30) plt.ylabel(r'$\theta_1$',fontsize=30) plt.title('Cost (Minimization Path Shown in Blue)',fontsize=20) plt.plot([x[0] for x in thetahistory],[x[1] for x in thetahistory],J_vec,'bo-') ax.view_init(45, 0) plt.show() ```
github_jupyter
``` import pandas as pd import numpy as np import calendar import math import re import string import segmentation import utils import data2graph from finetuned import T5FineTuner, BARTFineTuner, generate, generate_beam, graph2text_nobeam, graph2text_nobeam_ngram_es, graph2text_nobeam_topk, graph2text_nobeam_topp import textstat import language_tool_python from lexical_diversity import lex_div as ld tool = language_tool_python.LanguageTool('en-US') def grammar_score(input_text): errors = len(tool.check(input_text)) clean_text = input_text.translate(str.maketrans('', '', string.punctuation)) clean_text = list(filter(None, clean_text.split(' '))) num_words = len(clean_text) return float(1-(errors/num_words)) ``` ### Loading Fine-Tuned PLMs ``` import torch cuda0 = torch.device("cuda:0") #cuda1 = torch.device("cuda:1") #cuda3 = torch.device("cuda:3") t5 = T5FineTuner.load_from_checkpoint("T5Models/T5Both.ckpt") bart = BARTFineTuner.load_from_checkpoint("BARTModels/BARTBoth.ckpt") t5.to(cuda0) bart.to(cuda0) ``` ### Global Temperature ``` #Import Land Temp Dataset ds_gtemp = pd.read_csv("Data/GlobalTemperature/GlobalLandTemperaturesByCountry.csv") ds_gtemp = ds_gtemp.dropna() ds_gtemp['dt'] = pd.to_datetime(ds_gtemp['dt']) ds_gtemp['month'] = pd.DatetimeIndex(ds_gtemp['dt']).month ds_gtemp['month'] = ds_gtemp['month'].apply(lambda x: calendar.month_name[x]) ds_gtemp['year'] = pd.DatetimeIndex(ds_gtemp['dt']).year ds_gtemp.set_index(['dt'],inplace=True) #RE Scores template_re_scores = [] t5_re_scores = [] t5_re_scores_topk = [] t5_re_scores_topp = [] bart_re_scores = [] bart_re_scores_topk = [] bart_re_scores_topp = [] #Diveristy Scores template_tte_scores = [] t5_tte_scores = [] t5_tte_scores_topk = [] t5_tte_scores_topp = [] bart_tte_scores = [] bart_tte_scores_topk = [] bart_tte_scores_topp = [] #Grammar Scores t5_g_scores = [] t5_g_scores_topk = [] t5_g_scores_topp = [] bart_g_scores = [] bart_g_scores_topk = [] bart_g_scores_topp = [] #Grammar Mistakes t5_g_mistake = [] t5_g_mistake_topk = [] t5_g_mistake_topp = [] bart_g_mistake = [] bart_g_mistake_topk = [] bart_g_mistake_topp = [] countries = ['United States', 'India', 'Brazil', 'Russia', 'United Kingdom', 'France', 'Spain', 'Italy' , 'Turkey', 'Germany'] for c in countries: print("Processing Country: ", c) country = ds_gtemp[ds_gtemp['Country']==c][['AverageTemperature','month', 'year']].reset_index().drop(columns=['dt']) country_gtemp_raw = country['AverageTemperature'].tolist() #Log-normalize data trans = np.ma.log(country_gtemp_raw) country_gtemp = trans.filled(0) print("\n Data Loaded") #Detecting Waves embeds, cluster_labels = segmentation.tslr_rep(country_gtemp) cluster_arrangement = utils.find_contiguous(cluster_labels) indices = utils.find_indices(cluster_arrangement) wave_indices = utils.find_waves(country_gtemp_raw, indices, tolerance=7) print("\n Waves Detected") #Detecting Trends segmentation_results = segmentation.sliding_window(country_gtemp, 7) print("\n Segmentation Done") filtered_results = segmentation.re_segment(segmentation_results, country_gtemp) trends = segmentation.find_trend(filtered_results, country_gtemp) print("\n Trends Detected") location = c graph, essentials = data2graph.build_graph_gtemp_form1("Global Temperature", location, wave_indices, trends, country, country_gtemp_raw ) print("\n Graph Calculated") #Template Narrative template_text = data2graph.build_template_gtemp_nums("Global Temperature", location, wave_indices, trends, country, country_gtemp_raw ) print("\n Templated Computed") t5_prefix = 'translate Graph to English: ' iso = c #Simple PLM Generation t5_narrative = graph2text_nobeam(t5, graph, t5_prefix, 512, cuda0) bart_narrative = graph2text_nobeam(bart , graph, "", 512, cuda0) bart_narrative = re.sub('</s>' , '', bart_narrative) print("Simple Generation Complete: ", iso) #Top-k at 50 t5_narrative_topk = graph2text_nobeam_topk(t5, graph, t5_prefix, 50, 512, cuda0) bart_narrative_topk = graph2text_nobeam_topk(bart, graph, "", 50, 512, cuda0) bart_narrative_topk = re.sub('</s>' , '', bart_narrative_topk) print("Top-k Complete: ", iso) #Top-p at 0.92 t5_narrative_topp = graph2text_nobeam_topp(t5, graph, t5_prefix, 0.92, 512, cuda0) bart_narrative_topp = graph2text_nobeam_topp(bart, graph, "", 0.92, 512, cuda0) bart_narrative_topp = re.sub('</s>' , '', bart_narrative_topp) print("Top-p Complete: ", iso) #RE Scores template_re_scores.append(textstat.flesch_reading_ease(template_text)) t5_re_scores.append(textstat.flesch_reading_ease(t5_narrative)) t5_re_scores_topk.append(textstat.flesch_reading_ease(t5_narrative_topk)) t5_re_scores_topp.append(textstat.flesch_reading_ease(t5_narrative_topp)) bart_re_scores.append(textstat.flesch_reading_ease(bart_narrative)) bart_re_scores_topk.append(textstat.flesch_reading_ease(bart_narrative_topk)) bart_re_scores_topp.append(textstat.flesch_reading_ease(bart_narrative_topp)) print("RE Scores Computed: ", iso) #Diveristy Scores template_tte_scores.append(ld.ttr(ld.flemmatize(template_text))) t5_tte_scores.append(ld.ttr(ld.flemmatize(t5_narrative))) t5_tte_scores_topk.append(ld.ttr(ld.flemmatize(t5_narrative_topk))) t5_tte_scores_topp.append(ld.ttr(ld.flemmatize(t5_narrative_topp))) bart_tte_scores.append(ld.ttr(ld.flemmatize(bart_narrative))) bart_tte_scores_topk.append(ld.ttr(ld.flemmatize(bart_narrative_topk))) bart_tte_scores_topp.append(ld.ttr(ld.flemmatize(bart_narrative_topp))) print("TTE Scores Computed: ", iso) #Grammar Scores gs = grammar_score(t5_narrative) t5_g_scores.append(gs) if gs != 1.0: t5_g_mistake.append((graph, t5_narrative)) gs = grammar_score(t5_narrative_topk) t5_g_scores_topk.append(gs) if gs != 1.0: t5_g_mistake_topk.append((graph, t5_narrative_topk)) gs = grammar_score(t5_narrative_topp) t5_g_scores_topp.append(gs) if gs != 1.0: t5_g_mistake_topp.append((graph, t5_narrative_topp)) gs = grammar_score(bart_narrative) bart_g_scores.append(gs) if gs != 1.0: bart_g_mistake.append((graph, bart_narrative)) gs = grammar_score(bart_narrative_topk) bart_g_scores_topk.append(gs) if gs != 1.0: bart_g_mistake_topk.append((graph, bart_narrative_topk)) gs = grammar_score(bart_narrative_topp) bart_g_scores_topp.append(gs) if gs != 1.0: bart_g_mistake_topp.append((graph, bart_narrative_topp)) print("Grammar Scores Computed: ", iso) #RE Scores print("*** RE Scores ***") print("template_re_scores: ", np.mean(template_re_scores)) print("t5_re_scores: ", np.mean(t5_re_scores)) print("t5_re_scores_topk: ", np.mean(t5_re_scores_topk)) print("t5_re_scores_topp: ", np.mean(t5_re_scores_topp)) print("bart_re_scores: ", np.mean(bart_re_scores)) print("bart_re_scores_topk: ", np.mean(bart_re_scores_topk)) print("bart_re_scores_topp: ", np.mean(bart_re_scores_topp)) print("\n") print("*** Diversity Scores ***") #Diveristy Scores print("template_tte_scores: ", np.mean(template_tte_scores)) print("t5_tte_scores: ", np.mean(t5_tte_scores)) print("t5_tte_scores_topk: ", np.mean(t5_tte_scores_topk)) print("t5_tte_scores_topp: ", np.mean(t5_tte_scores_topp)) print("bart_tte_scores: ", np.mean(bart_tte_scores)) print("bart_tte_scores_topk: ", np.mean(bart_tte_scores_topk)) print("bart_tte_scores_topp: ", np.mean(bart_tte_scores_topp)) print("\n") print("*** Grammar Scores ***") #Grammar Scores print("t5_g_scores: ", np.mean(t5_g_scores)) print("t5_g_scores_topk: ", np.mean(t5_g_scores_topk)) print("t5_g_scores_topp: ", np.mean(t5_g_scores_topp)) print("bart_g_scores: ", np.mean(bart_g_scores)) print("bart_g_scores_topk: ", np.mean(bart_g_scores_topk)) print("bart_g_scores_topp: ", np.mean(bart_g_scores_topp)) ```
github_jupyter
``` from IPython.display import HTML tag = HTML('''<script> code_show=true; function code_toggle() { if (code_show){ $('div.input').hide() } else { $('div.input').show() } code_show = !code_show } $( document ).ready(code_toggle); </script> Toggle cell visibility <a href="javascript:code_toggle()">here</a>.''') display(tag) # Hide the code completely # from IPython.display import HTML # tag = HTML('''<style> # div.input { # display:none; # } # </style>''') # display(tag) ``` ## Modalna analiza sistema masa-vzmet-dušilka Ta interaktivni primer se navezuje na sistem masa-vzmet-dušilka, predstavljenem v primeru [Modalna analiza](SS-02-Modalna_analiza.ipynb); grafično so prikazani MODES sistema, ki predstavljajo odziv sistema. Dinamično matriko sistema lahko zapišemo kot: $$ A= \begin{bmatrix} 0 && 1 \\ -\frac{k}{m} && -\frac{c}{m} \end{bmatrix}$$ in njen karakteristični polinom (matrika $A$ je zapisana v kanonični vodljivostni obliki) kot: $$\lambda^2+\frac{c}{m}\lambda+\frac{k}{m}.$$ Sledi, da so lastne vrednosti in z njimi povezani MODES enaki $$\lambda_{1,2}=-\frac{c}{m}\pm\frac{1}{m}\sqrt{c^2-4km}.$$ Člen znotraj kvadratnega korena je kritičen, saj odloča o tem, kakšen bo odziv sistema; npr. v primeru vrednosti tega člena $c\ge2\sqrt{km}$ bo imel sistem zgolj realne lastne vrednosti in v odzivu sistema ne bo oscilacij. Primer omogoča vpogled v to, kaj se dogaja z MODES sistema, ko spreminjamo vrednosti parametrov $k$, $m$, in $c$. ``` #Preparatory Cell import control import numpy from IPython.display import display, Markdown import ipywidgets as widgets import matplotlib.pyplot as plt from matplotlib import animation %matplotlib inline #print a matrix latex-like def bmatrix(a): """Returns a LaTeX bmatrix - by Damir Arbula (ICCT project) :a: numpy array :returns: LaTeX bmatrix as a string """ if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{bmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{bmatrix}'] return '\n'.join(rv) # Display formatted matrix: def vmatrix(a): if len(a.shape) > 2: raise ValueError('bmatrix can at most display two dimensions') lines = str(a).replace('[', '').replace(']', '').splitlines() rv = [r'\begin{vmatrix}'] rv += [' ' + ' & '.join(l.split()) + r'\\' for l in lines] rv += [r'\end{vmatrix}'] return '\n'.join(rv) #matrixWidget is a matrix looking widget built with a VBox of HBox(es) that returns a numPy array as value ! class matrixWidget(widgets.VBox): def updateM(self,change): for irow in range(0,self.n): for icol in range(0,self.m): self.M_[irow,icol] = self.children[irow].children[icol].value #print(self.M_[irow,icol]) self.value = self.M_ def dummychangecallback(self,change): pass def __init__(self,n,m): self.n = n self.m = m self.M_ = numpy.matrix(numpy.zeros((self.n,self.m))) self.value = self.M_ widgets.VBox.__init__(self, children = [ widgets.HBox(children = [widgets.FloatText(value=0.0, layout=widgets.Layout(width='90px')) for i in range(m)] ) for j in range(n) ]) #fill in widgets and tell interact to call updateM each time a children changes value for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] self.children[irow].children[icol].observe(self.updateM, names='value') #value = Unicode('example@example.com', help="The email value.").tag(sync=True) self.observe(self.updateM, names='value', type= 'All') def setM(self, newM): #disable callbacks, change values, and reenable self.unobserve(self.updateM, names='value', type= 'All') for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].unobserve(self.updateM, names='value') self.M_ = newM self.value = self.M_ for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].value = self.M_[irow,icol] for irow in range(0,self.n): for icol in range(0,self.m): self.children[irow].children[icol].observe(self.updateM, names='value') self.observe(self.updateM, names='value', type= 'All') #self.children[irow].children[icol].observe(self.updateM, names='value') #overlaod class for state space systems that DO NOT remove "useless" states (what "professor" of automatic control would do this?) class sss(control.StateSpace): def __init__(self,*args): #call base class init constructor control.StateSpace.__init__(self,*args) #disable function below in base class def _remove_useless_states(self): pass #define the sliders for m, k and c m = widgets.FloatSlider( value=4, min=0.1, max=10.0, step=0.1, description='$m$ [kg]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) k = widgets.FloatSlider( value=1, min=0, max=10.0, step=0.1, description='$k$ [N/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) c = widgets.FloatSlider( value=4, min=0, max=10.0, step=0.1, description='$c$ [Ns/m]:', disabled=False, continuous_update=False, orientation='horizontal', readout=True, readout_format='.1f', ) #function that make all the computations def main_callback(m, k, c): if c**2-4*k*m >= 0: eig1 = -c/m+1/m*numpy.sqrt(c**2-4*k*m) eig2 = -c/m-1/m*numpy.sqrt(c**2-4*k*m) else: eig1 = -c/m+1j*(1/m*numpy.sqrt(-c**2+4*k*m)) eig2 = -c/m-1j*(1/m*numpy.sqrt(-c**2+4*k*m)) if numpy.real([eig1,eig2])[0] == 0 and numpy.real([eig1,eig2])[1] == 0: T = numpy.linspace(0,20,1000) else: if min(numpy.abs(numpy.real([eig1,eig2]))) != 0: T = numpy.linspace(0,7*1/min(numpy.abs(numpy.real([eig1,eig2]))),1000) else: T = numpy.linspace(0,7*1/max(numpy.abs(numpy.real([eig1,eig2]))),1000) if numpy.isreal(eig1): if eig1 == eig2: mode1 = numpy.exp(eig1*T) mode2 = T*numpy.exp(eig2*T) else: mode1 = numpy.exp(eig1*T) mode2 = numpy.exp(eig2*T) else: mode1 = numpy.exp(eig1.real*T)*numpy.cos(abs(eig1.imag)*T) mode2 = numpy.exp(eig2.real*T)*numpy.sin(abs(eig2.imag)*T) fig = plt.figure(figsize=[16, 5]) fig.set_label('Modes') g1 = fig.add_subplot(121) g2 = fig.add_subplot(122) g1.plot(T,mode1) g1.grid() g1.set_xlabel('čas [s]') g1.set_ylabel('prvi mode') g2.plot(T,mode2) g2.grid() g2.set_xlabel('čas [s]') g2.set_ylabel('drugi mode') # print('The eigenvalues are: -%.3f+%.3fj -%.3f-%.3fj' %(abs(eig1.real),abs(eig1.imag),abs(eig2.real),abs(eig2.imag))) modesString = 'Lastni vrednosti sta $' + str(numpy.around(eig1,decimals=3)) + '$ in $' + str(numpy.around(eig2,decimals=3)) + '$ ' if numpy.isreal(eig1): if eig1 == eig2: modesString = modesString + 's pripadajočimi MODES $e^{' + str(numpy.around(eig1,decimals=3))\ + ' t}$ in $te^{' + str(numpy.around(eig2,decimals=3)) + ' t}$.' else: modesString = modesString + 's pripadajočimi MODES $e^{' + str(numpy.around(eig1,decimals=3))\ + ' t}$ in $e^{' + str(numpy.around(eig2,decimals=3)) + ' t}$.' else: modesString = modesString + 's pripadajočimi MODES $e^{' + str(numpy.around(numpy.real(eig1),decimals=3))\ + ' t} \cos{(' + str(numpy.around(abs(numpy.imag(eig1)),decimals=3)) + 't)}$ in $e^{'\ + str(numpy.around(numpy.real(eig2),decimals=3)) + ' t} \sin{(' + str(numpy.around(abs(numpy.imag(eig2)),decimals=3)) + 't)}$.' display(Markdown(modesString)) out = widgets.interactive_output(main_callback,{'m':m,'k':k,'c':c}) sliders = widgets.HBox([k,m,c]) display(out,sliders) ```
github_jupyter
# PRMS v6 BMI coupling - runtime interaction demo * This demonstration will illustrate how the coupled surface-, soil-, groundwater-, and streamflow-BMIs can be interacted with at runtime. * Some initial setup including matching an HRU polygon shapefile with order of HRUs in input file * Visualizing results by mapping onto geopandas dataframe * Using web-based data-services to drive climate forcing * User controled forcing to inspect HRU response * Note there are several python files with helper functions associated with this notebook. * helper.py - has plotting functions * gridmet.py / helpers.py - contains functions for the Gridmet data service. More information about Gridmet can be found here: http://www.climatologylab.org/gridmet.html In particular we use the netcdf subsetting service found here: http://thredds.northwestknowledge.net:8080/thredds/reacch_climate_MET_aggregated_catalog.html * Demonstration based on the model devloped for Pipestem Creek Watershed in the Prairie Pothole region of North Dakota. ![](../assets/hyp11416-fig-0001-m.jpg) Hay, L, Norton, P, Viger, R, Markstrom, S, Regan, RS, Vanderhoof, M. Modelling surface‐water depression storage in a Prairie Pothole Region. Hydrological Processes. 2018; 32: 462– 479. https://doi.org/10.1002/hyp.11416 ``` %matplotlib inline import numpy as np from pymt.models import PRMSSurface, PRMSSoil, PRMSGroundwater, PRMSStreamflow from pathlib import Path import geopandas as gpd import pandas as pd from gridmet import Gridmet import matplotlib.pyplot as plt import matplotlib import datetime as dt import helper # # If using locally set path HRU and streamsegment shapefiles from data download in README # hru_shp = '../GIS/nhru_10U.shp' # hru_strmseg = '../GIS/nsegment_10U.shp' # # set path to Gridmet weights file for mapping Gridmet gridded data to HRU # weight_file = '../GIS/weights.csv' # If using notebook in CSDMS JupyterHub. See README for instruction on where to # get the data and uncomment out the following lines hru_shp = '/opt/data/GIS/nhru_10U.shp' hru_strmseg = '/opt/data/GIS/nsegment_10U.shp' # set path to Gridmet weights file for mapping Gridmet gridded data to HRU weight_file = '/opt/data/GIS/weights.csv' ``` ### Set inputfiles for each of the 4 BMI and instantiate. ``` run_dir = '../prms/pipestem' config_surf= 'control_surface.simple1' config_soil = 'control_soil.simple1' config_gw = 'control_groundwater.simple1' config_sf = 'control_streamflow.simple1' print(Path(run_dir).exists()) print((Path(run_dir) / config_surf).exists()) print((Path(run_dir) / config_soil).exists()) print((Path(run_dir) / config_gw).exists()) print((Path(run_dir) / config_sf).exists()) msurf = PRMSSurface() msoil = PRMSSoil() mgw = PRMSGroundwater() msf = PRMSStreamflow() print(msurf.name, msoil.name, mgw.name, msf.name) ``` ### Initialize the BMIs ``` msurf.initialize(config_surf, run_dir) msoil.initialize(config_soil, run_dir) mgw.initialize(config_gw, run_dir) msf.initialize(config_sf, run_dir) ``` --- ### Open shapefile for the pipestem HRUs and stream segments and make sure the order in geopandas dataframe match the order from model components so they can be easily maps. Shapefiles are used for spatial plots of the prms6 variables. - get_gdf and get_gdf_stream can be found in helper.py --- ``` gdf_ps = helper.get_gdf(hru_shp, msurf) # print(gdf_ps.head()) gdf_streams = helper.get_gdf_streams(hru_strmseg, msurf) # print(gdf_streams.head()) ``` --- ### Open climate driver data used by PRMS and plot the first days data and after one year of model time look for significant precipitation even to view. --- ``` clim_file = Path('../prms/pipestem/daymet.nc') #plot climate and return clim_file as xarray object clim = helper.plot_climate2(clim_file, gdf_ps, msurf) # plot cumulative sum to find precipitation event cum_sum = clim.cumsum(dim='time') cum_sum.prcp.isel(hru=1)[365:485].plot() ``` --- ## Get some model time information --- ``` # Get time information from the model. print(msurf.get_value('nowtime')) # print(msoil.var['nowtime'].data) print(f'Start time: {msurf.start_time}') print(f'End time: {msurf.end_time}') print(f'Current time : {msurf.time}') ``` --- ## Functions to couple Surface, Soil, Groundwater, and Streamflow BMIs ___ ``` soil_input_cond_vars = ['soil_rechr_chg', 'soil_moist_chg'] surf2soil_vars = ['hru_ppt', 'hru_area_perv', 'hru_frac_perv', 'dprst_evap_hru', 'dprst_seep_hru', 'infil', 'sroff','potet', 'hru_intcpevap', 'snow_evap', 'snowcov_area', 'soil_rechr', 'soil_rechr_max', 'soil_moist', 'soil_moist_max', 'hru_impervevap' , 'srunoff_updated_soil','transp_on'] soil2surf_vars = ['infil', 'sroff', 'soil_rechr', 'soil_moist'] surf2gw_vars = ['pkwater_equiv', 'dprst_seep_hru', 'dprst_stor_hru', 'hru_intcpstor', 'hru_impervstor', 'sroff'] soil2gw_vars = ['soil_moist_tot', 'soil_to_gw', 'ssr_to_gw', 'ssres_flow'] surf2sf_vars = ['potet', 'swrad', 'sroff'] soil2sf_vars = ['ssres_flow'] gw2sf_vars = ['gwres_flow'] def soilinput(msurf, msoil, exch_vars, cond_vars, dprst_flag, imperv_flag): for var in exch_vars: msoil.set_value(var, msurf.get_value(var)) if dprst_flag in [1, 3] or imperv_flag in [1, 3]: for var in cond_vars: msoil.set_value(var, msurf.get_value(var)) def soil2surface(msoil, msurf, exch_vars): for var in exch_vars: msurf.set_value(var, msoil.get_value(var)) def gwinput(msurf, msoil, mgw, surf_vars, soil_vars): for var in surf_vars: mgw.set_value(var, msurf.get_value(var)) for var in soil_vars: mgw.set_value(var, msoil.get_value(var)) def sfinput(msurf, msoil, mgw, msf, surf_vars, soil_vars, gw_vars): for var in surf_vars: msf.set_value(var, msurf.get_value(var)) for var in soil_vars: msf.set_value(var, msoil.get_value(var)) for var in gw_vars: msf.set_value(var, mgw.get_value(var)) dprst_flag = msoil.get_value('dyn_dprst_flag') imperv_flag = msoil.get_value('dyn_imperv_flag') def update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag): msurf.update() soilinput(msurf, msoil, surf2soil_vars, soil_input_cond_vars, dprst_flag, imperv_flag) msoil.update() soil2surface(msoil, msurf, soil2surf_vars) gwinput(msurf, msoil, mgw, surf2gw_vars, soil2gw_vars) mgw.update() sfinput(msurf, msoil, mgw, msf, surf2sf_vars, soil2sf_vars, gw2sf_vars) msf.update() ``` --- Run for 1-year plus 90 days just prior to preciptation event in cumulative plot above --- ``` for time in range(455): update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag) ``` --- Run for 7-days and plot results --- ``` for i in range(7): update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag) ptime = msurf.var['nowtime'].data timesel = dt.datetime(ptime[0], ptime[1], ptime[2]) print(f'Model time: {msurf.time}, Date: {timesel}') helper.example_plot_strm(clim, gdf_ps, gdf_streams, msurf, msoil, mgw, msf, i, timesel) # helper.example_plot(clim, gdf_ps, msurf, msoil, i, timesel) for i in range(19): update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag) ptime = msurf.var['nowtime'].data timesel = dt.datetime(ptime[0], ptime[1], ptime[2]) print(f'Model time: {msurf.time}, Date: {timesel}') ``` ### Drive climate forcing with web-based data services - here Gridmet * Pull Gridmet data from web-service for specified period and map to HRUs ``` # initialize Gridmet data service gmdata = Gridmet("1981-04-26", end_date="1981-05-04", hrumap=True, hru_id=msurf.get_value('nhm_id'), wght_file=weight_file) for i in np.arange(7): msurf.set_value('hru_ppt', (gmdata.precip.data[i,:]*.0393701).astype(np.float32)) msurf.set_value('tmax', ((gmdata.tmax.data[i,:]*(9./5.))+32.0).astype(np.float32)) msurf.set_value('tmin', ((gmdata.tmin.data[i,:]*(9./5.))+32.0).astype(np.float32)) # print(gmdata.precip[i,:]*.0393701) # print((gmdata.tmax[i,:]*(9/5))+32.0) # print((gmdata.tmin[i,:]*(9/5))+32.0) update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag) ptime = msurf.var['nowtime'].data timesel = dt.datetime(ptime[0], ptime[1], ptime[2]) print(f'Model time: {msurf.time}, Date: {timesel}') # print(gmdata.precip.data[i,:]*.0393701) helper.example_plot_strm(clim, gdf_ps, gdf_streams, msurf, msoil, mgw, msf, i, timesel) ``` --- In the next cell the precipitation, normally read from the netCDF file is overridden with user defined values. Here we kick one HRU with a large amount of precipitation, 3", and view the response --- ``` for i in range(14): if i == 0: grid_id = msurf.var_grid('hru_ppt') var_type = msurf.var_type('hru_ppt') grid_size = msurf.grid_node_count(grid_id) ppt_override = np.zeros(shape = (grid_size), dtype=var_type) ppt_override[0] = 3.0 msurf.set_value('hru_ppt', ppt_override) update_coupled(msurf, msoil, mgw, msf, dprst_flag, imperv_flag) ptime = msurf.var['nowtime'].data timesel = dt.datetime(ptime[0], ptime[1], ptime[2]) print(f'Model time: {msurf.time}, Date: {timesel}') helper.example_plot_strm(clim, gdf_ps, gdf_streams, msurf, msoil, mgw, msf, i, timesel) ``` View response at individual HRUs by reading the netCDF output files. ``` t_hru = 0 t_seg = 0 start_date = msoil.time-14 end_date = msoil.time print(start_date, end_date) import xarray as xr surface_file = Path('../prms/pipestem/output/summary_surf_daily.nc') soil_file = Path('../prms/pipestem/output/summary_soil_daily.nc') gw_file = Path('../prms/pipestem/output/summary_gw_daily.nc') strm_file = Path('../prms/pipestem/output/summary_streamflow_daily.nc') dsurf = xr.open_dataset(surface_file, decode_times=False) dsoil = xr.open_dataset(soil_file, decode_times=False) dgw = xr.open_dataset(gw_file, decode_times=False) dsf = xr.open_dataset(strm_file, decode_times=False) fig, ax = plt.subplots(ncols=5, figsize=(12,4)) helper.bmi_prms6_value_plot(dsoil, t_hru, 'soil_moist_tot', 'surface-bmi', start_date, end_date, ax[0]) helper.bmi_prms6_value_plot(dsurf, t_hru, 'sroff', 'surface-bmi', start_date, end_date, ax[1]) helper.bmi_prms6_value_plot(dsoil, t_hru, 'ssres_flow', 'soil-bmi', start_date, end_date, ax[2]) helper.bmi_prms6_value_plot(dgw, t_hru, 'gwres_flow', 'groundwater-bmi', start_date, end_date, ax[3]) helper.bmi_prms6_value_plot(dsf, t_seg, 'seg_outflow', 'streamflow-bmi', start_date, end_date, ax[4]) plt.tight_layout() plt.show() ``` Finalize the BMIs and shut down ``` msurf.finalize() msoil.finalize() mgw.finalize() msf.finalize() ```
github_jupyter
# Ingest data with Redshift This notebook demonstrates how to set up a database with Redshift and query data with it. We are going to use the data we load into S3 in the previous notebook [011_Ingest_tabular_data.ipynb](011_Ingest_tabular_data_v1.ipynb) and database and schema we created in [02_Ingest_data_with_Athena.ipynb](02_Ingest_data_with_Athena_v1.ipynb). Amazon Redshift is a fully managed data warehouse that allows you to run complex analytic queries against petabytes of structured data. Your queries are distributed and parallelized across multiple physical resources, and you can easily scale your Amazon Redshift environment up and down depending on your business needs. You can also check the [existing notebook](https://github.com/aws/amazon-sagemaker-examples/blob/master/advanced_functionality/working_with_redshift_data/working_with_redshift_data.ipynb) for more information on how to load data from and save data to Redshift. ## When should you use Redshift? While Athena is mostly used to run ad-hoc queries on Amazon S3 data lake, Redshift is usually recommended for large structured data sets, or traditional relational database; it does well with performing aggregations, complex joins, and inner queries. You would need to set up and load the cluster before using it; and you need to load data into created tables. ## Set up Redshift First we are going to make sure we have policy attached to our role (The role we will create specifically for the Redshift task) to access Redshift. You can do this through IAM client as below, or through the AWS console. **Note: You would need IAMFullAccess to attach policies to the role.** #### Attach IAMFullAccess Policy from Console **1.** Go to **Sagemaker Console**, choose **notebook instances** in the navigation panel, then select your notebook instance to view the details. Then under **Permissions and Encryption**, click on the **IAM role ARN** link and it will take you to your role summery in the **IAM Console**. <div> <img src="image/athena-iam-1.png" width="300"/> </div> **2.** Click on **Create Policy** under **Permissions**. <div> <img src="image/athena-iam-2.PNG" width="300"/> </div> **3.** In the **Attach Permissions** page, search for **IAMFullAccess**. It will show up in the policies search results if it has not been attached to your role yet. Select the checkbox for the **IAMFullAccess** Policy, then click **Attach Policy**. You now have the policy successfully attached to your role. <div> <img src="image/athena-iam-3.PNG" width="500"/> </div> ``` %pip install -qU 'sagemaker>=2.15.0' 'PyAthena==1.10.7' 'awswrangler==1.2.0' 'SQLAlchemy==1.3.13' import io import boto3 import sagemaker import json from sagemaker import get_execution_role import os from sklearn.datasets import * import pandas as pd from botocore.exceptions import ClientError import awswrangler as wr from datetime import date # Get region session = boto3.session.Session() region_name = session.region_name # Get SageMaker session & default S3 bucket sagemaker_session = sagemaker.Session() bucket = sagemaker_session.default_bucket() #replace with your own bucket name if you have one role = sagemaker.get_execution_role() prefix = 'data/tabular/boston_house' filename = 'boston_house.csv' iam = boto3.client('iam') sts = boto3.client('sts') redshift = boto3.client('redshift') sm = boto3.client('sagemaker') s3 = sagemaker_session.boto_session.resource('s3') role_name = role.split('/')[-1] print('Your Role name used to create this notebook is: {}'.format(role_name)) ``` ### Download data from online resources and write data to S3 ``` #helper functions to upload data to s3 def write_to_s3(filename, bucket, prefix): #put one file in a separate folder. This is helpful if you read and prepare data with Athena filename_key = filename.split('.')[0] key = "{}/{}/{}".format(prefix,filename_key,filename) return s3.Bucket(bucket).upload_file(filename,key) def upload_to_s3(bucket, prefix, filename): url = 's3://{}/{}/{}'.format(bucket, prefix, filename) print('Writing to {}'.format(url)) write_to_s3(filename, bucket, prefix) tabular_data = load_boston() tabular_data_full = pd.DataFrame(tabular_data.data, columns=tabular_data.feature_names) tabular_data_full['target'] = pd.DataFrame(tabular_data.target) tabular_data_full.to_csv('boston_house.csv', index = False) upload_to_s3(bucket, 'data/tabular', filename) ``` ### Create Redshift Role The policy enables Redshift to assume the role. The services can then perform any tasks granted by the permissions policy assigned to the role (which we will attach to it later). ``` assume_role_policy_doc = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Principal": { "Service": "redshift.amazonaws.com" }, "Action": "sts:AssumeRole" } ] } # Create Role iam_redshift_role_name = 'Tabular_Redshift' try: iam_role_redshift = iam.create_role( RoleName=iam_redshift_role_name, AssumeRolePolicyDocument=json.dumps(assume_role_policy_doc), Description='Tabular data Redshift Role' ) except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Role already exists") else: print("Unexpected error: %s" % e) #get role arn role_rs = iam.get_role(RoleName='Tabular_Redshift') iam_role_redshift_arn = role_rs['Role']['Arn'] print('Your Role arn used to create a Redshift Cluster is: {}'.format(iam_role_redshift_arn)) ``` ### Create Policy Document We will create policies we used to access S3 and Athena. The two policies we will create here are: * S3FullAccess: `arn:aws:iam::aws:policy/AmazonS3FullAccess` * AthenaFullAccess: `arn:aws:iam::aws:policy/AmazonAthenaFullAccess` You can check the policy document in the IAM console and copy the policy file here. ``` #s3FullAccess my_redshift_to_s3 = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": "s3:*", "Resource": "*" } ] } #Athena Full Access my_redshift_to_athena = { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Action": [ "athena:*" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "glue:CreateDatabase", "glue:DeleteDatabase", "glue:GetDatabase", "glue:GetDatabases", "glue:UpdateDatabase", "glue:CreateTable", "glue:DeleteTable", "glue:BatchDeleteTable", "glue:UpdateTable", "glue:GetTable", "glue:GetTables", "glue:BatchCreatePartition", "glue:CreatePartition", "glue:DeletePartition", "glue:BatchDeletePartition", "glue:UpdatePartition", "glue:GetPartition", "glue:GetPartitions", "glue:BatchGetPartition" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "s3:GetBucketLocation", "s3:GetObject", "s3:ListBucket", "s3:ListBucketMultipartUploads", "s3:ListMultipartUploadParts", "s3:AbortMultipartUpload", "s3:CreateBucket", "s3:PutObject" ], "Resource": [ "arn:aws:s3:::aws-athena-query-results-*" ] }, { "Effect": "Allow", "Action": [ "s3:GetObject", "s3:ListBucket" ], "Resource": [ "arn:aws:s3:::athena-examples*" ] }, { "Effect": "Allow", "Action": [ "s3:ListBucket", "s3:GetBucketLocation", "s3:ListAllMyBuckets" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "sns:ListTopics", "sns:GetTopicAttributes" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "cloudwatch:PutMetricAlarm", "cloudwatch:DescribeAlarms", "cloudwatch:DeleteAlarms" ], "Resource": [ "*" ] }, { "Effect": "Allow", "Action": [ "lakeformation:GetDataAccess" ], "Resource": [ "*" ] } ] } try: policy_redshift_s3 = iam.create_policy( PolicyName='Tabular_RedshiftPolicyToS3', PolicyDocument=json.dumps(my_redshift_to_s3) ) print ('Policy created.') except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print ("Policy already exists") else: print ("Unexpected error: %s" % e) account_id = sts.get_caller_identity()['Account'] policy_redshift_s3_arn = f'arn:aws:iam::{account_id}:policy/Tabular_RedshiftPolicyToS3' try: policy_redshift_athena = iam.create_policy( PolicyName='Tabular_RedshiftPolicyToAthena', PolicyDocument=json.dumps(my_redshift_to_athena) ) print ('Policy created.') except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print ("Policy already exists") else: print ("Unexpected error: %s" % e) account_id = sts.get_caller_identity()['Account'] policy_redshift_athena_arn = f'arn:aws:iam::{account_id}:policy/Tabular_RedshiftPolicyToAthena' ``` ### Attach Policy to Role ``` # Attach RedshiftPolicyToAthena policy try: response = iam.attach_role_policy( PolicyArn=policy_redshift_athena_arn, RoleName=iam_redshift_role_name ) except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. This is ok.") else: print("Unexpected error: %s" % e) # Attach RedshiftPolicyToS3 policy try: response = iam.attach_role_policy( PolicyArn=policy_redshift_s3_arn, RoleName=iam_redshift_role_name ) except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. This is ok.") else: print("Unexpected error: %s" % e) ``` ### Making Sure your Role **to run this Notebook** has the following policy attached: * `SecretsManagerReadWrite`: we will use this service to store and retrive our Redshift Credentials. * `AmazonRedshiftFullAccess`: we will use this role to create a Redshift cluster from the notebook. ``` #making sure you have secret manager policy attached to role try: policy='SecretsManagerReadWrite' response = iam.attach_role_policy( PolicyArn='arn:aws:iam::aws:policy/{}'.format(policy), RoleName=role_name ) print("Policy %s has been succesfully attached to role: %s" % (policy, role_name)) except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached.") else: print("Unexpected error: %s " % e) #making sure you have RedshiftFullAccess policy attached to role from botocore.exceptions import ClientError try: policy='AmazonRedshiftFullAccess' response = iam.attach_role_policy( PolicyArn='arn:aws:iam::aws:policy/{}'.format(policy), RoleName=role_name ) print("Policy %s has been succesfully attached to role: %s" % (policy, role_name)) except ClientError as e: if e.response['Error']['Code'] == 'EntityAlreadyExists': print("Policy is already attached. ") else: print("Unexpected error: %s " % e) ``` # Optional: Create Redshift Cluster Most of the times we have a Redshift cluster already up and running and we want to connect to the cluster in-use, but if you want to create a new cluster, you can follow the steps below to create one. *Note that only some Instance Types support Redshift Query Editor, so be careful when you specify the Redshift Cluster Nodes.*(https://docs.aws.amazon.com/redshift/latest/mgmt/query-editor.html). ``` notebook_instance_name = sm.list_notebook_instances()['NotebookInstances'][0]['NotebookInstanceName'] ``` ### Create Secret in Secrets Manager AWS Secrets Manager is a service that enables you to easily rotate, manage, and retrieve database credentials, API keys, and other secrets throughout their lifecycle. Using Secrets Manager, you can secure and manage secrets used to access resources in the AWS Cloud, on third-party services, and on-premises. *note that `MasterUserPassword` must contain at least 1 upper case letter and at least 1 decimal digit. ``` secretsmanager = boto3.client('secretsmanager') try: response = secretsmanager.create_secret( Name='tabular_redshift_login', Description='Boston House data New Cluster Redshift Login', SecretString='[{"username":"awsuser"},{"password":"Bostonhouse1"}]', Tags=[ { 'Key': 'name', 'Value': 'tabular_redshift_login' }, ] ) except ClientError as e: if e.response['Error']['Code'] == 'ResourceExistsException': print("Secret already exists. This is ok.") else: print("Unexpected error: %s" % e) # And retrieving the secret again secretsmanager = boto3.client('secretsmanager') import json secret = secretsmanager.get_secret_value(SecretId='tabular_redshift_login') cred = json.loads(secret['SecretString']) master_user_name = cred[0]['username'] master_user_pw = cred[1]['password'] # Set up parameters # Redshift configuration parameters redshift_cluster_identifier = 'redshiftdemo' database_name = 'bostonhouse' cluster_type = 'multi-node' node_type = 'dc2.large' number_nodes = '2' ``` When creating a new cluster, you want to make sure that the Redshift VPC is the same one you used to create your notebook in. Your VPC should have the following two VPC attributes set to **yes**: **DNS resolution** and **DNS hostnames**. You can either specify a **security group** or specify a created **cluster subnet group name** (which you will create from the Redshift console). If you are not using default VPC and using **security group** returns VPC error, you can try create a subnet group in Redshift Console, by choose **Configurations** -> **subnet groups** -> **create cluster subnet group**, then specify the **VPC** and **subnet** you want to choose and you created this notebook in. Specify the `ClusterSubnetGroupName` in the following command with the subnet group you created. ### Optional: Get Security Group ID ``` notebook_instance = sm.describe_notebook_instance(NotebookInstanceName=notebook_instance_name) security_group_id = notebook_instance['SecurityGroups'][0] print(security_group_id) ``` ### Create Redshift Cluster using Subnet Group ``` response = redshift.create_cluster( DBName=database_name, ClusterIdentifier=redshift_cluster_identifier, ClusterType=cluster_type, NodeType=node_type, NumberOfNodes=int(number_nodes), MasterUsername=master_user_name, MasterUserPassword=master_user_pw, ClusterSubnetGroupName='cluster-subnet-group-1', #you can either specify an existing subnet group (change this to your Subnet Group name), or specify your security group below IamRoles=[iam_role_redshift_arn], VpcSecurityGroupIds=[security_group_id], Port=5439, PubliclyAccessible=False ) print(response) ``` Wait until the status of your redshift cluster become **available**. ``` #check cluster status response = redshift.describe_clusters(ClusterIdentifier=redshift_cluster_identifier) cluster_status = response['Clusters'][0]['ClusterStatus'] print('Your Redshift Cluster Status is: ' + cluster_status) ``` # Existing Redshift Cluster ### Prerequisites Your existing Redshift cluster have to be in the **same VPC** as your notebook instance. Also, note that this Notebook instance needs to resolve to a private IP when connecting to the Redshift instance. There are two ways to resolve the Redshift DNS name to a private IP: The Redshift cluster is not publicly accessible so by default it will resolve to private IP. The Redshift cluster is publicly accessible and has an EIP associated with it but when accessed from within a VPC, it should resolve to private IP of the Redshift cluster. This is possible by setting following two VPC attributes to yes: **DNS resolution** and **DNS hostnames**. For instructions on setting that up, see Redshift public docs on [Managing Clusters in an Amazon Virtual Private Cloud (VPC)](https://docs.aws.amazon.com/redshift/latest/mgmt/managing-clusters-vpc.html). We will use [sqlalchemy](https://pypi.org/project/SQLAlchemy/) to connect to the redshift database engine. ``` from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker ``` #### Retrive Redshift credentials from Secret Manager ``` secretsmanager = boto3.client('secretsmanager') secret = secretsmanager.get_secret_value(SecretId='tabular_redshift_login') cred = json.loads(secret['SecretString']) master_user_name = cred[0]['username'] master_user_pw = cred[1]['password'] ``` #### Set up parameters for connection: replace with your own parameters We are going to use the data and schema created in the sequel notebook Ingest_data_with_Athena.ipynb. If you see an error below, please make sure you run through the [02_Ingest_data_with_Athena.ipynb](02_Ingest_data_with_Athena_v1.ipynb) notebook before the next steps. ``` redshift_cluster_identifier = 'redshiftdemo' database_name_redshift = 'bostonhouse' database_name_athena = 'tabularbh' redshift_port = '5439' schema_redshift = 'redshift' schema_spectrum = 'spectrum' table_name_csv = 'boston_house_athena' ``` #### Check cluster status to see if it is available ``` #check cluster status response = redshift.describe_clusters(ClusterIdentifier=redshift_cluster_identifier) cluster_status = response['Clusters'][0]['ClusterStatus'] print(cluster_status) # Get Redshift Endpoint Address & IAM Role redshift_endpoint_address = response['Clusters'][0]['Endpoint']['Address'] iam_role = response['Clusters'][0]['IamRoles'][0]['IamRoleArn'] print('Redshift endpoint: {}'.format(redshift_endpoint_address)) print('IAM Role: {}'.format(iam_role)) ``` #### Create Engine https://docs.sqlalchemy.org/en/13/core/engines.html ``` # Connect to Redshift Database Engine engine = create_engine('postgresql://{}:{}@{}:{}/{}'.format(master_user_name, master_user_pw, redshift_endpoint_address, redshift_port, database_name_redshift)) ``` #### Create Session: we will use this session to run SQL commands ``` # config session session = sessionmaker() session.configure(bind=engine) s = session() ``` ## Method 1: Access Data without Moving it to Redshift: Amazon Redshift Spectrum [Redshift Spectrum](https://docs.aws.amazon.com/redshift/latest/dg/c-getting-started-using-spectrum.html) is used to query data directly from files on Amazon S3.You will need to create external tables in an external schema. The external schema references a database in the external data catalog and provides the IAM role ARN that authorizes your cluster to access Amazon S3 on your behalf. #### Get table and schema information from the Glue Catalog: getting meta data from data catalog and connecting to the Athena database ``` statement = """ rollback; create external schema if not exists {} from data catalog database '{}' iam_role '{}' create external database if not exists """.format(schema_spectrum, database_name_athena, iam_role) s.execute(statement) s.commit() ``` #### Run a sample query through Redshift Spectrum ``` statement = """ select * from {}.{} limit 10 """.format(schema_spectrum, table_name_csv) df = pd.read_sql_query(statement, engine) df.head(5) ``` ## Method 2: Loading Data into Redshift from Athena To load data into Redshift, you need to either use `COPY` command or `INSERT INTO` command to move data into a table from data files. Copied files may reside in an S3 bucket, an EMR cluster, or on a remote host accessed. #### Create Schema in Redshift ``` #create schema statement = """create schema if not exists {}""".format(schema_redshift) s = session() s.execute(statement) s.commit() ``` #### Create Redshift Table ``` table_name_redshift = table_name_csv+'_'+'redshift_insert' statement = """ rollback; create table if not exists redshift.{}( CRIM float, ZN float, INDUS float, CHAS float, NOX float, RM float, AGE float, DIS float, RAD float, TAX float, PTRATIO float, B float, LSTAT float, target float)""".format(table_name_redshift) s.execute(statement) s.commit() ``` #### `Insert into` data into the table we created https://docs.aws.amazon.com/redshift/latest/dg/c_Examples_of_INSERT_30.html ``` table_name_redshift = table_name_csv+'_'+'redshift_insert' statement = """ insert into redshift.{} select * from {}.{} """.format(table_name_redshift, schema_spectrum, table_name_csv) s.execute(statement) s.commit() ``` #### Query data in Redshift ``` statement = """ select * from redshift.{} limit 10 """.format(table_name_redshift) df = pd.read_sql_query(statement, engine) df.head(5) ``` ## Method 3: Copy data directly from S3 You can also `Copy` Data into a new table. https://docs.aws.amazon.com/redshift/latest/dg/tutorial-loading-run-copy.html #### Create a new Schema in Redshift ``` #create a new sample table table_name_redshift = table_name_csv+'_'+'redshift_copy' statement = """ rollback; create table if not exists redshift.{}( CRIM float, ZN float, INDUS float, CHAS float, NOX float, RM float, AGE float, DIS float, RAD float, TAX float, PTRATIO float, B float, LSTAT float, target float)""".format(table_name_redshift) s.execute(statement) s.commit() ``` #### Copy data into Redshift table Redshift assumes your data comes in pipe delimited, so if you are reading in csv or txt, be sure to specify the `delimiter`. To load data that is in `CSV` format, add `csv` to your `COPY` command. Also since we are reading directly from S3, if your data has header, remember to add `ignoreheader` to your command. ``` table_name_redshift = table_name_csv+'_'+'redshift_copy' data_s3_path = 's3://sagemaker-us-east-2-060356833389/data/tabular/boston_house/boston_house.csv' statement = """ rollback; copy redshift.{} from '{}' iam_role '{}' csv ignoreheader 1 """.format(table_name_redshift, data_s3_path, iam_role) s.execute(statement) s.commit() statement = """ select * from redshift.{} limit 10 """.format(table_name_redshift) df_copy = pd.read_sql_query(statement, engine) df_copy.head(5) ``` #### Error Handling Sometimes you might see an error stating" Load into table 'part' failed. Check 'stl_load_errors' system table for details.", and below is a helpful function to check where the copying process went wrong. You can find more information in the [Redshift Load Error documentation](https://docs.aws.amazon.com/redshift/latest/dg/r_STL_LOAD_ERRORS.html). ``` statement = """ select query, substring(filename,22,25) as filename,line_number as line, substring(colname,0,12) as column, type, position as pos, substring(raw_line,0,30) as line_text, substring(raw_field_value,0,15) as field_text, substring(err_reason,0,45) as reason from stl_load_errors order by query desc limit 10""" error = pd.read_sql_query(statement, engine) error.head(5) ``` ## Method 4: AWS Data Wrangler You can find more information on how AWS Data Wrangler works at [this tutorial](https://github.com/awslabs/aws-data-wrangler/blob/master/tutorials/008%20-%20Redshift%20-%20Copy%20%26%20Unload.ipynb). ### AWS Data Wrangler Get Engine Function Run this command within a private subnet. You can find your host address by going to the Redshift Console, then choose **Clusters** -> **Property** -> **Connection details** -> **View all connection details** -> **Node IP address** -> **Private IP address**. https://aws-data-wrangler.readthedocs.io/en/latest/stubs/awswrangler.db.get_engine.html#awswrangler.db.get_engine ``` engine = wr.db.get_engine( db_type="postgresql", host= '10.0.14.121', #Private IP address of your Redshift Cluster port=redshift_port, database=database_name_redshift, user = master_user_name, password=master_user_pw ) df = wr.db.read_sql_query("SELECT * FROM redshift.{}".format(table_name_redshift), con=engine) df.head() ``` ### Citation Boston Housing data, Harrison, D. and Rubinfeld, D.L. `Hedonic prices and the demand for clean air', J. Environ. Economics & Management, vol.5, 81-102, 1978. Data Science On AWS workshops, Chris Fregly, Antje Barth, https://www.datascienceonaws.com/
github_jupyter
<a href="https://colab.research.google.com/github/Manan1811/FaceNet-Model/blob/main/FaceNet_Model2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !nvidia-smi from google.colab import drive drive.mount('/content/drive') !wget https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/wiki_crop.tar !wget https://data.vision.ee.ethz.ch/cvl/rrothe/imdb-wiki/static/imdb_crop.tar !tar -xf wiki_crop.tar !tar -xf imdb_crop.tar import os ages = os.scandir('wiki_crop') length = 0 for age in ages: print(age.path) if '.mat' not in str(age.path): length+=len(os.listdir(age.path)) print(length) import os ages = os.scandir('imdb_crop') length = 0 for age in ages: print(age.path) if '.mat' not in str(age.path): length+=len(os.listdir(age.path)) print(length) import scipy.io imdbMat = scipy.io.loadmat('imdb_crop/imdb.mat') imdbPlace = imdbMat['imdb'][0][0] imdbMat print(imdbPlace) print(len(imdbPlace)) for item in imdbPlace: print(item) print(len(item[0])) wikiMat = scipy.io.loadmat('wiki_crop/wiki.mat') wikiPlace = wikiMat['wiki'][0][0] print(wikiPlace) print(len(wikiPlace)) for item in wikiPlace: print(item) print(len(item[0])) place = imdbPlace where='imdb_crop' img_loc=[] corr_ages=[] total = 0 for i in range(460723): #print(place[0][0][i]) bYear = int(place[0][0][i]/365) #birth year #print(bYear) taken = place[1][0][i] #photo taken #print(taken) path = place[2][0][i][0] age = taken - bYear img_loc.append(os.path.join(where,path)) corr_ages.append(age) """print("AGE", age) print('----------------------') faceScore = str(faceScore) secFaceScore = str(secFaceScore) if 'n' not in faceScore: # n as in Inf; if true, implies that there isn't a face in the image if 'a' in secFaceScore: #a as in NaN; implies that no second face was found if age >= 0: try: gender = int(gender) total +=1 if i > 1500: print('----------------------') print(i) print(bYear) print(taken) print("AGE", age) print("NAME", name) print("GENDER", gender) print(faceBox) print(faceScore) print(secFaceScore) imShow(os.path.join(where,path)) break except: print('Failed with gender') continue""" #print(total) print(img_loc[:10]) print(corr_ages[:10]) import pandas as pd df = pd.DataFrame(img_loc,columns=['Image Location']) df df['Age']=corr_ages df def imShow(path): import cv2 import matplotlib.pyplot as plt %matplotlib inline image = cv2.imread(path) height, width = image.shape[:2] resized_image = cv2.resize(image,(3*width, 3*height), interpolation = cv2.INTER_CUBIC) fig = plt.gcf() fig.set_size_inches(18, 10) plt.axis("off") #plt.rcParams['figure.figsize'] = [10, 5] plt.imshow(cv2.cvtColor(resized_image, cv2.COLOR_BGR2RGB)) import matplotlib.pyplot as plt %matplotlib inline import cv2 image=cv2.imread(df['Image Location'][10]) new_image=cv2.resize(image,(224,224),interpolation = cv2.INTER_CUBIC) print(new_image.shape) fig = plt.gcf() fig.set_size_inches(18, 10) plt.axis("off") #plt.rcParams['figure.figsize'] = [10, 5] plt.imshow(cv2.cvtColor(new_image, cv2.COLOR_BGR2RGB)) from PIL import Image import numpy as np import random def get_images(batch_size, img_size=(160,160),add=df['Image Location'], age=df['Age']): rand=random.sample(range(10, len(add)), batch_size) X=[] y=[] for i in rand: image=cv2.imread(add[i]) curr_img=cv2.resize(image,img_size,interpolation = cv2.INTER_CUBIC) curr_img=curr_img.astype('float64') curr_img=curr_img/127.5 curr_img=curr_img-1 curr_age=age[i] X.append(curr_img) y.append(curr_age) return X,y !pip install git+https://github.com/rcmalli/keras-vggface.git ! pip show keras-vggface ! pip install mtcnn ! git clone https://github.com/arshagarwal/outlier-experiment.git -b slim_dataset_creation cd './outlier-experiment' ! bash import_weights.sh from tensorflow.keras.models import load_model prev_model=load_model('Facenet/facenet_model.h5') prev_model_weights=model.load_weights('Facenet/facenet_weights.h5') prev_model.summary() # Regression Model import tensorflow as tf from keras.models import Sequential from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, BatchNormalization, Dropout model=Sequential() model.add(prev_model) model.add(Dense(64,activation='relu')) model.add(BatchNormalization()) model.add(Dense(32,activation='relu')) model.add(BatchNormalization()) model.add(Dense(16,activation='relu')) model.add(BatchNormalization()) model.add(Dense(1, activation='linear')) model.summary() from PIL import Image import numpy as np import random import cv2 image=cv2.imread(df['Image Location'][10]) curr_img=cv2.resize(image,(160,160),interpolation = cv2.INTER_CUBIC) curr_img=curr_img.astype('float64') curr_img=curr_img/127.5 curr_img=curr_img-1 print(curr_img.shape) epochs=5 batch_size=5 for i in range(epochs): X,y=get_images(batch_size) X=np.array(X) y=np.array(y) print(X.shape,y.shape) epochs=20 batch_size=10 for i in range(epochs): X,y=get_images(batch_size) X=np.array(X) y=np.array(y) model.compile(loss='categorical_crossentropy', metrics=['accuracy'], optimizer='adam') ```
github_jupyter
[Index](Index.ipynb) - [Back](Widget Styling.ipynb) - [Next](Widget Asynchronous.ipynb) ``` from __future__ import print_function ``` # Building a Custom Widget - Hello World The widget framework is built on top of the Comm framework (short for communication). The Comm framework is a framework that allows the kernel to send/receive JSON messages to/from the front end (as seen below). ![Widget layer](images/WidgetArch.png) To create a custom widget, you need to define the widget both in the browser and in the python kernel. ## Building a Custom Widget To get started, you'll create a simple hello world widget. Later you'll build on this foundation to make more complex widgets. ## Python Kernel ### DOMWidget and Widget To define a widget, you must inherit from the Widget or DOMWidget base class. If you intend for your widget to be displayed in the Jupyter notebook, you'll want to inherit from the DOMWidget. The DOMWidget class itself inherits from the Widget class. The Widget class is useful for cases in which the Widget is not meant to be displayed directly in the notebook, but instead as a child of another rendering environment. For example, if you wanted to create a three.js widget (a popular WebGL library), you would implement the rendering window as a DOMWidget and any 3D objects or lights meant to be rendered in that window as Widgets. ### _view_name Inheriting from the DOMWidget does not tell the widget framework what front end widget to associate with your back end widget. Instead, you must tell it yourself by defining specially named trait attributes, `_view_name`, `_view_module`, and `_view_module_version` (as seen below) and optionally `_model_name` and `_model_module`. ``` import ipywidgets as widgets from traitlets import Unicode, validate class HelloWidget(widgets.DOMWidget): _view_name = Unicode('HelloView').tag(sync=True) _view_module = Unicode('hello').tag(sync=True) _view_module_version = Unicode('0.1.0').tag(sync=True) ``` ### sync=True traitlets Traitlets is an IPython library for defining type-safe properties on configurable objects. For this tutorial you do not need to worry about the *configurable* piece of the traitlets machinery. The `sync=True` keyword argument tells the widget framework to handle synchronizing that value to the browser. Without `sync=True`, the browser would have no knowledge of `_view_name` or `_view_module`. ### Other traitlet types Unicode, used for `_view_name`, is not the only Traitlet type, there are many more some of which are listed below: - Any - Bool - Bytes - CBool - CBytes - CComplex - CFloat - CInt - CLong - CRegExp - CUnicode - CaselessStrEnum - Complex - Dict - DottedObjectName - Enum - Float - FunctionType - Instance - InstanceType - Int - List - Long - Set - TCPAddress - Tuple - Type - Unicode - Union Not all of these traitlets can be synchronized across the network, only the JSON-able traits and Widget instances will be synchronized. ## Front end (JavaScript) ### Models and views The IPython widget framework front end relies heavily on [Backbone.js](http://backbonejs.org/). Backbone.js is an MVC (model view controller) framework. Widgets defined in the back end are automatically synchronized with generic Backbone.js models in the front end. The traitlets are added to the front end instance automatically on first state push. The `_view_name` trait that you defined earlier is used by the widget framework to create the corresponding Backbone.js view and link that view to the model. ### Import @jupyter-widgets/base You first need to import the `@jupyter-widgets/base` module. To import modules, use the `define` method of [require.js](http://requirejs.org/) (as seen below). ``` %%javascript define('hello', ["@jupyter-widgets/base"], function(widgets) { }); ``` ### Define the view Next, define your widget view class. Inherit from the `DOMWidgetView` by using the `.extend` method. ``` %%javascript require.undef('hello'); define('hello', ["@jupyter-widgets/base"], function(widgets) { // Define the HelloView var HelloView = widgets.DOMWidgetView.extend({ }); return { HelloView: HelloView } }); ``` ### Render method Lastly, override the base `render` method of the view to define custom rendering logic. A handle to the widget's default DOM element can be acquired via `this.el`. The `el` property is the DOM element associated with the view. ``` %%javascript require.undef('hello'); define('hello', ["@jupyter-widgets/base"], function(widgets) { var HelloView = widgets.DOMWidgetView.extend({ // Render the view. render: function() { this.el.textContent = 'Hello World!'; }, }); return { HelloView: HelloView }; }); ``` ## Test You should be able to display your widget just like any other widget now. ``` HelloWidget() ``` ## Making the widget stateful There is not much that you can do with the above example that you can't do with the IPython display framework. To change this, you will make the widget stateful. Instead of displaying a static "hello world" message, it will display a string set by the back end. First you need to add a traitlet in the back end. Use the name of `value` to stay consistent with the rest of the widget framework and to allow your widget to be used with interact. ``` class HelloWidget(widgets.DOMWidget): _view_name = Unicode('HelloView').tag(sync=True) _view_module = Unicode('hello').tag(sync=True) _view_module_version = Unicode('0.1.0').tag(sync=True) value = Unicode('Hello World!').tag(sync=True) ``` ### Accessing the model from the view To access the model associated with a view instance, use the `model` property of the view. `get` and `set` methods are used to interact with the Backbone model. `get` is trivial, however you have to be careful when using `set`. After calling the model `set` you need call the view's `touch` method. This associates the `set` operation with a particular view so output will be routed to the correct cell. The model also has an `on` method, which allows you to listen to events triggered by the model (like value changes). ### Rendering model contents By replacing the string literal with a call to `model.get`, the view will now display the value of the back end upon display. However, it will not update itself to a new value when the value changes. ``` %%javascript require.undef('hello'); define('hello', ["@jupyter-widgets/base"], function(widgets) { var HelloView = widgets.DOMWidgetView.extend({ render: function() { this.el.textContent = this.model.get('value'); }, }); return { HelloView : HelloView }; }); ``` ### Dynamic updates To get the view to update itself dynamically, register a function to update the view's value when the model's `value` property changes. This can be done using the `model.on` method. The `on` method takes three parameters, an event name, callback handle, and callback context. The Backbone event named `change` will fire whenever the model changes. By appending `:value` to it, you tell Backbone to only listen to the change event of the `value` property (as seen below). ``` %%javascript require.undef('hello'); define('hello', ["@jupyter-widgets/base"], function(widgets) { var HelloView = widgets.DOMWidgetView.extend({ render: function() { this.value_changed(); this.model.on('change:value', this.value_changed, this); }, value_changed: function() { this.el.textContent = this.model.get('value'); }, }); return { HelloView : HelloView }; }); ``` ## Test ``` w = HelloWidget() w w.value = 'test' ``` ## Conclusion The example above dumps the value directly into the DOM. There is no way for you to interact with this dumped data in the front end. To create an example that accepts input, you will have to do something more than blindly dumping the contents of value into the DOM. In the next section of the tutorial, you will build a date picker to display and accept input in the front end. ## More advanced uses: Packaging and distributing Jupyter widgets A template project is available in the form of a cookie cutter: https://github.com/jupyter/widget-cookiecutter This project is meant to help custom widget authors get started with the packaging and the distribution of Jupyter interactive widgets. It produces a project for a Jupyter interactive widget library following the current best practices for using interactive widgets. An implementation for a placeholder "Hello World" widget is provided. [Index](Index.ipynb) - [Back](Widget Styling.ipynb) - [Next](Widget Asynchronous.ipynb)
github_jupyter
``` %load_ext autoreload %autoreload 2 ``` > **How to run this notebook (command-line)?** 1. Install the `ReinventCommunity` environment: `conda env create -f environment.yml` 2. Activate the environment: `conda activate ReinventCommunity` 3. Execute `jupyter`: `jupyter notebook` 4. Copy the link to a browser # `REINVENT` score transformation notebook This notebook serves two purposes: **(a)** to explain, what is meant by *score transformation* in the context of `REINVENT` and how to use it and **(b)** to serve as a way to find the proper transfomation parameters for a new or updated component. ### Background As described in the Reinforcement Learning notebook in this repository, `REINVENT` uses different components in its scoring functions, which can be freely combined to generate a composite score for a compound. Each component returns a partial score between '0' and '1' and a selected functional form (either a product or a sum) produces the final composite score (again, a number between '0' and '1'). The following lines are an excerpt of an actual run that illustrates this: ``` INFO Step 0 Fraction valid SMILES: 99.2 Score: 0.1583 Time elapsed: 0 Time left: 0.0 Agent Prior Target Score SMILES -29.25 -29.25 -0.60 0.22 n1c(CN2CCOCC2)cnc2[nH]c3c(cc(C)c(C)c3)c12 -27.63 -27.63 -27.63 0.00 C1N(Cc2ccccc2)C(=O)c2cccc(N3C(=O)c4ccc(C(O)=O)cc4C3=O)c2C1 -40.76 -40.76 -14.11 0.21 C(NC(c1csnn1)=O)(c1ccc(-c2cc(Cl)cc(F)c2-c2nnn(C)n2)o1)(C)CC Regression model Matching substructure Custom alerts QED Score 0.3370678424835205 0.5 1.0 0.78943 0.3446018993854522 1.0 0.0 0.64563 0.3945346176624298 0.5 1.0 0.46391 ``` Each component (e.g. the `QED Score`) produces a partial score which is combined into `Score` (see product functional below). ![](img/scoring_function_product.png) For this to work, we need to ensure, that all components return a meaningful value from the interval [0,1], on top of which values closer to '1' must mean "better" as we are always trying to maximize the composite score. However, most components will not naturally comply with this requirements. In those cases, we can define a transformation (effectively the application of a mathematical function with given parameters) to the "raw" score returned by the component before feeding it into the scoring function. First, let's load up everything we need: ``` # load the dependencies and classes used %run code/score_transformation_code.py # set plotting parameters small = 12 med = 16 large = 22 params = {"axes.titlesize": large, "legend.fontsize": med, "figure.figsize": (16, 10), "axes.labelsize": med, "axes.titlesize": med, "xtick.labelsize": med, "ytick.labelsize": med, "figure.titlesize": large} plt.rcParams.update(params) plt.style.use("seaborn-whitegrid") sns.set_style("white") %matplotlib inline # set up Enums and factory tt_enum = TransformationTypeEnum() csp_enum = ComponentSpecificParametersEnum() factory = TransformationFactory() ``` ### Example The following example simulates the incorporation of a new (fictious) component. Let us assume, we have run 10 compounds through this component and got the following values: ``` -12.4, -9.0, 1.3, 2.3, 0.7, -4.2, -0.3, -7.7, -9.9, 3.3 ``` From your experience, you do consider a value above 0.3 to be very interesting and anything below -3 as completely useless. Thus we will choose a `sigmoid` transformation and adapt the parameters to reflect that. To get a nice curve in a plot which helps us deciding whether we are on the right track, we will define a range of values from -10 to 5 in a list. ``` # specify a list of dummy input values values_list = np.arange(-10, 5, 0.25).tolist() # set up the parameters specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -2, csp_enum.HIGH: 1.25, csp_enum.K: 0.17, csp_enum.TRANSFORMATION_TYPE: tt_enum.SIGMOID} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Sigmoid transformation function", x=values_list, y=transformed_scores) # check, whether the transformation does what we expect input_values = [-12.4, -9.0, 1.3, 2.3, 0.7, -4.2, -0.3, -7.7, -9.9, 3.3] output_values = transform_function(predictions=input_values, parameters=specific_parameters) print(input_values) print([round(x, 1) for x in output_values]) ``` As you can see, we have found a transformation that satisfies our needs in this case. It is important, that there is a smooth transition (so do not set parameter `K` to very high values), so that the "trail" can be picked up. You can play around with the parameters to see the effect on the curve and the output values. The parameters can be directly set in the `REINVENT` configuration file. ### All transformations Of course, sometimes you will need other transformations than a `sigmoid` one, so below is a complete list of all transformations available at the moment. ``` # sigmoid transformation # --------- values_list = np.arange(-30, 20, 0.25).tolist() specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -25, csp_enum.HIGH: 10, csp_enum.K: 0.4505, csp_enum.TRANSFORMATION_TYPE: tt_enum.SIGMOID} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Sigmoid transformation function", x=values_list, y=transformed_scores) # reverse sigmoid transformation # --------- values_list = np.arange(-30, 20, 0.25).tolist() specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -20, csp_enum.HIGH: -5, csp_enum.K: 0.2, csp_enum.TRANSFORMATION_TYPE: tt_enum.REVERSE_SIGMOID} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Reverse sigmoid transformation", x=values_list, y=transformed_scores) # double sigmoid # --------- values_list = np.arange(-20, 20, 0.25).tolist() specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -10, csp_enum.HIGH: 3, csp_enum.COEF_DIV: 500, csp_enum.COEF_SI: 250, csp_enum.COEF_SE: 250, csp_enum.TRANSFORMATION_TYPE: tt_enum.DOUBLE_SIGMOID} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Double-sigmoid transformation function", x=values_list, y=transformed_scores) # step # --------- values_list = np.arange(-20, 20, 0.25).tolist() specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -10, csp_enum.HIGH: 3, csp_enum.TRANSFORMATION_TYPE: tt_enum.STEP} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Step transformation function", x=values_list, y=transformed_scores) # right step # --------- values_list = np.arange(-20, 20, 0.25).tolist() specific_parameters = {csp_enum.TRANSFORMATION: True, csp_enum.LOW: -10, csp_enum.TRANSFORMATION_TYPE: tt_enum.RIGHT_STEP} transform_function = factory.get_transformation_function(specific_parameters) transformed_scores = transform_function(predictions=values_list, parameters=specific_parameters) # render the curve render_curve(title="Right step transformation function", x=values_list, y=transformed_scores) ``` There is also a `no_transformation` type, which does not change the input values at all.
github_jupyter
<a href="https://colab.research.google.com/github/RSNA/AI-Deep-Learning-Lab-2021/blob/main/sessions/object-detection-seg/segmentation.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Overview In this tutorial we will explore how to create a contract-expanding fully convolutional neural network (CNN) for segmentation of pneumonia (lung infection) from chest radiographs, the most common imaging modality used to screen for pulmonary disease. For any patient with suspected lung infection, including viral penumonia such as as COVID-19, the initial imaging exam of choice is a chest radiograph. ## Workshop Links Use the following link to access materials from this workshop: https://github.com/peterchang77/dl_tutor/tree/master/workshops *Tutorials* * Introduction to Tensorflow 2.0 and Keras: https://bit.ly/2VSYaop * CNN for pneumonia classification: https://bit.ly/2D9ZBrX * CNN for pneumonia segmentation: https://bit.ly/2VQMWk9 (**current tutorial**) # Environment The following lines of code will configure your Google Colab environment for this tutorial. ### Enable GPU runtime Use the following instructions to switch the default Colab instance into a GPU-enabled runtime: ``` Runtime > Change runtime type > Hardware accelerator > GPU ``` ### Jarvis library In this notebook we will Jarvis, a custom Python package to facilitate data science and deep learning for healthcare. Among other things, this library will be used for low-level data management, stratification and visualization of high-dimensional medical data. ``` # --- Install Jarvis library % pip install jarvis-md ``` ### Imports Use the following lines to import any needed libraries: ``` import numpy as np, pandas as pd from tensorflow import losses, optimizers from tensorflow.keras import Input, Model, models, layers, metrics from jarvis.train import datasets, custom from jarvis.utils.display import imshow ``` # Data The data used in this tutorial will consist of (frontal projection) chest radiographs from a subset of the RSNA / Kaggle pneumonia challenge (https://www.kaggle.com/c/rsna-pneumonia-detection-challenge). From the complete cohort, a random subset of 1,000 exams will be used for training and evaluation. ### Download The custom `datasets.download(...)` method can be used to download a local copy of the dataset. By default the dataset will be archived at `/data/raw/xr_pna`; as needed an alternate location may be specified using `datasets.download(name=..., path=...)`. ``` # --- Download dataset datasets.download(name='xr/pna-512') ``` ### Python generators Once the dataset is downloaded locally, Python generators to iterate through the dataset can be easily prepared using the `datasets.prepare(...)` method: ``` # --- Prepare generators gen_train, gen_valid, client = datasets.prepare(name='xr/pna-512', keyword='seg-512') ``` The created generators, `gen_train` and `gen_valid`, are designed to yield two variables per iteration: `xs` and `ys`. Both `xs` and `ys` each represent a dictionary of NumPy arrays containing model input(s) and output(s) for a single *batch* of training. The use of Python generators provides a generic interface for data input for a number of machine learning libraries including Tensorflow 2.0 / Keras. Note that any valid Python iterable method can be used to loop through the generators indefinitely. For example the Python built-in `next(...)` method will yield the next batch of data: ``` # --- Yield one example xs, ys = next(gen_train) ``` ### Data exploration To help facilitate algorithm design, each original chest radiograph has been resampled to a uniform `(512, 512)` matrix. Overall, the dataset comprises a total of `1,000` 2D images: a total of `500` negaative exams and `500` positive exams. ### `xs` dictionary The `xs` dictionary contains a single batch of model inputs: 1. `dat`: input chest radiograph resampled to `(1, 512, 512, 1)` matrix shape ``` # --- Print keys for key, arr in xs.items(): print('xs key: {} | shape = {}'.format(key.ljust(8), arr.shape)) ``` ### `ys` dictionary The `ys` dictionary contains a single batch of model outputs: 1. `pna`: output segmentation mask for pneumonia equal in size to the input `(1, 512, 512, 1)` matrix shape * 0 = pixels negative for pneumonia * 1 = pixels positive for pneumonia ``` # --- Print keys for key, arr in ys.items(): print('ys key: {} | shape = {}'.format(key.ljust(8), arr.shape)) ``` ### Visualization Use the following lines of code to visualize a single input image and mask using the `imshow(...)` method: ``` # --- Show labels xs, ys = next(gen_train) imshow(xs['dat'][0], ys['pna'][0], radius=3) ``` Use the following lines of code to visualize an N x N mosaic of all images and masks in the current batch using the `imshow(...)` method: ``` # --- Show "montage" of all images xs, ys = next(gen_train) imshow(xs['dat'], ys['pna'], figsize=(12, 12), radius=3) ``` ### Model inputs For every input in `xs`, a corresponding `Input(...)` variable can be created and returned in a `inputs` dictionary for ease of model development: ``` # --- Create model inputs inputs = client.get_inputs(Input) ``` In this example, the equivalent Python code to generate `inputs` would be: ```python inputs = {} inputs['dat'] = Input(shape=(1, 512, 512, 1)) ``` # U-Net Architecture The **U-Net** architecture is a common fully-convolutional neural network used to perform instance segmentation. The network topology comprises of symmetric contracting and expanding arms to map an original input image to an output segmentation mask that appoximates the size of the original image: ![U-Net Architecture](https://raw.githubusercontent.com/peterchang77/dl_tutor/master/cs190/spring_2020/notebooks/organ_segmentation/pngs/u-net-architecture.png) # Contracting Layers The contracting layers of a U-Net architecture are essentially identical to a standard feed-forward CNN. Compared to the original architecture above, several key modifications will be made for ease of implementation and to optimize for medical imaging tasks including: * same padding (vs. valid padding) * strided convoltions (vs. max-pooling) * smaller filters (channel depths) Let us start by defining the contracting layer architecture below: ``` # --- Define kwargs dictionary kwargs = { 'kernel_size': (1, 3, 3), 'padding': 'same'} # --- Define lambda functions conv = lambda x, filters, strides : layers.Conv3D(filters=filters, strides=strides, **kwargs)(x) norm = lambda x : layers.BatchNormalization()(x) relu = lambda x : layers.ReLU()(x) # --- Define stride-1, stride-2 blocks conv1 = lambda filters, x : relu(norm(conv(x, filters, strides=1))) conv2 = lambda filters, x : relu(norm(conv(x, filters, strides=(1, 2, 2)))) ``` Using these lambda functions, let us define a simple 9-layer contracting network topology with a total a four subsample (stride-2 convolution) operations: ``` # --- Define contracting layers l1 = conv1(16, inputs['dat']) l2 = conv1(32, conv2(32, l1)) l3 = conv1(48, conv2(48, l2)) l4 = conv1(64, conv2(64, l3)) l5 = conv1(80, conv2(80, l4)) ``` **Checkpoint**: What is the shape of the `l5` feature map? ``` ``` # Expanding Layers The expanding layers are simply implemented by reversing the operations found in the contract layers above. Specifically, each subsample operation is now replaced by a **convolutional transpose**. Due to the use of **same** padding, defining a transpose operation with the exact same parameters as a strided convolution will ensure that layers in the expanding pathway will exactly match the shape of the corresponding contracting layer. ### Convolutional transpose Let us start by defining an additional lambda function for the convolutional transpose: ``` # --- Define single transpose tran = lambda x, filters, strides : layers.Conv3DTranspose(filters=filters, strides=strides, **kwargs)(x) # --- Define transpose block tran2 = lambda filters, x : relu(norm(tran(x, filters, strides=(1, 2, 2)))) ``` Carefully compare these functions to the single `conv` operations as well as the `conv1` and `conv2` blocks above. Notice that they share the exact same configurations. Let us now apply the first convolutional transpose block to the `l5` feature map: ``` # --- Define expanding layers l6 = tran2(64, l5) ``` **Checkpoint**: What is the shape of the `l6` feature map? ### Concatenation The first connection in this specific U-Net derived architecture is a link between the `l4` and the `l6` layers: ``` l1 -------------------> l9 \ / l2 -------------> l8 \ / l3 -------> l7 \ / l4 -> l6 \ / l5 ``` To mediate the first connection between contracting and expanding layers, we must ensure that `l4` and `l6` match in feature map size (the number of filters / channel depth *do not* necessarily). Using the `same` padding as above should ensure that this is the case and thus simplifies the connection operation: ``` # --- Ensure shapes match print(l4.shape) print(l6.shape) # --- Concatenate concat = lambda a, b : layers.Concatenate()([a, b]) concat(l4, l6) ``` Note that since `l4` and `l6` are **exactly the same shape** (including matching channel depth), what additional operation could be used here instead of a concatenation? ### Full expansion Alternate the use of `conv1` and `tran2` blocks to build the remainder of the expanding pathway: ``` # --- Define expanding layers l7 = tran2(48, conv1(64, concat(l4, l6))) l8 = tran2(32, conv1(48, concat(l3, l7))) l9 = tran2(16, conv1(32, concat(l2, l8))) l10 = conv1(16, l9) ``` # Logits The last convolution projects the `l10` feature map into a total of just `n` feature maps, one for each possible class prediction. In this 2-class prediction task, a total of `2` feature maps will be needed. Recall that these feature maps essentially act as a set of **logit scores** for each voxel location throughout the image. As with a standard CNN architecture, **do not** use an activation here in the final convolution: ``` # --- Create logits logits = {} logits['pna'] = layers.Conv3D(filters=2, name='pna', **kwargs)(l10) ``` # Model Let us first create our model: ``` # --- Create model model = Model(inputs=inputs, outputs=logits) ``` ### Custom Dice score metric The metric of choice for tracking performance of a medical image segmentation algorithm is the **Dice score**. The Dice score is not a default metric built in the Tensorflow library, however a custom metric is available for your convenience as part of the `jarvis-md` package. It is invoked using the `custom.dsc(cls=...)` call, where the argument `cls` refers to the number of *non-zero* classes to track (e.g. the background Dice score is typically not tracked). In this exercise, it will be important to track the performance of segmentation for **pneumonia** (class = 1) only, thus set the `cls` argument to `1`. ``` # --- Compile model model.compile( optimizer=optimizers.Adam(learning_rate=2e-4), loss={'pna': losses.SparseCategoricalCrossentropy(from_logits=True)}, metrics={'pna': custom.dsc(cls=1)}, experimental_run_tf_function=False) ``` # Model Training ### In-Memory Data The following line of code will load all training data into RAM memory. This strategy can be effective for increasing speed of training for small to medium-sized datasets. ``` # --- Load data into memory client.load_data_in_memory() ``` ### Training Once the model has been compiled and the data prepared (via a generator), training can be invoked using the `model.fit(...)` method. Ensure that both the training and validation data generators are used. In this particular example, we are defining arbitrary epochs of 100 steps each. Training will proceed for 8 epochs in total. Validation statistics will be assess every fourth epoch. As needed, tune these arugments as need. ``` model.fit( x=gen_train, steps_per_epoch=100, epochs=8, validation_data=gen_valid, validation_steps=100, validation_freq=4) ``` # Evaluation To test the trained model, the following steps are required: * load data * use `model.predict(...)` to obtain logit scores * use `np.argmax(...)` to obtain prediction * compare prediction with ground-truth Recall that the generator used to train the model simply iterates through the dataset randomly. For model evaluation, the cohort must instead be loaded manually in an orderly way. For this tutorial, we will create new **test mode** data generators, which will simply load each example individually once for testing. ``` # --- Create validation generator test_train, test_valid = client.create_generators(test=True) ``` ### Dice score While the Dice score metric for Tensorflow has been provided already, an implementation must still be used to manually calculate the performance during validation. Use the following code cell block to implement: ``` def dice(y_true, y_pred, c=1, epsilon=1): """ Method to calculate the Dice score coefficient for given class :params (np.ndarray) y_true : ground-truth label (np.ndarray) y_pred : predicted logits scores (int) c : class to calculate DSC on """ assert y_true.ndim == y_pred.ndim true = y_true[..., 0] == c pred = np.argmax(y_pred, axis=-1) == c A = np.count_nonzero(true & pred) * 2 B = np.count_nonzero(true) + np.count_nonzero(pred) + epsilon return A / B ``` Use the following lines of code to loop through the test set generator and run model prediction on each example: ``` # --- Test model dsc = [] for x, y in test_valid: if y['pna'].any(): # --- Predict logits = model.predict(x['dat']) if type(logits) is dict: logits = logits['pna'] # --- Argmax dsc.append(dice(y['pna'][0], logits[0], c=1)) dsc = np.array(dsc) ``` Use the following lines of code to calculate validataion cohort performance: ``` # --- Calculate accuracy print('{}: {:0.5f}'.format('Mean Dice'.ljust(20), np.mean(dsc))) print('{}: {:0.5f}'.format('Median Dice'.ljust(20), np.median(dsc))) print('{}: {:0.5f}'.format('25th-centile Dice'.ljust(20), np.percentile(dsc, 25))) print('{}: {:0.5f}'.format('74th-centile Dice'.ljust(20), np.percentile(dsc, 75))) ``` ## Saving and Loading a Model After a model has been successfully trained, it can be saved and/or loaded by simply using the `model.save()` and `models.load_model()` methods. ``` # --- Serialize a model model.save('./cnn.hdf5') # --- Load a serialized model del model model = models.load_model('./cnn.hdf5', compile=False) ```
github_jupyter
© 2020 Nokia Licensed under the BSD 3 Clause license SPDX-License-Identifier: BSD-3-Clause ## Setup ``` %load_ext autoreload %autoreload 2 import os import json import time import numpy as np import sys from codesearch.encoders import BasicEncoder from codesearch import embedding_pretraining from codesearch.embedding_pretraining import train_fasttext_model_from_snippets, load_fasttext_model from codesearch.utils import SaveableFunction from codesearch.data import load_snippet_collection, EVAL_DATASETS, SNIPPET_COLLECTIONS, eval_datasets_from_regex from codesearch.ncs.ncs_embedder import TfidfCodeEmbedder, NcsEmbedder from codesearch.evaluation import evaluate_and_dump from codesearch.embedding_retrieval import EmbeddingRetrievalModel start = time.time() ``` Read configuration parameters from environment variables (when this notebook is run as a script). ``` fast_text_checkpoint = os.environ.get("fast_text_checkpoint", None) model_filename = os.environ.get("model_filename", None) snippets_collection = os.environ.get("snippets_collection", "so-ds-feb20") train_snippets_collection = os.environ.get("train_snippets_collection", "so-ds-feb20") valid_dataset = os.environ.get("valid_dataset", None) test_dataset = os.environ.get("test_dataset", "conala-curated-0.5-test") text_overrides = json.loads(os.environ.get("text_overrides", "{}")) code_overrides = json.loads(os.environ.get("code_overrides", "{}")) fast_text_overrides = json.loads(os.environ.get("fast_text_overrides", "{}")) zip_fn_name = os.environ.get("zip_fn", "zip_descr_end") output_dir = os.environ.get("output_dir", ".") model_filename, fast_text_checkpoint snippets_collection text_overrides, code_overrides, fast_text_overrides, zip_fn_name ``` ## Load data ``` if valid_dataset and valid_dataset not in EVAL_DATASETS and valid_dataset not in SNIPPET_COLLECTIONS: raise ValueError() test_datasets = eval_datasets_from_regex(test_dataset) snippets = load_snippet_collection(snippets_collection) train_snippets = load_snippet_collection(train_snippets_collection) train_snippets[-1] f=open('../../processed_search_1.json') train_snippets=json.load(f) snippets=train_snippets snippets[0] train_snippets ``` ## Train or load embedding model ``` enc = BasicEncoder(text_preprocessing_params=text_overrides, code_preprocessing_params=code_overrides) zip_fn = getattr(sys.modules[embedding_pretraining.__name__], zip_fn_name) model = train_fasttext_model_from_snippets(train_snippets, enc, zip_fn, fast_text_overrides, "./", save=True) if fast_text_checkpoint: model, enc = load_fasttext_model(fast_text_checkpoint) print("Loaded fast text checkpoint") else: enc = BasicEncoder(text_preprocessing_params=text_overrides, code_preprocessing_params=code_overrides) zip_fn = getattr(sys.modules[embedding_pretraining.__name__], zip_fn_name) model = train_fasttext_model_from_snippets(train_snippets, enc, zip_fn, fast_text_overrides, "./", save=False) ``` ## Unsupervised retrieval baseline A first baseline that computes a representation a snippet representation as a tfidf weighted average of their embeddings and a query representation by averaging all terms. ### Embedding code & queries ``` tfidf_model = TfidfCodeEmbedder.create_tfidf_model(enc, model, snippets) embedder = NcsEmbedder(model, enc, tfidf_model) ``` ### Create retrieval model ``` retrieval_model = EmbeddingRetrievalModel(embedder) retrieval_model.add_snippets(snippets) if model_filename: embedder.save(model_filename) embedder.save('best_ncs_embedder') ``` ## Evaluation ``` sample_queries = ["train a tensorflow model", "plot a bar chart", "merge two dataframes", "sort a list", "read a pandas dataframe from a file", "plot an image"] config = {"text": text_overrides, "code": code_overrides, "fasttext": fast_text_overrides} evaluate_and_dump( retrieval_model, config, output_dir, valid_dataset, test_datasets, sample_queries=sample_queries ) duration = time.time() - start f"Running the notebook took {duration} seconds" give_your_query=['sort a list'] ```
github_jupyter
# Word2Vec with CNN and Bi-LSTM - word2vec vector values as weights for LSTM to train ``` import numpy as np import pandas as pd import os import nltk import sklearn from gensim.models import Word2Vec import re import multiprocessing import tensorflow as tf from keras.preprocessing.text import Tokenizer from collections import Counter from nltk.stem.lancaster import LancasterStemmer from sklearn.metrics import roc_curve, auc, accuracy_score, precision_recall_fscore_support,confusion_matrix from keras.preprocessing.sequence import pad_sequences from nltk.stem import WordNetLemmatizer from nltk.stem.lancaster import LancasterStemmer import matplotlib.pyplot as plt %matplotlib inline import tensorboard tensorboard.__version__ from datetime import datetime from packaging import version from sklearn.metrics import auc from sklearn.metrics import roc_curve from beautifultable import BeautifulTable #function to extract data from the file def read_file(df_new): print("Started extracting data from file",df_new.shape) dfnew=pd.DataFrame() dfnew.insert(0,'Post',None) dfnew.insert(1,'class',None) for val in df_new.values: appList=[] sp=np.array_str(val).split(",") if len(sp)==2: appList.append(sp[0]) appList.append(sp[1]) dfnew.loc[len(dfnew)]=appList for i in range(0,dfnew.shape[0]): dfnew.values[i][1]=int(dfnew.values[i][1].strip("\'|]|\"")) print(dfnew['class'].value_counts()) print("Finished extracting data from file",dfnew.shape) return dfnew #performing data cleaning on the formspring.me dataset def post_tokenizing_dataset1(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') lancaster_stemmer=LancasterStemmer() wordnet_lemmatizer = WordNetLemmatizer() token_list=[] phrase_list=[] token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: append_list=[] filter_val=re.sub(r'Q:','',val[0]) filter_val=re.sub(r'&#039;[a-z]{1}','',filter_val) filter_val=re.sub('<[a-z]+>',' ',filter_val).lower() filter_val=re.sub(r'[^a-zA-Z\s]', '', filter_val, re.I|re.A) filter_val=[token for token in wpt.tokenize(filter_val)] filter_val=[word for word in filter_val if word.isalpha()] lemma_tokens=[wordnet_lemmatizer.lemmatize(token) for token in filter_val if token not in stop_words and len(token)>=3] lancaster_tokens = [lancaster_stemmer.stem(word) for word in lemma_tokens] if(lancaster_tokens): append_list.append(' '.join(lancaster_tokens)) append_list.append(val[1]) token_df.loc[len(token_df)]=append_list print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df #performing data cleaning on the twitter dataset def post_tokenizing_dataset3(df): print("Started cleaning data in dataframe", df.shape) #print(df.head(5)) wpt = nltk.WordPunctTokenizer() stop_words = nltk.corpus.stopwords.words('english') lancaster_stemmer=LancasterStemmer() wordnet_lemmatizer = WordNetLemmatizer() token_df=pd.DataFrame() token_df.insert(0,'Post',None) token_df.insert(1,'class',None) for val in df.values: filter_val=[] value=re.sub(r'@\w*','',val[0]) value=re.sub(r'&.*;','',value) value=re.sub(r'http[s?]?:\/\/.*[\r\n]*','',value) tokens=[token for token in wpt.tokenize(value)] tokens=[word for word in tokens if word.isalpha()] lemma_tokens=[wordnet_lemmatizer.lemmatize(token) for token in tokens if token not in stop_words and len(token)>=3] lancaster_tokens = [lancaster_stemmer.stem(word) for word in lemma_tokens] if len(lancaster_tokens)!=0: filter_val.append(' '.join(lancaster_tokens).lower()) filter_val.append(val[1]) token_df.loc[len(token_df)]=filter_val print("Finished cleaning data in dataframe",token_df.shape) #print(token_df.head(5)) return token_df #removal of words which occur once def remove_less_occurent_words(token_df,counter): print("Started removing less occurent words",token_df.shape) token_df_2=pd.DataFrame() token_df_2.insert(0,'Post',None) token_df_2.insert(1,'class',None) less_list=[] for key,val in counter.items(): if(val==1): less_list.append(key) for val in token_df.values: list_2=[] split_list=[] split_list=val[0].split(' ') for word in split_list: if word in less_list: split_list.remove(word) list_2.append(' '.join(split_list)) list_2.append(val[1]) token_df_2.loc[len(token_df_2)]=list_2 print("Finished removing less occurent words",token_df_2.shape) return token_df_2 #counting the number of unique words in the corpora def counter_word(text): print("Started counting words") count = Counter() for i in text.values: for word in i.split(): count[word] += 1 print("Finished counting words") return count #getting the data from csv files df_data_1=read_file(pd.read_csv("../../post.csv",sep="\t")) df_data_2=read_file(pd.read_csv("../../new_data.csv",sep=",")) df_data_3=pd.read_csv("../../dataset_4.csv",sep=",") #calling the function post_tokenizing_dataset1() and post_tokenizing_dataset3() for cleaning df_data_1=post_tokenizing_dataset1(df_data_1) tk=df_data_3[df_data_3['class']==1].iloc[0:8000,] post_tk=post_tokenizing_dataset3(tk) post_tk=post_tk.append(df_data_1[df_data_1['class']==0].iloc[0:7500,], ignore_index=True) print(post_tk['class'].value_counts()) post_tk=sklearn.utils.shuffle(post_tk) counter_tk = counter_word(post_tk['Post']) print(len(counter_tk)) token_tk=remove_less_occurent_words(post_tk,counter_tk) print(tk.shape) token_list=[] total_words_max=0 for val in token_tk.values: token_list.append(val[0].split()) if total_words_max<len(val[0].split()): total_words_max=len(val[0].split()) print(total_words_max) #Building a Word2Vec model em_dim=300 cores = multiprocessing.cpu_count() embeddings=Word2Vec(token_list,min_count=5, window=5, size=em_dim, sample=6e-5, alpha=0.03, min_alpha=0.0007, negative=2, workers=cores) words=list(embeddings.wv.vocab) print("Total vocabulary size:",len(words)) #docs_vectors=post_vector_calculation(token_list,embeddings) #Pushing the vector values of each word into a file for creating an embedding matrix below. filename="word2vec_embeddings.txt" embeddings.wv.save_word2vec_format(filename, binary=False) from sklearn.model_selection import train_test_split from sklearn.ensemble import AdaBoostClassifier X_train, X_test, y_train, y_test = train_test_split(token_tk['Post'],token_tk['class'],test_size = 0.2,stratify=token_tk['class'], random_state = 42) print(X_train.shape, X_test.shape) print(y_train.shape,y_test.shape) #converting the post to sequences and padding to equal length from keras.preprocessing.text import Tokenizer tokenizer = Tokenizer() tokenizer.fit_on_texts(X_train) X_train_seq=tokenizer.texts_to_sequences(X_train) X_test_seq=tokenizer.texts_to_sequences(X_test) vocab_size=len(tokenizer.word_index)+1 X_train_pad=pad_sequences(X_train_seq,maxlen=total_words_max,padding="post", truncating="post") X_test_pad=pad_sequences(X_test_seq,maxlen=total_words_max,padding="post", truncating="post") embeddings_index={} f=open(os.path.join('','word2vec_embeddings.txt'),encoding='utf-8') for line in f: values=line.split() word=values[0] coefs=np.asarray(values[1:]) embeddings_index[word]=coefs f.close() embedding_matrix=np.zeros((vocab_size,em_dim)) for word,i in tokenizer.word_index.items(): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector print(vocab_size) rate_drop_lstm = 0.15 + np.random.rand() * 0.25 rate_drop_dense = 0.15 + np.random.rand() * 0.25 from keras.models import Sequential from keras.layers import Embedding, LSTM, Dense, Dropout, Masking, Activation, Input, Bidirectional, Conv1D, MaxPooling1D from keras.initializers import Constant from keras.optimizers import Adam from keras.layers.merge import concatenate from keras.layers.normalization import BatchNormalization from keras.models import Model embedding_layer = Embedding(input_dim=vocab_size,output_dim=em_dim, weights=[embedding_matrix], trainable=True, mask_zero=True) lstm_layer= LSTM(units=em_dim,dropout=rate_drop_lstm,recurrent_dropout=rate_drop_lstm) model=Sequential() model.add(embedding_layer) model.add(Conv1D(filters=em_dim,kernel_size=4,padding="valid",activation="relu")) model.add(MaxPooling1D(pool_size=2)) model.add(Bidirectional(lstm_layer)) model.add(Dense(1)) model.add(Activation('sigmoid')) optimizer = Adam(learning_rate=3e-4) model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy',tf.keras.metrics.Precision(),tf.keras.metrics.Recall(),tf.keras.metrics.AUC(),tf.keras.metrics.TrueNegatives(),tf.keras.metrics.TruePositives(),tf.keras.metrics.FalseNegatives(),tf.keras.metrics.FalsePositives()]) model.summary() y_train=y_train.astype('int') y_test=y_test.astype('int') final_model = model.fit( X_train_pad, y_train,epochs=5,validation_data=(X_test_pad, y_test) ) model.save("Word2Vec_LSTM_CNN.model") pred_val=model.predict_classes(X_test_pad) tn, fp, fn, tp = confusion_matrix(y_test,pred_val).ravel() print("Confusion matrix: tn={tn}, fp={fp}, fn={fn}, tp={tp}".format(tn=tn,fp=fp,fn=fn,tp=tp)) fpr_keras, tpr_keras, thresholds_keras = roc_curve(y_test, pred_val) auc_keras = auc(fpr_keras, tpr_keras) plt.figure(1) plt.plot([0, 1], [0, 1], 'k--') plt.plot(fpr_keras, tpr_keras, label='Keras (auc = {:.3f})'.format(auc_keras)) plt.xlabel('False positive rate') plt.ylabel('True positive rate') plt.title('ROC curve') plt.legend(loc='best') plt.show() LSTM_accuracy=final_model.history['val_accuracy'][4] LSTM_precision=final_model.history['val_precision'][4] LSTM_recall=final_model.history['val_recall'][4] LSTM_f1_score= 2 * ((LSTM_precision*LSTM_recall)/(LSTM_precision+LSTM_recall)) h0=["Algorithm"] h1=["LSTM",LSTM_accuracy,LSTM_f1_score,LSTM_precision,LSTM_recall] h0.append("Accuracy") h0.append("F1 Measure") h0.append("Precision") h0.append("Recall") table = BeautifulTable() table.column_headers = h0 table.append_row(h1) print(table) LSTM_metrics=np.array([LSTM_accuracy,LSTM_f1_score,LSTM_precision,LSTM_recall]) index = ['accuracy', 'F1-score', 'precision', 'recall'] df_metrics = pd.DataFrame({'LSTM': LSTM_metrics}, index=index) df_metrics.plot.bar(rot=0) plt.legend(loc="lower right") plt.show() ``` ## Testing ``` test_data=pd.DataFrame() t1=df_data_2[df_data_2['class']==0].iloc[0:1000,] t1=post_tokenizing_dataset1(t1) t2=df_data_3[df_data_3['class']==1].iloc[8001:9000,] t2=post_tokenizing_dataset3(t2) test_data=test_data.append(t1 , ignore_index = True) test_data=test_data.append(t2 , ignore_index = True) test_data=sklearn.utils.shuffle(test_data) print(test_data['class'].value_counts()) counter_test= counter_word(test_data['Post']) print(len(counter_test)) test_data=remove_less_occurent_words(test_data,counter_test) token_list_test=[] maxi_test=0 for val in test_data.values: token_list_test.append(val[0].split()) if maxi_test<len(val[0].split()): maxi_test=len(val[0].split()) print(maxi_test) sequences_test=tokenizer.texts_to_sequences(token_list_test) review_pad_test=pad_sequences(sequences_test,maxlen=maxi_test,padding="post", truncating="post") def model_evaluation(X_test, y_test, model): _predicted_values = model.predict_classes(X_test) _accuracy = accuracy_score(y_test, _predicted_values) _precision, _recall, _f1_score, _ = precision_recall_fscore_support(y_test, _predicted_values, labels=[1]) tn, fp, fn, tp = confusion_matrix(y_test, _predicted_values).ravel() print("Confusion matrix: tn={tn}, fp={fp}, fn={fn}, tp={tp}".format(tn=tn,fp=fp,fn=fn,tp=tp)) return _accuracy, _precision[0], _recall[0], _f1_score[0] X=review_pad_test y=test_data['class'] y=y.astype('int') accuracy, precision, recall, f1=model_evaluation(X,y,model) print("accuracy:{a}, precision:{p}, recall:{r}, f1:{f}".format(a=accuracy,p=precision,r=recall,f=f1)) LSTM_metrics_test=np.array([accuracy,f1,precision,recall]) index = ['accuracy', 'F1-score', 'precision', 'recall'] df_metrics = pd.DataFrame({'LSTM': LSTM_metrics_test}, index=index) df_metrics.plot.bar(rot=0) plt.legend(loc="lower right") plt.show() ```
github_jupyter
``` import requests import datetime from datetime import datetime as dt import patoolib import os import pandas as pd import sqlalchemy import psycopg2 from sqlalchemy import create_engine import numpy as np from datetime import timedelta import os.path from datetime import datetime import sqlalchemy as sa #from sqlalchemy.sql import text DBname=os.environ['DB_NAME'] postgres_psswd=os.environ['POSTGRES_PASSWORD'] postgres_user=os.environ['POSTGRES_USER'] postgres_port=str(os.environ['POSTGRES_PORT']) # A long string that contains the necessary Postgres login information postgres_str = ('postgresql://'+postgres_user+':'+postgres_psswd+'@'+DBname+':'+postgres_port+'/superset') # Create the connection cnx = create_engine(postgres_str) s="producto50" n="producto50_DefuncionesDEIS_sospechososPorComuna_std" sospechosos=pd.read_sql_table(n, con=cnx,schema=s).drop(['index','reverse_idx','MAX_DATE'],axis=1) s="producto50" n="producto50_DefuncionesDEIS_confirmadosPorComuna_std" confirmados=pd.read_sql_table(n, con=cnx,schema=s).drop(['index','reverse_idx','MAX_DATE'],axis=1) confirmados['Defunciones'].sum() sospechosos['Defunciones'].sum() for k in ['DEIS']: if not cnx.dialect.has_schema(cnx, k): print('schema '+k+' does not exist, creating it') cnx.execute(sqlalchemy.schema.CreateSchema(k)) else: print('schema '+k+' exists, will not be created') DEIS=pd.concat([confirmados,sospechosos],keys=['Confirmados','Sospechosos']).reset_index().drop('level_1',axis=1).rename(columns={'level_0':'PCR+'}) DEIS.to_sql('deis', schema='DEIS',con=cnx,if_exists='replace') DEIS DEIS.columns DEIS['Defunciones'].sum() DEIS['PCR+'] DEIS.groupby(['Fecha','PCR+'])['Defunciones'].sum().reset_index() DEIS.melt(id_vars=['Fecha','PCR+']) DEIS.pivot(index='Fecha',columns=['PCR+'],values=['Defunciones']) #fallecidos nuevos regionales (DEIS) fr=DEIS.groupby(['FECHA_DEF','Metropolitana'])['FECHA_STR'].count() fr=fr.reset_index() fr.columns=['Fecha','Region Metropolitana','Fallecidos Nuevos'] fr=fr.pivot(index='Fecha',columns=['Region Metropolitana']) fr=fr.T.reset_index().T fr.columns=fr.iloc[0].astype(str)+' '+fr.iloc[1].astype(str)+' (DEIS)' fr=fr.iloc[3:].reset_index() #fallecidos regional acumulado (deis) fr=fr.fillna(0) fra=fr[fr.columns.to_list()[1:]].cumsum() fra.columns=[x.replace('Nuevos','Acumulados') for x in fra.columns] #concatenar fr=pd.concat([fr,fra],axis=1) os.system('jupyter nbconvert --output /home/jovyan/work/ETLdocs/' + 'ETL_DEIS.html' + ' --to html ' + '/home/jovyan/work/ETL/DEIS.ipynb') ```
github_jupyter
``` import numpy as np import pandas as pd import matplotlib.pyplot as plt %matplotlib inline import seaborn as sns sns.set() from google.colab import files uploaded = files.upload() ``` ### ***Train Data*** ``` train_data = pd.read_excel('Data_Train.xlsx') pd.set_option('display.max_columns',None) train_data.head() train_data.info() train_data.isnull().sum() train_data.dropna(inplace=True) train_data.isnull().sum() train_data['Duration'].value_counts() train_data.shape train_data['Journey_day'] = pd.to_datetime(train_data['Date_of_Journey'], format='%d/%m/%Y').dt.day train_data['Journey_month'] = pd.to_datetime(train_data['Date_of_Journey'], format='%d/%m/%Y').dt.month #Dropping Date of journey colums as we already extracted date and month from it. train_data.drop(['Date_of_Journey'],axis=1,inplace=True) train_data.head(2) train_data['Dep_hour'] = pd.to_datetime(train_data['Dep_Time']).dt.hour train_data['Dep_mins'] = pd.to_datetime(train_data['Dep_Time']).dt.minute #Dropping Departure Time column as we already extracted hours and minutes from it. train_data.drop(['Dep_Time'],axis=1,inplace=True) train_data.head(2) train_data['Arrival_hour'] = pd.to_datetime(train_data['Arrival_Time']).dt.hour train_data['Arrival_minutes'] = pd.to_datetime(train_data['Arrival_Time']).dt.minute #Dropping Arrival Time column as we already extracted hours and minutes from it. train_data.drop(['Arrival_Time'],axis=1,inplace=True) train_data.head(2) duration = list(train_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" else: duration[i] = "0h " + duration[i] duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) train_data['Duration_hours'] = duration_hours train_data['Duration_minutes'] = duration_mins #Dropping Duration column as we already extracted hours and minutes from it. train_data.drop(['Duration'],axis=1,inplace=True) train_data.head(2) train_data['Airline'].value_counts() sns.catplot(y='Price',x='Airline',data=train_data.sort_values('Price', ascending=False),kind='boxen',aspect=3) plt.show() Airline = train_data['Airline'] Airline = pd.get_dummies(Airline,drop_first=True) Airline.head() train_data['Source'].value_counts() sns.catplot(y='Price',x='Source',data=train_data.sort_values('Price', ascending=False),kind='bar',aspect=3) plt.show() Source = train_data['Source'] Source = pd.get_dummies(Source,drop_first=True) Source.head() train_data['Destination'].value_counts() sns.catplot(y='Price',x='Destination',data=train_data.sort_values('Price', ascending=False),kind='bar',aspect=3) plt.show() Destination = train_data['Destination'] Destination = pd.get_dummies(Destination,drop_first=True) Destination.head() train_data['Route'].value_counts() train_data.head(3) #we are dropping route because we can get the same info from total stops columns. train_data.drop(['Route','Additional_Info'],axis=1,inplace=True) train_data['Total_Stops'].value_counts() train_data['Total_Stops'] = train_data['Total_Stops'].map({'non-stop': 0 ,'1 stop':1,'2 stops':2,'3 stops':3,'4 stops':4}) train_data.head() data_train = pd.concat([train_data,Airline,Source,Destination],axis=1) data_train.head(2) data_train.drop(['Airline','Source','Destination'],axis=1,inplace=True) data_train.head() data_train.shape ``` ## ***Test Data*** ``` from google.colab import files uploaded1 = files.upload() test_data = pd.read_excel('Test_set.xlsx') test_data.head(3) print('Test_data_isnull::') test_data.dropna(inplace=True) print(test_data.isnull().sum()) test_data['Journey_day'] = pd.to_datetime(test_data['Date_of_Journey'], format='%d/%m/%Y').dt.day test_data['Journey_month'] = pd.to_datetime(test_data['Date_of_Journey'], format='%d/%m/%Y').dt.month #Dropping Date of journey colums as we already extracted date and month from it. test_data.drop(['Date_of_Journey'],axis=1,inplace=True) test_data['Dep_hour'] = pd.to_datetime(test_data['Dep_Time']).dt.hour test_data['Dep_mins'] = pd.to_datetime(test_data['Dep_Time']).dt.minute #Dropping Departure Time column as we already extracted hours and minutes from it. test_data.drop(['Dep_Time'],axis=1,inplace=True) test_data['Arrival_hour'] = pd.to_datetime(test_data['Arrival_Time']).dt.hour test_data['Arrival_minutes'] = pd.to_datetime(test_data['Arrival_Time']).dt.minute #Dropping Arrival Time column as we already extracted hours and minutes from it. test_data.drop(['Arrival_Time'],axis=1,inplace=True) duration = list(test_data["Duration"]) for i in range(len(duration)): if len(duration[i].split()) != 2: if "h" in duration[i]: duration[i] = duration[i].strip() + " 0m" else: duration[i] = "0h " + duration[i] duration_hours = [] duration_mins = [] for i in range(len(duration)): duration_hours.append(int(duration[i].split(sep = "h")[0])) duration_mins.append(int(duration[i].split(sep = "m")[0].split()[-1])) test_data['Duration_hours'] = duration_hours test_data['Duration_minutes'] = duration_mins #Dropping Duration column as we already extracted hours and minutes from it. test_data.drop(['Duration'],axis=1,inplace=True) Airline = test_data['Airline'] Airline = pd.get_dummies(Airline,drop_first=True) Source = test_data['Source'] Source = pd.get_dummies(Source,drop_first=True) Destination = test_data['Destination'] Destination = pd.get_dummies(Destination,drop_first=True) #we are dropping route because we can get the same info from total stops columns test_data.drop(['Route','Additional_Info'],axis=1,inplace=True) test_data['Total_Stops'] = test_data['Total_Stops'].map({'non-stop': 0 ,'1 stop':1,'2 stops':2,'3 stops':3,'4 stops':4}) data_test = pd.concat([test_data,Airline,Source,Destination],axis=1) data_test.drop(['Airline','Source','Destination'],axis=1,inplace=True) data_test.head() ``` ### ***Feature Selection*** ``` data_train.shape data_train.columns X = data_train.loc[:,['Total_Stops','Journey_day', 'Journey_month', 'Dep_hour', 'Dep_mins', 'Arrival_hour', 'Arrival_minutes', 'Duration_hours', 'Duration_minutes', 'Air India', 'GoAir', 'IndiGo', 'Jet Airways', 'Jet Airways Business', 'Multiple carriers', 'Multiple carriers Premium economy', 'SpiceJet', 'Trujet', 'Vistara', 'Vistara Premium economy', 'Chennai', 'Delhi', 'Kolkata', 'Mumbai', 'Cochin', 'Delhi', 'Hyderabad', 'Kolkata', 'New Delhi']] X.head() y=data_train.iloc[:,1] y.head() #Finding Co-relation plt.figure(figsize=(10,10)) sns.heatmap(train_data.corr(),annot=True,cmap='hot') plt.show() from sklearn.ensemble import ExtraTreesRegressor selection = ExtraTreesRegressor() selection.fit(X,y) print(selection.feature_importances_) plt.figure(figsize=(10,8)) feature_imp = pd.Series(selection.feature_importances_,index=X.columns) feature_imp.nlargest(20).plot(kind='bar') plt.show() ``` ## ***Fitting the model using Train,Test,Split and using Random_Forest*** ``` from sklearn.model_selection import train_test_split X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.2,random_state=101) from sklearn.ensemble import RandomForestRegressor rf_reg = RandomForestRegressor() rf_reg.fit(X_train,y_train) y_pred = rf_reg.predict(X_test) rf_reg.score(X_train,y_train) rf_reg.score(X_test,y_test) sns.distplot(y_test-y_pred) plt.show() plt.figure(figsize=(8,4)) plt.scatter(y_test,y_pred,alpha= 0.5) plt.title('RESULTS',c='r') plt.xlabel('y_test',c='orange') plt.ylabel('y_pred',c='orange') plt.show() from sklearn import metrics print('MAE:', metrics.mean_absolute_error(y_test,y_pred)) print('MSE:',metrics.mean_squared_error(y_test,y_pred)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test,y_pred))) metrics.r2_score(y_test,y_pred) ``` ### **Hyper-parameter tuning** ``` from sklearn.model_selection import RandomizedSearchCV #Number of trees in random_forest n_estimators = [int(x) for x in np.linspace(start=100,stop=1300,num=12)] #Number of features to consider at every split max_features = ['auto','sqrt'] #Maximim number of levels in tree max_depth = [int(x) for x in np.linspace(start=5,stop=30,num=6)] #Minimum number of samples required to split a node min_samples_split = [2,5,10,15,100] #Minimum number of samples required at each leaf node min_samples_leaf = [1,2,5,10] random_grid = {'n_estimators':n_estimators, 'max_features':max_features, 'max_depth':max_depth, 'min_samples_split':min_samples_split, 'min_samples_leaf':min_samples_leaf } print(random_grid) rf_random = RandomizedSearchCV(estimator=rf_reg,param_distributions=random_grid,scoring='neg_mean_squared_error',n_iter=10,cv=5,verbose=2,random_state=42,n_jobs=1) rf_random.fit(X_train,y_train) rf_random.best_params_ predictions_cv = rf_random.predict(X_test) plt.figure(figsize=(8,4)) plt.scatter(y_test,predictions_cv,alpha= 0.5) plt.title('SCATTER_PLOT_AFTER_RANDOMIZED_SEARCH',c='r') plt.xlabel('y_test',c='orange') plt.ylabel('y_pred',c='orange') plt.show() print('MAE:', metrics.mean_absolute_error(y_test,predictions_cv)) print('MSE:',metrics.mean_squared_error(y_test,predictions_cv)) print('RMSE:',np.sqrt(metrics.mean_squared_error(y_test,predictions_cv))) metrics.r2_score(y_test,predictions_cv) ```
github_jupyter
# Training Neural Networks The network we built in the previous part isn't so smart, it doesn't know anything about our handwritten digits. Neural networks with non-linear activations work like universal function approximators. There is some function that maps your input to the output. For example, images of handwritten digits to class probabilities. The power of neural networks is that we can train them to approximate this function, and basically any function given enough data and compute time. <img src="assets/function_approx.png" width=500px> At first the network is naive, it doesn't know the function mapping the inputs to the outputs. We train the network by showing it examples of real data, then adjusting the network parameters such that it approximates this function. To find these parameters, we need to know how poorly the network is predicting the real outputs. For this we calculate a **loss function** (also called the cost), a measure of our prediction error. For example, the mean squared loss is often used in regression and binary classification problems $$ \large \ell = \frac{1}{2n}\sum_i^n{\left(y_i - \hat{y}_i\right)^2} $$ where $n$ is the number of training examples, $y_i$ are the true labels, and $\hat{y}_i$ are the predicted labels. By minimizing this loss with respect to the network parameters, we can find configurations where the loss is at a minimum and the network is able to predict the correct labels with high accuracy. We find this minimum using a process called **gradient descent**. The gradient is the slope of the loss function and points in the direction of fastest change. To get to the minimum in the least amount of time, we then want to follow the gradient (downwards). You can think of this like descending a mountain by following the steepest slope to the base. <img src='assets/gradient_descent.png' width=350px> ## Backpropagation For single layer networks, gradient descent is straightforward to implement. However, it's more complicated for deeper, multilayer neural networks like the one we've built. Complicated enough that it took about 30 years before researchers figured out how to train multilayer networks. Training multilayer networks is done through **backpropagation** which is really just an application of the chain rule from calculus. It's easiest to understand if we convert a two layer network into a graph representation. <img src='assets/backprop_diagram.png' width=550px> In the forward pass through the network, our data and operations go from bottom to top here. We pass the input $x$ through a linear transformation $L_1$ with weights $W_1$ and biases $b_1$. The output then goes through the sigmoid operation $S$ and another linear transformation $L_2$. Finally we calculate the loss $\ell$. We use the loss as a measure of how bad the network's predictions are. The goal then is to adjust the weights and biases to minimize the loss. To train the weights with gradient descent, we propagate the gradient of the loss backwards through the network. Each operation has some gradient between the inputs and outputs. As we send the gradients backwards, we multiply the incoming gradient with the gradient for the operation. Mathematically, this is really just calculating the gradient of the loss with respect to the weights using the chain rule. $$ \large \frac{\partial \ell}{\partial W_1} = \frac{\partial L_1}{\partial W_1} \frac{\partial S}{\partial L_1} \frac{\partial L_2}{\partial S} \frac{\partial \ell}{\partial L_2} $$ **Note:** I'm glossing over a few details here that require some knowledge of vector calculus, but they aren't necessary to understand what's going on. We update our weights using this gradient with some learning rate $\alpha$. $$ \large W^\prime_1 = W_1 - \alpha \frac{\partial \ell}{\partial W_1} $$ The learning rate $\alpha$ is set such that the weight update steps are small enough that the iterative method settles in a minimum. ## Losses in PyTorch Let's start by seeing how we calculate the loss with PyTorch. Through the `nn` module, PyTorch provides losses such as the cross-entropy loss (`nn.CrossEntropyLoss`). You'll usually see the loss assigned to `criterion`. As noted in the last part, with a classification problem such as MNIST, we're using the softmax function to predict class probabilities. With a softmax output, you want to use cross-entropy as the loss. To actually calculate the loss, you first define the criterion then pass in the output of your network and the correct labels. Something really important to note here. Looking at [the documentation for `nn.CrossEntropyLoss`](https://pytorch.org/docs/stable/nn.html#torch.nn.CrossEntropyLoss), > This criterion combines `nn.LogSoftmax()` and `nn.NLLLoss()` in one single class. > > The input is expected to contain scores for each class. This means we need to pass in the raw output of our network into the loss, not the output of the softmax function. This raw output is usually called the *logits* or *scores*. We use the logits because softmax gives you probabilities which will often be very close to zero or one but floating-point numbers can't accurately represent values near zero or one ([read more here](https://docs.python.org/3/tutorial/floatingpoint.html)). It's usually best to avoid doing calculations with probabilities, typically we use log-probabilities. ``` # The MNIST datasets are hosted on yann.lecun.com that has moved under CloudFlare protection # Run this script to enable the datasets download # Reference: https://github.com/pytorch/vision/issues/1938 from six.moves import urllib opener = urllib.request.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] urllib.request.install_opener(opener) import torch from torch import nn import torch.nn.functional as F from torchvision import datasets, transforms # Define a transform to normalize the data transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,)), ]) # Download and load the training data trainset = datasets.MNIST('~/.pytorch/MNIST_data/', download=True, train=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True) # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10)) # Define the loss criterion = nn.CrossEntropyLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our logits logits = model(images) # Calculate the loss with the logits and the labels loss = criterion(logits, labels) print(loss) ``` In my experience it's more convenient to build the model with a log-softmax output using `nn.LogSoftmax` or `F.log_softmax` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.LogSoftmax)). Then you can get the actual probabilites by taking the exponential `torch.exp(output)`. With a log-softmax output, you want to use the negative log likelihood loss, `nn.NLLLoss` ([documentation](https://pytorch.org/docs/stable/nn.html#torch.nn.NLLLoss)). >**Exercise:** Build a model that returns the log-softmax as the output and calculate the loss using the negative log likelihood loss. ``` ## Solution # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) # Define the loss criterion = nn.NLLLoss() # Get our data images, labels = next(iter(trainloader)) # Flatten images images = images.view(images.shape[0], -1) # Forward pass, get our log-probabilities logps = model(images) # Calculate the loss with the logps and the labels loss = criterion(logps, labels) print(loss) ``` ## Autograd Now that we know how to calculate a loss, how do we use it to perform backpropagation? Torch provides a module, `autograd`, for automatically calculating the gradients of tensors. We can use it to calculate the gradients of all our parameters with respect to the loss. Autograd works by keeping track of operations performed on tensors, then going backwards through those operations, calculating gradients along the way. To make sure PyTorch keeps track of operations on a tensor and calculates the gradients, you need to set `requires_grad = True` on a tensor. You can do this at creation with the `requires_grad` keyword, or at any time with `x.requires_grad_(True)`. You can turn off gradients for a block of code with the `torch.no_grad()` content: ```python x = torch.zeros(1, requires_grad=True) >>> with torch.no_grad(): ... y = x * 2 >>> y.requires_grad False ``` Also, you can turn on or off gradients altogether with `torch.set_grad_enabled(True|False)`. The gradients are computed with respect to some variable `z` with `z.backward()`. This does a backward pass through the operations that created `z`. ``` x = torch.randn(2,2, requires_grad=True) print(x) y = x**2 print(y) ``` Below we can see the operation that created `y`, a power operation `PowBackward0`. ``` ## grad_fn shows the function that generated this variable print(y.grad_fn) ``` The autograd module keeps track of these operations and knows how to calculate the gradient for each one. In this way, it's able to calculate the gradients for a chain of operations, with respect to any one tensor. Let's reduce the tensor `y` to a scalar value, the mean. ``` z = y.mean() print(z) ``` You can check the gradients for `x` and `y` but they are empty currently. ``` print(x.grad) ``` To calculate the gradients, you need to run the `.backward` method on a Variable, `z` for example. This will calculate the gradient for `z` with respect to `x` $$ \frac{\partial z}{\partial x} = \frac{\partial}{\partial x}\left[\frac{1}{n}\sum_i^n x_i^2\right] = \frac{x}{2} $$ ``` z.backward() print(x.grad) print(x/2) ``` These gradients calculations are particularly useful for neural networks. For training we need the gradients of the weights with respect to the cost. With PyTorch, we run data forward through the network to calculate the loss, then, go backwards to calculate the gradients with respect to the loss. Once we have the gradients we can make a gradient descent step. ## Loss and Autograd together When we create a network with PyTorch, all of the parameters are initialized with `requires_grad = True`. This means that when we calculate the loss and call `loss.backward()`, the gradients for the parameters are calculated. These gradients are used to update the weights with gradient descent. Below you can see an example of calculating the gradients using a backwards pass. ``` # Build a feed-forward network model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() images, labels = next(iter(trainloader)) images = images.view(images.shape[0], -1) logps = model(images) loss = criterion(logps, labels) print('Before backward pass: \n', model[0].weight.grad) loss.backward() print('After backward pass: \n', model[0].weight.grad) ``` ## Training the network! There's one last piece we need to start training, an optimizer that we'll use to update the weights with the gradients. We get these from PyTorch's [`optim` package](https://pytorch.org/docs/stable/optim.html). For example we can use stochastic gradient descent with `optim.SGD`. You can see how to define an optimizer below. ``` from torch import optim # Optimizers require the parameters to optimize and a learning rate optimizer = optim.SGD(model.parameters(), lr=0.01) ``` Now we know how to use all the individual parts so it's time to see how they work together. Let's consider just one learning step before looping through all the data. The general process with PyTorch: * Make a forward pass through the network * Use the network output to calculate the loss * Perform a backward pass through the network with `loss.backward()` to calculate the gradients * Take a step with the optimizer to update the weights Below I'll go through one training step and print out the weights and gradients so you can see how it changes. Note that I have a line of code `optimizer.zero_grad()`. When you do multiple backwards passes with the same parameters, the gradients are accumulated. This means that you need to zero the gradients on each training pass or you'll retain gradients from previous training batches. ``` print('Initial weights - ', model[0].weight) images, labels = next(iter(trainloader)) images.resize_(64, 784) # Clear the gradients, do this because gradients are accumulated optimizer.zero_grad() # Forward pass, then backward pass, then update weights output = model(images) loss = criterion(output, labels) loss.backward() print('Gradient -', model[0].weight.grad) # Take an update step and few the new weights optimizer.step() print('Updated weights - ', model[0].weight) ``` ### Training for real Now we'll put this algorithm into a loop so we can go through all the images. Some nomenclature, one pass through the entire dataset is called an *epoch*. So here we're going to loop through `trainloader` to get our training batches. For each batch, we'll doing a training pass where we calculate the loss, do a backwards pass, and update the weights. > **Exercise: ** Implement the training pass for our network. If you implemented it correctly, you should see the training loss drop with each epoch. ``` model = nn.Sequential(nn.Linear(784, 128), nn.ReLU(), nn.Linear(128, 64), nn.ReLU(), nn.Linear(64, 10), nn.LogSoftmax(dim=1)) criterion = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.003) epochs = 5 for e in range(epochs): running_loss = 0 for images, labels in trainloader: # Flatten MNIST images into a 784 long vector images = images.view(images.shape[0], -1) # TODO: Training pass optimizer.zero_grad() output = model(images) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() else: print(f"Training loss: {running_loss/len(trainloader)}") ``` With the network trained, we can check out it's predictions. ``` %matplotlib inline import helper images, labels = next(iter(trainloader)) img = images[0].view(1, 784) # Turn off gradients to speed up this part with torch.no_grad(): logps = model(img) # Output of the network are log-probabilities, need to take exponential for probabilities ps = torch.exp(logps) helper.view_classify(img.view(1, 28, 28), ps) ``` Now our network is brilliant. It can accurately predict the digits in our images. Next up you'll write the code for training a neural network on a more complex dataset.
github_jupyter
# Class Session 2 ## Comparing running times for enumerating neighbors of all vertices in a graph (with different graph data structures) In this notebook we will measure the running time for enumerating the neighbor vertices for three different data structures for representing an undirected graph: - adjacency matrix - adjacency list - edge list Let's assume that each vertex is labeled with a unique integer number. So if there are N vertices, the vertices are labeled 0, 2, 3, 4, ..., N-1. First, we will import all of the Python modules that we will need for this exercise: note how we assign a short name, "np" to the numpy module. This will save typing. ``` import numpy as np import igraph import timeit import itertools ``` Now, define a function that returns the index numbers of the neighbors of a vertex i, when the graph is stored in adjacency matrix format. So your function will accept as an input a NxN numpy matrix. The function should return a list (of index numbers of the neighbors). ``` def enumerate_matrix(gmat, i): return np.nonzero(gmat[i,:])[1].tolist() ``` Define a function that enumerates the neighbors of a vertex i, when the graph is stored in adjacency list format (a list of lists). The function should return a list (of index numbers of the neighbors). ``` def enumerate_adj_list(adj_list, i): return adj_list[i] ``` Define a function that enumerates the neighbors of a vertex i, when the graph is stored in edge-list format (a numpy array of length-two-lists); use `numpy.where` and `numpy.unique`. The function should return a list (of index numbers of the neighbors). ``` def enumerate_edge_list(edge_list, i): inds1 = np.where(edge_list[:,0] == i)[0] elems1 = edge_list[inds1, 1].tolist() inds2 = np.where(edge_list[:,1] == i)[0] elems2 = edge_list[inds2, 0].tolist() return np.unique(elems1 + elems2).tolist() ``` In this notebook, we are going to create some random networks. We'll use the Barabasi-Albert method, which has two parameters, *n* and *m* (where *n* > *m*). (For more information on the Barabasi-Albert model, see http://barabasi.com/f/622.pdf). In `igraph`, the `igraph.Graph.Barabasi` method will generate a single connected undirected graph with *n* vertices and where the total number *E* of edges is: E = nm - (m^2 / 2) - m/2 Let's plot a Barabasi-Albert graph generated using *n*=5 and *m*=3: ``` igraph.drawing.plot(igraph.Graph.Barabasi(5,3), bbox=[0,0,200,200]) ``` Now we need to write a simulation funtion that generates random graphs and enumerates all neighbors of each vertex in the graph (while computing running time), for each of three different graph data structures (adjacency matrix, adjacency list, and edge list). The function's sole argument "n" is the number of vertices. It returns a length-three list containing the average running time for enumerating the neighbor vertices of a vertex in the graph. ``` def do_sim_ms(n): retlist = [] nrep = 10 nsubrep = 10 # this is (sort of) a Python way of doing the R function "replicate": for _ in itertools.repeat(None, nrep): # make a random undirected graph with fixed (average) vertex degree = 5 g = igraph.Graph.Barabasi(n, 5) # get the graph in three different representations g_matrix = np.matrix(g.get_adjacency().data) g_adj_list = g.get_adjlist() g_edge_list = np.array(g.get_edgelist()) start_time = timeit.default_timer() for _ in itertools.repeat(None, nsubrep): for i in range(0, n): enumerate_matrix(g_matrix, i) matrix_elapsed = timeit.default_timer() - start_time start_time = timeit.default_timer() for _ in itertools.repeat(None, nsubrep): for i in range(0, n): enumerate_adj_list(g_adj_list, i) adjlist_elapsed = timeit.default_timer() - start_time start_time = timeit.default_timer() for _ in itertools.repeat(None, nsubrep): for i in range(0, n): enumerate_edge_list(g_edge_list, i) edgelist_elapsed = timeit.default_timer() - start_time retlist.append([matrix_elapsed, adjlist_elapsed, edgelist_elapsed]) resarray = 1000 * np.mean(np.array(retlist), axis=0)/n resdict = {'adjacency matrix': resarray[0], 'adjacency list': resarray[1], 'edge list': resarray[2]} # average over replicates and then # divide by n so that the running time results are on a per-vertex basis return resdict ``` A simulation with 1000 vertices clearly shows that adjacency list is fastest: ``` do_sim_ms(1000) ``` Now let's quadruple "n". We see the expected behavior, with the running time for the adjacency-matrix and edge-list formats going up when we increase "n", but there is hardly any change in the running time for the graph stored in adjacency list format: ``` do_sim_ms(4000) ```
github_jupyter
# Example: CanvasXpress scatter2d Chart No. 1 This example page demonstrates how to, using the Python package, create a chart that matches the CanvasXpress online example located at: https://www.canvasxpress.org/examples/scatter2d-1.html This example is generated using the reproducible JSON obtained from the above page and the `canvasxpress.util.generator.generate_canvasxpress_code_from_json_file()` function. Everything required for the chart to render is included in the code below. Simply run the code block. ``` from canvasxpress.canvas import CanvasXpress from canvasxpress.js.collection import CXEvents from canvasxpress.render.jupyter import CXNoteBook cx = CanvasXpress( render_to="scatter2d1", data={ "y": { "smps": [ "Alcohol", "Tobacco" ], "vars": [ "North", "Yorkshire", "Northeast", "East Midlands", "West Midlands", "East Anglia", "Southeast", "Southwest", "Wales", "Scotland", "Northern Ireland" ], "data": [ [ 6.47, 4.03 ], [ 6.13, 3.76 ], [ 6.19, 3.77 ], [ 4.89, 3.34 ], [ 5.63, 3.47 ], [ 4.52, 2.92 ], [ 5.89, 3.2 ], [ 4.79, 2.71 ], [ 3.53, 3.53 ], [ 6.08, 4.51 ], [ 4.02, 4.56 ] ] }, "m": { "Name": "Alcohol Tobacco", "Description": "Average weekly household spending, in British pounds, on tobacco products and alcoholic beverages for each of the 11 regions of Great Britain.", "Reference": "Moore, David S., and George P. McCabe (1989). Introduction to the Practice of Statistics, p. 179. Original source : Family Expenditure Survey, Department of Employment, 1981 (British official statistics)" }, "x": { "Description": [ "Average weekly household spending on alcoholic beverages in pounds", "Average weekly household spending on tobacco products in pounds" ] } }, config={ "citation": "Moore, David S., and George P. McCabe (1989). Introduction to the Practice of Statistics, p. 179.", "decorations": { "marker": [ { "y": 0.18, "text": "Maybe an Outlier?", "variable": [ "Northern Ireland" ], "x": 0.45, "sample": [ "Alcohol", "Tobacco" ], "id": "scatter2d1-marker-0", "vi": [ 10 ], "si": [ 0, 1 ], "vi2": False, "si2": False, "type": "line", "b": [ 69.15205078125, 58.2, 526.021923828125, 481.5 ], "len": 122.5634765625, "width": 15, "tx": 186.0458116319444, "ty": 108.99560439560452, "tx2": False, "ty2": False, "curX": 305.86191650390623, "curY": 144.87 } ] }, "graphType": "Scatter2D", "showTransition": False, "theme": "CanvasXpress", "title": "Average weekly household spending, in British pounds, on tobacco productsand alcoholic beverages for each of the 11 regions of Great Britain.", "xAxis": [ "Alcohol" ], "yAxis": [ "Tobacco" ] }, width=613, height=613, events=CXEvents(), after_render=[], other_init_params={ "version": 35, "events": False, "info": False, "afterRenderInit": False, "noValidate": True } ) display = CXNoteBook(cx) display.render(output_file="scatter2d_1.html") ```
github_jupyter
##### Copyright 2018 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # Linear Mixed Effects Models <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/probability/examples/Linear_Mixed_Effects_Models"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Linear_Mixed_Effects_Models.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> A linear mixed effects model is a simple approach for modeling structured linear relationships (Harville, 1997; Laird and Ware, 1982). Each data point consists of inputs of varying type—categorized into groups—and a real-valued output. A linear mixed effects model is a _hierarchical model_: it shares statistical strength across groups in order to improve inferences about any individual data point. In this tutorial, we demonstrate linear mixed effects models with a real-world example in TensorFlow Probability. We'll use the JointDistributionCoroutine and Markov Chain Monte Carlo (`tfp.mcmc`) modules. ### Dependencies & Prerequisites ``` #@title Import and set ups{ display-mode: "form" } import csv import matplotlib.pyplot as plt import numpy as np import pandas as pd import requests import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_probability as tfp tfd = tfp.distributions tfb = tfp.bijectors dtype = tf.float64 %config InlineBackend.figure_format = 'retina' %matplotlib inline plt.style.use('ggplot') ``` ### Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU". The following snippet will verify that we have access to a GPU. ``` if tf.test.gpu_device_name() != '/device:GPU:0': print('WARNING: GPU device not found.') else: print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name())) ``` Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) ## Data We use the `InstEval` data set from the popular [`lme4` package in R](https://CRAN.R-project.org/package=lme4) (Bates et al., 2015). It is a data set of courses and their evaluation ratings. Each course includes metadata such as `students`, `instructors`, and `departments`, and the response variable of interest is the evaluation rating. ``` def load_insteval(): """Loads the InstEval data set. It contains 73,421 university lecture evaluations by students at ETH Zurich with a total of 2,972 students, 2,160 professors and lecturers, and several student, lecture, and lecturer attributes. Implementation is built from the `observations` Python package. Returns: Tuple of np.ndarray `x_train` with 73,421 rows and 7 columns and dictionary `metadata` of column headers (feature names). """ url = ('https://raw.github.com/vincentarelbundock/Rdatasets/master/csv/' 'lme4/InstEval.csv') with requests.Session() as s: download = s.get(url) f = download.content.decode().splitlines() iterator = csv.reader(f) columns = next(iterator)[1:] x_train = np.array([row[1:] for row in iterator], dtype=np.int) metadata = {'columns': columns} return x_train, metadata ``` We load and preprocess the data set. We hold out 20% of the data so we can evaluate our fitted model on unseen data points. Below we visualize the first few rows. ``` data, metadata = load_insteval() data = pd.DataFrame(data, columns=metadata['columns']) data = data.rename(columns={'s': 'students', 'd': 'instructors', 'dept': 'departments', 'y': 'ratings'}) data['students'] -= 1 # start index by 0 # Remap categories to start from 0 and end at max(category). data['instructors'] = data['instructors'].astype('category').cat.codes data['departments'] = data['departments'].astype('category').cat.codes train = data.sample(frac=0.8) test = data.drop(train.index) train.head() ``` We set up the data set in terms of a `features` dictionary of inputs and a `labels` output corresponding to the ratings. Each feature is encoded as an integer and each label (evaluation rating) is encoded as a floating point number. ``` get_value = lambda dataframe, key, dtype: dataframe[key].values.astype(dtype) features_train = { k: get_value(train, key=k, dtype=np.int32) for k in ['students', 'instructors', 'departments', 'service']} labels_train = get_value(train, key='ratings', dtype=np.float32) features_test = {k: get_value(test, key=k, dtype=np.int32) for k in ['students', 'instructors', 'departments', 'service']} labels_test = get_value(test, key='ratings', dtype=np.float32) num_students = max(features_train['students']) + 1 num_instructors = max(features_train['instructors']) + 1 num_departments = max(features_train['departments']) + 1 num_observations = train.shape[0] print("Number of students:", num_students) print("Number of instructors:", num_instructors) print("Number of departments:", num_departments) print("Number of observations:", num_observations) ``` ## Model A typical linear model assumes independence, where any pair of data points has a constant linear relationship. In the `InstEval` data set, observations arise in groups each of which may have varying slopes and intercepts. Linear mixed effects models, also known as hierarchical linear models or multilevel linear models, capture this phenomenon (Gelman & Hill, 2006). Examples of this phenomenon include: + __Students__. Observations from a student are not independent: some students may systematically give low (or high) lecture ratings. + __Instructors__. Observations from an instructor are not independent: we expect good teachers to generally have good ratings and bad teachers to generally have bad ratings. + __Departments__. Observations from a department are not independent: certain departments may generally have dry material or stricter grading and thus be rated lower than others. To capture this, recall that for a data set of $N\times D$ features $\mathbf{X}$ and $N$ labels $\mathbf{y}$, linear regression posits the model $$ \begin{equation*} \mathbf{y} = \mathbf{X}\beta + \alpha + \epsilon, \end{equation*} $$ where there is a slope vector $\beta\in\mathbb{R}^D$, intercept $\alpha\in\mathbb{R}$, and random noise $\epsilon\sim\text{Normal}(\mathbf{0}, \mathbf{I})$. We say that $\beta$ and $\alpha$ are "fixed effects": they are effects held constant across the population of data points $(x, y)$. An equivalent formulation of the equation as a likelihood is $\mathbf{y} \sim \text{Normal}(\mathbf{X}\beta + \alpha, \mathbf{I})$. This likelihood is maximized during inference in order to find point estimates of $\beta$ and $\alpha$ that fit the data. A linear mixed effects model extends linear regression as $$ \begin{align*} \eta &\sim \text{Normal}(\mathbf{0}, \sigma^2 \mathbf{I}), \\ \mathbf{y} &= \mathbf{X}\beta + \mathbf{Z}\eta + \alpha + \epsilon. \end{align*} $$ where there is still a slope vector $\beta\in\mathbb{R}^P$, intercept $\alpha\in\mathbb{R}$, and random noise $\epsilon\sim\text{Normal}(\mathbf{0}, \mathbf{I})$. In addition, there is a term $\mathbf{Z}\eta$, where $\mathbf{Z}$ is a features matrix and $\eta\in\mathbb{R}^Q$ is a vector of random slopes; $\eta$ is normally distributed with variance component parameter $\sigma^2$. $\mathbf{Z}$ is formed by partitioning the original $N\times D$ features matrix in terms of a new $N\times P$ matrix $\mathbf{X}$ and $N\times Q$ matrix $\mathbf{Z}$, where $P + Q=D$: this partition allows us to model the features separately using the fixed effects $\beta$ and the latent variable $\eta$ respectively. We say the latent variables $\eta$ are "random effects": they are effects that vary across the population (although they may be constant across subpopulations). In particular, because the random effects $\eta$ have mean 0, the data label's mean is captured by $\mathbf{X}\beta + \alpha$. The random effects component $\mathbf{Z}\eta$ captures variations in the data: for example, "Instructor \#54 is rated 1.4 points higher than the mean." In this tutorial, we posit the following effects: + Fixed effects: `service`. `service` is a binary covariate corresponding to whether the course belongs to the instructor's main department. No matter how much additional data we collect, it can only take on values $0$ and $1$. + Random effects: `students`, `instructors`, and `departments`. Given more observations from the population of course evaluation ratings, we may be looking at new students, teachers, or departments. In the syntax of R's lme4 package (Bates et al., 2015), the model can be summarized as ``` ratings ~ service + (1|students) + (1|instructors) + (1|departments) + 1 ``` where `x` denotes a fixed effect,`(1|x)` denotes a random effect for `x`, and `1` denotes an intercept term. We implement this model below as a JointDistribution. To have better support for parameter tracking (e.g., we want to track all the `tf.Variable` in `model.trainable_variables`), we implement the model template as `tf.Module`. ``` class LinearMixedEffectModel(tf.Module): def __init__(self): # Set up fixed effects and other parameters. # These are free parameters to be optimized in E-steps self._intercept = tf.Variable(0., name="intercept") # alpha in eq self._effect_service = tf.Variable(0., name="effect_service") # beta in eq self._stddev_students = tfp.util.TransformedVariable( 1., bijector=tfb.Exp(), name="stddev_students") # sigma in eq self._stddev_instructors = tfp.util.TransformedVariable( 1., bijector=tfb.Exp(), name="stddev_instructors") # sigma in eq self._stddev_departments = tfp.util.TransformedVariable( 1., bijector=tfb.Exp(), name="stddev_departments") # sigma in eq def __call__(self, features): model = tfd.JointDistributionSequential([ # Set up random effects. tfd.MultivariateNormalDiag( loc=tf.zeros(num_students), scale_identity_multiplier=self._stddev_students), tfd.MultivariateNormalDiag( loc=tf.zeros(num_instructors), scale_identity_multiplier=self._stddev_instructors), tfd.MultivariateNormalDiag( loc=tf.zeros(num_departments), scale_identity_multiplier=self._stddev_departments), # This is the likelihood for the observed. lambda effect_departments, effect_instructors, effect_students: tfd.Independent( tfd.Normal( loc=(self._effect_service * features["service"] + tf.gather(effect_students, features["students"], axis=-1) + tf.gather(effect_instructors, features["instructors"], axis=-1) + tf.gather(effect_departments, features["departments"], axis=-1) + self._intercept), scale=1.), reinterpreted_batch_ndims=1) ]) # To enable tracking of the trainable variables via the created distribution, # we attach a reference to `self`. Since all TFP objects sub-class # `tf.Module`, this means that the following is possible: # LinearMixedEffectModel()(features_train).trainable_variables # ==> tuple of all tf.Variables created by LinearMixedEffectModel. model._to_track = self return model lmm_jointdist = LinearMixedEffectModel() # Conditioned on feature/predictors from the training data lmm_train = lmm_jointdist(features_train) lmm_train.trainable_variables ``` As a Probabilistic graphical program, we can also visualize the model's structure in terms of its computational graph. This graph encodes dataflow across the random variables in the program, making explicit their relationships in terms of a graphical model (Jordan, 2003). As a statistical tool, we might look at the graph in order to better see, for example, that `intercept` and `effect_service` are conditionally dependent given `ratings`; this may be harder to see from the source code if the program is written with classes, cross references across modules, and/or subroutines. As a computational tool, we might also notice latent variables flow into the `ratings` variable via `tf.gather` ops. This may be a bottleneck on certain hardware accelerators if indexing `Tensor`s is expensive; visualizing the graph makes this readily apparent. ``` lmm_train.resolve_graph() ``` ## Parameter Estimation Given data, the goal of inference is to fit the model's fixed effects slope $\beta$, intercept $\alpha$, and variance component parameter $\sigma^2$. The maximum likelihood principle formalizes this task as $$ \max_{\beta, \alpha, \sigma}~\log p(\mathbf{y}\mid \mathbf{X}, \mathbf{Z}; \beta, \alpha, \sigma) = \max_{\beta, \alpha, \sigma}~\log \int p(\eta; \sigma) ~p(\mathbf{y}\mid \mathbf{X}, \mathbf{Z}, \eta; \beta, \alpha)~d\eta. $$ In this tutorial, we use the Monte Carlo EM algorithm to maximize this marginal density (Dempster et al., 1977; Wei and Tanner, 1990).¹ We perform Markov chain Monte Carlo to compute the expectation of the conditional likelihood with respect to the random effects ("E-step"), and we perform gradient descent to maximize the expectation with respect to the parameters ("M-step"): + For the E-step, we set up Hamiltonian Monte Carlo (HMC). It takes a current state—the student, instructor, and department effects—and returns a new state. We assign the new state to TensorFlow variables, which will denote the state of the HMC chain. + For the M-step, we use the posterior sample from HMC to calculate an unbiased estimate of the marginal likelihood up to a constant. We then apply its gradient with respect to the parameters of interest. This produces an unbiased stochastic descent step on the marginal likelihood. We implement it with the Adam TensorFlow optimizer and minimize the negative of the marginal. ``` target_log_prob_fn = lambda *x: lmm_train.log_prob(x + (labels_train,)) trainable_variables = lmm_train.trainable_variables current_state = lmm_train.sample()[:-1] # For debugging target_log_prob_fn(*current_state) # Set up E-step (MCMC). hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_log_prob_fn, step_size=0.015, num_leapfrog_steps=3) kernel_results = hmc.bootstrap_results(current_state) @tf.function(autograph=False, experimental_compile=True) def one_e_step(current_state, kernel_results): next_state, next_kernel_results = hmc.one_step( current_state=current_state, previous_kernel_results=kernel_results) return next_state, next_kernel_results optimizer = tf.optimizers.Adam(learning_rate=.01) # Set up M-step (gradient descent). @tf.function(autograph=False, experimental_compile=True) def one_m_step(current_state): with tf.GradientTape() as tape: loss = -target_log_prob_fn(*current_state) grads = tape.gradient(loss, trainable_variables) optimizer.apply_gradients(zip(grads, trainable_variables)) return loss ``` We perform a warm-up stage, which runs one MCMC chain for a number of iterations so that training may be initialized within the posterior's probability mass. We then run a training loop. It jointly runs the E and M-steps and records values during training. ``` num_warmup_iters = 1000 num_iters = 1500 num_accepted = 0 effect_students_samples = np.zeros([num_iters, num_students]) effect_instructors_samples = np.zeros([num_iters, num_instructors]) effect_departments_samples = np.zeros([num_iters, num_departments]) loss_history = np.zeros([num_iters]) # Run warm-up stage. for t in range(num_warmup_iters): current_state, kernel_results = one_e_step(current_state, kernel_results) num_accepted += kernel_results.is_accepted.numpy() if t % 500 == 0 or t == num_warmup_iters - 1: print("Warm-Up Iteration: {:>3} Acceptance Rate: {:.3f}".format( t, num_accepted / (t + 1))) num_accepted = 0 # reset acceptance rate counter # Run training. for t in range(num_iters): # run 5 MCMC iterations before every joint EM update for _ in range(5): current_state, kernel_results = one_e_step(current_state, kernel_results) loss = one_m_step(current_state) effect_students_samples[t, :] = current_state[0].numpy() effect_instructors_samples[t, :] = current_state[1].numpy() effect_departments_samples[t, :] = current_state[2].numpy() num_accepted += kernel_results.is_accepted.numpy() loss_history[t] = loss.numpy() if t % 500 == 0 or t == num_iters - 1: print("Iteration: {:>4} Acceptance Rate: {:.3f} Loss: {:.3f}".format( t, num_accepted / (t + 1), loss_history[t])) ``` You can also write the warmup for-loop into a `tf.while_loop`, and the training step into a `tf.scan` or `tf.while_loop` for even faster inference. For example: ``` @tf.function(autograph=False, experimental_compile=True) def run_k_e_steps(k, current_state, kernel_results): _, next_state, next_kernel_results = tf.while_loop( cond=lambda i, state, pkr: i < k, body=lambda i, state, pkr: (i+1, *one_e_step(state, pkr)), loop_vars=(tf.constant(0), current_state, kernel_results) ) return next_state, next_kernel_results ``` Above, we did not run the algorithm until a convergence threshold was detected. To check whether training was sensible, we verify that the loss function indeed tends to converge over training iterations. ``` plt.plot(loss_history) plt.ylabel(r'Loss $-\log$ $p(y\mid\mathbf{x})$') plt.xlabel('Iteration') plt.show() ``` We also use a trace plot, which shows the Markov chain Monte Carlo algorithm's trajectory across specific latent dimensions. Below we see that specific instructor effects indeed meaningfully transition away from their initial state and explore the state space. The trace plot also indicates that the effects differ across instructors but with similar mixing behavior. ``` for i in range(7): plt.plot(effect_instructors_samples[:, i]) plt.legend([i for i in range(7)], loc='lower right') plt.ylabel('Instructor Effects') plt.xlabel('Iteration') plt.show() ``` ## Criticism Above, we fitted the model. We now look into criticizing its fit using data, which lets us explore and better understand the model. One such technique is a residual plot, which plots the difference between the model's predictions and ground truth for each data point. If the model were correct, then their difference should be standard normally distributed; any deviations from this pattern in the plot indicate model misfit. We build the residual plot by first forming the posterior predictive distribution over ratings, which replaces the prior distribution on the random effects with its posterior given training data. In particular, we run the model forward and intercept its dependence on prior random effects with their inferred posterior means.² ``` lmm_test = lmm_jointdist(features_test) [ effect_students_mean, effect_instructors_mean, effect_departments_mean, ] = [ np.mean(x, axis=0).astype(np.float32) for x in [ effect_students_samples, effect_instructors_samples, effect_departments_samples ] ] # Get the posterior predictive distribution (*posterior_conditionals, ratings_posterior), _ = lmm_test.sample_distributions( value=( effect_students_mean, effect_instructors_mean, effect_departments_mean, )) ratings_prediction = ratings_posterior.mean() ``` Upon visual inspection, the residuals look somewhat standard-normally distributed. However, the fit is not perfect: there is larger probability mass in the tails than a normal distribution, which indicates the model might improve its fit by relaxing its normality assumptions. In particular, although it is most common to use a normal distribution to model ratings in the `InstEval` data set, a closer look at the data reveals that course evaluation ratings are in fact ordinal values from 1 to 5. This suggests that we should be using an ordinal distribution, or even Categorical if we have enough data to throw away the relative ordering. This is a one-line change to the model above; the same inference code is applicable. ``` plt.title("Residuals for Predicted Ratings on Test Set") plt.xlim(-4, 4) plt.ylim(0, 800) plt.hist(ratings_prediction - labels_test, 75) plt.show() ``` To explore how the model makes individual predictions, we look at the histogram of effects for students, instructors, and departments. This lets us understand how individual elements in a data point's feature vector tends to influence the outcome. Not surprisingly, we see below that each student typically has little effect on an instructor's evaluation rating. Interestingly, we see that the department an instructor belongs to has a large effect. ``` plt.title("Histogram of Student Effects") plt.hist(effect_students_mean, 75) plt.show() plt.title("Histogram of Instructor Effects") plt.hist(effect_instructors_mean, 75) plt.show() plt.title("Histogram of Department Effects") plt.hist(effect_departments_mean, 75) plt.show() ``` ## Footnotes ¹ Linear mixed effect models are a special case where we can analytically compute its marginal density. For the purposes of this tutorial, we demonstrate Monte Carlo EM, which more readily applies to non-analytic marginal densities such as if the likelihood were extended to be Categorical instead of Normal. ² For simplicity, we form the predictive distribution's mean using only one forward pass of the model. This is done by conditioning on the posterior mean and is valid for linear mixed effects models. However, this is not valid in general: the posterior predictive distribution's mean is typically intractable and requires taking the empirical mean across multiple forward passes of the model given posterior samples. ## Acknowledgments This tutorial was originally written in Edward 1.0 ([source](https://github.com/blei-lab/edward/blob/master/notebooks/linear_mixed_effects_models.ipynb)). We thank all contributors to writing and revising that version. ## References 1. Douglas Bates and Martin Machler and Ben Bolker and Steve Walker. Fitting Linear Mixed-Effects Models Using lme4. _Journal of Statistical Software_, 67(1):1-48, 2015. 2. Arthur P. Dempster, Nan M. Laird, and Donald B. Rubin. Maximum likelihood from incomplete data via the EM algorithm. _Journal of the Royal Statistical Society, Series B (Methodological)_, 1-38, 1977. 3. Andrew Gelman and Jennifer Hill. _Data analysis using regression and multilevel/hierarchical models._ Cambridge University Press, 2006. 4. David A. Harville. Maximum likelihood approaches to variance component estimation and to related problems. _Journal of the American Statistical Association_, 72(358):320-338, 1977. 5. Michael I. Jordan. An Introduction to Graphical Models. Technical Report, 2003. 6. Nan M. Laird and James Ware. Random-effects models for longitudinal data. _Biometrics_, 963-974, 1982. 7. Greg Wei and Martin A. Tanner. A Monte Carlo implementation of the EM algorithm and the poor man's data augmentation algorithms. _Journal of the American Statistical Association_, 699-704, 1990.
github_jupyter
``` import os, sys, gc import time import glob import pickle import copy import json import random from collections import OrderedDict, namedtuple import multiprocessing import threading import traceback from typing import Tuple, List import h5py from tqdm import tqdm, tqdm_notebook import numpy as np import pandas as pd import matplotlib.pyplot as plt import cv2 from PIL import Image import torch import torchvision import torch.nn.functional as F from torch import nn, optim import torch.optim.lr_scheduler as lr_scheduler from torch.utils.data import Dataset, DataLoader from torch.optim.lr_scheduler import CosineAnnealingLR import torchmetrics import pl_bolts import pytorch_lightning as pl from IPython.display import display, clear_output import faiss from modules.AugsDS_v7 import * from modules.eval_functions import * from modules.eval_metrics import evaluate sys.path.append('./modules') from modules.Facebook_model_v20 import ArgsT15_EffNetV2L, FacebookModel args = ArgsT15_EffNetV2L() args.pretrained_bb = False args.arc_classnum = 40 print(args) ``` # Building model ``` model = FacebookModel(args) ``` # Loading ckpt ``` ckpt_filename = './TEST15_arcface/lightning_logs/version_7/checkpoints/last.ckpt' _ = model.restore_checkpoint(ckpt_filename) ``` # Inference configuration ``` do_simple_augmentation = False K = 500 BATCH_SIZE = 96 N_WORKERS = 7 DS_INPUT_DIR = f'./all_datasets/dataset' ALL_FOLDERS = ['query_images', 'reference_images', 'training_images'] args.ALL_FOLDERS = ALL_FOLDERS args.BATCH_SIZE = BATCH_SIZE args.N_WORKERS = N_WORKERS args.DS_INPUT_DIR = DS_INPUT_DIR while DS_INPUT_DIR[-1] in ['/', r'\\']: DS_INPUT_DIR = DS_INPUT_DIR[:-1] ``` # Data Source ``` if any( [not os.path.exists(os.path.join(args.DS_DIR, folder)) for folder in args.ALL_FOLDERS] ): assert os.path.exists(args.DS_INPUT_DIR), f'DS_INPUT_DIR not found: {args.DS_INPUT_DIR}' resize_dataset( ds_input_dir=args.DS_INPUT_DIR, ds_output_dir=args.DS_DIR, output_wh=args.DATASET_WH, output_ext='jpg', num_workers=args.N_WORKERS, ALL_FOLDERS=args.ALL_FOLDERS, verbose=False, ) print('Paths:') print(' - DS_INPUT_DIR:', args.DS_INPUT_DIR) print(' - DS_DIR: ', args.DS_DIR) assert os.path.exists(args.DS_DIR), f'DS_DIR not found: {args.DS_DIR}' try: public_ground_truth_path = os.path.join(args.DS_DIR, 'public_ground_truth.csv') public_gt = pd.read_csv( public_ground_truth_path) except: public_ground_truth_path = os.path.join(args.DS_INPUT_DIR, 'public_ground_truth.csv') public_gt = pd.read_csv( public_ground_truth_path) ``` # Datasets ``` ds_qry_full = FacebookDataset( samples_id_v=[f'Q{i:05d}' for i in range(50_000)], do_augmentation=False, ds_dir=args.DS_DIR, output_wh=args.OUTPUT_WH, channel_first=True, norm_type= args.img_norm_type, verbose=True, ) # ds_qry_full.plot_sample(4) ds_ref_full = FacebookDataset( samples_id_v=[f'R{i:06d}' for i in range(1_000_000)], do_augmentation=False, ds_dir=args.DS_DIR, output_wh=args.OUTPUT_WH, channel_first=True, norm_type=args.img_norm_type, verbose=True, ) # ds_ref_full.plot_sample(4) ds_trn_full = FacebookDataset( samples_id_v=[f'T{i:06d}' for i in range(1_000_000)], do_augmentation=False, ds_dir=args.DS_DIR, output_wh=args.OUTPUT_WH, channel_first=True, norm_type=args.img_norm_type, verbose=True, ) # ds_trn_full.plot_sample(4) dl_qry_full = DataLoader( ds_qry_full, batch_size=args.BATCH_SIZE, num_workers=args.N_WORKERS, shuffle=False, ) dl_ref_full = DataLoader( ds_ref_full, batch_size=args.BATCH_SIZE, num_workers=args.N_WORKERS, shuffle=False, ) dl_trn_full = DataLoader( ds_trn_full, batch_size=args.BATCH_SIZE, num_workers=args.N_WORKERS, shuffle=False, ) ``` ### Query embeddings ``` embed_qry_d = calc_embed_d( model, dataloader=dl_qry_full, do_simple_augmentation=do_simple_augmentation, ) ``` ### Reference embeddings ``` aug = '_AUG' if do_simple_augmentation else '' submission_path = ckpt_filename.replace('.ckpt', f'_{args.OUTPUT_WH[0]}x{args.OUTPUT_WH[1]}{aug}_REF.h5') scores_path = submission_path.replace('.h5', '_match_d.pickle') embed_ref_d = calc_embed_d( model, dataloader=dl_ref_full, do_simple_augmentation=do_simple_augmentation ) save_submission( embed_qry_d, embed_ref_d, save_path=submission_path, ) match_d = calc_match_scores(embed_qry_d, embed_ref_d, k=K) save_obj(match_d, scores_path) ``` ### Public GT validation ``` eval_d = evaluate( submission_path=submission_path, gt_path=public_ground_truth_path, is_matching=False, ) ```
github_jupyter
# Getting Started ## Install Dependencies This is a tutorial of using D4 in Python. Before you started trying this document, please make sure you have D4 package and `d4tools` binary installed. * To install the d4tools binary, please read the instruction from [this link](https://github.com/38/d4-format#installation--2-minutes) * To install `pyd4`, please run command `pip install pyd4` ## Download dataset To download the dataset we are using for this interactive document, please use the following links: * WGS Sample HG002: https://home.chpc.utah.edu/~u0875014/hg002.cram * Reference Genome: https://home.chpc.utah.edu/~u0875014/hg19.fa.gz Please make sure that index files is accessible for both CRAM file and reference genome. # Download Data ``` import os os.system("mkdir -p data") os.system("cd data && wget --continue https://home.chpc.utah.edu/~u0875014/hg002.cram") os.system("cd data && wget --continue https://home.chpc.utah.edu/~u0875014/hg002.cram.crai") os.system("cd data && wget --continue https://home.chpc.utah.edu/~u0875014/hg19.fa.gz") os.system("cd data && samtools view -T data/hg19.fa.gz data/hg002.cram | head -n 1") ``` # Create depth profile for an alignment file pyd4 provides a very fast way to get the per-base depth profile of an alignment file, which usually takes less than 2 minutes on a laptop. As the begining of this tutorial, we profile the per-base depth from the CRAM file we previously downloaded. ``` import pyd4 # Create a D4 file from depth, generate data/hg002.d4 file_handle = pyd4.bam_to_d4("data/hg002.cram", reference_genome = "data/hg19.fa.gz.fai", output = "data/hg002.d4") ``` # Open a D4 file on disk and load data ``` from pyd4 import D4File # To open file, you can simply initialize a D4File instance file = D4File("data/hg002.d4") # To load data as numpy array from the file chr1_data = file["1"] print(chr1_data.mean()) ``` # Visualization data in d4 file You can also use `D4File.resample` to get windowed data of a chromosome and return it as numpy array. Then you can use the numpy array to visualize the depth data. ``` from pyd4 import D4File from matplotlib import pyplot as plt # To open file, you can simply initialize a D4File instance file = D4File("data/hg002.d4") chr1_data = file.resample("1", bin_size = 10000) x = [i * 10000 / 1000000 for i, _ in enumerate(chr1_data)] plt.title("Chromosome 1") plt.xlabel("MB") plt.ylabel("Depth") plt.plot(x,chr1_data) plt.show() ``` # Create D4 file to store analysis results You can also use D4 file to save your analysis results in python. The following example gives an example that saves bit flags that indicates if the value is greater than the mean depth for chromosome 1. ``` from pyd4 import D4File file = D4File("data/hg002.d4") data = file["1"] # The second parameter specify that we only want the output contains chromosome 1 # If this parameter is not provided the output file will use exactly same genome defined in the input file output_writer = file.create_on_same_genome("data/chr1_flags_gt50.d4", ["1"]) \ .for_bit_array() \ .get_writer() # Write the numpy array for chromosome 1. # The second parameter indicates the locus where the first element of the data array should be. output_writer.write_np_array("1", 0, data > data.mean()) del output_writer ``` # Create D4 file without an existing input as template To create a D4 file from scratch, you need to use the `D4Builder` class. You can also use `add_sequence` method to define your own chromosome in the file. ``` from pyd4 import D4Builder import numpy writer = D4Builder("data/my_d4_file.d4") \ .add_sequence("1", 1000000) \ .add_sequence("2", 1000000) \ .set_dict_bits(2) \ .get_writer() chr1_data = numpy.ones((1000000,), dtype = "int32") chr2_data = numpy.zeros((1000000,), dtype = "int32") writer.write_np_array("1", 0, chr1_data) writer.write_np_array("2", 0, chr2_data) del writer ``` # Load previously saved analysis results ``` from pyd4 import D4File # First file file = D4File("data/chr1_flags_gt50.d4") print(file.chroms()) print(file["1"].sum()) # Second file we created file = D4File("data/my_d4_file.d4") print(file.mean(["1", "2"])) ``` # More about loading raw data - Load the entire chromosome: `file["1"]` - Load a region: `file["1:12345-20000"]` or `file[("1", 12345, 20000)]` ``` from pyd4 import D4File file = D4File("data/hg002.d4") print("Mean depth of chr1:", file["1"].mean()) print("Mean depth of chr1:12345-20000", file["1:12345-20000"].mean()) print("Mean depth of chr1:12345-20000", file[("1", 12345, 20000)].mean()) print("Mean depth of chr1:90000000-", file["1:90000000-"].mean()) ``` # Parallel Tasks Previously we have shown that we can load data in D4 as numpy array and do analysis. However, numpy is mostly single threaded which doesn't takes the advantage of multi-core CPU. The D4 library provides a very effecient API to parallize many different kinds of analysis for D4 format. In Python we can also use those effecient Rust routines to accelerate our analysis. Currently, we have the following task exposed to Python: - `D4File.mean` - `D4File.median` - `D4File.percentile` - `D4File.histogram` ``` from pyd4 import D4File from matplotlib import pyplot as plt import time file = D4File("data/hg002.d4") regions = [(chr, 0, size) for chr, size in file.chroms()] # Note you can use the batch mode, which the API process all the regions in a fully parallel fashion t = time.monotonic() file.mean(regions) print("Time to compute mean depth for each chromosome with PyD4 task API: %.2fs"%(time.monotonic() - t)) t = time.monotonic() for chr, start, end in regions: file[(chr, start, end)].mean() print("Time to compute mean depth for each chromosome with NumPy: %.2fs"%(time.monotonic() - t)) ``` # Accessing D4 file on a static HTTP server It's possible to access D4 file hosted on a static HTTP server, for example, Amazon S3, without downloading the file to local. The API remains the same as local file API, when the `D4File` is instantiated with an URL starting with `http://` or `https://`, PyD4 will enable the HTTP accessing support. Although you can call the same API, but there are few restrictions for the D4 file opened remotely, due to scanning the entire file is a very expensive operation on HTTP server: - Loading the raw data is slower, due to the network connection bandwidth is much smaller than bandwidth of interal bus. - If the remote file contains a secondary frame index(SFI), the random accessing speed may be faster - Task API currently only support compute mean depth when the remote file contains a data index - For all unsupported operation, the D4File API will throw a expection. Note: To create a index in the file, please use `d4tools index build` subcommand. - To create SFI, please run `d4tools index build -s data/hg002.d4` - To create data index, please run `d4tools index build -S data/hg002.d4` This example will use a sample D4 file with the both index hosted on Amazon S3. ``` from pyd4 import D4File file = D4File('https://d4-format-testing.s3.us-west-1.amazonaws.com/hg002.d4') # Basic API doesn't change chroms = file.chroms() print("There are ", len(chroms), " chromosomes defined in the file:", [name for name, _ in chroms]) # Loading raw data works as local file as well. # Note in this case, if we want to load chromosome 1, it will be ralatively slow due to network connection speed raw_first_mb_of_chr1 = file["1:0-1000000"] print(raw_first_mb_of_chr1) # You can load from the any locus raw_second_mb_of_chr2 = file["2:1000000-2000000"] print(raw_second_mb_of_chr2) # And you can utilize the data index for fast summarize, even the range is very large print("Mean depth of chr1: ", file.mean("1")) ```
github_jupyter
# Fast Fourier Transform Forecasting Model (FFT) The following is a brief demonstration of the FFT forecasting model. This model is especially suited for data that is very seasonal. The datasets chosen for this demonstration were selected accordingly. ``` # fix python path if working locally from utils import fix_pythonpath_if_working_locally fix_pythonpath_if_working_locally() %load_ext autoreload %autoreload 2 %matplotlib inline import pandas as pd import numpy as np import matplotlib.pyplot as plt from darts import TimeSeries from darts.models import ( FFT, AutoARIMA, ExponentialSmoothing, Prophet, Theta ) from darts.metrics import mae from darts.utils.missing_values import fill_missing_values from darts.datasets import TemperatureDataset, AirPassengersDataset, EnergyDataset import warnings warnings.filterwarnings("ignore") import logging logging.disable(logging.CRITICAL) ``` ## Read and format Here we simply read the CSV file containing daily temperatures, and transform the values into the desired format. ``` ts = TemperatureDataset().load() ``` ## Constructing `TimeSeries` instances for training and validation ``` train, val = ts.split_after(pd.Timestamp('19850701')) train.plot(label='train') val.plot(label='val') ``` ## Basic FFT model ``` model = FFT(required_matches=set(), nr_freqs_to_keep=None) model.fit(train) pred_val = model.predict(len(val)) ``` The plot below shows us that a simple DFT with a random train-test split will most likely lead to bad results. Upon closer inspection we can see that the prediction (in purple) simply repeats the training set (blue). This is the standard behavior of the DFT, and by itself it is quite useless, since repeating our training set could be done much more efficiently. Three improvements were made to this approach. ``` train.plot(label='train') val.plot(label='val') pred_val.plot(label='prediction') print("MAE:", mae(pred_val, val)) ``` ## Improvement 1: Crop the training set The first improvement consists of cropping the training set before feeding it to the FFT algorithm such that the first timestamp in the cropped series matches the first timestamp to be predicted in terms of seasonality, i.e. it has the same month, day, weekday, time of day, etc. We could achieve this by passing the optional argument `required_matches` to the FFT constructor that explicitly tells our model which timestamp attributes are relevant. But actually, if we don't set it manually, the model will attempt to automatically find the pd.Timestamp attributes that are relevant and crop the training set accordingly (which we will do here). ``` model = FFT(nr_freqs_to_keep=None) model.fit(train) pred_val = model.predict(len(val)) ``` We can see that the results look like the seasonality of the predictions nicely aligns with the seasonality of the validation set. However, we are still just repeating the training set, including all of the noise. Looking at the error we can see that this is still a pretty bad forecast. ``` train.plot(label='train') val.plot(label='val') pred_val.plot(label='predict') print("MAE:", mae(pred_val, val)) ``` ## Improvement 2: Filtering out low-amplitude waves The decomposition of the DFT into the frequency domain allows us to selectively filter out waves with low amplitudes. This allows us to keep strong seasonal trends while discarding some noise. This is achieved in the FFT model by passing the optional argument `nr_freqs_to_keep`. This argument represents the total number of frequencies that will be kept. For instance, if a value of 20 is passed, only the 20 frequencies with the highest amplitudes will be utilized. The default value is set to 10. ``` model = FFT(nr_freqs_to_keep=20) model.fit(train) pred_val = model.predict(len(val)) ``` We get a signal that is less noisy. Depending on the data set, this might be a better forecast. Looking at the error metric, we can see that this model performs significantly better than the previous models. ``` train.plot(label='train') val.plot(label='val') pred_val.plot(label='predict') print("MAE:", mae(pred_val, val)) ``` ## Improvement 3: Detrending Let's try out a different data set that has a global upward trend ``` ts_2 = AirPassengersDataset().load() train, val = ts_2.split_after(pd.Timestamp('19551201')) train.plot(label='train') val.plot(label='val') model = FFT() model.fit(train) pred_val = model.predict(len(val)) ``` Clearly, our model fails completely at incorporating the upward trend. Due to the trend, our model also fails to recognize the monthly seasonality. ``` train.plot(label='train') val.plot(label='val') pred_val.plot(label='prediction') ``` This problem can be solved by setting the optional trend argument to either 'poly' or 'exp', which fits a polynomial or exponential funtion to the data and subtracts it before moving on to DFT. When predicting, the trend is added again. ``` model = FFT(trend='poly') model.fit(train) pred_val = model.predict(len(val)) ``` We have a much better prediction now. ``` train.plot(label='train') val.plot(label='val') pred_val.plot(label='prediction') ``` ## New Data: Hourly Nuclear Energy Generation ``` ts_3 = EnergyDataset().load() ts_3 = fill_missing_values(ts_3, 'auto') ts_3 = ts_3['generation nuclear'] train, val = ts_3.split_after(pd.Timestamp('2017-07-01')) train.plot(label='train') val.plot(label='val') ``` Instead of simply looking at the performance of the FFT model, we can also look at how a bunch of other forecasting models performs on this new data set in terms of MAE. Surprisingly, on this dataset, the FFT model outperforms all of the others (at least with their default parameters). Granted, this dataset was specifically chosen because of its highly seasonal nature. However, this shows us that there are use cases for FFT. Furthermore, the FFT model has a much shorter running time than the other models! ``` models = [ AutoARIMA(), Prophet(), ExponentialSmoothing(), Theta(), FFT() ] for model in models: model.fit(train) pred_val = model.predict(len(val)) print(str(model) + " MAE: " + str(mae(pred_val, val))) ```
github_jupyter
# Introduction Involve 10 Models Clustering <br> <br> <font color = 'blue'> <b>Content: </b> 1. [Prepare Problems] * [Load Libraries](#2) * [Load Dataset](#3) 1. [Models] * [K-Means](#4) * [Affinity Propagation](#5) * [BIRCH](#6) * [DBSCAN](#7) * [Mini Batch K-Means](#8) * [Mean Shift](#9) * [OPTICS](#10) * [Spectral Clustering](#11) * [Gaussian Mixture Model](#12) * [Agglomerative Clustering](#13) 1. [References](#14) <a id = "2"></a><br> ## Load Libraries ``` import pandas as pd import matplotlib.pyplot as plt import numpy as np from numpy import unique from sklearn.cluster import KMeans from sklearn.cluster import AffinityPropagation from sklearn.cluster import Birch from sklearn.cluster import DBSCAN from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import MeanShift from sklearn.cluster import OPTICS from sklearn.cluster import SpectralClustering from sklearn.mixture import GaussianMixture from sklearn.cluster import AgglomerativeClustering from sklearn import metrics # import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname,filename)) ``` <a id = "3"></a><br> ## Load Dataset ``` data = pd.read_csv('Mall_Customers.csv', index_col=0) data.head() data.drop(['Genre'], axis=1, inplace=True) data.drop(['Age'], axis=1, inplace=True) data.head() ``` ### Taking full fraction of data It shuffles the data ``` data = data.sample(frac=1) data.head() ``` <a id = "4"></a><br> ## 1 - K-Means ``` k_means = KMeans(n_clusters=2) k_means.fit(data) ``` ### Labels ``` k_means.labels_ np.unique(k_means.labels_) centers = k_means.cluster_centers_ centers plt.figure(figsize=(10, 8)) plt.scatter(data['Annual Income (k$)'], data['Spending Score (1-100)'], c=k_means.labels_, s=100) plt.scatter(centers[:,0], centers[:,1], color='blue', marker='s', s=200) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.title('K-Means with 2 clusters') plt.show() ``` * A measure of how similar a point is to other points in its own cluster and how different it is from points in other clusters. ``` from sklearn.metrics import silhouette_score score = silhouette_score (data, k_means.labels_) print("Score = ", score) wscc = [] for i in range(1,15): kmeans = KMeans(n_clusters=i, init="k-means++",random_state=0) kmeans.fit(data) wscc.append(kmeans.inertia_) plt.plot(range(1,15),wscc,marker="*",c="black") plt.title("Elbow plot for optimal number of clusters") ``` ### KMeans clustering with 5 clusters ``` k_means = KMeans(n_clusters=5) k_means.fit(data) np.unique(k_means.labels_) centers = k_means.cluster_centers_ centers ``` ### Displaying Data in 5 cluster form with 5 centroids ``` plt.figure(figsize=(10, 8)) plt.scatter(data['Annual Income (k$)'], data['Spending Score (1-100)'], c=k_means.labels_, s=100) plt.scatter(centers[:,0], centers[:,1], color='blue', marker='s', s=200) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.title('5 Cluster K-Means') plt.show() ``` Silhouette Score: This is a better measure to decide the number of clusters to be formulated from the data. ``` score = metrics.silhouette_score(data, k_means.labels_) print("Score = ", score) ``` This function returns the Silhouette Coefficient for each sample. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. ``` score1 = metrics.silhouette_samples(data, k_means.labels_, metric='euclidean') print("Score = ", score1) ``` <a id = "5"></a><br> ## 2 - Affinity Propagation Affinity Propagation involves finding a set of exemplars that best summarize the data. ``` model_aff = AffinityPropagation(damping=0.9) model_aff.fit(data) # yhat_aff = model_aff.predict(data) clusters_aff = unique(yhat_aff) print("Clusters of Affinity Prop.",clusters_aff) labels_aff = model_aff.labels_ centroids_aff = model_aff.cluster_centers_ plt.figure(figsize=(10, 8)) plt.scatter(data['Annual Income (k$)'], data['Spending Score (1-100)'], c=labels_aff, s=100) plt.scatter(centroids_aff[:,0], centroids_aff[:,1], color='red', marker='*', s=200) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.title('Affinity Propagation') plt.grid() plt.show() score_aff = metrics.silhouette_score(data,labels_aff) print("Score of Affinity Propagation = ", score_aff) ``` <a id = "6"></a><br> ## 3 - BIRCH BIRCH Clustering (BIRCH is short for Balanced Iterative Reducing and Clustering using Hierarchies) involves constructing a tree structure from which cluster centroids are extracted. ``` model_br = Birch(threshold=0.01, n_clusters=5) model_br.fit(data) # yhat_br = model_br.predict(data) clusters_br = unique(yhat_br) print("Clusters of Birch",clusters_br) labels_br = model_br.labels_ score_br = metrics.silhouette_score(data,labels_br) print("Score of Birch = ", score_br) ``` <a id = "7"></a><br> ## 4- DBSCAN * DBSCAN Clustering (where DBSCAN is short for Density-Based Spatial Clustering of Applications with Noise) involves finding high-density areas in the domain and expanding those areas of the feature space around them as clusters. * For this data, could not get a good result. ``` # dbscan clustering from numpy import unique from numpy import where data_X = data.iloc[:,[0,1]].values # define the model model = DBSCAN(eps=0.7, min_samples=90) # fit model and predict clusters yhat = model.fit_predict(data_X) # retrieve unique clusters clusters = unique(yhat) # create scatter plot for samples from each cluster for cluster in clusters: # get row indexes for samples with this cluster row_ix = where(yhat == cluster) # create scatter of these samples plt.scatter(data_X[row_ix, 0], data_X[row_ix, 1]) # show the plot plt.show() ``` <a id = "8"></a><br> ## 5 - Mini Batch K-Means * Mini-Batch K-Means is a modified version of k-means that makes updates to the cluster centroids using mini-batches of samples rather than the entire dataset, which can make it faster for large datasets, and perhaps more robust to statistical noise. ``` model_mini = MiniBatchKMeans(n_clusters=2) model_mini.fit(data) # yhat_mini = model_mini.predict(data) clusters_mini = unique(yhat_mini) print("Clusters of Mini Batch KMeans.",clusters_mini) labels_mini = model_mini.labels_ centroids_mini = model_mini.cluster_centers_ wscc = [] for i in range(1,15): mkmeans = MiniBatchKMeans(n_clusters=i, init="k-means++",random_state=0) mkmeans.fit(data) wscc.append(mkmeans.inertia_) plt.plot(range(1,15),wscc,marker="*",c="black") plt.title("Elbow plot for Mini Batch KMeans") model_mini = MiniBatchKMeans(n_clusters=5) model_mini.fit(data) # yhat_mini = model_mini.predict(data) clusters_mini = unique(yhat_mini) print("Clusters of Mini Batch KMeans.",clusters_mini) labels_mini = model_mini.labels_ centroids_mini = model_mini.cluster_centers_ plt.figure(figsize=(10, 8)) plt.scatter(data['Annual Income (k$)'], data['Spending Score (1-100)'], c=labels_mini, s=100) plt.scatter(centroids_mini[:,0], centroids_mini[:,1], color='red', marker='*', s=200) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.title('Mini Batch KMeans') plt.grid() plt.show() score_mini = metrics.silhouette_score(data,labels_mini) print("Score of Mini Batch KMeans = ", score_mini) ``` <a id = "9"></a><br> ## 6 - Mean Shift * Mean shift clustering involves finding and adapting centroids based on the density of examples in the feature space. ``` model_ms = MeanShift(bandwidth=25) model_ms.fit(data) # yhat_ms = model_ms.predict(data) clusters_ms = unique(yhat_ms) print("Clusters of Mean Shift.",clusters_ms) labels_ms = model_ms.labels_ centroids_ms = model_ms.cluster_centers_ plt.figure(figsize=(10, 8)) plt.scatter(data['Annual Income (k$)'], data['Spending Score (1-100)'], c=labels_ms, s=100) plt.scatter(centroids_ms[:,0], centroids_ms[:,1], color='red', marker='*', s=200) plt.xlabel('Annual Income') plt.ylabel('Spending Score') plt.title('Mean Shift') plt.grid() plt.show() score_ms = metrics.silhouette_score(data,labels_ms) print("Score of Mean Shift = ", score_ms) ``` <a id = "10"></a><br> ## 7 - OPTICS * OPTICS clustering (where OPTICS is short for Ordering Points To Identify the Clustering Structure) is a modified version of DBSCAN described above. * In this case, I could not achieve a reasonable result on this dataset. ``` model_op = OPTICS(eps=0.8, min_samples=10) # yhat_op = model_op.fit_predict(data) clusters_op = unique(yhat_op) print("Clusters of Optics.",clusters_op) labels_op = model_op.labels_ score_op = metrics.silhouette_score(data,labels_op) print("Score of Optics = ", score_op) ``` <a id = "11"></a><br> ## 8 - Spectral Clustering * Spectral Clustering is a general class of clustering methods, drawn from linear algebra. ``` model_sc = SpectralClustering(n_clusters=5) # yhat_sc = model_sc.fit_predict(data) clusters_sc = unique(yhat_sc) print("Clusters of Spectral Clustering.",clusters_sc) labels_sc = model_sc.labels_ score_sc = metrics.silhouette_score(data,labels_sc) print("Score of Spectral Clustering = ", score_sc) ``` <a id = "12"></a><br> ## 9 - Gaussian Mixture Model * A Gaussian mixture model summarizes a multivariate probability density function with a mixture of Gaussian probability distributions as its name suggests. ``` from numpy import unique from numpy import where data_X = data.iloc[:,[0,1]].values model_gb = GaussianMixture(n_components=5) model_gb.fit(data_X) # yhat_gb = model_gb.predict(data_X) clusters_gb = unique(yhat_gb) # create scatter plot for samples from each cluster for cluster in clusters_gb: # get row indexes for samples with this cluster row_ix = where(yhat_gb == cluster) # create scatter of these samples plt.scatter(data_X[row_ix, 0], data_X[row_ix, 1]) # show the plot plt.show() score_sc = metrics.silhouette_score(data,yhat_gb) print("Score of Gaussian Mixture = ", score_sc) ``` <a id = "13"></a><br> ## 10 - Agglomerative Clustering * Agglomerative clustering involves merging examples until the desired number of clusters is achieved. ``` model_agg = AgglomerativeClustering(n_clusters=5) # yhat_agg = model_agg.fit_predict(data) clusters_agg = unique(yhat_agg) print("Clusters of Agglomerative Clustering.",clusters_agg) labels_agg = model_agg.labels_ score_agg = metrics.silhouette_score(data,labels_agg) print("Score of Agglomerative Clustering = ", score_agg) ``` # If you like my kernel, please upvote <a id = "14"></a><br> ## References * https://machinelearningmastery.com/clustering-algorithms-with-python/
github_jupyter
# Robot Class In this project, we'll be localizing a robot in a 2D grid world. The basis for simultaneous localization and mapping (SLAM) is to gather information from a robot's sensors and motions over time, and then use information about measurements and motion to re-construct a map of the world. ### Uncertainty As you've learned, robot motion and sensors have some uncertainty associated with them. For example, imagine a car driving up hill and down hill; the speedometer reading will likely overestimate the speed of the car going up hill and underestimate the speed of the car going down hill because it cannot perfectly account for gravity. Similarly, we cannot perfectly predict the *motion* of a robot. A robot is likely to slightly overshoot or undershoot a target location. In this notebook, we'll look at the `robot` class that is *partially* given to you for the upcoming SLAM notebook. First, we'll create a robot and move it around a 2D grid world. Then, **you'll be tasked with defining a `sense` function for this robot that allows it to sense landmarks in a given world**! It's important that you understand how this robot moves, senses, and how it keeps track of different landmarks that it sees in a 2D grid world, so that you can work with it's movement and sensor data. --- Before we start analyzing robot motion, let's load in our resources and define the `robot` class. You can see that this class initializes the robot's position and adds measures of uncertainty for motion. You'll also see a `sense()` function which is not yet implemented, and you will learn more about that later in this notebook. ``` # import some resources import numpy as np import matplotlib.pyplot as plt import random %matplotlib inline # the robot class class robot: # -------- # init: # creates a robot with the specified parameters and initializes # the location (self.x, self.y) to the center of the world # def __init__(self, world_size = 100.0, measurement_range = 30.0, motion_noise = 1.0, measurement_noise = 1.0): self.measurement_noise = 0.0 self.world_size = world_size self.measurement_range = measurement_range self.x = world_size / 2.0 self.y = world_size / 2.0 self.motion_noise = motion_noise self.measurement_noise = measurement_noise self.landmarks = [] self.num_landmarks = 0 # returns a positive, random float def rand(self): return random.random() * 2.0 - 1.0 # -------- # move: attempts to move robot by dx, dy. If outside world # boundary, then the move does nothing and instead returns failure # def move(self, dx, dy): x = self.x + dx + self.rand() * self.motion_noise y = self.y + dy + self.rand() * self.motion_noise if x < 0.0 or x > self.world_size or y < 0.0 or y > self.world_size: return False else: self.x = x self.y = y return True # -------- # sense: returns x- and y- distances to landmarks within visibility range # because not all landmarks may be in this range, the list of measurements # is of variable length. Set measurement_range to -1 if you want all # landmarks to be visible at all times # ## TODO: complete the sense function def sense(self): ''' This function does not take in any parameters, instead it references internal variables (such as self.landamrks) to measure the distance between the robot and any landmarks that the robot can see (that are within its measurement range). This function returns a list of landmark indices, and the measured distances (dx, dy) between the robot's position and said landmarks. This function should account for measurement_noise and measurement_range. One item in the returned list should be in the form: [landmark_index, dx, dy]. ''' measurements = [] ## TODO: iterate through all of the landmarks in a world ## TODO: For each landmark ## 1. compute dx and dy, the distances between the robot and the landmark ## 2. account for measurement noise by *adding* a noise component to dx and dy ## - The noise component should be a random value between [-1.0, 1.0)*measurement_noise ## - Feel free to use the function self.rand() to help calculate this noise component ## - It may help to reference the `move` function for noise calculation ## 3. If either of the distances, dx or dy, fall outside of the internal var, measurement_range ## then we cannot record them; if they do fall in the range, then add them to the measurements list ## as list.append([index, dx, dy]), this format is important for data creation done later ## TODO: return the final, complete list of measurements return measurements # -------- # make_landmarks: # make random landmarks located in the world # def make_landmarks(self, num_landmarks): self.landmarks = [] for i in range(num_landmarks): self.landmarks.append([round(random.random() * self.world_size), round(random.random() * self.world_size)]) self.num_landmarks = num_landmarks # called when print(robot) is called; prints the robot's location def __repr__(self): return 'Robot: [x=%.5f y=%.5f]' % (self.x, self.y) ``` ## Define a world and a robot Next, let's instantiate a robot object. As you can see in `__init__` above, the robot class takes in a number of parameters including a world size and some values that indicate the sensing and movement capabilities of the robot. In the next example, we define a small 10x10 square world, a measurement range that is half that of the world and small values for motion and measurement noise. These values will typically be about 10 times larger, but we ust want to demonstrate this behavior on a small scale. You are also free to change these values and note what happens as your robot moves! ``` world_size = 10.0 # size of world (square) measurement_range = 5.0 # range at which we can sense landmarks motion_noise = 0.2 # noise in robot motion measurement_noise = 0.2 # noise in the measurements # instantiate a robot, r r = robot(world_size, measurement_range, motion_noise, measurement_noise) # print out the location of r print(r) ``` ## Visualizing the World In the given example, we can see/print out that the robot is in the middle of the 10x10 world at (x, y) = (5.0, 5.0), which is exactly what we expect! However, it's kind of hard to imagine this robot in the center of a world, without visualizing the grid itself, and so in the next cell we provide a helper visualization function, `display_world`, that will display a grid world in a plot and draw a red `o` at the location of our robot, `r`. The details of how this function wors can be found in the `helpers.py` file in the home directory; you do not have to change anything in this `helpers.py` file. ``` # import helper function from helpers import display_world # define figure size plt.rcParams["figure.figsize"] = (5,5) # call display_world and display the robot in it's grid world print(r) display_world(int(world_size), [r.x, r.y]) ``` ## Movement Now you can really picture where the robot is in the world! Next, let's call the robot's `move` function. We'll ask it to move some distance `(dx, dy)` and we'll see that this motion is not perfect by the placement of our robot `o` and by the printed out position of `r`. Try changing the values of `dx` and `dy` and/or running this cell multiple times; see how the robot moves and how the uncertainty in robot motion accumulates over multiple movements. #### For a `dx` = 1, does the robot move *exactly* one spot to the right? What about `dx` = -1? What happens if you try to move the robot past the boundaries of the world? ``` # choose values of dx and dy (negative works, too) dx = 1 dy = 2 r.move(dx, dy) # print out the exact location print(r) # display the world after movement, not that this is the same call as before # the robot tracks its own movement display_world(int(world_size), [r.x, r.y]) ``` ## Landmarks Next, let's create landmarks, which are measurable features in the map. You can think of landmarks as things like notable buildings, or something smaller such as a tree, rock, or other feature. The robot class has a function `make_landmarks` which randomly generates locations for the number of specified landmarks. Try changing `num_landmarks` or running this cell multiple times to see where these landmarks appear. We have to pass these locations as a third argument to the `display_world` function and the list of landmark locations is accessed similar to how we find the robot position `r.landmarks`. Each landmark is displayed as a purple `x` in the grid world, and we also print out the exact `[x, y]` locations of these landmarks at the end of this cell. ``` # create any number of landmarks num_landmarks = 3 r.make_landmarks(num_landmarks) # print out our robot's exact location print(r) # display the world including these landmarks display_world(int(world_size), [r.x, r.y], r.landmarks) # print the locations of the landmarks print('Landmark locations [x,y]: ', r.landmarks) ``` ## Sense Once we have some landmarks to sense, we need to be able to tell our robot to *try* to sense how far they are away from it. It will be up t you to code the `sense` function in our robot class. The `sense` function uses only internal class parameters and returns a list of the the measured/sensed x and y distances to the landmarks it senses within the specified `measurement_range`. ### TODO: Implement the `sense` function Follow the `##TODO's` in the class code above to complete the `sense` function for the robot class. Once you have tested out your code, please **copy your complete `sense` code to the `robot_class.py` file in the home directory**. By placing this complete code in the `robot_class` Python file, we will be able to refernce this class in a later notebook. The measurements have the format, `[i, dx, dy]` where `i` is the landmark index (0, 1, 2, ...) and `dx` and `dy` are the measured distance between the robot's location (x, y) and the landmark's location (x, y). This distance will not be perfect since our sense function has some associated `measurement noise`. --- In the example in the following cell, we have a given our robot a range of `5.0` so any landmarks that are within that range of our robot's location, should appear in a list of measurements. Not all landmarks are guaranteed to be in our visibility range, so this list will be variable in length. *Note: the robot's location is often called the **pose** or `[Pxi, Pyi]` and the landmark locations are often written as `[Lxi, Lyi]`. You'll see this notation in the next notebook.* ``` # try to sense any surrounding landmarks measurements = r.sense() # this will print out an empty list if `sense` has not been implemented print(measurements) ``` **Refer back to the grid map above. Do these measurements make sense to you? Are all the landmarks captured in this list (why/why not)?** --- ## Data #### Putting it all together To perform SLAM, we'll collect a series of robot sensor measurements and motions, in that order, over a defined period of time. Then we'll use only this data to re-construct the map of the world with the robot and landmar locations. You can think of SLAM as peforming what we've done in this notebook, only backwards. Instead of defining a world and robot and creating movement and sensor data, it will be up to you to use movement and sensor measurements to reconstruct the world! In the next notebook, you'll see this list of movements and measurements (which you'll use to re-construct the world) listed in a structure called `data`. This is an array that holds sensor measurements and movements in a specific order, which will be useful to call upon when you have to extract this data and form constraint matrices and vectors. `data` is constructed over a series of time steps as follows: ``` data = [] # after a robot first senses, then moves (one time step) # that data is appended like so: data.append([measurements, [dx, dy]]) # for our example movement and measurement print(data) # in this example, we have only created one time step (0) time_step = 0 # so you can access robot measurements: print('Measurements: ', data[time_step][0]) # and its motion for a given time step: print('Motion: ', data[time_step][1]) ``` ### Final robot class Before moving on to the last notebook in this series, please make sure that you have copied your final, completed `sense` function into the `robot_class.py` file in the home directory. We will be using this file in the final implementation of slam!
github_jupyter
``` import pandas as pd import matplotlib.pyplot as plt import matplotlib.dates as mdates import numpy as np import sklearn as sk import seaborn as sns import statsmodels.api as sm import statsmodels.formula.api as smf import statsmodels.tsa.api as smt import itertools import warnings import scipy.signal as sp import math from statsmodels.tsa.stattools import acf, pacf from datetime import date, timedelta from dateutil.relativedelta import relativedelta import datetime #importing zonal data from 2015 to illustrate dominance of Toronto Zone zone2015 = pd.read_csv("ZonalDemands_2015.csv", parse_dates=["Date"],index_col="Date") #importing zonal data from 2015-2017, only taking total consumption demand2013 = pd.read_csv("ZonalDemands_2013.csv", usecols = [0,1,2]) demand2014 = pd.read_csv("ZonalDemands_2014.csv", usecols = [0,1,2]) demand2015 = pd.read_csv("ZonalDemands_2015.csv", usecols = [0,1,2]) demand2016 = pd.read_csv("ZonalDemands_2016.csv", usecols = [0,1,2]) dTot_test = pd.read_csv("ZonalDemands_2017.csv", usecols = [0,1,2]) #merge all demand data dTot = pd.concat([demand2013,demand2014,demand2015,demand2016]) #cleaning and changing to pandas datetime?? #importanting weather data temp = pd.read_csv("temperature.csv", usecols = [0,26], parse_dates=["datetime"], index_col="datetime") #convert to degrees celsius temp = temp - 273.15 dTot['Date_Hour'] = pd.to_datetime(dTot.Date) + pd.to_timedelta(dTot.Hour, unit='h') dTot = dTot.drop(['Date','Hour'],axis = 1) dTot = dTot.set_index('Date_Hour') dTot_test['Date_Hour'] = pd.to_datetime(dTot_test.Date) + pd.to_timedelta(dTot_test.Hour, unit='h') dTot_test = dTot_test.drop(['Date','Hour'],axis = 1) dTot_test = dTot_test.set_index('Date_Hour') from statsmodels.tsa.stattools import adfuller def test_stationarity(timeseries): #Determing rolling statistics rolmean = pd.rolling_mean(timeseries, window=12) rolstd = pd.rolling_std(timeseries, window=12) #Plot rolling statistics: orig = plt.plot(timeseries, color='blue',label='Original') mean = plt.plot(rolmean, color='red', label='Rolling Mean') std = plt.plot(rolstd, color='black', label = 'Rolling Std') plt.legend(loc='best') plt.title('Rolling Mean & Standard Deviation') plt.show(block=False) #Perform Dickey-Fuller test: print ('Results of Dickey-Fuller Test:') dftest = adfuller(timeseries, autolag='AIC') dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-value','#Lags Used','Number of Observations Used']) for key,value in dftest[4].items(): dfoutput['Critical Value (%s)'%key] = value print (dfoutput) dTot = dTot.resample('D').mean() dTot_log = (dTot.Total/dTot.Total.mean()) plt.plot(dTot_log) expwighted_avg = pd.ewma(dTot_log, halflife=365) plt.plot(dTot_log) plt.plot(expwighted_avg, color='red') from statsmodels.tsa.seasonal import seasonal_decompose decomposition = seasonal_decompose(dTot_log) #trend = decomposition.trend #seasonal = decomposition.seasonal #residual = decomposition.resid #dTot_log_decompose = residual #dTot_log_decompose.dropna(inplace=True) #test_stationarity(dTot_log_decompose.Total) dTot_log_ewma_diff = dTot_log - expwighted_avg test_stationarity(dTot_log_ewma_diff) #print(dTot_log_decompose) fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot() fig = sm.graphics.tsa.plot_acf(dTot_log_ewma_diff, lags=40, ax=ax1) ax2 = fig.add_subplot() fig = sm.graphics.tsa.plot_pacf(dTot_log_ewma_diff, lags=40, ax=ax2) #find optimal hyperparameters p = d = q = range(0,1) #pdq = list(itertools.product(p,d,q)) #print('ARIMA{}-AIC{}'.format(pdq[i],results_AR.aic)) min_aic = 10000000 #min_aic_params = pdq[0] #warnings.filterwarnings("ignore") for i in range(0,len(pdq)+1): try: model = ARIMA(dTot_log_ewma_diff, order=pdq[i]) results_AR = model.fit(disp=-1) if abs(results_AR.aic) < min_aic: min_aic = abs(results_AR.aic) min_aic_param = 'ARIMA{}-AIC{}'.format(pdq[i],abs(results_AR.aic)) print('ARIMA{}-AIC{}'.format(pdq[i],results_AR.aic)) except: continue print(min_aic) print(min_aic_params) #make model plot transforms vs predicted tranforms from statsmodels.tsa.arima_model import ARIMA model = ARIMA(dTot_log_ewma_diff, order=(27, 0, 0)) results_AR = model.fit(disp=-1) plt.plot(dTot_log_ewma_diff) plt.plot(results_AR.fittedvalues, color='red') #print(results_AR.fittedvalues.tail()) #print(dTot_log_decompose) #print(sum((results_AR.fittedvalues-dTot_log_ewma_diff.Total)**2)) results_AR.summary() predictions_ARIMA_diff = pd.Series(results_AR.fittedvalues, copy=True) #pred_2017_diff = results_AR.predict(start = '2017-02-28', end = '2017-12-31') pred_2017_diff = pd.Series(results_AR.forecast(steps = 366)[0],copy = True) #index = ['2017-02-28','2017-03-31','2017-04-30','2017-05-31','2017-06-30','2017-07-31','2017-08-31','2017-09-30','2017-10-31','2017-11-30','2017-12-31'] #pred_2017_diff.index = pd.to_datetime(pred_2017_diff.index) pred_2017_diff.index = dTot_test.resample('D').mean().index print(pred_2017_diff) print(predictions_ARIMA_diff) predictions_ARIMA_diff_cumsum = predictions_ARIMA_diff #.cumsum() pred_2017_diff_cumsum = pred_2017_diff #.cumsum() print(pred_2017_diff_cumsum.head()) print(predictions_ARIMA_diff_cumsum.head()) predictions_ARIMA_log = pd.Series(expwighted_avg.iloc[0], index=dTot_log.index) #print(predictions_ARIMA_log ) predictions_ARIMA_log = predictions_ARIMA_log.add(predictions_ARIMA_diff_cumsum,fill_value=0) #predictions_ARIMA_log.head() pred_2017_log = dTot_log.iloc[0] #print(predictions_ARIMA_log ) pred_2017_log = pred_2017_log + pred_2017_diff_cumsum #predictions_ARIMA_log.head() print(pred_2017_log.head()) print(predictions_ARIMA_log.tail()) #multiply by mean predictions_ARIMA = predictions_ARIMA_log*dTot.Total.mean() pred_2017 = pred_2017_log*dTot.Total.mean() start = '2013' end = '2016' plt.plot(dTot[start:end]) plt.plot(predictions_ARIMA[start:end]) plt.plot(dTot_test.resample('D').mean()) plt.plot(pred_2017) plt.legend(['Actual','In Sample Prediction', '2017 Actual','2017 ARIMA Prediction']) print(np.sqrt(sum((predictions_ARIMA[start:end]-dTot[start:end].Total)**2)/len(dTot.Total))) ```
github_jupyter
# An example of the Nonlinear inference with multiple latent functions. This notebook briefly shows an example for an inverse problem where multiple latent functions to be infered. *Keisuke Fujii 3rd Oct. 2016* ## Synthetic observation Consider we observe a cylindrical transparent mediam with multiple ($N$) lines-of-sight, as shown below. <img src=figs/abel_inversion.png width=240pt> <img src=figs/los_theta.png width=180pt> The local emission intensity $a(r)$, local flow velocity $v(r)$, and local temperature $\sigma(r)$ are functions of radius $r$., The local spectrum $\\mathbf{y}_{i,j}$ from $i$-th shell with radius $r_i$ measured with the $j$-th sight line can be written as, $$ \mathbf{y}_{i,j} = \frac{a(r_i)}{\sqrt{2\pi}\sigma_i}\exp\left[ \frac{(\lambda-\lambda_0 v_i/c \cos\theta_{i,j})^2}{2\sigma_i^2} \right] + \mathbf{e}_i $$ where $\theta_{i,j}$ is an angle between the $i$-th shell and $j$-th sight line. $\mathbf{e}$ is i.i.d. Gaussian noise. ## Non-linear model and transform We assume $\log \mathbf{a}$, $\mathbf{v}$, $\log \mathbf{\sigma}$ follow Gaussian process, with the identical kernel $\mathrm{K_a}$, $\mathrm{K_v}$, $\mathrm{K_\sigma}$, respecvitvely. In this notebook, we infer $\mathbf{a},\mathbf{v},\mathbf{\sigma},$ by 1. Stochastic approximation of the variational Gaussian process. 2. Markov Chain Monte-Carlo (MCMC) method. ## Import several libraries including GPinv ``` import numpy as np %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf import sys # In ../testing/ dir, we prepared a small script for generating the above matrix A sys.path.append('../testing/') import make_LosMatrix # Import GPinv import GPinv ``` ## Synthetic signals Here, we make a synthetic measurement. The synthetic signal $\mathrm{y}$ is simulated from the grand truth solution $g_true$ and random gaussian noise. ``` n = 30 # radial coordinate N = 40 # number of cite lines # radial coordinate r = np.linspace(0, 1., n) # synthetic latent function a = np.exp(-(r-0.3)*(r-0.3)/0.1) + np.exp(-(r+0.3)*(r+0.3)/0.1) v = 3.*np.exp(-(r-0.6)*(r-0.6)/0.05)*(r-0.6) sigma = 1.*np.exp(-(r-0.0)*(r-0.0)/0.3) + 0.2 # plotting the latent function plt.figure(figsize=(4,3)) plt.plot(r, a, label='a') plt.plot(r, v, label='v') plt.plot(r, sigma, label='$\sigma$') plt.plot([0,1], [0,0], '--k') plt.xlabel('r') plt.legend() ``` ### Prepare the synthetic signal. ``` # los height z = np.linspace(-0.9,0.9, N) # Los-matrix A = make_LosMatrix.make_LosMatrix(r, z) cosTheta = make_LosMatrix.make_cosTheta(r,z) print(A.shape, cosTheta.shape) # Wavelength coordinate # number of coordinate m = 50 # coordinate lam = np.linspace(-3,3,50) # true (synthetic) signals. f_true = np.zeros((m, N, n)) for i in range(N): for j in range(n): f_true[:,i,j] = a[j] / (np.sqrt(2*np.pi)*sigma[j]) * np.exp(-0.5*((lam-v[j]*cosTheta[i,j])/sigma[j])**2) # synthetic observation y_syn = np.zeros((m,N)) for i in range(N): for j in range(n): y_syn[:,i] += A[i,j] * f_true[:,i,j] y_syn+=np.random.randn(m,N)*0.02 # plot plt.figure(figsize=(5,3)) for i in range(0,N,2): plt.plot(lam, y_syn[:,i]+0.05*i, '-+', ms=2) plt.xlabel('lam') plt.ylabel('signal') ``` # Inference In order to carry out an inference, a custom **likelihood**, which calculates $p(\mathbf{Y}|\mathbf{f})$ with given $\mathbf{f}$, must be prepared according to the problem. The method to be implemented is **logp(f,Y)** method, that calculates log-likelihood for data **Y** with given **f** ``` class SpecAbelLikelihood(GPinv.likelihoods.Likelihood): def __init__(self, Amat, cosTheta, lam): GPinv.likelihoods.Likelihood.__init__(self) # Amat, cosTheta shape [m,n] self.Amat = GPinv.param.DataHolder(Amat) self.cosT = GPinv.param.DataHolder(cosTheta) # lam [k,1] self.lam = GPinv.param.DataHolder(lam.reshape(-1,1)) self.variance = GPinv.param.Param(np.ones(1), GPinv.transforms.positive) def sample_F(self, F): """ :param tf.tensor F: sized [N,n,R] :return tf.tensor Transformed F: sized [N,k,m] where N is number of samples to approximate integration. n is number of radial coordinate, R is number of latent functions (a, sigma, v), k is number of wavelength points m is number of citelines. """ N = tf.shape(F)[0] n = tf.shape(F)[1] k = tf.shape(self.lam)[0] m = tf.shape(self.Amat)[0] # latent functions a, s, v = tf.unpack(F, axis=-1, num=3) # shape [N,n] # map a and s by exp a = tf.exp(a) s = tf.exp(s) # Tile latent functions to be sized [N,k,m,n] a = tf.tile(tf.expand_dims(tf.expand_dims(a, 1),-2), [1,k,m,1]) s = tf.tile(tf.expand_dims(tf.expand_dims(s, 1),-2), [1,k,m,1]) v = tf.tile(tf.expand_dims(tf.expand_dims(v, 1),-2), [1,k,m,1]) Amat = tf.tile(tf.expand_dims(tf.expand_dims(self.Amat,0), 0), [N,k,1,1]) cosT = tf.tile(tf.expand_dims(tf.expand_dims(self.cosT,0), 0), [N,k,1,1]) lam = tf.tile(tf.expand_dims(tf.expand_dims(self.lam, 0),-1), [N,1,m,n]) # Latent spectrum at wavelength k, radial position n, cite line m f = a / (np.sqrt(2*np.pi)*s) * tf.exp(-0.5*tf.square((lam - v * cosTheta)/s)) # Latent spectrum at wavelength k, cite line m, [N,k,m] Af = tf.reduce_sum(Amat * f, 3) return Af def logp(self, F, Y): """ :param tf.tensor Y: sized [k,m] :return tf.tensor : tensor containing logp values. """ # Expand Y to shape [N,k,m] f_samples = self.sample_F(F) Y = tf.tile(tf.expand_dims(Y, 0), [tf.shape(f_samples)[0],1,1]) return GPinv.densities.gaussian(f_samples, Y, self.variance) lik = SpecAbelLikelihood(A, cosTheta, lam) ``` ### Kernel The statistical property is interpreted in Gaussian Process kernel. Since $a$ and $s$ are cylindrically symmetric functions, we adopt **RBF_csym** kernel for $\mathbf{K}_a$ and $\mathbf{K}_s$ with **same** lengthscale. Since $v$ is a cylindrically anti-symmetric functions, we adopt **RBF_casym** kernel for $\mathbf{K}_v$. ``` # kernel for a and s kern_as = GPinv.kernels.RBF_csym(1, 2) kern_as.lengthscales = 0.3 # kernel for v kern_v = GPinv.kernels.RBF_casym(1, 1) kern_v.lengthscales = 0.3 # Stacked kernel kern = GPinv.kernels.Stack([kern_as, kern_v]) ``` ### MeanFunction To make $a$ and $s$ scale invariant, we added the constant mean_function for them. We adopt a zero mean for $v$ ``` # mean for a and s mean_as = GPinv.mean_functions.Constant(2, c=np.ones(2)*(-2)) # mean for v mean_v = GPinv.mean_functions.Zero(1) # Stacked mean mean = GPinv.mean_functions.Stack([mean_as, mean_v]) ``` ## Variational inference by StVGP In StVGP, we evaluate the posterior $p(\mathbf{f}|\mathbf{y},\theta)$ by approximating as a multivariate Gaussian distribution. The hyperparameters are obtained at the maximum of the evidence lower bound (ELBO) $p(\mathbf{y}|\theta)$. ``` model_stvgp = GPinv.stvgp.StVGP(r.reshape(-1,1), y_syn, kern = kern, mean_function = mean,likelihood=lik, num_latent=3, num_samples=5) ``` ## Draw the initial estimate. ``` # Data Y should scatter around the transform F of the GP function f. sample_F = model_stvgp.sample_F(100) plt.figure(figsize=(5,3)) # initial estimate for s in sample_F: for i in range(0,N,2): plt.plot(lam, s[:,i]+0.05*i, '-k',lw=1, alpha=0.1) # observation for i in range(0,N,2): plt.plot(lam, y_syn[:,i]+0.05*i, '-o', ms=2) # plot plt.xlabel('lam') plt.ylabel('signal') ``` Although the initial estimate does not seem good, we start from here. ## Iteration ``` # This function visualizes the iteration. from IPython import display logf = [] def logger(x): if (logger.i % 10) == 0: obj = -model_stvgp._objective(x)[0] logf.append(obj) # display if (logger.i % 100) ==0: plt.clf() plt.plot(logf, '--ko', markersize=3, linewidth=1) plt.ylabel('ELBO') plt.xlabel('iteration') display.display(plt.gcf()) display.clear_output(wait=True) logger.i+=1 logger.i = 1 import time # start time start_time = time.time() plt.figure(figsize=(6,3)) # Rough optimization by scipy.minimize model_stvgp.optimize() # Final optimization by tf.train trainer = tf.train.AdamOptimizer(learning_rate=0.003) _= model_stvgp.optimize(trainer, maxiter=5000, callback=logger) display.clear_output(wait=True) print('Ellapsed Time is', time.time()-start_time, ' (s)') ``` ## Plot the result ## Latent function ``` r_new = np.linspace(0.,1., 30) plt.figure(figsize=(10,3)) # --- StVGP --- f_pred, f_var = model_stvgp.predict_f(r_new.reshape(-1,1)) f_plus = f_pred + 2.*np.sqrt(f_var) f_minus = f_pred - 2.*np.sqrt(f_var) # --- observed and grand truth --- plt.subplot(1,3,1) plt.fill_between(r_new, np.exp(f_plus[:,0]), np.exp(f_minus[:,0]), alpha=0.2) plt.plot(r_new, np.exp(f_pred[:,0]), label='StVGP',lw=1.5) plt.plot(r, a, '-k', label='true',lw=1.5) plt.xlabel('$r$: Radial coordinate') plt.ylabel('$a$: Emissivity') plt.subplot(1,3,2) plt.fill_between(r_new, f_plus[:,2], f_minus[:,2], alpha=0.2) plt.plot(r_new, f_pred[:,2], label='StVGP',lw=1.5) plt.plot(r, v, '-k', label='true',lw=1.5) plt.xlabel('$r$: Radial coordinate') plt.ylabel('$v$: Velocity') plt.subplot(1,3,3) plt.fill_between(r_new, np.exp(f_plus[:,1]), np.exp(f_minus[:,1]), alpha=0.2) plt.plot(r_new, np.exp(f_pred[:,1]), label='StVGP',lw=1.5) plt.plot(r, sigma, '-k', label='true',lw=1.5) plt.xlabel('$r$: Radial coordinate') plt.ylabel('$\sigma$: Temperature') plt.tight_layout() ``` ## Transformed functions ``` # Data Y should scatter around the transform F of the GP function f. sample_F = model_stvgp.sample_F(100) plt.figure(figsize=(5,3)) # initial estimate for s in sample_F: for i in range(0,N,2): plt.plot(lam, s[:,i]+0.05*i, '-k', lw=1, alpha=0.1) # observation for i in range(0,N,2): plt.plot(lam, y_syn[:,i]+0.05*i, '-o', ms=2) plt.xlabel('lam') plt.ylabel('signal') ```
github_jupyter
``` from mcts_simple import Game class TicTacToe(Game): def __init__(self): self.board = {char + str(num + 1): " " for char in "abc" for num in range(3)} self.players = ["X", "O"] self.player_turn = 0 self.prev_actions = [] def win_conditions(self): return ((self.board["a1"], self.board["a2"], self.board["a3"]), (self.board["b1"], self.board["b2"], self.board["b3"]), (self.board["c1"], self.board["c2"], self.board["c3"]), (self.board["a1"], self.board["b1"], self.board["c1"]), (self.board["a2"], self.board["b2"], self.board["c2"]), (self.board["a3"], self.board["b3"], self.board["c3"]), (self.board["a1"], self.board["b2"], self.board["c3"]), (self.board["a3"], self.board["b2"], self.board["c1"])) def previous_player(self): self.player_turn = (self.player_turn - 1) % 2 def next_player(self): self.player_turn = (self.player_turn + 1) % 2 def render(self): print(f"{self.board['a1']}|{self.board['a2']}|{self.board['a3']}") print("-" * 5) print(f"{self.board['b1']}|{self.board['b2']}|{self.board['b3']}") print("-" * 5) print(f"{self.board['c1']}|{self.board['c2']}|{self.board['c3']}") print() def get_state(self): return tuple(self.board.values()) def number_of_players(self): return len(self.players) def current_player(self): return self.players[self.player_turn] def possible_actions(self): return [pos for pos in self.board if self.board[pos] == " "] def take_action(self, action): if action not in self.possible_actions(): raise RuntimeError("Action taken is invalid.") self.board[action] = self.current_player() self.prev_actions.append(action) self.next_player() def delete_last_action(self): if len(self.prev_actions) == 0: raise RuntimeError("There is no action to be deleted.") self.board[self.prev_actions.pop()] = " " self.previous_player() def has_outcome(self): return any([(player,) * 3 in self.win_conditions() for player in self.players]) or " " not in self.board.values() def winner(self): win_conditions = self.win_conditions() if not self.has_outcome(): raise RuntimeError("winner() cannot be called when outcome is undefined.") for player in self.players: if (player,) * 3 in self.win_conditions(): return player if " " not in self.board.values(): return None else: raise RuntimeError from mcts_simple import MCTS, UCT # To train MCTS print("To train MCTS") mcts = MCTS(TicTacToe()) mcts.run(iterations = 300) mcts._export("mcts.json") # To import trained MCTS print("To import trained MCTS") mcts = MCTS(TicTacToe()) mcts._import("mcts.json") mcts.self_play(activation = "best") # To train UCT print("To train UCT") uct = UCT(TicTacToe()) uct.run(iterations = 300000) uct._export("uct.json") # To import trained UCT print("To import trained UCT") uct = UCT(TicTacToe()) uct._import("uct.json") uct.self_play(activation = "best") # To import trained MCTS to play from middle of game print("To import trained MCTS to play from middle of game") t = TicTacToe() t.take_action("b2") t.take_action("c1") t.take_action("a2") mcts = MCTS(t) mcts._import("mcts.json") mcts.self_play(activation = "best") # To import trained UCT to play with human print("To import trained UCT to play with human") uct = UCT(TicTacToe()) uct._import("uct.json") uct.play_with_human(activation = "best") ```
github_jupyter
``` ######################################## ## import packages ######################################## import os import re import csv import codecs import numpy as np import pandas as pd import operator from nltk.corpus import stopwords from nltk.stem import SnowballStemmer from string import punctuation from textblob import TextBlob from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, SpatialDropout1D, Reshape, GlobalAveragePooling1D, merge, Flatten, Bidirectional, CuDNNGRU, add, Conv1D, GlobalMaxPooling1D from keras.layers.merge import concatenate from keras.models import Model, Sequential from keras.layers.normalization import BatchNormalization from keras.callbacks import EarlyStopping, ModelCheckpoint import sys path = 'Dataset/' EMBEDDING_FILE='features/fast-text-300.txt' #EMBEDDING_FILE='features/glove.twitter.27B.200d.txt' #EMBEDDING_FILE='features/glove.840B.300d.txt' TRAIN_DATA_FILE=path + 'train.csv' TEST_DATA_FILE=path + 'test.csv' MAX_SEQUENCE_LENGTH = 350 MAX_NB_WORDS = 100000 EMBEDDING_DIM = 200 ######################################## ## index word vectors ######################################## print('Indexing word vectors') #Glove Vectors embeddings_index = {} f = open(EMBEDDING_FILE, 'r', encoding='utf-8') for line in f: values = line.split() try: word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs except: print("Err on ", values[:3]) f.close() print('Total %s word vectors.' % len(embeddings_index)) train_df = pd.read_csv(TRAIN_DATA_FILE) test_df = pd.read_csv(TEST_DATA_FILE) ######################################## # Load the cleaned words ######################################## cl_path = 'features/cleanwords.txt' clean_word_dict = {} with open(cl_path, 'r', encoding='utf-8') as cl: for line in cl: print(line) line = line.strip('\n') typo, correct = line.split(',') clean_word_dict[typo] = correct ######################################## ## process texts in datasets ######################################## print('Processing text dataset') from collections import defaultdict # Regex to remove all Non-Alpha Numeric and space special_character_removal=re.compile(r'[^?!.,:a-z\d ]',re.IGNORECASE) # regex to replace all numerics replace_numbers=re.compile(r'\d+',re.IGNORECASE) word_count_dict = defaultdict(int) import re toxic_dict = {} def text_to_wordlist(text, remove_stopwords=False, stem_words=False, count_null_words=True, clean_wiki_tokens=True): # Clean the text, with the option to remove stopwords and to stem words. # dirty words text = re.sub(r"”", "", text) text = re.sub(r"“", "", text) text = replace_numbers.sub(' ', text) if count_null_words: text = text.split() for t in text: word_count_dict[t] += 1 text = " ".join(text) # Optionally, shorten words to their stems if stem_words: text = text.split() stemmer = SnowballStemmer('english') stemmed_words = [stemmer.stem(word) for word in text] text = " ".join(stemmed_words) return (text) list_sentences_train = train_df["comment_text_clean"].fillna("no comment").values list_classes = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] y = train_df[list_classes].values list_sentences_test = test_df["comment_text_clean"].fillna("no comment").values comments = [] for text in list_sentences_train: comments.append(text_to_wordlist(text)) test_comments=[] for text in list_sentences_test: test_comments.append(text_to_wordlist(text)) tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='"#$%&()+,-./:;<=>@[\\]^_`{|}~\t\n') #tokenizer = Tokenizer(num_words=MAX_NB_WORDS) tokenizer.fit_on_texts(comments + test_comments) sequences = tokenizer.texts_to_sequences(comments) test_sequences = tokenizer.texts_to_sequences(test_comments) word_index = tokenizer.word_index print('Found %s unique tokens' % len(word_index)) data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', data.shape) print('Shape of label tensor:', y.shape) test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of test_data tensor:', test_data.shape) def sent2pos(sentence): try: tag = TextBlob(sentence).tags except: print(sentence) print(' '.join([word_index[word] for word in text])) updated_sentence = ' '.join([i[0] for i in tag]) tagged = ' '.join([i[1] for i in tag]) # print(len(updated_sentence.split(' ')),len(text2.split(' '))) # print(updated_sentence) # print(tagged) return updated_sentence, tagged inverse_word_index = {v: k for k, v in word_index.items()} Pos_comments = [] Pos_updated_sentence = [] for text in sequences: text1 = ' '.join([inverse_word_index[word] for word in text]) if not isinstance(text1, str): print(text) print(text1) updated_sentence, text2 = sent2pos(text1) Pos_updated_sentence.append(updated_sentence) Pos_comments.append(text2) assert len(updated_sentence.split(' ')) == len(text2.split(' ')), "T1 {} T2 {} ".format(len(text), len(text2.split())) Pos_test_comments = [] Pos_test_updated_sentence = [] for text in test_sequences: text1 = ' '.join([inverse_word_index[word] for word in text]) updated_sentence, text2 = sent2pos(text1) Pos_test_updated_sentence.append(updated_sentence) Pos_test_comments.append(text2) assert len(updated_sentence.split(' ')) == len(text2.split(' ')), "T1 {} T2 {} ".format(len(text), len(text2.split())) pos_tokenizer = Tokenizer(num_words=50, filters='"#$%&()+,-./:;<=>@[\\]^_`{|}~\t\n') #tokenizer = Tokenizer(num_words=MAX_NB_WORDS) pos_tokenizer.fit_on_texts(Pos_comments + Pos_test_comments) sequences = pos_tokenizer.texts_to_sequences(Pos_comments) test_sequences = pos_tokenizer.texts_to_sequences(Pos_test_comments) pos_word_index = tokenizer.word_index print('Found %s unique tokens' % len(pos_word_index)) pos_data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', pos_data.shape) print('Shape of label tensor:', y.shape) pos_test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of test_data tensor:', pos_test_data.shape) ``` ## Second valid ``` comments = [] for text in Pos_updated_sentence: comments.append(text_to_wordlist(text)) test_comments=[] for text in Pos_test_updated_sentence: test_comments.append(text_to_wordlist(text)) # tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='"#$%&()+,-./:;<=>@[\\]^_`{|}~\t\n') # tokenizer.fit_on_texts(comments + test_comments) sequences = tokenizer.texts_to_sequences(comments) test_sequences = tokenizer.texts_to_sequences(test_comments) word_index = tokenizer.word_index print('Found %s unique tokens' % len(word_index)) data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of data tensor:', data.shape) print('Shape of label tensor:', y.shape) test_data = pad_sequences(test_sequences, maxlen=MAX_SEQUENCE_LENGTH) print('Shape of test_data tensor:', test_data.shape) ######################################## ## prepare embeddings ######################################## print('Preparing embedding matrix') nb_words = min(MAX_NB_WORDS, len(word_index)) embedding_matrix = np.zeros((nb_words, EMBEDDING_DIM)) null_words = open('null-word.txt', 'w', encoding='utf-8') for word, i in word_index.items(): if i >= MAX_NB_WORDS: null_words.write(word + ', ' + str(word_count_dict[word]) +'\n') continue embedding_vector = embeddings_index.get(word) if embedding_vector is not None: # words not found in embedding index will be all-zeros. embedding_matrix[i] = embedding_vector else: null_words.write(word + ', ' + str(word_count_dict[word]) + '\n') print('Null word embeddings: %d' % np.sum(np.sum(embedding_matrix, axis=1) == 0)) #24146 f = open('cleaned_text.txt', 'w', encoding='utf-8') for line in test_comments: f.write(line + '\n') # sort null word null_count = {} with open('null-word.txt', 'r', encoding='utf-8') as nullword: for line in nullword: w, c = line.strip('\n').split(', ') null_count[w] = int(c) null_count = sorted(word_count_dict.items(), key=operator.itemgetter(1), reverse=True) with open('null-word.txt', 'w', encoding='utf-8') as output: for w, c in null_count: output.write(w + ", " + str(c) + '\n') ``` # Model Zoo ``` from sklearn.metrics import roc_auc_score import numpy as np STAMP = 'pavel_rnn_%.2f_%.2f'%(0.5,0.5) def _train_model_by_auc(model, batch_size, train_x, train_y, val_x, val_y): best_auc = -1 best_weights = None best_epoch = 0 current_epoch = 1 while True: model.fit(train_x, train_y, batch_size=batch_size, epochs=1, validation_data=[val_x, val_y]) y_pred = model.predict(val_x, batch_size=batch_size) current_auc = roc_auc_score(val_y, y_pred) print("Epoch {} auc {:.6f} best_auc {:.6f}".format(current_epoch, current_auc, best_auc)) current_epoch += 1 if best_auc < current_auc or best_auc == -1: best_auc = current_auc best_weights = model.get_weights() best_epoch = current_epoch else: if current_epoch - best_epoch == 5: break model.set_weights(best_weights) return model, best_auc def _train_model_by_logloss(model, batch_size, train_x, pos_train_x, train_y, val_x, pos_val_x, val_y, fold_id): early_stopping =EarlyStopping(monitor='val_loss', patience=7) bst_model_path = STAMP + str(fold_id) + '.h5' model_checkpoint = ModelCheckpoint(bst_model_path, save_best_only=True, save_weights_only=True) train_data = {'Onehot':train_x, 'POS':pos_train_x} val_data = {'Onehot':val_x, 'POS':pos_val_x} hist = model.fit(train_data, train_y, validation_data=(val_data, val_y), epochs=50, batch_size=batch_size, shuffle=True, callbacks=[early_stopping, model_checkpoint]) bst_val_score = min(hist.history['val_loss']) predictions = model.predict(val_data) auc = roc_auc_score(val_y, predictions) print("AUC Score", auc) return model, bst_val_score, auc, predictions def train_folds(X, pos_x, y, fold_count, batch_size, get_model_func): fold_size = len(X) // fold_count models = [] fold_predictions = [] score = 0 total_auc = 0 for fold_id in range(0, fold_count): fold_start = fold_size * fold_id fold_end = fold_start + fold_size if fold_id == fold_size - 1: fold_end = len(X) train_x = np.concatenate([X[:fold_start], X[fold_end:]]) train_y = np.concatenate([y[:fold_start], y[fold_end:]]) val_x = X[fold_start:fold_end] val_y = y[fold_start:fold_end] pos_train_x = np.concatenate([pos_x[:fold_start], pos_x[fold_end:]]) pos_val_x = pos_x[fold_start:fold_end] print("In fold #", fold_id) model, bst_val_score, auc, fold_prediction = _train_model_by_logloss(get_model_func(), batch_size, train_x, pos_train_x, train_y, val_x, pos_val_x, val_y, fold_id) score += bst_val_score total_auc += auc fold_predictions.append(fold_prediction) models.append(model) return models, score / fold_count, total_auc / fold_count, fold_predictions from keras import optimizers from keras.layers import Reshape adam_optimizer = optimizers.Adam(lr=1e-3 ** 64/256, decay=1e-8) def get_av_pos_cnn(): embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False) filter_nums = 325 # 500->375, 400->373, 300-> drop = 0.5 dr_rate = 0.5 input_layer = Input(shape=(MAX_SEQUENCE_LENGTH,), name='Onehot') input_layer_2 = Input(shape=(MAX_SEQUENCE_LENGTH,), name='POS') embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)(input_layer) embedding_layer2 = Embedding(50, 30, # Latest Modify input_length=MAX_SEQUENCE_LENGTH, trainable=True)(input_layer_2) embedding_layer = concatenate([embedding_layer, embedding_layer2], axis=2) embedded_sequences = SpatialDropout1D(0.25)(embedding_layer) conv_0 = Conv1D(filter_nums, 1, kernel_initializer="normal", padding="valid", activation="relu")(embedded_sequences) conv_1 = Conv1D(filter_nums, 2, kernel_initializer="normal", padding="valid", activation="relu")(embedded_sequences) conv_2 = Conv1D(filter_nums, 3, kernel_initializer="normal", padding="valid", activation="relu")(embedded_sequences) conv_3 = Conv1D(filter_nums, 4, kernel_initializer="normal", padding="valid", activation="relu")(embedded_sequences) attn_0 = AttentionWeightedAverage()(conv_0) avg_0 = GlobalAveragePooling1D()(conv_0) maxpool_0 = GlobalMaxPooling1D()(conv_0) maxpool_1 = GlobalMaxPooling1D()(conv_1) attn_1 = AttentionWeightedAverage()(conv_1) avg_1 = GlobalAveragePooling1D()(conv_1) maxpool_2 = GlobalMaxPooling1D()(conv_2) attn_2 = AttentionWeightedAverage()(conv_2) avg_2 = GlobalAveragePooling1D()(conv_2) maxpool_3 = GlobalMaxPooling1D()(conv_3) attn_3 = AttentionWeightedAverage()(conv_3) avg_3 = GlobalAveragePooling1D()(conv_3) v0_col = merge([maxpool_0, maxpool_1, maxpool_2, maxpool_3], mode='concat', concat_axis=1) v1_col = merge([attn_0, attn_1, attn_2, attn_3], mode='concat', concat_axis=1) v2_col = merge([avg_1, avg_2, avg_0, avg_3], mode='concat', concat_axis=1) merged_tensor = merge([v0_col, v1_col, v2_col], mode='concat', concat_axis=1) output = Dropout(0.7)(merged_tensor) output = Dense(units=144)(output) output = Activation('relu')(output) #output = Dropout(0.5)(output) output = Dense(units=6, activation='sigmoid')(output) model = Model(inputs=[input_layer, input_layer_2], outputs=output) model.compile(loss='binary_crossentropy', optimizer=adam_optimizer, metrics=['accuracy']) return model ######################################## ## define the RNN with Attention model structure ######################################## from keras import optimizers adam_optimizer = optimizers.Adam(lr=1e-3, clipvalue=5, decay=1e-6) def get_av_pos_rnn(): recurrent_units = 56 dropout_rate = 0.35 dense_size = 32 input_layer = Input(shape=(MAX_SEQUENCE_LENGTH,), name='Onehot') input_layer_2 = Input(shape=(MAX_SEQUENCE_LENGTH,), name='POS') embedding_layer = Embedding(nb_words, EMBEDDING_DIM, weights=[embedding_matrix], input_length=MAX_SEQUENCE_LENGTH, trainable=False)(input_layer) embedding_layer2 = Embedding(50, 35, # Latest Modify input_length=MAX_SEQUENCE_LENGTH, trainable=True)(input_layer_2) embedding_layer = concatenate([embedding_layer, embedding_layer2], axis=2) embedding_layer = SpatialDropout1D(0.2)(embedding_layer) r1 = Bidirectional(CuDNNGRU(64, return_sequences=True))(embedding_layer) #r1 = SpatialDropout1D(0.35)(r1) # Latest Modify #r2 = Bidirectional(CuDNNGRU(64, return_sequences=True))(r1) #r2 = SpatialDropout1D(0.35)(r2) #rrs = concatenate([r1 ,r2], axis=-1) last_1 = Lambda(lambda t: t[:, -1])(r1) #last_2 = Lambda(lambda t: t[:, -1])(r2) maxpool = GlobalMaxPooling1D()(r1) attn = AttentionWeightedAverage()(r1) average = GlobalAveragePooling1D()(r1) concatenated = concatenate([maxpool, last_1, attn, average,], axis=1) x = Dropout(0.5)(concatenated) x = Dense(144, activation="relu")(x) output_layer = Dense(6, activation="sigmoid")(x) model = Model(inputs=[input_layer, input_layer_2], outputs=output_layer) model.compile(loss='binary_crossentropy', optimizer=adam_optimizer, metrics=['accuracy']) return model models, val_loss, total_auc, fold_predictions = train_folds(data, pos_data, y, 10, 224, get_av_pos_rnn) print("Overall val-loss:", val_loss, "AUC", total_auc) # RNN benchmark ``` ## Predections ``` train_fold_preditcions = np.concatenate(fold_predictions, axis=0) training_auc = roc_auc_score(y[:-1], train_fold_preditcions) print("Training AUC", training_auc) #test_data = test_df CLASSES = ["toxic", "severe_toxic", "obscene", "threat", "insult", "identity_hate"] submit_path_prefix = "results/rnn/fasttext-nds-SC-POV-avrnn-Voc" + str(MAX_NB_WORDS) + "-fixedpuppet-skippandall-lp-ct-" + str(MAX_SEQUENCE_LENGTH) print("Predicting testing results...") test_predicts_list = [] for fold_id, model in enumerate(models): test_predicts = model.predict({'Onehot':test_data, 'POS':pos_test_data}, batch_size=256, verbose=1) test_predicts_list.append(test_predicts) np.save("predict_path/", test_predicts) test_predicts = np.zeros(test_predicts_list[0].shape) for fold_predict in test_predicts_list: test_predicts += fold_predict test_predicts /= len(test_predicts_list) test_ids = test_df["id"].values test_ids = test_ids.reshape((len(test_ids), 1)) test_predicts = pd.DataFrame(data=test_predicts, columns=CLASSES) test_predicts["id"] = test_ids test_predicts = test_predicts[["id"] + CLASSES] submit_path = submit_path_prefix + "-L{:4f}-A{:4f}.csv".format(val_loss, total_auc) test_predicts.to_csv(submit_path, index=False) print("Predicting training results...") train_ids = train_df["id"].values train_ids = train_ids.reshape((len(train_ids), 1)) train_predicts = pd.DataFrame(data=train_fold_preditcions, columns=CLASSES) # IT MISS THE LAST ONE's label train_predicts["id"] = train_ids[:-1] train_predicts = train_predicts[["id"] + CLASSES] submit_path = submit_path_prefix + "-Train-L{:4f}-A{:4f}.csv".format(val_loss, training_auc) train_predicts.to_csv(submit_path, index=False) ```
github_jupyter
# Effect of learning rate In this notebook, we will discuss the impact of learning rate, which will determine step size and change the distance from initialzation to the solution, which contributes to breaking the NTK regime. ``` import torch from torch import optim, nn from torchvision import datasets, transforms from torch.autograd import Variable import numpy as np import matplotlib.pyplot as plt import copy import os import random from models import train_ntk os.environ["CUDA_VISIBLE_DEVICES"] = "3" # training parameters batch_size = 128 transform = transforms.Compose([ transforms.ToTensor() ]) train_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=True, download=True, transform=transform), batch_size=batch_size, shuffle=True) test_loader = torch.utils.data.DataLoader( datasets.MNIST('data', train=False, download=True, transform=transform), batch_size=batch_size, shuffle=True) h_dim = 5000 train_epoch = 100 alpha_set = [h_dim**(0.1*k) for k in range(11)] alpha = alpha_set[5] srr1,saa1,sll1 = train_ntk(train_loader, test_loader,h_dim,alpha,train_epoch,1) srr2,saa2,sll2 = train_ntk(train_loader, test_loader,h_dim,alpha,train_epoch,.1) srr3,saa3,sll3 = train_ntk(train_loader, test_loader,h_dim,alpha,train_epoch,.01) ``` ## Plot According to the notebook, a large learning rate will directly break the NTK regime. Since all the proof are approximating the gradient flow (infinite small learning rate), a large step makes the parameter get out of the initialization neighborhood. ``` import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") from scipy.ndimage.filters import gaussian_filter1d plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa1)[:,0],3),label = r'NN, $\eta = 1$') plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa2)[:,0],3),label = r'NN, $\eta = 0.1$') plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa3)[:,0],3),label = r'NN, $\eta = 0.01$') plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa1)[:,1],3),linestyle='dashed',label = r'NTK, $\eta = 1$') plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa2)[:,1],3),linestyle='dashed',label = r'NTK, $\eta = 0.1$') plt.plot(np.arange(train_epoch)+1,100*gaussian_filter1d(np.array(saa3)[:,1],3,),linestyle='dashed',label = r'NTK, $\eta = 0.01$') #plt.plot(np.arange(50)+1,np.array(srr4)[:,0],label = r'$\alpha = m^{0.7}$') plt.legend() plt.ylabel(r'accuracy (%)',fontsize=15) plt.xlabel('epoch',fontsize=15) #plt.ylim([0.8,0.99]) #plt.yscale('log') plt.legend(fontsize=12) plt.xticks(fontsize=12) plt.yticks(fontsize=12) import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") plt.plot(np.arange(train_epoch)+1,np.array(srr1)[:,0],label = r'$\eta = 1$') plt.plot(np.arange(train_epoch)+1,np.array(srr2)[:,0],label = r'$\eta = 0.1$') plt.plot(np.arange(train_epoch)+1,np.array(srr3)[:,0],label = r'$\eta = 0.01$') plt.legend(fontsize=12) plt.xticks(fontsize=12) plt.yticks(fontsize=11) plt.ylabel(r'${\Vert \theta-\tilde{\theta} \Vert}$',fontsize=11) plt.xlabel('epoch',fontsize=15) #plt.ylim([0.1,10]) plt.yscale('log') import matplotlib.pyplot as plt import seaborn as sns sns.set_style("whitegrid") plt.plot(np.arange(train_epoch)+1,np.array(srr1)[:,1],label = r'$\eta = 1$') plt.plot(np.arange(train_epoch)+1,np.array(srr2)[:,1],label = r'$\eta = 0.1$') plt.plot(np.arange(train_epoch)+1,np.array(srr3)[:,1],label = r'$\eta = 0.01$') plt.legend(fontsize=12) plt.xticks(fontsize=12) plt.yticks(fontsize=11) plt.ylabel(r'${\Vert u-\tilde{u} \Vert}$',fontsize=11) plt.xlabel('epoch',fontsize=15) plt.ylim([0.8,100]) plt.yscale('log') ```
github_jupyter
## Day 3: Cells in Silicon Welcome to Day 3! Today, we start with our discussion with the Hodgkin Huxley Neurons and how we can simulate them in python using Tensorflow and Numerical Integration. ### What is the Hodgkin Huxley Neuron Model? (Modified from Neuronal Dynamics, EPFL) Hodgkin and Huxley performed many experiments on the giant axon of the squid and found three different types of ion currents - sodium, potassium, and a leak current. They found that specific voltage-dependent ion channels, for sodium and for potassium, control the flow of those ions through the cell membrane from different electrophysiology studies involving phamacological blocking of ion channels. The leak current essentially takes care of other channel types which are not described explicitly. <img src="cd.png" alt="cd.png" width="600"/> The Hodgkin-Huxley model of neurons can easily be understood with the help of a circuit diagram. The semipermeable cell membrane separates the interior of the cell from the extracellular liquid and acts as a capacitor. If an input current I(t) is injected into the cell, it may add further charge on the capacitor, or leak through the channels in the cell membrane. Because of active ion transport through the cell membrane, the ion concentration inside the cell is different from that in the extracellular liquid. The Nernst potential generated by the difference in ion concentration is represented by a battery. Let us now translate the above considerations into mathematical equations. The conservation of electric charge on a piece of membrane implies that the applied current $I(t)$ may be split in a capacitive current $I_C$ which charges the capacitor $C_m = 1 \mu F/cm^2$ and further components $I_k$ which pass through the ion channels. Thus $I(t) = I_C(t) + \sum_kI_k(t)$ where the sum runs over all ion channels. In the standard Hodgkin-Huxley model, there are only three types of channel: a Sodium channel, a Potassium channel and an unspecific leakage channel. From the definition of a capacitance $C_m=\frac{q}{u}$, $I_C=C_m\frac{du}{dt}$ where $q$ is a charge and $u$ the voltage across the capacitor. Thus the model becomes: $$C_m\frac{du}{dt}=−I_{Na}(t)−I_{K}(t)−I_{L}(𝑡)+I(t)$$ In biological terms, $u$ is the voltage across the membrane. Hogkin and Huxley found the Na and K ion currents to be dependent on the voltage and of the form given below: $$I_{Na} = g_{Na}m^3h(u−E_{Na})$$ $$I_K = g_Kn^4(u−E_K)$$ $$I_L = g_L(u−E_L)$$ where $E_{Na}=50\ mV$, $E_K = -95\ mV$ and $E_L=-55\ mV$ are the reversal potentials; $g_{Na} = 100\ \mu S/cm^2$, $g_K = 10\ \mu S/cm^2$ and $g_L = 0.15\ \mu S/cm^2$ are the channel conductances; and m,h, and n are gating variables that follow the dynamics given by: $$\frac{dm}{dt} = - \frac{1}{\tau_m}(m-m_0)$$ $$\frac{dh}{dt} = - \frac{1}{\tau_h}(h-h_0)$$ $$\frac{dn}{dt} = - \frac{1}{\tau_n}(n-n_0)$$ where $\tau_m$, $\tau_h$ and $\tau_n$ are voltage dependent time constants and $m_0$, $h_0$ and $n_0$ are voltage dependent asymptotic gating values. These functions are empirically determined for different types of neurons. <img src="dyn.png" alt="dyn.png" width="800"/> #### Recalling the Generalized TensorFlow Integrator On day 2, we had created a RK4 based numerical integrator. We recall the implementation of the Integrator. ``` import numpy as np import matplotlib.pyplot as plt import tensorflow as tf %matplotlib inline def tf_check_type(t, y0): # Ensure Input is Correct if not (y0.dtype.is_floating and t.dtype.is_floating): raise TypeError('Error in Datatype') class _Tf_Integrator(): def integrate(self, func, y0, t): time_delta_grid = t[1:] - t[:-1] def scan_func(y, t_dt): t, dt = t_dt dy = self._step_func(func,t,dt,y) # Make code more modular. return y + dy y = tf.scan(scan_func, (t[:-1], time_delta_grid),y0) return tf.concat([[y0], y], axis=0) def _step_func(self, func, t, dt, y): k1 = func(y, t) half_step = t + dt / 2 dt_cast = tf.cast(dt, y.dtype) # Failsafe k2 = func(y + dt_cast * k1 / 2, half_step) k3 = func(y + dt_cast * k2 / 2, half_step) k4 = func(y + dt_cast * k3, t + dt) return tf.add_n([k1, 2 * k2, 2 * k3, k4]) * (dt_cast / 6) def odeint(func, y0, t): t = tf.convert_to_tensor(t, preferred_dtype=tf.float64, name='t') y0 = tf.convert_to_tensor(y0, name='y0') tf_check_type(y0,t) return _Tf_Integrator().integrate(func,y0,t) ``` #### Implementing the Dynamical Function for an Hodkin Huxley Neuron Recall, a simple Hodgkin Huxley Neuron has a 4 main dynamical variables: $V = Membrane\ Potential$ $m = Sodium\ Activation\ Gating\ Variable$ $h = Sodium\ Inactivation\ Gating\ Variable$ $n = Potassium\ Channel\ Gating\ Variable$ And the dynamics are given by: $$C_m\frac{dV}{dt} = I_{injected} - I_{Na} - I_K - I_L$$ $$\frac{dm}{dt} = - \frac{1}{\tau_m}(m-m_0)$$ $$\frac{dh}{dt} = - \frac{1}{\tau_h}(h-h_0)$$ $$\frac{dn}{dt} = - \frac{1}{\tau_n}(n-n_0)$$ where the values of $\tau_m$, $\tau_h$, $\tau_n$, $m_0$, $h_0$, $n_0$ are given from the equations mentioned earlier. ##### Step 1: Defining Parameters of the Neuron ``` C_m = 1 # Membrane Capacitance g_K = 10 E_K = -95 g_Na = 100 E_Na = 50 g_L = 0.15 E_L = -55 ``` ##### Step 2: Defining functions that calculate $\tau_m$, $\tau_h$, $\tau_n$, $m_0$, $h_0$, $n_0$ Note: Always use Tensorflow functions for all mathematical operations. For our Hodgkin Huxley Model, we will determine the values of $\tau_m$, $\tau_h$, $\tau_n$, $m_0$, $h_0$, $n_0$ by the following equations: <img src="eqns1.png" alt="eqns1.png" width="600"/> ``` def K_prop(V): T = 22 phi = 3.0**((T-36.0)/10) V_ = V-(-50) alpha_n = 0.02*(15.0 - V_)/(tf.exp((15.0 - V_)/5.0) - 1.0) beta_n = 0.5*tf.exp((10.0 - V_)/40.0) t_n = 1.0/((alpha_n+beta_n)*phi) n_0 = alpha_n/(alpha_n+beta_n) return n_0, t_n def Na_prop(V): T = 22 phi = 3.0**((T-36)/10) V_ = V-(-50) alpha_m = 0.32*(13.0 - V_)/(tf.exp((13.0 - V_)/4.0) - 1.0) beta_m = 0.28*(V_ - 40.0)/(tf.exp((V_ - 40.0)/5.0) - 1.0) alpha_h = 0.128*tf.exp((17.0 - V_)/18.0) beta_h = 4.0/(tf.exp((40.0 - V_)/5.0) + 1.0) t_m = 1.0/((alpha_m+beta_m)*phi) t_h = 1.0/((alpha_h+beta_h)*phi) m_0 = alpha_m/(alpha_m+beta_m) h_0 = alpha_h/(alpha_h+beta_h) return m_0, t_m, h_0, t_h ``` ##### Step 3: Defining function that calculate Neuronal currents <img src="eqns2.png" alt="eqns2.png" width="600"/> ``` def I_K(V, n): return g_K * n**4 * (V - E_K) def I_Na(V, m, h): return g_Na * m**3 * h * (V - E_Na) def I_L(V): return g_L * (V - E_L) ``` ##### Step 4: Define the function dX/dt where X is the State Vector ``` def dXdt(X, t): V = X[0:1] m = X[1:2] h = X[2:3] n = X[3:4] dVdt = (5 - I_Na(V, m, h) - I_K(V, n) - I_L(V)) / C_m # Here the current injection I_injected = 5 uA m0,tm,h0,th = Na_prop(V) n0,tn = K_prop(V) dmdt = - (1.0/tm)*(m-m0) dhdt = - (1.0/th)*(h-h0) dndt = - (1.0/tn)*(n-n0) out = tf.concat([dVdt,dmdt,dhdt,dndt],0) return out ``` ##### Step 5: Define Initial Condition and Integrate ``` y0 = tf.constant([-71,0,0,0], dtype=tf.float64) epsilon = 0.01 t = np.arange(0,200,epsilon) state = odeint(dXdt,y0,t) with tf.Session() as sess: state = sess.run(state) ``` ##### Step 6: Plot Output ``` plt.style.use('seaborn-colorblind') plt.style.use('seaborn-ticks') ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.grid(color='#DDDDDD', linestyle='--', linewidth=0.3) plt.plot(t,state.T[0,:]) plt.xlabel("Time (in ms)") plt.ylabel("Voltage (in mV)") fig = plt.gcf() fig.savefig("fig4.eps",format='eps') plt.show() ``` #### Simulating Multiple Independent HH Neurons at the Same Time Although, simulating a Single Hodgkin-Huxley Neuron is possible in TensorFlow, the real ability of tensorflow can be seen only when a large number of simultaneous diffential equations are to be solved at the the same time. Let's try to simulate 20 independent HH neurons with different input currents and characterise the firing rates. ##### Methods of Parallelization TensorFlow has the intrinsic ability to speed up any and all Tensor computations using available multi-cores, and GPU/TPU setups. There are two major parts of the code where TensorFlow can help us really speed up the computation: 1. **RK4 Steps:** Since the TensorFlow implementation of the Integrator utilizes Tensor calculations, TensorFlow will automatically speed it up. 2. **Functional Evaluations:** Looking at Dynamical Equations that describe the neuronal dynamics, its easy to notice that all simple HH Neurons share the same or atleast similar dynamical equations but will vary only in the values of parameters. We can exploit this to speed up the computations. Say $\vec{X}=[V,m,n,h]$ is the state vector of a single neuron and its dynamics are defined using parameters $C_m,g_K,...E_L$ equations of the form: $$\frac{d\vec{X}}{dt} = [f_1(\vec{X},C_m,g_K,...E_L),f_2(\vec{X},C_m,g_K,...E_L)...f_m(\vec{X},C_m,g_K,...E_L)]$$ We have to somehow convert these to a form in which all evaluations are done as vector calculations and NOT scalar calculations. So, what we need for a system of n neurons is to have a method to evaluate the updation of $\mathbf{X}=[\vec{X_1},\vec{X_2}...\vec{X_n}]$ where $\vec{X_i}=[V_1,m_1,n_1,h_1]$ is the state vector of the $i$th neuron. Now there is a simple trick that allows us to maximize the parallel processing. Each neuron represented by $\vec{X_i}$ has a distinct set of parameters and differential equations. Now, despite the parameters being different, the functional forms of the updation is similar for the same state variable for different neurons. Thus, the trick is to reorganize $\mathbf{X}$ as $\mathbf{X'}=[(V_1,V_2,...V_n),(m_1,m_2,...m_n),(h_1,h_2,...h_n),(n_1,n_2,...n_n)]=[\vec{V},\vec{m},\vec{h},\vec{n}]$. And the parameters as $\vec{C_m},\vec{g_K}$ and so on. Now that we know the trick, what is the benefit? Earlier, each state variable (say $V_i$) had a DE of the form: $$\frac{dV_i}{dt}=f(V_i,m_i,h_i,n_i,C_{m_i},g_{K_i}...)$$ This is now easily parallelizable using a vector computation of a form: $$\frac{d\vec{V}}{dt}=f(\vec{V},\vec{m},\vec{h},\vec{n},\vec{C_m},\vec{g_K}...)$$ Thus we can do the calculations as: $$\frac{d\mathbf{X'}}{dt}= \Big[\frac{d\vec{V}}{dt},\frac{d\vec{m}}{dt},\frac{d\vec{h}}{dt},\frac{d\vec{n}}{dt}\Big]$$ ``` n_n = 20 # number of simultaneous neurons to simulate # parameters will now become n_n-vectors C_m = [1.0]*n_n g_K = [10.0]*n_n E_K = [-95.0]*n_n g_Na = [100]*n_n E_Na = [50]*n_n g_L = [0.15]*n_n E_L = [-55.0]*n_n def K_prop(V): T = 22 phi = 3.0**((T-36.0)/10) V_ = V-(-50) alpha_n = 0.02*(15.0 - V_)/(tf.exp((15.0 - V_)/5.0) - 1.0) beta_n = 0.5*tf.exp((10.0 - V_)/40.0) t_n = 1.0/((alpha_n+beta_n)*phi) n_0 = alpha_n/(alpha_n+beta_n) return n_0, t_n def Na_prop(V): T = 22 phi = 3.0**((T-36)/10) V_ = V-(-50) alpha_m = 0.32*(13.0 - V_)/(tf.exp((13.0 - V_)/4.0) - 1.0) beta_m = 0.28*(V_ - 40.0)/(tf.exp((V_ - 40.0)/5.0) - 1.0) alpha_h = 0.128*tf.exp((17.0 - V_)/18.0) beta_h = 4.0/(tf.exp((40.0 - V_)/5.0) + 1.0) t_m = 1.0/((alpha_m+beta_m)*phi) t_h = 1.0/((alpha_h+beta_h)*phi) m_0 = alpha_m/(alpha_m+beta_m) h_0 = alpha_h/(alpha_h+beta_h) return m_0, t_m, h_0, t_h def I_K(V, n): return g_K * n**4 * (V - E_K) def I_Na(V, m, h): return g_Na * m**3 * h * (V - E_Na) def I_L(V): return g_L * (V - E_L) def dXdt(X, t): V = X[:1*n_n] # First n_n values are Membrane Voltage m = X[1*n_n:2*n_n] # Next n_n values are Sodium Activation Gating Variables h = X[2*n_n:3*n_n] # Next n_n values are Sodium Inactivation Gating Variables n = X[3*n_n:] # Last n_n values are Potassium Gating Variables dVdt = (np.linspace(0,10,n_n) - I_Na(V, m, h) - I_K(V, n) -I_L(V)) / C_m # Input current is linearly varied between 0 and 10 m0,tm,h0,th = Na_prop(V) n0,tn = K_prop(V) dmdt = - (1.0/tm)*(m-m0) dhdt = - (1.0/th)*(h-h0) dndt = - (1.0/tn)*(n-n0) out = tf.concat([dVdt,dmdt,dhdt,dndt],0) return out y0 = tf.constant([-71]*n_n+[0,0,0]*n_n, dtype=tf.float64) epsilon = 0.01 t = np.arange(0,200,epsilon) state = odeint(dXdt,y0,t) with tf.Session() as sess: state = sess.run(state) plt.style.use('seaborn-colorblind') plt.style.use('seaborn-ticks') plt.figure(figsize=(12,17)) for i in range(20): ax = plt.subplot(10,2,i+1) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.grid(color='#DDDDDD', linestyle='--', linewidth=0.3) plt.plot(t,state[:,i]) plt.title("Injected Current = {:0.1f}".format(i/2)) plt.ylim([-90,60]) plt.xlabel("Time (in ms)") plt.ylabel("Voltage (in mV)") plt.tight_layout() fig = plt.gcf() fig.savefig("fig5.eps",format='eps') plt.show() ``` #### Quantifying the Firing Rates against Input Current One way to quantify the firing rate is to perform a fourier analysis and find peak frequency, but an easier way to find the rate is to see how many times it crosses a threshold say 0 mV in a given time, here it is for 200ms = 0.2s, and find the rate. ``` plt.style.use('seaborn-colorblind') plt.style.use('seaborn-ticks') ax = plt.subplot(111) ax.spines["top"].set_visible(False) ax.spines["right"].set_visible(False) ax.get_xaxis().tick_bottom() ax.get_yaxis().tick_left() ax.grid(color='#DDDDDD', linestyle='--', linewidth=0.3) plt.plot(np.linspace(0,10,20),np.bitwise_and(state[:-1,:20]<0,state[1:,:20]>0).sum(axis=0)/0.2,"o") # plt.plot(np.linspace(0,10,20),np.bitwise_and(state[:-1,:20]<0,state[1:,:20]>0).sum(axis=0)/0.2,":") plt.xlabel("Injected Current(mA)") plt.ylabel("Firing Rate (Hz)") fig = plt.gcf() fig.savefig("fig6.eps",format='eps') plt.show() ```
github_jupyter
# Styling *New in version 0.17.1* <span style="color: red">*Provisional: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.*</span> This document is written as a Jupyter Notebook, and can be viewed or downloaded [here](http://nbviewer.ipython.org/github/pandas-dev/pandas/blob/master/doc/source/style.ipynb). You can apply **conditional formatting**, the visual styling of a DataFrame depending on the data within, by using the ``DataFrame.style`` property. This is a property that returns a ``Styler`` object, which has useful methods for formatting and displaying DataFrames. The styling is accomplished using CSS. You write "style functions" that take scalars, `DataFrame`s or `Series`, and return *like-indexed* DataFrames or Series with CSS `"attribute: value"` pairs for the values. These functions can be incrementally passed to the `Styler` which collects the styles before rendering. ## Building Styles Pass your style functions into one of the following methods: - ``Styler.applymap``: elementwise - ``Styler.apply``: column-/row-/table-wise Both of those methods take a function (and some other keyword arguments) and applies your function to the DataFrame in a certain way. `Styler.applymap` works through the DataFrame elementwise. `Styler.apply` passes each column or row into your DataFrame one-at-a-time or the entire table at once, depending on the `axis` keyword argument. For columnwise use `axis=0`, rowwise use `axis=1`, and for the entire table at once use `axis=None`. For `Styler.applymap` your function should take a scalar and return a single string with the CSS attribute-value pair. For `Styler.apply` your function should take a Series or DataFrame (depending on the axis parameter), and return a Series or DataFrame with an identical shape where each value is a string with a CSS attribute-value pair. Let's see some examples. ``` import matplotlib.pyplot # We have this here to trigger matplotlib's font cache stuff. # This cell is hidden from the output import pandas as pd import numpy as np np.random.seed(24) df = pd.DataFrame({'A': np.linspace(1, 10, 10)}) df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))], axis=1) df.iloc[0, 2] = np.nan ``` Here's a boring example of rendering a DataFrame, without any (visible) styles: ``` df.style ``` *Note*: The `DataFrame.style` attribute is a property that returns a `Styler` object. `Styler` has a `_repr_html_` method defined on it so they are rendered automatically. If you want the actual HTML back for further processing or for writing to file call the `.render()` method which returns a string. The above output looks very similar to the standard DataFrame HTML representation. But we've done some work behind the scenes to attach CSS classes to each cell. We can view these by calling the `.render` method. ``` df.style.highlight_null().render().split('\n')[:10] ``` The `row0_col2` is the identifier for that particular cell. We've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page (you can set the `uuid` if you'd like to tie together the styling of two DataFrames). When writing style functions, you take care of producing the CSS attribute / value pairs you want. Pandas matches those up with the CSS classes that identify each cell. Let's write a simple style function that will color negative numbers red and positive numbers black. ``` def color_negative_red(val): """ Takes a scalar and returns a string with the css property `'color: red'` for negative strings, black otherwise. """ color = 'red' if val < 0 else 'black' return 'color: %s' % color ``` In this case, the cell's style depends only on it's own value. That means we should use the `Styler.applymap` method which works elementwise. ``` s = df.style.applymap(color_negative_red) s ``` Notice the similarity with the standard `df.applymap`, which operates on DataFrames elementwise. We want you to be able to reuse your existing knowledge of how to interact with DataFrames. Notice also that our function returned a string containing the CSS attribute and value, separated by a colon just like in a `<style>` tag. This will be a common theme. Finally, the input shapes matched. `Styler.applymap` calls the function on each scalar input, and the function returns a scalar output. Now suppose you wanted to highlight the maximum value in each column. We can't use `.applymap` anymore since that operated elementwise. Instead, we'll turn to `.apply` which operates columnwise (or rowwise using the `axis` keyword). Later on we'll see that something like `highlight_max` is already defined on `Styler` so you wouldn't need to write this yourself. ``` def highlight_max(s): ''' highlight the maximum in a Series yellow. ''' is_max = s == s.max() return ['background-color: yellow' if v else '' for v in is_max] df.style.apply(highlight_max) ``` In this case the input is a `Series`, one column at a time. Notice that the output shape of `highlight_max` matches the input shape, an array with `len(s)` items. We encourage you to use method chains to build up a style piecewise, before finally rending at the end of the chain. ``` df.style.\ applymap(color_negative_red).\ apply(highlight_max) ``` Above we used `Styler.apply` to pass in each column one at a time. <span style="background-color: #DEDEBE">*Debugging Tip*: If you're having trouble writing your style function, try just passing it into <code style="background-color: #DEDEBE">DataFrame.apply</code>. Internally, <code style="background-color: #DEDEBE">Styler.apply</code> uses <code style="background-color: #DEDEBE">DataFrame.apply</code> so the result should be the same.</span> What if you wanted to highlight just the maximum value in the entire table? Use `.apply(function, axis=None)` to indicate that your function wants the entire table, not one column or row at a time. Let's try that next. We'll rewrite our `highlight-max` to handle either Series (from `.apply(axis=0 or 1)`) or DataFrames (from `.apply(axis=None)`). We'll also allow the color to be adjustable, to demonstrate that `.apply`, and `.applymap` pass along keyword arguments. ``` def highlight_max(data, color='yellow'): ''' highlight the maximum in a Series or DataFrame ''' attr = 'background-color: {}'.format(color) if data.ndim == 1: # Series from .apply(axis=0) or axis=1 is_max = data == data.max() return [attr if v else '' for v in is_max] else: # from .apply(axis=None) is_max = data == data.max().max() return pd.DataFrame(np.where(is_max, attr, ''), index=data.index, columns=data.columns) ``` When using ``Styler.apply(func, axis=None)``, the function must return a DataFrame with the same index and column labels. ``` df.style.apply(highlight_max, color='darkorange', axis=None) ``` ### Building Styles Summary Style functions should return strings with one or more CSS `attribute: value` delimited by semicolons. Use - `Styler.applymap(func)` for elementwise styles - `Styler.apply(func, axis=0)` for columnwise styles - `Styler.apply(func, axis=1)` for rowwise styles - `Styler.apply(func, axis=None)` for tablewise styles And crucially the input and output shapes of `func` must match. If `x` is the input then ``func(x).shape == x.shape``. ## Finer Control: Slicing Both `Styler.apply`, and `Styler.applymap` accept a `subset` keyword. This allows you to apply styles to specific rows or columns, without having to code that logic into your `style` function. The value passed to `subset` behaves simlar to slicing a DataFrame. - A scalar is treated as a column label - A list (or series or numpy array) - A tuple is treated as `(row_indexer, column_indexer)` Consider using `pd.IndexSlice` to construct the tuple for the last one. ``` df.style.apply(highlight_max, subset=['B', 'C', 'D']) ``` For row and column slicing, any valid indexer to `.loc` will work. ``` df.style.applymap(color_negative_red, subset=pd.IndexSlice[2:5, ['B', 'D']]) ``` Only label-based slicing is supported right now, not positional. If your style function uses a `subset` or `axis` keyword argument, consider wrapping your function in a `functools.partial`, partialing out that keyword. ```python my_func2 = functools.partial(my_func, subset=42) ``` ## Finer Control: Display Values We distinguish the *display* value from the *actual* value in `Styler`. To control the display value, the text is printed in each cell, use `Styler.format`. Cells can be formatted according to a [format spec string](https://docs.python.org/3/library/string.html#format-specification-mini-language) or a callable that takes a single value and returns a string. ``` df.style.format("{:.2%}") ``` Use a dictionary to format specific columns. ``` df.style.format({'B': "{:0<4.0f}", 'D': '{:+.2f}'}) ``` Or pass in a callable (or dictionary of callables) for more flexible handling. ``` df.style.format({"B": lambda x: "±{:.2f}".format(abs(x))}) ``` ## Builtin Styles Finally, we expect certain styling functions to be common enough that we've included a few "built-in" to the `Styler`, so you don't have to write them yourself. ``` df.style.highlight_null(null_color='red') ``` You can create "heatmaps" with the `background_gradient` method. These require matplotlib, and we'll use [Seaborn](http://stanford.edu/~mwaskom/software/seaborn/) to get a nice colormap. ``` import seaborn as sns cm = sns.light_palette("green", as_cmap=True) s = df.style.background_gradient(cmap=cm) s ``` `Styler.background_gradient` takes the keyword arguments `low` and `high`. Roughly speaking these extend the range of your data by `low` and `high` percent so that when we convert the colors, the colormap's entire range isn't used. This is useful so that you can actually read the text still. ``` # Uses the full color range df.loc[:4].style.background_gradient(cmap='viridis') # Compress the color range (df.loc[:4] .style .background_gradient(cmap='viridis', low=.5, high=0) .highlight_null('red')) ``` There's also `.highlight_min` and `.highlight_max`. ``` df.style.highlight_max(axis=0) ``` Use `Styler.set_properties` when the style doesn't actually depend on the values. ``` df.style.set_properties(**{'background-color': 'black', 'color': 'lawngreen', 'border-color': 'white'}) ``` ### Bar charts You can include "bar charts" in your DataFrame. ``` df.style.bar(subset=['A', 'B'], color='#d65f5f') ``` New in version 0.20.0 is the ability to customize further the bar chart: You can now have the `df.style.bar` be centered on zero or midpoint value (in addition to the already existing way of having the min value at the left side of the cell), and you can pass a list of `[color_negative, color_positive]`. Here's how you can change the above with the new `align='mid'` option: ``` df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d']) ``` The following example aims to give a highlight of the behavior of the new align options: ``` import pandas as pd from IPython.display import HTML # Test series test1 = pd.Series([-100,-60,-30,-20], name='All Negative') test2 = pd.Series([10,20,50,100], name='All Positive') test3 = pd.Series([-10,-5,0,90], name='Both Pos and Neg') head = """ <table> <thead> <th>Align</th> <th>All Negative</th> <th>All Positive</th> <th>Both Neg and Pos</th> </thead> </tbody> """ aligns = ['left','zero','mid'] for align in aligns: row = "<tr><th>{}</th>".format(align) for serie in [test1,test2,test3]: s = serie.copy() s.name='' row += "<td>{}</td>".format(s.to_frame().style.bar(align=align, color=['#d65f5f', '#5fba7d'], width=100).render()) #testn['width'] row += '</tr>' head += row head+= """ </tbody> </table>""" HTML(head) ``` ## Sharing Styles Say you have a lovely style built up for a DataFrame, and now you want to apply the same style to a second DataFrame. Export the style with `df1.style.export`, and import it on the second DataFrame with `df1.style.set` ``` df2 = -df style1 = df.style.applymap(color_negative_red) style1 style2 = df2.style style2.use(style1.export()) style2 ``` Notice that you're able share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been `use`d upon. ## Other Options You've seen a few methods for data-driven styling. `Styler` also provides a few other options for styles that don't depend on the data. - precision - captions - table-wide styles - hiding the index or columns Each of these can be specified in two ways: - A keyword argument to `Styler.__init__` - A call to one of the `.set_` or `.hide_` methods, e.g. `.set_caption` or `.hide_columns` The best method to use depends on the context. Use the `Styler` constructor when building many styled DataFrames that should all share the same properties. For interactive use, the`.set_` and `.hide_` methods are more convenient. ### Precision You can control the precision of floats using pandas' regular `display.precision` option. ``` with pd.option_context('display.precision', 2): html = (df.style .applymap(color_negative_red) .apply(highlight_max)) html ``` Or through a `set_precision` method. ``` df.style\ .applymap(color_negative_red)\ .apply(highlight_max)\ .set_precision(2) ``` Setting the precision only affects the printed number; the full-precision values are always passed to your style functions. You can always use `df.round(2).style` if you'd prefer to round from the start. ### Captions Regular table captions can be added in a few ways. ``` df.style.set_caption('Colormaps, with a caption.')\ .background_gradient(cmap=cm) ``` ### Table Styles The next option you have are "table styles". These are styles that apply to the table as a whole, but don't look at the data. Certain sytlings, including pseudo-selectors like `:hover` can only be used this way. ``` from IPython.display import HTML def hover(hover_color="#ffff99"): return dict(selector="tr:hover", props=[("background-color", "%s" % hover_color)]) styles = [ hover(), dict(selector="th", props=[("font-size", "150%"), ("text-align", "center")]), dict(selector="caption", props=[("caption-side", "bottom")]) ] html = (df.style.set_table_styles(styles) .set_caption("Hover to highlight.")) html ``` `table_styles` should be a list of dictionaries. Each dictionary should have the `selector` and `props` keys. The value for `selector` should be a valid CSS selector. Recall that all the styles are already attached to an `id`, unique to each `Styler`. This selector is in addition to that `id`. The value for `props` should be a list of tuples of `('attribute', 'value')`. `table_styles` are extremely flexible, but not as fun to type out by hand. We hope to collect some useful ones either in pandas, or preferable in a new package that [builds on top](#Extensibility) the tools here. ### Hiding the Index or Columns The index can be hidden from rendering by calling `Styler.hide_index`. Columns can be hidden from rendering by calling `Styler.hide_columns` and passing in the name of a column, or a slice of columns. ``` df.style.hide_index() df.style.hide_columns(['C','D']) ``` ### CSS Classes Certain CSS classes are attached to cells. - Index and Column names include `index_name` and `level<k>` where `k` is its level in a MultiIndex - Index label cells include + `row_heading` + `row<n>` where `n` is the numeric position of the row + `level<k>` where `k` is the level in a MultiIndex - Column label cells include + `col_heading` + `col<n>` where `n` is the numeric position of the column + `level<k>` where `k` is the level in a MultiIndex - Blank cells include `blank` - Data cells include `data` ### Limitations - DataFrame only `(use Series.to_frame().style)` - The index and columns must be unique - No large repr, and performance isn't great; this is intended for summary DataFrames - You can only style the *values*, not the index or columns - You can only apply styles, you can't insert new HTML entities Some of these will be addressed in the future. ### Terms - Style function: a function that's passed into `Styler.apply` or `Styler.applymap` and returns values like `'css attribute: value'` - Builtin style functions: style functions that are methods on `Styler` - table style: a dictionary with the two keys `selector` and `props`. `selector` is the CSS selector that `props` will apply to. `props` is a list of `(attribute, value)` tuples. A list of table styles passed into `Styler`. ## Fun stuff Here are a few interesting examples. `Styler` interacts pretty well with widgets. If you're viewing this online instead of running the notebook yourself, you're missing out on interactively adjusting the color palette. ``` from IPython.html import widgets @widgets.interact def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)): return df.style.background_gradient( cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l, as_cmap=True) ) def magnify(): return [dict(selector="th", props=[("font-size", "4pt")]), dict(selector="td", props=[('padding', "0em 0em")]), dict(selector="th:hover", props=[("font-size", "12pt")]), dict(selector="tr:hover td:hover", props=[('max-width', '200px'), ('font-size', '12pt')]) ] np.random.seed(25) cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True) bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum() bigdf.style.background_gradient(cmap, axis=1)\ .set_properties(**{'max-width': '80px', 'font-size': '1pt'})\ .set_caption("Hover to magnify")\ .set_precision(2)\ .set_table_styles(magnify()) ``` ## Export to Excel *New in version 0.20.0* <span style="color: red">*Experimental: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.*</span> Some support is available for exporting styled `DataFrames` to Excel worksheets using the `OpenPyXL` or `XlsxWriter` engines. CSS2.2 properties handled include: - `background-color` - `border-style`, `border-width`, `border-color` and their {`top`, `right`, `bottom`, `left` variants} - `color` - `font-family` - `font-style` - `font-weight` - `text-align` - `text-decoration` - `vertical-align` - `white-space: nowrap` Only CSS2 named colors and hex colors of the form `#rgb` or `#rrggbb` are currently supported. ``` df.style.\ applymap(color_negative_red).\ apply(highlight_max).\ to_excel('styled.xlsx', engine='openpyxl') ``` A screenshot of the output: ![Excel spreadsheet with styled DataFrame](_static/style-excel.png) ## Extensibility The core of pandas is, and will remain, its "high-performance, easy-to-use data structures". With that in mind, we hope that `DataFrame.style` accomplishes two goals - Provide an API that is pleasing to use interactively and is "good enough" for many tasks - Provide the foundations for dedicated libraries to build on If you build a great library on top of this, let us know and we'll [link](http://pandas.pydata.org/pandas-docs/stable/ecosystem.html) to it. ### Subclassing If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template. We'll show an example of extending the default template to insert a custom header before each table. ``` from jinja2 import Environment, ChoiceLoader, FileSystemLoader from IPython.display import HTML from pandas.io.formats.style import Styler %mkdir templates ``` This next cell writes the custom template. We extend the template `html.tpl`, which comes with pandas. ``` %%file templates/myhtml.tpl {% extends "html.tpl" %} {% block table %} <h1>{{ table_title|default("My Table") }}</h1> {{ super() }} {% endblock table %} ``` Now that we've created a template, we need to set up a subclass of ``Styler`` that knows about it. ``` class MyStyler(Styler): env = Environment( loader=ChoiceLoader([ FileSystemLoader("templates"), # contains ours Styler.loader, # the default ]) ) template = env.get_template("myhtml.tpl") ``` Notice that we include the original loader in our environment's loader. That's because we extend the original template, so the Jinja environment needs to be able to find it. Now we can use that custom styler. It's `__init__` takes a DataFrame. ``` MyStyler(df) ``` Our custom template accepts a `table_title` keyword. We can provide the value in the `.render` method. ``` HTML(MyStyler(df).render(table_title="Extending Example")) ``` For convenience, we provide the `Styler.from_custom_template` method that does the same as the custom subclass. ``` EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl") EasyStyler(df) ``` Here's the template structure: ``` with open("template_structure.html") as f: structure = f.read() HTML(structure) ``` See the template in the [GitHub repo](https://github.com/pandas-dev/pandas) for more details. ``` # Hack to get the same style in the notebook as the # main site. This is hidden in the docs. from IPython.display import HTML with open("themes/nature_with_gtoc/static/nature.css_t") as f: css = f.read() HTML('<style>{}</style>'.format(css)) ```
github_jupyter
## Engineering Rare Categories Rare values are categories within a categorical variable that are present only in a small percentage of the observations. There is no rule of thumb to determine how small is a small percentage, but typically, any value below 5 % can be considered rare. As we discussed in section 3 of the course, Infrequent labels are so few, that it is hard to derive reliable information from them. But more importantly, if you remember from section 3, infrequent labels tend to appear only on train set or only on the test set: - If only on the train set, they may cause over-fitting - If only on the test set, our machine learning model will not know how to score them Therefore, to avoid this behaviour, we tend to group those into a new category called 'Rare' or 'Other'. Rare labels can appear in low or highly cardinal variables. There is no rule of thumb to determine how many different labels are considered high cardinality. It depend as well on how many observations there are in the dataset. In a dataset with 1,000 observations, 100 labels may seem a lot, whereas in a dataset with 100,000 observations it may not be so high. Highly cardinal variables tend to have many infrequent or rare categories, whereas low cardinal variables, may have only 1 or 2 rare labels. ### Note the following: **Note that grouping infrequent labels or categories under a new category called 'Rare' or 'Other' is the common practice in machine learning for business.** - Grouping categories into rare for variables that show low cardinality may or may not improve model performance, however, we tend to re-group them into a new category to smooth model deployment. - Grouping categories into rare for variables with high cardinality, tends to improve model performance as well. ## In this demo: We will learn how to re-group rare labels under a new category called rare, and compare the implications of this encoding in variables with: - One predominant category - A small number of categories - High cardinality For this demo, we will use the House Sale dataset. We will re-group variables using pandas an feature-engine. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt # to split the datasets from sklearn.model_selection import train_test_split pd.set_option('display.max_columns', None) # to display the total number columns present in the dataset ``` ## House Sale Price dataset ``` # let's load the house price dataset data = pd.read_csv('../houseprice.csv') data.head() ``` ### Important The identification of rare labels should be done using only the training set, and then propagated to the test set. Rare labels should be identified in the training set only. In practice, what we will do is identify **non-rare labels**, and then any other label, either in the train or the test or future live data that is not in that list of **non-rare** labels, will be re=grouped into the new category. For example, let's imagine that we have in the training set the variable 'city' with the labels 'London', 'Manchester' and 'Yorkshire'. 'Yorkshire' is present in less than 5% of the observations so we decide to re-group it in a new category called 'Rare'. In the test set, we should also replace 'Yorkshire' by 'Rare', regardless of the percentage of observations for 'Yorkshire' in the test set. In addition, if in the test set we find the category 'Milton Keynes', that was not present in the training set, we should also replace that category by 'Rare'. On other words, all categories present in test set, not present in the list of **non-rare** categories derived from the training set, should be treated as rare values and re-grouped into 'Rare'. ``` # let's divide into train and test set X_train, X_test, y_train, y_test = train_test_split( data.drop(labels=['SalePrice'], axis=1), # predictors data.SalePrice, # target test_size=0.3, random_state=0) X_train.shape, X_test.shape ``` ## Variables with one dominant category ``` # let's explore a few examples in which variables have only a few categories, say less than 3 for col in X_train.columns: if X_train[col].dtypes == 'O': # if the variable is categorical if X_train[col].nunique() < 3: # if the variable has less than 3 categories # print percentage of observations per category print(X_train.groupby(col)[col].count() / len(X_train)) print() ``` ### Conclusion The 3 variables above, Street, Utilities and CentralAir, show one dominating category which accounts for more than 93-99% of the observations. Re-grouping the rare label in this situation does not make any sense. We could determine if these variables are useful with exploratory analysis, or any feature selection algorithm, or drop the variables altogether. ## Variables with few categories ``` # the columns in the below list have only 4 different labels cols = ['MasVnrType', 'ExterQual', 'BsmtCond'] for col in cols: print(X_train.groupby(col)[col].count() / len(X_train)) # frequency print() ``` The variables above have only 4 categories, and in all three cases, there is at least one category that is infrequent, that is, that is present in less than 5% of the observations. When the variable has only a few categories, then perhaps it makes no sense to re-categorise the rare labels into something else. For example the first variable MasVnrType shows only 1 rare label, BrkCmn. Thus, re-categorising it into a new label will leave the variable in the same situation. The second variable ExterQual, contains 2 rare labels Ex and Fa, we could group these 2 into a new label called 'Rare'. The third variable BsmtCond contains 3 rare labels, Fa, Gd and Po, so we could group these 3 under the new label 'Rare'. ## Variable with high cardinality ``` # let's explore examples in which variables have several categories, say more than 10 multi_cat_cols = [] for col in X_train.columns: if X_train[col].dtypes =='O': # if variable is categorical if X_train[col].nunique() > 10: # and has more than 10 categories multi_cat_cols.append(col) # add to the list print(X_train.groupby(col)[col].count()/ len(X_train)) # and print the percentage of observations within each category print() ``` We can see that many categories are rare in the 3 categorical variables printed above. In fact, we can plot them using the same code we learned in the lecture on rare labels in section 3: ``` for col in ['Neighborhood', 'Exterior1st', 'Exterior2nd']: temp_df = pd.Series(X_train[col].value_counts() / len(X_train) ) # make plot with the above percentages fig = temp_df.sort_values(ascending=False).plot.bar() fig.set_xlabel(col) # add a line at 5 % to flag the threshold for rare categories fig.axhline(y=0.05, color='red') fig.set_ylabel('Percentage of houses') plt.show() ``` ## Re-grouping rare labels with pandas ``` def find_non_rare_labels(df, variable, tolerance): temp = df.groupby([variable])[variable].count() / len(df) non_rare = [x for x in temp.loc[temp>tolerance].index.values] return non_rare # non rare labels find_non_rare_labels(X_train, 'Neighborhood', 0.05) # rare labels [x for x in X_train['Neighborhood'].unique( ) if x not in find_non_rare_labels(X_train, 'Neighborhood', 0.05)] def rare_encoding(X_train, X_test, variable, tolerance): X_train = X_train.copy() X_test = X_test.copy() # find the most frequent category frequent_cat = find_non_rare_labels(X_train, variable, tolerance) # re-group rare labels X_train[variable] = np.where(X_train[variable].isin( frequent_cat), X_train[variable], 'Rare') X_test[variable] = np.where(X_test[variable].isin( frequent_cat), X_test[variable], 'Rare') return X_train, X_test for variable in ['Neighborhood', 'Exterior1st', 'Exterior2nd']: X_train, X_test = rare_encoding(X_train, X_test, variable, 0.05) for col in ['Neighborhood', 'Exterior1st', 'Exterior2nd']: temp_df = pd.Series(X_train[col].value_counts() / len(X_train) ) # make plot with the above percentages fig = temp_df.sort_values(ascending=False).plot.bar() fig.set_xlabel(col) # add a line at 5 % to flag the threshold for rare categories fig.axhline(y=0.05, color='red') fig.set_ylabel('Percentage of houses') plt.show() ``` And now let's encode the low cardinal variables. ``` for variable in ['MasVnrType', 'ExterQual', 'BsmtCond']: X_train, X_test = rare_encoding(X_train, X_test, variable, 0.05) for col in ['MasVnrType', 'ExterQual', 'BsmtCond']: temp_df = pd.Series(X_train[col].value_counts() / len(X_train) ) # make plot with the above percentages fig = temp_df.sort_values(ascending=False).plot.bar() fig.set_xlabel(col) # add a line at 5 % to flag the threshold for rare categories fig.axhline(y=0.05, color='red') fig.set_ylabel('Percentage of houses') plt.show() ``` ## Encoding Rare Labels with Feature-Engine ``` from feature_engine.categorical_encoders import RareLabelCategoricalEncoder # let's divide into train and test set X_train, X_test, y_train, y_test = train_test_split( data.drop(labels=['SalePrice'], axis=1), # predictors data.SalePrice, # target test_size=0.3, random_state=0) X_train.shape, X_test.shape # Rare value encoder rare_encoder = RareLabelCategoricalEncoder( tol=0.05, # minimal percentage to be considered non-rare n_categories=4, # minimal number of categories the variable should have to re-cgroup rare categories variables=['Neighborhood', 'Exterior1st', 'Exterior2nd', 'MasVnrType', 'ExterQual', 'BsmtCond'] # variables to re-group ) rare_encoder.fit(X_train.fillna('Missing')) ``` Note how the encoder is warning us that the variable **ExterQual** contains less than 4 categories, and thus, categories will not be regrouped under Rare, even if the percentage of observations is less than 0.05. ``` rare_encoder.variables # the encoder_dict_ is a dictionary of variable: frequent labels pair rare_encoder.encoder_dict_ X_train = rare_encoder.transform(X_train.fillna('Missing')) X_test = rare_encoder.transform(X_test.fillna('Missing')) ```
github_jupyter
# Projet Morpion par *Hélène et Victoria* ; Bugnon Ours, oc.info 2018/2019 Morpion est un jeux simple qui se joue sur un cadrillage 3x3. Le but est d'aligner 3 jetons en colonne, ligne au en diagonale. Le jeux est joué sur la plateforme SenseHAT pour le Raspberry Pi. Dans ce notebook, des fragment de code sont expliqué. Parfois le résultat apparait sur le SenseHAT et parfois dans la cellule Out. Pour utiliser SenseHAT, il faut importer le module SensHAT pour avoir accès aux fonctions. Il faut créer une instance SenseHat() pour accéder aux méthodes. ``` from sense_hat import SenseHat from time import sleep, time sense = SenseHat() ``` Ensuite nous définissons les variables : les couleurs qui représentent le player 1 par exemple. ``` X = (255, 255, 255) O = (0, 0, 0) P1 = (0, 0, 255) P2 = (255, 255, 0) colors = (O, P1, P2) score1 = 0 score2 = 0 print(colors) ``` ### Définition init() * Nous devons faire une défnition initation, où nous définissons l'état initial du jeu qui sera modifié par les joueurs. * Pour que nos arguments appelés *state*, *board*, *state_to_board* soit utilisables dans tous le code, il fat utiliser le mot-clé **global**. Ensuite nous créeons la grille sur lequel a lieux le jeu qui est *board*. Puis nous créeons une matrice dont 4 pixels représentent un élément de la matrice *state* 3x3, sans oublier de bien faire les sauts qui correspondent au cadrillage de la grille. ``` def init(): global state global board global state_to_board board = [ O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, ] state_to_board = [[(0, 1, 8, 9), (3, 4, 11, 12), (6, 7, 14, 15)], [(24, 25, 32, 33), (27, 28, 35, 36), (30, 31, 38, 39)], [(48, 49, 56, 57), (51, 52, 59, 60), (54, 55, 62, 63)]] state = [[0, 0, 0], [0, 0, 0], [0, 0, 0]] board = [ O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, ] board[:3] ``` Nous voyons qu'il imprime que les trois premieres cases du *board*. Voici à quoi ressemble notre *board* à l'état initial. ![img](../../games/morpion/img/IMG_0684.jpg) ### Définition Show_board(board, state) Cette définition nous permet d'afficher les différents états du jeu. Nous parcourons, les liste *state* et *state_to_board* pour aquérir à l'état de nos quatres pixels qui forment une case (soit les coordonées). Puis nous faisons correspondre son état, qui est 0,1,2 au position de la liste *colors* qui permettront d'afficher l'état du jeu par exemple : jaune qui indique que c'est le player 2 qui a selectionné la case. ``` def show_board(board, state): for y in range(len(state)): for x, s in enumerate(state[y]): c = colors[s] for index in state_to_board[y][x]: board[index] = c sense.set_pixels(board) show_board(board, state) board = [ 1, 1, X, O, O, X, O, O, 1, 1, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, X, X, X, X, X, X, X, X, O, O, X, O, O, X, O, O, O, O, X, O, O, X, O, O, ] state = [[1, 0, 0], [0, 0, 0], [0, 0, 0]] def show_board(board, state): for y in range(len(state)): for x, s in enumerate(state[y]): c = colors[s] for index in state_to_board[y][x]: board[index] = c sense.set_pixels(board) show_board(board, state) ``` ### Définition is_winning(p,state) Nous définissons tous les cas ou un joueur est gagant. Pour cela il faut que l'état de trois cases selon les règles du jeu de morpion est le même état. Si c'est le cas la fonction retoune **True**, autrement **False**. ``` def is_winning(p, state): return state[0][0] == state[0][1] == state[0][2] == p or \ state[1][0] == state[1][1] == state[1][2] == p or \ state[2][0] == state[2][1] == state[2][2] == p or \ state[0][0] == state[1][0] == state[2][0] == p or \ state[0][1] == state[1][1] == state[2][1] == p or \ state[0][2] == state[1][2] == state[2][2] == p or \ state[0][0] == state[1][1] == state[2][2] == p or \ state[0][2] == state[1][1] == state[2][0] == p is_winning(1, state) state = [[1, 1, 1], [0, 0, 0], [0, 0, 0]] show_board(board, state) is_winning(2, state) ``` ### Définitions is_draw(state) Si les cas de is_winnig(p,state) n'apparaissent pas dans le jeu, et que toutes les cases sont à l'état 1 ou 2 alors c'est un match nul. Si une case est à l'état 0 la fonction retourne **False**, autrement elle retourne **True**. ``` def is_draw(state): for i in state: for s in i: if s == 0: return False return True state = [[1,2,1],[1,2,2],[2,1,1]] is_draw(state) ``` ### Définition play(p, board, state) * Grâce à cette fonction nous définissons les paramètres du joueur. * D'abord nous définissons les paramètres du curseurs à l'état initial ((x,y) = (1,1)). Mais à l'état initial aucun joueur n'a encore joué donc la couleur du curseur est divisé par deux.Nous faisons cela en parcourant la liste state_to_board qui va donc prendre chaque état des quatres pixels pour ensuite reparcourir la liste colors pour afficher l'état correspondant mais en divisant la couleur par deux. * Ensuite nous lançons une boucle qui attend un évènement de la bibliothèque *directions* pour modifier les coordonnées du curseur. Lorsqu'un évènment est préssé il ajoute la coordonées à celles initinales, mais les coordonées ne doivent pas etre plus "grandes" que notre matrice nous faisons donc un modulo. Les coordonées sont donc modifiées mais pas forcément selectionnées pour de bon donc la case apparait encore d'une intensité de couleur diminuée. * Il ne reste plus qu'a ajouter un paramètre : celui qui ne permet pas de modifier un état losque celui ci n'est pas égal à 0 . ``` def play(p,board, state): (x, y) = (1, 1) dirs = {'up':(0, -1), 'down':(0, 1), 'right':(1, 0), 'left':(-1, 0)} c = tuple(int(x/2) for x in colors[p]) for index in state_to_board[y][x]: board[index] = c sense.set_pixels(board) while True : event = sense.stick.wait_for_event() if event.action == 'pressed': if event.direction in dirs: (dx, dy) = dirs[event.direction] x = (x + dx) % len(state) y = (y + dy) % len(state) show_board(board, state) # eviter de colorier le chemin c = tuple(int(x/2) for x in colors[p]) for index in state_to_board[y][x]: board[index] = c sense.set_pixels(board) else: if state[y][x] == 0: state[y][x] = p show_board(board, state) return play(p, board, state) ``` Voici ce qui se passe visuellement losrqu'une partie est en cours.Ici le joeur *bleu* a finit de jouer, c'est pour cela que la case central apparait en jaune claire : le joueur n'a pas encore changer les coordonées et n'a pas préssé sur le joystick. ![img](../../games/morpion/img/IMG_0682.jpg) ### Dénition show_score(p) Si les player 1 ou 2 ont gagné leur score est incrémenté de 1 et s'affiche à la fin de la partie. ``` def show_score(p): global score1, score2 if p == 1: score1 += 1 elif p == 2: score2 += 1 msg = 'player1=' + str(score1) + ' player2=' + str(score2) sense.show_message(msg) p = 1 score1 = 0 show_score(p) ``` ### Définition end_game(p) Lorsque aucun joueur gagne, on affiche le message *draw* autrement c'est le numéro du joueur gagant qui est affiché, puis le score s'affiche. Pour relancer la partie le message *?* apparait pendant 3 secondes, il faut donc appuyer sur le joystick pour recommencer la partie. ``` def end_game(p): if p == 0: sense.show_message("draw") else: sense.show_letter(str(p)) sleep(3) show_score(p) return continue_game() p = 1 end_game(p) ``` Voici ce qui se passe visuellement lorsqu'il affiche le gagnant à la fin du jeu : ![img](../../games/morpion/img/IMG_0683.jpg) ### Définition continue_game() Si pendant 3 secondes aucune action est faite avcec le joystick, alors le jeu s'éteind (**False**), autrement il rejoue une partie (**True**). ``` def continue_game(): sense.show.letter('?') sense.stick.get.events() t0 = time() while time() < t0 + 3: for event in sense.stick.get_events(): init() show_board(board, sate) print('continue') return True print('timeout') return False ``` Voici ce qui se passe visuellement losrque La fonction demande pour continuer le jeu : ![img](../../games/morpion/img/IMG_0685.jpg) ### Définition Main() Tout d'abord il regarde si l'état du jeu correspond à un état gagnant ou a un match nul. Puis il regarde si la réponse de la définition continue_game est True ou False pour refaire une partie. Pour intervertir les joueurs entre 1 et 2 nous utilisons l'astuce `player = 3 - player`. ``` def main(): init() show_board(board, state) player = 1 playing = True while playing: play(player, board, state) if is_winning(player, state): playing = end_game(player) elif is_draw(state): playing = end_game(0) player = 3 - player ```
github_jupyter
# K-Nearest Neighbors Algorithm * Last class, we introduced the probabilistic generative classifier. * As discussed, the probabilistic generative classifier requires us to assume a parametric form for each class (e.g., each class is represented by a multi-variate Gaussian distribution, etc..). Because of this, the probabilistic generative classifier is a *parametric* approach * Parametric approaches have the drawback that the functional parametric form needs to be decided/assumed in advance and, if chosen poorly, might be a poor model of the distribution that generates the data resulting in poor performance. * Non-parametric methods are those that do not assume a particular generating distribution for the data. The $K$-nearest neighbors algorithm is one example of a non-parametric classifier. * Nearest neighbor methods compare a test point to the $k$ nearest training data points and then estimate an output value based on the desired/true output values of the $k$ nearest training points * Essentially, there is no ''training'' other than storing the training data points and their desired outputs * In test, you need to: (1) determine which $k$ training data points are closest to the test point; and (2) determine the output value for the test point * In order to find the $k$ nearest neighbors in the training data, you need to define a *similarity measure* or a *dissimilarity measure*. The most common dissimilarity measure is Euclidean distance. * Euclidean distance: $d_E = \sqrt{\left(\mathbf{x}_1-\mathbf{x}_2\right)^T\left(\mathbf{x}_1-\mathbf{x}_2\right)}$ * City block distance: $d_C = \sum_{i=1}^d \left| x_{1i} - x_{2i} \right|$ * Mahalanobis distance: $\left(\mathbf{x}_1-\mathbf{x}_2\right)^T\Sigma^{-1}\left(\mathbf{x}_1-\mathbf{x}_2\right)$ * Geodesic distance * Cosine angle similarity: $\cos \theta = \frac{\mathbf{x}_1^T\mathbf{x}_2}{\left\|\mathbf{x}_1\right\|_2^2\left\|\mathbf{x}_2\right\|_2^2}$ * and many more... * If you are doing classification, once you find the $k$ nearest neighbors to your test point in the training data, then you can determine the class label of your test point using (most commonly) *majority vote* * If there are ties, they can be broken randomly or using schemes like applying the label of the closest data point in the neighborhood * Of course, there are MANY modifications you can make to this. A common one is to weight the votes of each of the nearest neighbors by their distance/similarity measure value. If they are closer, they get more weight. ``` # Reference for some code: http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html import numpy as np from matplotlib import pyplot as plt from matplotlib.colors import ListedColormap from sklearn.model_selection import train_test_split from sklearn.datasets import make_moons, make_circles, make_classification from sklearn import neighbors %matplotlib inline #figure params h = .02 # step size in the mesh figure = plt.figure(figsize=(17, 9)) #set up classifiers n_neighbors = 3 classifiers = [] classifiers.append(neighbors.KNeighborsClassifier(n_neighbors, weights='uniform')) classifiers.append(neighbors.KNeighborsClassifier(n_neighbors, weights='distance')) names = ['K-NN_Uniform', 'K-NN_Weighted'] #Put together datasets n_samples = 300 X, y = make_classification(n_samples, n_features=2, n_redundant=0, n_informative=2, random_state=0, n_clusters_per_class=1) rng = np.random.RandomState(2) X += 2 * rng.uniform(size=X.shape) linearly_separable = (X, y) datasets = [make_moons(n_samples, noise=0.3, random_state=0), make_circles(n_samples, noise=0.2, factor=0.5, random_state=1), linearly_separable] i = 1 # iterate over datasets for X, y in datasets: # preprocess dataset, split into training and test part X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.8) #split into train/test folds #set up meshgrid for figure x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5 y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) # just plot the dataset first cm = plt.cm.RdBu cm_bright = ListedColormap(['#FF0000', '#0000FF']) ax = plt.subplot(len(datasets), len(classifiers) + 1, i) # Plot the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], marker='+', c=y_test, cmap=cm_bright, alpha=0.6) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) i += 1 # iterate over classifiers for name, clf in zip(names, classifiers): ax = plt.subplot(len(datasets), len(classifiers) + 1, i) clf.fit(X_train, y_train) score = clf.score(X_test, y_test) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) ax.contourf(xx, yy, Z, cmap=cm, alpha=.8) # Plot also the training points ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright) # and testing points ax.scatter(X_test[:, 0], X_test[:, 1], marker='+', c=y_test, cmap=cm_bright, alpha=0.4) ax.set_xlim(xx.min(), xx.max()) ax.set_ylim(yy.min(), yy.max()) ax.set_xticks(()) ax.set_yticks(()) ax.set_title(name) ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'), size=15, horizontalalignment='right') i += 1 figure.subplots_adjust(left=.02, right=.98) plt.show() ``` # Error and Evaluation Metrics * A key step in machine learning algorithm development and testing is determining a good error and evaluation metric. * Evaluation metrics help us to estimate how well our model is trained and it is important to pick a metric that matches our overall goal for the system. * Some common evaluation metrics include precision, recall, receiver operating curves, and confusion matrices. ### Classification Accuracy and Error * Classification accuracy is defined as the number of correctly classified samples divided by all samples: \begin{equation} \text{accuracy} = \frac{N_{cor}}{N} \end{equation} where $N_{cor}$ is the number of correct classified samples and $N$ is the total number of samples. * Classification error is defined as the number of incorrectly classified samples divided by all samples: \begin{equation} \text{error} = \frac{N_{mis}}{N} \end{equation} where $N_{mis}$ is the number of misclassified samples and $N$ is the total number of samples. * Suppose there is a 3-class classification problem, in which we would like to classify each training sample (a fish) to one of the three classes (A = salmon or B = sea bass or C = cod). * Let's assume there are 150 samples, including 50 salmon, 50 sea bass and 50 cod. Suppose our model misclassifies 3 salmon, 2 sea bass and 4 cod. * Prediction accuracy of our binary classification model is calculated as: \begin{equation} \text{accuracy} = \frac{47+48+46}{50+50+50} = \frac{47}{50} \end{equation} * Prediction error is calculated as: \begin{equation} \text{error} = \frac{N_{mis}}{N} = \frac{3+2+4}{50+50+50} = \frac{3}{50} \end{equation} ### Confusion Matrices * A confusion matrix summarizes the classification accuracy across several classes. It shows the ways in which our classification model is confused when it makes predictions, allowing visualization of the performance of our algorithm. Generally, each row represents the instances of an actual class while each column represents the instances in a predicted class. * If our classifier is trained to distinguish between salmon, sea bass and cod, then we can summarize the prediction result in the confusion matrix as follows: | Actual/Predicted | Salmon | Sea bass | Cod | | --- | --- | --- | --- | | Salmon | 47 | 2 | 1 | | Sea Bass | 2 | 48 | 0 | | Cod | 0 | 0 | 50 | * In this confusion matrix, of the 50 actual salmon, the classifier predicted that 2 are sea bass, 1 is cod incorrectly and 47 are labeled salmon correctly. All correct predictions are located in the diagonal of the table. So it is easy to visually inspect the table for prediction errors, as they will be represented by values outside the diagonal. ### TP, FP, TN, and FN * True positive (TP): correctly predicting event values * False positive (FP): incorrectly calling non-events as an event * True negative (TN): correctly predicting non-event values * False negative (FN): incorrectly labeling events as non-event * Precision is also called positive predictive value. \begin{equation} \text{Precision} = \frac{\text{TP}}{\text{TP}+\text{FP}} \end{equation} * Recall is also called true positive rate, probability of detection \begin{equation} \text{Recall} = \frac{\text{TP}}{\text{TP}+\text{FN}} \end{equation} * Fall-out is also called false positive rate, probability of false alarm. \begin{equation} \text{Fall-out} = \frac{\text{FP}}{\text{N}}= \frac{\text{FP}}{\text{FP}+\text{TN}} \end{equation} * *Consider the salmon/non-salmon classification problem, what are the TP, FP, TN, FN values?* | Actual/Predicted | Salmon | Non-Salmon | | --- | --- | --- | | Salmon | 47 | 3 | | Non-Salmon | 2 | 98 | ### ROC curves * The Receiver Operating Characteristic (ROC) curve is a plot between the true positive rate (TPR) and the false positive rate (FPR), where the TPR is defined on the $x$-axis and FPR is defined on the $y$-axis. * $TPR = TP/(TP+FN)$ is defined as ratio between true positive prediction and all real positive samples. The definition used for $FPR$ in a ROC curve is often problem dependent. For example, for detection of targets in an area, FPR may be defined as the ratio between the number of false alarms per unit area ($FA/m^2$). In another example, if you have a set number of images and you are looking for targets in these collection of images, FPR may be defined as the number of false alarms per image. In some cases, it may make the most sense to simply use the Fall-out or false positive rate. * Given a binary classifier and its threshold, the (x,y) coordinates of ROC space can be calculated from all the prediction result. You trace out a ROC curve by varying the threshold to get all of the points on the ROC. * The diagonal between (0,0) and (1,1) separates the ROC space into two areas, which are left up area and right bottom area. The points above the diagonal represent good classification (better than random guess) which below the diagonal represent bad classification (worse than random guess). * *What is the perfect prediction point in a ROC curve?* ### MSE and MAE * *Mean Square Error* (MSE) is the average of the squared error between prediction and actual observation. * For each sample $\mathbf{x}_i$, the prediction value is $y_i$ and the actual output is $d_i$. The MSE is \begin{equation} MSE = \sum_{i=1}^n \frac{(d_i - y_i)^2}{n} \end{equation} * *Root Mean Square Error* (RMSE) is simply the square root the MSE. \begin{equation} RMSE = \sqrt{MSE} \end{equation} * *Mean Absolute Error* (MAE) is the average of the absolute error. \begin{equation} MAE = \frac{1}{n} \sum_{i=1}^n \lvert d_i - y_i \rvert \end{equation}
github_jupyter
### HGT features in A. castellanii **cmdoret, 20201009** In this notebook, I compare the nucleotide composition and general features of A.castallanii genes with HGT candidates. I previously computed the following genome composition metrics in 1kb non-overlapping sliding windows: * GC content $\frac{G+C}{A+C+G+T}$ * GC skew $\frac{G-C}{G+C}$ * AT skew $\frac{T-A}{T+A}$ * Shannon entropy $\sum_{i \in \left(A,C,G,T\right)}{P(i)*log(P(i))}$ * k-mer euclidean distance $\sqrt{\sum_{k \in kmers(ref)}{\left(P(ref[k]) - P(window[k])\right)^2}}$ I then intersected those windows with gene coordinates in the A. castellanii genome and for each gene, I computed the mean of those statistics (in case it overlaps multiple windows. Each gene also has two additional features: the number of exon (NEXON) and its length (LEN). ``` import umap import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt from sklearn.decomposition import PCA ``` First, I scale all features into z-scores so that they have comparable magnitudes (mean=0, stdev=1). ``` c3 = pd.read_csv('../../data/out/hgt/C3_windows_hgt.tsv', sep='\t') neff = pd.read_csv('../../data/out/hgt/Neff_windows_hgt.tsv', sep='\t') fields = ['LEN', 'NEXON', 'GC', 'GCSKEW', 'ATSKEW', 'ENTRO', '2MER', '3MER', '4MER'] neff['LEN'] = neff.end - neff.start c3['LEN'] = c3.end - c3.start neff.LEN = np.log10(neff.LEN) neff.NEXON = np.log10(neff.NEXON) c3.LEN = np.log10(c3.LEN) c3.NEXON = np.log10(c3.NEXON) neff.loc[:, fields] = neff.loc[:, fields].apply(lambda x: (x - x.mean()) / x.std(), axis=0) c3.loc[:, fields] = c3.loc[:, fields].apply(lambda x: (x - x.mean()) / x.std(), axis=0) ``` I want to compare HGT and non-HGT gene. To get these labels, I retrieved all HGT ids from the original A. castellanii Neff genome paper and extracted the corresponding sequences in the annotations. Out of the 402 listed HGT ids, only 172 are in the annotations. I then blasted those 172 genes against the proteomes of my 2 assemblies (Neff and C3 strains) and for each HGT, I recovered the best match if it had >95% identity. This gives me a list of 117 HGT in Neff and 43 in C3. ``` %%bash echo "$(wc -l ../../data/input/misc/NEFF_v1_HGT.tsv | cut -d' ' -f1) HGT ID from Clarke et al." echo "$(grep ">" ../../data/out/hgt/NEFF_v1_hgt_cds.fa | wc -l) found in the original assembly." echo "$(wc -l ../../data/out/blast/hgt_v1_vs_Neff_filtered.blast | cut -d' ' -f1) Matches in the new Neff assembly" echo "$(wc -l ../../data/out/blast/hgt_v1_vs_C3_filtered.blast | cut -d' ' -f1) Matches in the new C3 assembly" ``` We can see that several features are correlated with each other. For example, k-mer profile divergence are similar for all 3 kmer length selected. Length and number of exons are also highly correlated. ``` %matplotlib inline fig, axes = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(20, 8)) sns.heatmap(neff.loc[:, fields].corr(), cmap='bwr', ax=axes[0]) axes[0].set_title("Correlation between gene features in Neff") sns.heatmap(c3.loc[:, fields].corr(), cmap='bwr', ax=axes[1]) axes[1].set_title("Correlation between gene features in C3") %matplotlib inline sns.pairplot(c3.loc[:, fields+['HGT']], hue="HGT", markers='.', size=0.8) %matplotlib inline sns.pairplot(neff.loc[:, fields+['HGT']], hue="HGT", markers='.', size=0.8) ``` Since several of those features are correlated to each other, I tried to remove that redundancy using PCA. HGT do not stand out in any particular way from the rest of the genome. ``` %matplotlib inline pca_neff = PCA() pcs_neff = pca_neff.fit_transform(neff.loc[:, fields]) neff['pc1'] = pcs_neff[:, 0] neff['pc2'] = pcs_neff[:, 1] pca_c3 = PCA() pcs_c3 = pca_c3.fit_transform(c3.loc[:, fields]) c3['pc1'] = pcs_c3[:, 0] c3['pc2'] = pcs_c3[:, 1] fix, ax = plt.subplots(1, 2, figsize=(15, 6)) for i, df, strain in zip(range(2), [c3, neff], ['C3', 'Neff']): bg = df.loc[df.HGT==0, :] hgt = df.loc[df.HGT==1, :] ax[i].scatter(bg.pc1, bg.pc2, c='grey', label='background') ax[i].scatter(hgt.pc1, hgt.pc2, c='red', label='HGT') ax[i].set_title(strain) ax[i].set_xlabel('PC1') ax[i].set_ylabel('PC2') ax[1].legend() plt.suptitle(f'PCA of gene statistics in A. castellanii') import scipy.stats as ss pval = ss.mannwhitneyu(c3.pc1[c3.HGT==1], c3.pc1[c3.HGT==0])[1] print(f"HGT are not so different than C3 background in PC1 (p={pval:.2f})") pval = ss.mannwhitneyu(neff.pc1[neff.HGT==1], neff.pc1[neff.HGT==0])[1] print(f"HGT are not different than Neff background in PC1 (p={pval:.2f})") %matplotlib inline fig, ax = plt.subplots(1, 1, figsize=(15, 8)) long_neff = pd.melt(neff.loc[:, fields + ['HGT', 'pc1', 'pc2']], id_vars=['HGT'], value_vars=fields+['pc1', 'pc2']) sns.violinplot(data=long_neff, x='variable', y='value', split=True, hue='HGT', inner='quartiles', ax=ax) plt.title("Comparison of gene features between HGT and background in Neff") %matplotlib inline fig, ax = plt.subplots(1, 1, figsize=(15, 8)) pca_c3 = PCA() pcs_c3 = pca_c3.fit_transform(c3.loc[:, fields]) c3['pc1'] = pcs_c3[:, 0] c3['pc2'] = pcs_c3[:, 1] long_c3 = pd.melt(c3.loc[:, fields + ['HGT', 'pc1', 'pc2']], id_vars=['HGT'], value_vars=fields+['pc1', 'pc2']) sns.violinplot(data=long_c3, x='variable', y='value', split=True, hue='HGT', inner='quartiles', ax=ax) plt.title("Comparison of gene features between HGT and background in C3") plt.savefig('/home/cmatthey/pCloudDrive/reports_phd/20200617_acastellanii_ms/assets/hgt_windows/features_stats_c3.svg') %matplotlib inline neff['gene_len'] = neff.end - neff.start fig, ax = plt.subplots(1, 2, figsize=(15, 6)) for i, df, strain in zip(range(2), [neff, c3], ["Neff", "C3"]): sns.scatterplot(data=df.loc[df.HGT == 0], x="LEN", y="NEXON", ax=ax[i]) sns.scatterplot(data=df.loc[df.HGT == 1], x="LEN", y="NEXON", color='red', ax=ax[i]) ax[i].set_title(strain) sx_bg_neff = sum(neff.loc[neff.HGT==0, "NEXON"] == neff.NEXON.min()) / len(neff.loc[neff.HGT==0, "NEXON"]) sx_ht_neff = sum(neff.loc[neff.HGT==1, "NEXON"] == neff.NEXON.min()) / len(neff.loc[neff.HGT==1, "NEXON"]) print(f"There are {100*sx_bg_neff:.2f}% single-exon genes in Neff, vs {100*sx_ht_neff:.2f}% for HGT candidates.") sx_bg_c3 = sum(c3.loc[c3.HGT==0, "NEXON"] == c3.NEXON.min()) / len(c3.loc[c3.HGT==0, "NEXON"]) sx_ht_c3 = sum(c3.loc[c3.HGT==1, "NEXON"] == c3.NEXON.min()) / len(c3.loc[c3.HGT==1, "NEXON"]) print(f"There are {100*sx_bg_c3:.2f}% single-exon genes in C3, vs {100*sx_ht_c3:.2f}% for HGT candidates.") ss.ttest_ind(neff.loc[neff.HGT==0, 'NEXON'], neff.loc[neff.HGT==1, 'NEXON']) ss.ttest_ind(c3.loc[c3.HGT==0, 'NEXON'], c3.loc[c3.HGT==1, 'NEXON']) for strain, df in zip(['C3', 'Neff'], [c3, neff]): for field in fields: res = ss.ttest_ind(df.loc[df.HGT==0, field], df.loc[df.HGT==1, field])[1] print(f'{strain}, {field}: pval={res:.2f}') ```
github_jupyter
# Project: Create a Convolutional Neural Network - We will create a model on the [CIFAR-10 dataset](https://www.cs.toronto.edu/%7Ekriz/cifar.html) ### Step 1: Import libraries ``` import tensorflow as tf from tensorflow.keras import datasets, layers, models from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense import matplotlib.pyplot as plt %matplotlib inline ``` ### Step 2: Download the CIFAR10 dataset - Excute the cell below ``` (train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data() ``` ### Step 3: Normalize the pixels - Divide the **train_images** and **test_images** with 255 to normalize them between 0 and 1. ``` train_images = train_images / 255.0 test_images = test_images / 255.0 ``` ### Step 4: Get the class names of the labels - Make a class name conversion. - HINT: make a list with the name **class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']** - How to use the name conversion. - **class_names[int(train_labels[index])]** - How to show an image - **plt.imshow(train_images[index])** ``` class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] index = 1 plt.imshow(train_images[index]) class_names[int(train_labels[index])] ``` ### Step 5: Create a model - Create a **Sequential** model - **Conv2D** with 32 and (3, 3), **activation='relu', input_shape=(32, 32, 3)** - **MaxPooling2D** with (2, 2) - **Conv2D** with 64 and (3, 3), **activation='relu'** - **MaxPooling2D** with (2, 2) - **Conv2D** with 64 and (3, 3), **activation='relu'** - **Flatten** - **Dense** with 64 nodes with **input_dim=4, activaition='relu'** - **Dense** with 10 (the output node)** - Complie the model with **optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']** ``` model = Sequential() model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64, input_dim=4, activation='relu')) model.add(layers.Dense(10)) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) ``` ### Step 6: Train the model - Fit the model with **train_images, train_labels, epochs=10** and **validation_data=(test_images, test_labels)** ``` model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels)) ``` ### Stpe 7: Test the model - Make predictions - assign the predictions of test_images to a variable - How to test visually - Assign **index=0** - Plot the image with **plt.imshow(test_images[index])** - See the label from prediction by mapping it from **class_names** ``` y_pred = model.predict(test_images) index = 168 plt.imshow(test_images[index]) class_names[y_pred[index].argmax()] model.evaluate(test_images, test_labels, verbose=0) ``` ### Step 8 (Optional): Improve the model - Try to play around with the model to improve the score ``` model = Sequential() model.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(32, 32, 3))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.Flatten()) model.add(layers.Dense(64, input_dim=4, activation='relu')) model.add(layers.Dense(10)) model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy']) model.fit(train_images, train_labels, epochs=10, validation_data=(test_images, test_labels)) y_pred = model.predict(test_images) index = 168 plt.imshow(test_images[index]) class_names[y_pred[index].argmax()] model.evaluate(test_images, test_labels, verbose=0) ```
github_jupyter
# K Nearest Neighbors Classifiers So far we've covered learning via probability (naive Bayes) and learning via errors (regression). Here we'll cover learning via similarity. This means we look for the datapoints that are most similar to the observation we are trying to predict. #### What type of model is k-nearest neighbors algorithm (k-NN)? A supervised, classification, nonparametric, instance-based model. Supervised ---- You have to have labeled data. Each points belong to a group. Your new point will be classfied into other existing groups. Parametric vs Nonparametric Models ----- Parametric: Make an assumption about form of the function of the data Nonparametric: Do __not__ make an assumption about the functional form. Instance-based --------- Uses only the actual observed data to classify. There is no model! <center><img src="images/knn2.png" width="50%"/></center> Let's start by the simplest example: **Nearest Neighbor**. ## Nearest Neighbor Let's use this example: classifying a song as either "rock" or "jazz". For this data we have measures of duration in seconds and loudness in loudness units (we're not going to be using decibels since that isn't a linear measure, which would create some problems we'll get into later). ``` import numpy as np import scipy import pandas as pd import matplotlib.pyplot as plt %matplotlib inline music = pd.DataFrame() # Some data to play with. music['duration'] = [184, 134, 243, 186, 122, 197, 294, 382, 102, 264, 205, 110, 307, 110, 397, 153, 190, 192, 210, 403, 164, 198, 204, 253, 234, 190, 182, 401, 376, 102] music['loudness'] = [18, 34, 43, 36, 22, 9, 29, 22, 10, 24, 20, 10, 17, 51, 7, 13, 19, 12, 21, 22, 16, 18, 4, 23, 34, 19, 14, 11, 37, 42] # We know whether the songs in our training data are jazz or not. music['jazz'] = [ 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0] music.head() # Look at our data. plt.scatter( music[music['jazz'] == 1].duration, music[music['jazz'] == 1].loudness, color='red' ) plt.scatter( music[music['jazz'] == 0].duration, music[music['jazz'] == 0].loudness, color='blue' ) plt.legend(['Jazz', 'Rock']) plt.title('Jazz and Rock Characteristics') plt.xlabel('Duration') plt.ylabel('Loudness') plt.show() ``` The simplest form of a similarity model is the Nearest Neighbor model. This works quite simply: when trying to predict an observation, we find the closest (or _nearest_) known observation in our training data and use that value to make our prediction. Here we'll use the model as a classifier, the outcome of interest will be a category. To find which observation is "nearest" we need some kind of way to measure distance. Typically we use _Euclidean distance_, the standard distance measure that you're familiar with from geometry. With one observation in n-dimensions $(x_1, x_2, ...,x_n)$ and the other $(w_1, w_2,...,w_n)$: $$ \sqrt{(x_1-w_1)^2 + (x_2-w_2)^2+...+(x_n-w_n)^2} $$ ## Other distance functions: 1-norm distance, aka City Block (Manhattan) distance ------ <center><img src="images/1_norm_1.svg" width="35%"/></center> <center><img src="images/1_norm_2.jpg" width="35%"/></center> 2-norm distance, aka Eculidian (as the crow flies) ----- <center><img src="images/2_norm_1.svg" width="35%"/></center> <center><img src="images/2_norm_2.jpg" width="35%"/></center> p-norm distance, aka Minkowski distance of order p ----- Generalization notion of normed vector space distance. When p = 1, Manhattan distance When p = 2, Euclidean distance <center><img src="images/p_norm.svg" width="35%"/></center> You can technically define any distance measure you want, and there are times where this customization may be valuable. As a general standard, however, we'll use Euclidean distance. Now that we have a distance measure from each point in our training data to the point we're trying to predict the model can find the datapoint with the smallest distance and then apply that category to our prediction. Let's try running this model, using the SKLearn package. ``` from sklearn.neighbors import KNeighborsClassifier neighbors = KNeighborsClassifier(n_neighbors=3) X = music[['loudness', 'duration']] Y = music.jazz neighbors.fit(X,Y) neighbors.predict_proba([[30, 160]]) ## Predict for a song with 24 loudness that's 190 seconds long. neighbors.predict([[30, 160]]) ``` It's as simple as that. Looks like our model is predicting that 24 loudness, 190 second long song is _not_ jazz. All it takes to train the model is a dataframe of independent variables and a dataframe of dependent outcomes. You'll note that for this example, we used the `KNeighborsClassifier` method from SKLearn. This is because Nearest Neighbor is a simplification of K-Nearest Neighbors. The jump, however, isn't that far. ## K-Nearest Neighbors <center><img src="images/knn1.png" width="50%"/></center> **K-Nearest Neighbors** (or "**KNN**") is the logical extension of Nearest Neighbor. Instead of looking at just the single nearest datapoint to predict an outcome, we look at several of the nearest neighbors, with $k$ representing the number of neighbors we choose to look at. Each of the $k$ neighbors gets to vote on what the predicted outcome should be. This does a couple of valuable things. Firstly, it smooths out the predictions. If only one neighbor gets to influence the outcome, the model explicitly overfits to the training data. Any single outlier can create pockets of one category prediction surrounded by a sea of the other category. This also means instead of just predicting classes, we get implicit probabilities. If each of the $k$ neighbors gets a vote on the outcome, then the probability of the test example being from any given class $i$ is: $$ \frac{votes_i}{k} $$ And this applies for all classes present in the training set. Our example only has two classes, but this model can accommodate as many classes as the data set necessitates. To come up with a classifier prediction it simply takes the class for which that fraction is maximized. Let's expand our initial nearest neighbors model from above to a KNN with a $k$ of 5. ``` neighbors = KNeighborsClassifier(n_neighbors=5) X = music[['loudness', 'duration']] Y = music.jazz neighbors.fit(X,Y) ## Predict for a 24 loudness, 190 seconds long song. print(neighbors.predict([[24, 190]])) print(neighbors.predict_proba([[24, 190]])) ``` ### predict vs predict_proba If there are say 3 classes (say -1, 0, 1), predict will give "to which class the data point belongs to" i.e. either (-1 or 0 or 1). Predict_proba will give the probability that the data point belongs to each of the classes, like (0.2, 0.7, 0.1). In general, we can say that the "predict" function is a class decision function, where as the predict_proba is a more general form of predicting the probability for each of the classes. Now our test prediction has changed. In using the five nearest neighbors it appears that there were two votes for rock and three for jazz, so it was classified as a jazz song. This is different than our simpler Nearest Neighbors model. While the closest observation was in fact rock, there are more jazz songs in the nearest $k$ neighbors than rock. We can visualize our decision bounds with something called a _mesh_. This allows us to generate a prediction over the whole space. Read the code below and make sure you can pull out what the individual lines do, consulting the documentation for unfamiliar methods if necessary. ## Normalization & Weighing It can be a more obvious challenge if you were dealing with something where the relative scales are strikingly different. For example, if you were looking at buildings and you have height in floors and square footage, you'd have a model that would really only care about square footage since distance in that dimension would be a far greater number of units than the number of floors. Turn all the values to be between 0 and 1 or -1 to 1 (same thing) There is one more thing to address when talking about distance, and that is weighting. In the vanilla version of KNN, all k of the closest observations are given equal votes on what the outcome of our test observation should be. When the data is densely populated that isn't necessarily a problem. However, sometimes the k nearest observations are not all similarly close to the test. In that case it may be useful to weight by distance. Functionally this will weight by the inverse of distance, so that closer datapoints (with a low distance) have a higher weight than further ones. ``` import numpy as np # Our data. Converting from data frames to arrays for the mesh. X = np.array(X) Y = np.array(Y) # Mesh size. h = 4.0 # Plot the decision boundary. We assign a color to each point in the mesh. x_min = X[:, 0].min() - .5 x_max = X[:, 0].max() + .5 y_min = X[:, 1].min() - .5 y_max = X[:, 1].max() + .5 xx, yy = np.meshgrid( np.arange(x_min, x_max, h), np.arange(y_min, y_max, h) ) Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot. Z = Z.reshape(xx.shape) plt.figure(1, figsize=(6, 4)) plt.set_cmap(plt.cm.Paired) plt.pcolormesh(xx, yy, Z) # Add the training points to the plot. plt.scatter(X[:, 0], X[:, 1], c=Y) plt.xlabel('Loudness') plt.ylabel('Duration') plt.title('Mesh visualization') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show() ``` Looking at the visualization above, any new point that fell within a blue area would be predicted to be jazz, and any point that fell within a brown area would be predicted to be rock. The boundaries above are strangly jagged here, and we'll get into that in more detail in the next lesson. Also note that the visualization isn't completely continuous. There are an infinite number of points in this space, and we can't calculate the value for each one. That's where the mesh comes in. We set our mesh size (`h = 4.0`) to 4.0 above, which means we calculate the value for each point in a grid where the points are spaced 4.0 away from each other. You can make the mesh size smaller to get a more continuous visualization, but at the cost of a more computationally demanding calculation. In the cell below, recreate the plot above with a mesh size of `10.0`. Then reduce the mesh size until you get a plot that looks good but still renders in a reasonable amount of time. When do you get a visualization that looks acceptably continuous? When do you start to get a noticeable delay? ``` from sklearn.neighbors import KNeighborsClassifier from scipy import stats neighbors = KNeighborsClassifier(n_neighbors=5, weights='distance') # Our input data frame will be the z-scores this time instead of raw data. # zscoring it is normalizing it X = pd.DataFrame({ 'loudness': stats.zscore(music.loudness), 'duration': stats.zscore(music.duration) }) # Fit our model. Y = music.jazz neighbors.fit(X, Y) # Arrays, not data frames, for the mesh. X = np.array(X) Y = np.array(Y) # Mesh size. h = .01 # Plot the decision boundary. We assign a color to each point in the mesh. x_min = X[:,0].min() - .5 x_max = X[:,0].max() + .5 y_min = X[:,1].min() - .5 y_max = X[:,1].max() + .5 xx, yy = np.meshgrid( np.arange(x_min, x_max, h), np.arange(y_min, y_max, h) ) Z = neighbors.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure(1, figsize=(6, 4)) plt.set_cmap(plt.cm.Paired) plt.pcolormesh(xx, yy, Z) # Add the training points to the plot. plt.scatter(X[:, 0], X[:, 1], c=Y) plt.xlabel('Loudness') plt.ylabel('Duration') plt.xlim(xx.min(), xx.max()) plt.ylim(yy.min(), yy.max()) plt.show() ```
github_jupyter
# COVID-19: Healthcare Facility Capacity Optimization ## Objective and Prerequisites This COVID-19 Healthcare Facility Capacity Optimization problem shows you how to determine the optimal location and capacity of healthcare facilities in order to: * Satisfy demand from COVID-19 patients for treatment, * Minimize the cost of opening temporary facilities for healthcare providers, and * Predict the allocation of COVID-19 patients from a specific county to a specific healthcare facility. This modeling example is at the beginner level, where we assume that you know Python and that you have some knowledge of how to build mathematical optimization models. **Download the Repository** <br /> You can download the repository containing this and other examples by clicking [here](https://github.com/Gurobi/modeling-examples/archive/master.zip). **Gurobi License** <br /> In order to run this Jupyter Notebook properly, you must have a Gurobi license. If you do not have one, you can request an [evaluation license](https://www.gurobi.com/downloads/request-an-evaluation-license/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-MUI-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_Covid19_HC_Facility_Location_COM_EVAL_GitHub&utm_term=Covid-19%20Healthcare%20Facility%20Location&utm_content=C_JPM) as a *commercial user*, or download a [free license](https://www.gurobi.com/academia/academic-program-and-licenses/?utm_source=3PW&utm_medium=OT&utm_campaign=WW-MU-EDU-OR-O_LEA-PR_NO-Q3_FY20_WW_JPME_Covid19_HC_Facility_Location_COM_EVAL_GitHub&utm_term=Covid-19%20Healthcare%20Facility%20Location&utm_content=C_JPM) as an *academic user*. --- ## Problem Description Hospitals in various counties throughout the US are reaching full capacity due to a surge in COVID-19 patients. Many hospitals are considering creating temporary facilities to increase their capacity to handle COVID-19 patients. ![temporary facility](covid19-trailerFacilty.jpg) In this example, we focus on nine counties in the US. Each county has existing facilities to treat COVID-19 patients, and also has the option of building temporary facilities to increase the overall capacity to handle COVID-19 patients. The following table defines the coordinates of the centroid and the forecasted demand (i.e. the projected number of COVID-19 patients) of each county. To estimate this demand, we consider the population of nine fictional counties in California, the current number of COVID-19 cases per day in California, the average percentage of COVID-19 cases who require hospitalization, and the average number of days that a COVID-19 patient stays in the hospital. | Centroid | Coordinates | Demand | | --- | --- | --- | | county 1 | (1, 1.5) | 351 | | county 2 | (3, 1) | 230 | | county 3 | (5.5, 1.5) | 529 | | county 4 | (1, 4.5 ) | 339 | | county 5 | (3, 3.5) | 360 | | county 6 | (5.5, 4.5) | 527 | | county 7 | (1, 8) | 469 | | county 8 | (3, 6) | 234 | | county 9 | (4.5, 8) | 500 | The following table defines the coordinates and capacity of existing facilities. The capacity of existing facilities is calculated as 80% of the forecasted demand of the county in which the existing facilities are located. The exception to this is county 9, where we assume that we have an excess of existing capacity. | Existing | Coordinates | Capacity | | --- | --- | --- | | facility 1 | (1, 2) | 281 | | facility 2 | (2.5, 1) | 187 | | facility 3 | (5, 1) | 200 | | facility 4 | (6.5, 3.5) | 223 | | facility 5 | (1, 5) | 281 | | facility 6 | (3, 4) | 281 | | facility 7 | (5, 4) | 222 | | facility 8 | (6.5, 5.5) | 200 | | facility 9 | (1, 8.5) | 250 | | facility 10 | (1.5, 9.5) | 125 | | facility 11 | (8.5, 6) | 187 | | facility 12 | (5, 8) | 300 | | facility 13 | (3, 9) | 300 | | facility 14 | (6, 9) | 243 | The following table defines the coordinates and capacity of new temporary facilities. The cost of building a temporary facility with a capacity of treating one hundred COVID-19 patients is $\$500,000$. | Temporary | Coordinates | Capacity | | --- | --- | --- | | facility 15 | (1.5, 1) | 100 | | facility 16 | (3.5, 1.5) | 100 | | facility 17 | (5.5, 2.5) | 100 | | facility 18 | (1.5, 3.5) | 100 | | facility 19 | (3.5, 2.5) | 100 | | facility 20 | (4.5, 4.5) | 100 | | facility 21 | (1.5, 6.5) | 100 | | facility 22 | (3.5, 6.5) | 100 | | facility 23 | (5.5, 6.5) | 100 | The coordinates of the three tables are in tens of miles. We assume that each increase of 10 miles in the distance to a COVID-19 facility results in a $\$5$ increase in driving costs for each COVID-19 patient. In this example, the goal is to identify which temporary facilities to build in order to be able to accommodate demand for treatment by COVID-19 patients while minimizing the total cost of COVID-19 patients driving to an existing or temporary COVID-19 facility and the total cost of building temporary facilities. This example shows how a Facility Location mixed-integer programming (MIP) model can help healthcare providers make decisions about: * How to best utilize their capacity, * Whether to build temporary facilities for COVID-19 patients, and * How COVID-19 patients from a county should be allocated to various healthcare facilities in order to ensure that the facilities have the capacity to provide treatment for the patients. This Jupyter Notebook is based on the paper written by Katherine Klise and Michael Bynum [1]. ## Model Formulation ### Sets and Indices $e \in E$: Index and set of existing healthcare facility locations. $t \in T$: Index and set of temporary healthcare facility locations. $f \in F = E \cup T$: Index and set of all healthcare facility locations. $c \in C$: Index and set of counties. ### Parameters $Dist_{c,f} \in \mathbb{R}^+$: Distance between county $c$ and facility location $f$. $Dem_{c} \in \mathbb{R}^+$: Expected number of people in county $c$ who will need a COVID-19 facility. $Cap_{f} \in \mathbb{R}^+$: Number of people that can be served by a facility at location $f$. $\text{dCost} = 5$: Cost of driving 10 miles. $\text{tFCost} = 500,000$: Cost of building a temporary COVID-19 facility with a capacity of treating 100 COVID-19 patients. $bigM$: Penalty of adding extra capacity at temporary facilities in order to satisfy treatment of COVID-19 patients demand. ### Decision Variables $y_{t} \in \{0, 1 \}$: This variable is equal to 1 if we build a temporary facility at location $t$; and 0 otherwise. $ x_{c,f} \in \mathbb{R}^+$: Number of people from county $c$ served by a facility at location $f$. $z_{t} \in \mathbb{R}^+$: Extra capacity added at temporary facility location $t$. ### Objective Function - **Cost**. We want to minimize the total cost of patients driving from a county to a healthcare facility and the total cost of building temporary COVID-19 treatment capacity. The last term with the big penalty coefficient ($bigM$), enables extra capacity to be added at a temporary facility to ensure that total demand is satisfied. \begin{equation} \text{Min} \quad Z = \sum_{c \in C} \sum_{f \in F} \text{dCost} *Dist_{c,f} * x_{c,f} + \text{tFCost}*\sum_{t \in T} y_{t} + bigM*\sum_{t \in T} z_{t} \tag{0} \end{equation} ### Constraints - **Demand**. Satisfy county demand of service from a COVID-19 facility. \begin{equation} \sum_{f \in F} x_{c,f} = Dem_{c} \quad \forall c \in C \tag{1} \end{equation} - **Existing facilities**. Capacity of an existing location of a facility cannot be exceeded. \begin{equation} \sum_{c \in C} x_{c,e} \leq Cap_{e} \quad \forall e \in E \tag{2} \end{equation} - **Temporary facilities**. Capacity of a temporary location of a facility cannot be exceeded. Please observe that extra capacity can be added. \begin{equation} \sum_{c \in C} x_{c,t} \leq Cap_{t}*y_{t} + z_{t} \quad \forall t \in T \tag{3} \end{equation} --- ## Python Implementation We now import the Gurobi Python Module and other Python libraries. ``` from itertools import product from math import sqrt import gurobipy as gp from gurobipy import GRB # tested with Gurobi v9.1.0 and Python 3.7.0 ``` --- ### Helper Functions * `compute_distance` computes distance between a county centroid and the location of a facility * `solve_covid19_facility` builds, solves, and prints results of the COVID-19 healthcare facility capacity optimization model ``` def compute_distance(loc1, loc2): # This function determines the Euclidean distance between a facility and a county centroid. dx = loc1[0] - loc2[0] dy = loc1[1] - loc2[1] return sqrt(dx*dx + dy*dy) def solve_covid19_facility(c_coordinates, demand): ##################################################### # Data ##################################################### # Indices for the counties counties = [*range(1,10)] # Indices for the facilities facilities = [*range(1,24)] # Create a dictionary to capture the coordinates of an existing facility and capacity of treating COVID-19 patients existing, e_coordinates, e_capacity = gp.multidict({ 1: [(1, 2), 281], 2: [(2.5, 1), 187], 3: [(5, 1), 200], 4: [(6.5, 3.5), 223], 5: [(1, 5), 281], 6: [(3, 4), 281], 7: [(5, 4), 222], 8: [(6.5, 5.5), 200], 9: [(1, 8.5), 250], 10: [(1.5, 9.5), 125], 11: [(8.5, 6), 187], 12: [(5, 8), 300], 13: [(3, 9), 300], 14: [(6, 9), 243] }) # Create a dictionary to capture the coordinates of a temporary facility and capacity of treating COVID-19 patients temporary, t_coordinates, t_capacity = gp.multidict({ 15: [(1.5, 1), 100], 16: [(3.5, 1.5), 100], 17: [(5.5, 2.5), 100], 18: [(1.5, 3.5), 100], 19: [(3.5, 2.5), 100], 20: [(4.5, 4.5), 100], 21: [(1.5, 6.5), 100], 22: [(3.5, 6.5), 100], 23: [(5.5, 6.5), 100] }) # Cost of driving 10 miles dcost = 5 # Cost of building a temporary facility with capacity of 100 COVID-19 tfcost = 500000 # Compute key parameters of MIP model formulation f_coordinates = {} for e in existing: f_coordinates[e] = e_coordinates[e] for t in temporary: f_coordinates[t] = t_coordinates[t] # Cartesian product of counties and facilities cf = [] for c in counties: for f in facilities: tp = c,f cf.append(tp) # Compute distances between counties centroids and facility locations distance = {(c,f): compute_distance(c_coordinates[c], f_coordinates[f]) for c, f in cf} ##################################################### # MIP Model Formulation ##################################################### m = gp.Model('covid19_temporary_facility_location') # Build temporary facility y = m.addVars(temporary, vtype=GRB.BINARY, name='temporary') # Assign COVID-19 patients of county to facility x = m.addVars(cf, vtype=GRB.CONTINUOUS, name='Assign') # Add capacity to temporary facilities z = m.addVars(temporary, vtype=GRB.CONTINUOUS, name='addCap' ) # Objective function: Minimize total distance to drive to a COVID-19 facility # Big penalty for adding capacity at a temporary facility bigM = 1e9 m.setObjective(gp.quicksum(dcost*distance[c,f]*x[c,f] for c,f in cf) + tfcost*y.sum() + bigM*z.sum(), GRB.MINIMIZE) # Counties demand constraints demandConstrs = m.addConstrs((gp.quicksum(x[c,f] for f in facilities) == demand[c] for c in counties), name='demandConstrs') # Existing facilities capacity constraints existingCapConstrs = m.addConstrs((gp.quicksum(x[c,e] for c in counties) <= e_capacity[e] for e in existing ), name='existingCapConstrs') # temporary facilities capacity constraints temporaryCapConstrs = m.addConstrs((gp.quicksum(x[c,t] for c in counties) -z[t] <= t_capacity[t]*y[t] for t in temporary ), name='temporaryCapConstrs') # Run optimization engine m.optimize() ##################################################### # Output Reports ##################################################### # Total cost of building temporary facility locations temporary_facility_cost = 0 print(f"\n\n_____________Optimal costs______________________") for t in temporary: if (y[t].x > 0.5): temporary_facility_cost += tfcost*round(y[t].x) patient_allocation_cost = 0 for c,f in cf: if x[c,f].x > 1e-6: patient_allocation_cost += dcost*round(distance[c,f]*x[c,f].x) print(f"The total cost of building COVID-19 temporary healhtcare facilities is ${temporary_facility_cost:,}") print(f"The total cost of allocating COVID-19 patients to healtcare facilities is ${patient_allocation_cost:,}") # Build temporary facility at location print(f"\n_____________Plan for temporary facilities______________________") for t in temporary: if (y[t].x > 0.5): print(f"Build a temporary facility at location {t}") # Extra capacity at temporary facilities print(f"\n_____________Plan to increase Capacity at temporary Facilities______________________") for t in temporary: if (z[t].x > 1e-6): print(f"Increase temporary facility capacity at location {t} by {round(z[t].x)} beds") # Demand satisfied at each facility f_demand = {} print(f"\n_____________Allocation of county patients to COVID-19 healthcare facility______________________") for f in facilities: temp = 0 for c in counties: allocation = round(x[c,f].x) if allocation > 0: print(f"{allocation} COVID-19 patients from county {c} are treated at facility {f} ") temp += allocation f_demand[f] = temp print(f"{temp} is the total number of COVID-19 patients that are treated at facility {f}. ") print(f"\n________________________________________________________________________________") # Test total demand = total demand satisfied by facilities total_demand = 0 for c in counties: total_demand += demand[c] demand_satisfied = 0 for f in facilities: demand_satisfied += f_demand[f] print(f"\n_____________Test demand = supply______________________") print(f"Total demand is: {total_demand:,} patients") print(f"Total demand satisfied is: {demand_satisfied:,} beds") ``` ## Base Scenario In this scenario, we consider the data described for the instance of the COVID-19 Healthcare Facility Capacity Optimization problem. The forecasted demand is as defined in the first table of the problem description. ``` # Create a dictionary to capture the coordinates of a county and the demand of COVID-19 treatment counties, coordinates, forecast = gp.multidict({ 1: [(1, 1.5), 351], 2: [(3, 1), 230], 3: [(5.5, 1.5), 529], 4: [(1, 4.5 ), 339], 5: [(3, 3.5), 360], 6: [(5.5, 4.5), 527], 7: [(1, 8), 469], 8: [(3, 6), 234], 9: [(4.5, 8), 500] }) # find the optimal solution of the base scenario solve_covid19_facility(coordinates, forecast) ``` ### Analysis for Base Scenario The optimal total cost of building COVID-19 temporary healthcare facilities is $\$1,500,000$, and three COVID-19 temporary healthcare facilities are built. The total cost of allocating COVID-19 patients to healthcare facilities is $\$21,645$, and no extra capacity needs to be added to accommodate the demand for treatment from COVID-19 patients. The MIP model also determines the expected number of COVID-19 patients of a county allocated to a healthcare facility. For example, 6 COVID-19 patients from county 3, 50 COVID-19 patients from county 5, and 166 COVID-19 patients from county 6 are expected to be treated at facility 7. The total number of COVID-19 patients expected to be treated at facility 7 is 222. --- ## Scenario 1 Assume that the Centers for Disease Control and Prevention (CDC) announced that the number of hospitalizations will increase by 20%. This percentage includes 5% of buffer capacity to account for the variability of the expected demand. ``` # Increase in demand by 20%. for c in counties: forecast[c] = round(1.2*forecast[c]) # find the optimal for scenario 1 solve_covid19_facility(coordinates, forecast) ``` ### Analysis for Scenario 1 The optimal total cost of building temporary COVID-19 healthcare facilities is $\$4,500,000$, and nine temporary COVID-19 healthcare facilities are built. The total cost of allocating COVID-19 patients to healthcare facilities is $\$25,520$, and 40 and 27 beds need to be added at temporary healthcare facilities 15 and 17, respectively. Please note that in this scenario, the system is overloaded and all COVID-19 healthcare facilities are operating at full capacity. In addition, extra capacity needs to be added at some temporary healthcare facilities. --- ## Conclusion In this example, we addressed the COVID-19 Healthcare Facility Capacity Optimization problem. We determined the optimal location and capacity of healthcare facilities in order to: * Satisfy demand from COVID-19 patients for treatment, * Minimize the cost of opening temporary facilities for healthcare providers, and * Predict the allocation of COVID-19 patients from a specific county to a specific healthcare facility. We explored two scenarios. In the base scenario, we have enough capacity and need to build three temporary healthcare facilities. Whereas in the alternative scenario (1) with an increase of 20% in the number of COVID-19 patients requiring hospitalization, we need to build nine temporary healthcare facilities and add extra capacity at two of them. Our COVID-19 Healthcare Facility Location Optimization model can be used by public health officials and healthcare providers to help make strategic decisions about when and where to increase healthcare facility capacity during the COVID-19 pandemic. Also, this strategic model can feed information to a COVID-19 load-balancing dispatching model that is capable of assigning (in real time) COVID-19 patients who require hospitalization to the "right" healthcare facilities. In addition, our model can feed into a tactical model that determines how capacity should be increased to accommodate any increase in demand. For example, the number medical personnel to be hired, trained, and re-skilled, the rotation of medical personnel, and the amount of equipment (e.g. ventilators, drugs, beds, etc.) needed. ## References [1] Katherine Klise and Michael Bynum. *Facility Location Optimization Model for COVID-19 Resources*. April 2020. Joint DOE Laboratory Pandemic Modeling and Analysis Capability. SAND2020-4693R. Copyright © 2020 Gurobi Optimization, LLC
github_jupyter
### MEDC0106: Bioinformatics in Applied Biomedical Science <p align="center"> <img src="../../resources/static/Banner.png" alt="MEDC0106 Banner" width="90%"/> <br> </p> --------------------------------------------------------------- # 11 - Introduction to Biopython - Proteins *Written by:* Mateusz Kaczyński **This notebook provides an introduction on using protein data in Biopython - from analysis to property prediction and similarity search to a brief entry to PDB / 3D file operations.** ## Contents 1. [Basic analysis](#Basic-analysis) 2. [Property prediction](#Property-prediction) 3. [BLAST](#BLAST) 4. [PDB files](#PDB-files) 5. [Discussion](#Discussion) ----- #### Extra Resources: - [Official Biopython tutorial](http://biopython.org/DIST/docs/tutorial/Tutorial.html) - A comprehensive guide to the library capabilities. - [Biopython API documentation](https://biopython.org/docs/latest/api/index.html) - a long, detailed list of all methods and connectors provided by Biopython. - [Rosalind](http://rosalind.info) - A bioinformatics learning platform that includes exercises. Importing required modules and functions. ``` import Bio print("Module", Bio.__name__, "version", Bio.__version__) from urllib.request import urlretrieve from Bio import SeqIO ``` ## Basic analysis **Biopython** provides various tools to analyse proteins. We will be analysing Cystic Fibrosis Transmembrane Conductance regulator (CFTR) gene and the protein it encodes. **Ensembl**: https://www.ensembl.org/Homo_sapiens/Gene/Summary?g=ENSG00000001626;r=7:117287120-117715971 **Uniprot**: https://www.uniprot.org/uniprot/P13569 First, we will download the corresponding FASTA file to extract the sequence. ``` urlretrieve("https://www.uniprot.org/uniprot/P13569.fasta", "data/P13569.fasta") # `next` method allows to get the first element of the sequence. cftr_aa = next(SeqIO.parse("data/P13569.fasta", "fasta")) print(cftr_aa) ``` **Biopython** contains `ProteinAnalysis` class that wraps a collection of protein analysis functionality. ``` from Bio.SeqUtils.ProtParam import ProteinAnalysis analysis = ProteinAnalysis(str(cftr_aa.seq)) # To delve into the full functionality of the ProteinAnalysis class, you can use `help` function. # Uncomment the next line to see what other information can be obtained from `analysis` object. # help(analysis) ``` Let's take a look at a simple summary of how many aminoacids are present in the protein. *Note the use of pprint (PrettyPrint) to make the dense dictionary more user-friendly.* ``` count_of_aas = analysis.count_amino_acids() print("Count of particular aminoacids") print(count_of_aas) print("Using PrettyPrint for more user-friendly representation.") import pprint pprint.pprint(count_of_aas) ``` Let's take a look at some protein properties available. `"{:.2f}"` is used to print only a `float` number to the first two decimal places. ``` print("Molecular weight :", "{:.2f}".format(analysis.molecular_weight())) print("Charge at a given pH:", "{:.2f}".format(analysis.charge_at_pH(5.8))) print("Isoelectric point :", "{:.2f}".format(analysis.isoelectric_point())) in_helix, in_turn, in_sheet = analysis.secondary_structure_fraction() print( "Fractions of AA associated with secondary structures:\n"\ " Helix: {:.2f}\n"\ " Turn: {:.2f}\n"\ " Sheet: {:.2f}\n".format(in_helix, in_turn, in_sheet) ) ``` We can also use the helper functions provided to create new statistics. For example, let's calculate [BCAA (branch-chain amino acid)](https://en.wikipedia.org/wiki/Branched-chain_amino_acid) content of the protein. ``` total_number_of_LIV_aas = 0 for aa in ["L", "I", "V"]: total_number_of_LIV_aas += count_of_aas[aa] print("BCAA content:", total_number_of_LIV_aas / len(cftr_aa)) ``` ## Property prediction In this section, we will analyse the hydrophobicity of the protein. [The Kyte-Doolittle scale](https://doi.org/10.1016/0022-2836\(82\)90515-0) is useful for predicting the hydropathic character of the molecule and is based on the experimentally - derived aminoacid properties as defined below. The higher the value the more hydrophobic the aminoacid. ``` Kyte_and_Doolittle_scale = { "A": 1.8, "C": 2.5, "D": -3.5, "E": -3.5, "F": 2.8, "G": -0.4, "H": -3.2, "I": 4.5, "K": -3.9, "L": 3.8, "M": 1.9, "N": -3.5, "P": -1.6, "Q": -3.5, "R": -4.5, "S": -0.8, "T": -0.7, "V": 4.2, "W": -0.9, "Y": -1.3 } sequence = """MQRSPLEKASVVSKLFFSWTRPILRKGYRQRLELSDIYQIPSVDSADNLSEKLEREWDRE LASKKNPKLINALRRCFFWRFMFYGIFLYLGEVTKAVQPLLLGRIIASYDPDNKEERSIA IYLGIGLCLLFIVRTLLLHPAIFGLHHIGMQMRIAMFSLIYKKTLKLSSRVLDKISIGQL VSLLSNNLNKFDEGLALAHFVWIAPLQVALLMGLIWELLQASAFCGLGFLIVLALFQAGL GRMMMKYRDQRAGKISERLVITSEMIENIQSVKAYCWEEAMEKMIENLRQTELKLTRKAA YVRYFNSSAFFFSGFFVVFLSVLPYALIKGIILRKIFTTISFCIVLRMAVTRQFPWAVQT WYDSLGAINKIQDFLQKQEYKTLEYNLTTTEVVMENVTAFWEEGFGELFEKAKQNNNNRK TSNGDDSLFFSNFSLLGTPVLKDINFKIERGQLLAVAGSTGAGKTSLLMVIMGELEPSEG KIKHSGRISFCSQFSWIMPGTIKENIIFGVSYDEYRYRSVIKACQLEEDISKFAEKDNIV LGEGGITLSGGQRARISLARAVYKDADLYLLDSPFGYLDVLTEKEIFESCVCKLMANKTR ILVTSKMEHLKKADKILILHEGSSYFYGTFSELQNLQPDFSSKLMGCDSFDQFSAERRNS ILTETLHRFSLEGDAPVSWTETKKQSFKQTGEFGEKRKNSILNPINSIRKFSIVQKTPLQ MNGIEEDSDEPLERRLSLVPDSEQGEAILPRISVISTGPTLQARRRQSVLNLMTHSVNQG QNIHRKTTASTRKVSLAPQANLTELDIYSRRLSQETGLEISEEINEEDLKECFFDDMESI PAVTTWNTYLRYITVHKSLIFVLIWCLVIFLAEVAASLVVLWLLGNTPLQDKGNSTHSRN NSYAVIITSTSSYYVFYIYVGVADTLLAMGFFRGLPLVHTLITVSKILHHKMLHSVLQAP MSTLNTLKAGGILNRFSKDIAILDDLLPLTIFDFIQLLLIVIGAIAVVAVLQPYIFVATV PVIVAFIMLRAYFLQTSQQLKQLESEGRSPIFTHLVTSLKGLWTLRAFGRQPYFETLFHK ALNLHTANWFLYLSTLRWFQMRIEMIFVIFFIAVTFISILTTGEGEGRVGIILTLAMNIM STLQWAVNSSIDVDSLMRSVSRVFKFIDMPTEGKPTKSTKPYKNGQLSKVMIIENSHVKK DDIWPSGGQMTVKDLTAKYTEGGNAILENISFSISPGQRVGLLGRTGSGKSTLLSAFLRL LNTEGEIQIDGVSWDSITLQQWRKAFGVIPQKVFIFSGTFRKNLDPYEQWSDQEIWKVAD EVGLRSVIEQFPGKLDFVLVDGGCVLSHGHKQLMCLARSVLSKAKILLLDEPSAHLDPVT YQIIRRTLKQAFADCTVILCEHRIEAMLECQQFLVIEENKVRQYDSIQKLLNERSLFRQA ISPSDRVKLFPHRNSSKCKSKPQIAALKEETEEEVQDTRL""".replace("\n", "") # Replace function gets rid of `new line` characters. ``` We will use a sliding window approach. >For a pre-defined window size of `n`, at any given point in the sequence, we will average its current and `(n-1)/2` preceding and proceeding values. You can think of it as a fixed-size rectangle moving across the sequence, averaging out the results to calculate the mean value of a wider section. `enumerate` function generates tuples containing consecutive numbers with the values at the given positions. ``` window_size = 11 hydrophobicity = [] # The hydrophobicity value at a given point in the sequence. for i, aa in enumerate(sequence): # This will return tuple of a position in the sequence and the aminoacid. window_start = int(i - (window_size-1)/2) window_end = int(i + (window_size-1)/2)+1 if window_start < 0 or window_end > len(sequence): window_hydrophobicity = None # At the very beginning and at the very end the window will be outside of the sequence. else: aas_in_window = sequence[window_start:window_end] # A list of all the aminoacids in the window. window_hydrophobicity = sum([Kyte_and_Doolittle_scale[aa] for aa in aas_in_window]) / window_size hydrophobicity.append(window_hydrophobicity) print("Calculated hydrophobicity for {} positions".format(len(hydrophobicity))) # Note that this is slightly different than GRAVY from the reference paper. print("Average hydrophobicity:", "{:.4f}".format(sum(h if h else 0 for h in hydrophobicity) / len(hydrophobicity))) ``` Now let's plot the hydrophobicity along the sequence to detect hydrophobic and hydrophilic regions. We will initialise `matplotlib` visualisation so that figures can be displayed in notebook cells. Then we will ask it to plot the `hydrophobicity` list from the previous calculation. You may find this [Hydrophilicity Plot link](https://en.wikipedia.org/wiki/Hydrophilicity_plot) helpful. ``` %matplotlib inline import matplotlib.pyplot as plt plt.rcParams['figure.dpi'] = 80 plt.plot(hydrophobicity) plt.title("Hydrophobicity per sequence region using window size {}".format(window_size)) plt.show() ``` ## BLAST Basic Local Alignment Search Tool allows finding similar regions across proteins and retrieving the most similar ones. **Biopython** provides tools for both local BLAST tools (e.g. those normally run on a command line) as well as remote computation services. In this section, we will use NCBI BLAST cloud services. *Note: running BLAST is computationally heavy, especially so with large databases of sequences, expect any calls to take at least several minutes.* **Biopython** Blast module contains two classes: - `NCBIWWW` - to issue queries to the remote server - `NCBIXML` - to convert results (in XML format) to an object that can be easily used in the code ``` from Bio.Blast import NCBIWWW, NCBIXML # This code will take several minutes to run as it executes a BLAST search in NCBI cloud environment. query_handle = NCBIWWW.qblast( "blastp", # The particular BLAST tool to be used. blastp is used for proteins. database="nr", # The database to perform the search on. "nr" is the non-redundant protein sequence database. sequence=cftr_aa.seq ) blast_results = next(NCBIXML.parse(query_handle)) ``` In order to visualise the results, we could simply iterate over them, printing the relevant information. Here we will use `pandas` library to display the results. *Note: if no results are displayed, you may need to re-run the query step. Wait for it to finish before running `pandas` code*. ``` import pandas as pd df = pd.DataFrame([ { "title": a.title, "accession": a.accession, "hit_def": a.hit_def, "length": a.length, "e_value": a.hsps[0].expect } for a in blast_results.alignments ]) df.head(n=20) ``` ## PDB files PDB files contain the full 3D representation of the proteins - either experimentally-derived or predicted. Here we will take a brief look at how to download and parse them. Systems such as Pymol can be used to inspect the 3D structure and interactions on the atomic level. We will download and briefly analyse the experimentally determined [structure of the protein encoded by CFTR gene](https://www.rcsb.org/structure/6O1V). ``` from urllib.request import urlretrieve result_location, _ = urlretrieve("https://files.rcsb.org/download/6O1V.pdb", "data/6O1V.pdb") print("File downloaded to:", result_location) from Bio.PDB.PDBParser import PDBParser parser = PDBParser() structure = parser.get_structure("6O1V", "data/6O1V.pdb") ``` Warnings like the one above are common when reading PDB files. This is due to (often) small incompatibilities between the standard and the content of the generated files. **Biopython** allows for strict parsing, i.e. if specified, this warning would turn to an error, which would prevent the execution of this code. This would, however, be impractical as these small abnormalities are commonplace. PDB files contain the hierarchical representation consisting of: - `structure` at the top level - `model` nested underneath - `chain` - in this case there are 2 - `residue` - belonging to a chain - a particular aminoacid - `atom` - which contains the coordinates More information on the PDB structure and representation can be [found here](https://pdb101.rcsb.org/learn/guide-to-understanding-pdb-data/introduction). Let's traverse the parsed file and calculate some statistics. We will be interested in rhe total TRP count and the number of carbon atoms. ``` total_TRP_residues = 0 total_carbon_atoms = 0 for model in structure: for chain in model: for residue in chain: if residue.resname == "TRP": total_TRP_residues += 1 for atom in residue: if atom.element=="C": total_carbon_atoms += 1 print("Total number of TRP aminoacids in the structure:", total_TRP_residues) print("Total number of carbon atoms in the structure: ", total_carbon_atoms) ``` We can leverage the coordinates provided by the atoms to find out the `bounding box` around the structure - by finding out the minimum and maximum value for each dimension. ``` # We start from extreme values and expect them to go down once we encounter a `better` value. min_atom_coord = [1000, 1000, 1000] max_atom_coord = [-1000, -1000, -1000] for model in structure: for chain in model: for residue in chain: for atom in residue: coord=atom.coord for dim, val in enumerate(coord): if val < min_atom_coord[dim]: min_atom_coord[dim] = val elif val > max_atom_coord[dim]: max_atom_coord[dim] = val print("Minimum coordinates for each dimension: ", min_atom_coord) print("Maximum coordinates for each dimension: ", max_atom_coord) ``` `Bio.PDB` module contains further utilities to acquire, save, transform and superimpose contents of the PDB files - including `mmcif` format. These can then be filtered or adjusted accordingly before performing deeper analysis with 3D-first tools such as Pymol. ## Discussion This notebook provided an introduction to the protein-related functionality of *Biopython*. Here we studied how to perform analysis and search for related proteins in a programmable, repeatable, and scalable way. *Biopython* is a much larger library, with a plethora of functionality. It can provide you with tried and tested algorithms and connectors to speed up your research. However, due to the size of the library *(we could easily spend all this time on reading the general information on each module it contains)*, we have only taken a look at the very small subset of what it can offer. Don't be afraid to experiment and play around with the cells in this notebook. If you are interested in learning more, take a look at the extra resources outlined in the top section. Take a look at the exercises to try out what you learnt. Click [here](#Contents) to go back to the contents.
github_jupyter
# Proyecto - Calculadora ## **Programación** ### *Universidad Central* ### *Elaborado por:* * Juan Castillo (Interfaz gráfica, Cálculo Vectorial) * Laura Contreras (Pre-Álgebra, gráficas) * Carlos Carvajales (Cálculo diferencial) * Jessica Santos (Álgebra lineal) * María García (Cálculo Integral) Querido usuario, Por favor, lee el manual para más información del funcionamiento de la calculadora. ``` ## Paquetes ! import matplotlib.pyplot as plt from ipywidgets import interact, interactive, interact_manual, widgets import numpy as np import sympy as spp from sympy.parsing.sympy_parser import parse_expr from mpl_toolkits.mplot3d import Axes3D from sympy.plotting import plot3d,plot, PlotGrid from IPython.display import display, Markdown x,y,z,w = spp.symbols('x y z w') # Materia: Pre-Álgebra def pre_algebra(Tema, Funcion) : try: ## Tema: Operaciones básicas if Tema == 'Operaciones Básicas': print(float(parse_expr(Funcion))) ## Tema: Factores y números primos if Tema == 'Factores y Números primos' : Funcion = int(Funcion) def fn(segundo_valor,máximo_común_divisor, Numero_primo): if máximo_común_divisor == True: def mcd(Funcion, segundo_valor): segundo_valor= parse_expr(segundo_valor) Funcion, segundo_valor=max(int(Funcion), segundo_valor),min(int(Funcion), segundo_valor) while segundo_valor!=0: Funcion, segundo_valor=segundo_valor,Funcion%segundo_valor return Funcion print(mcd(int(Funcion), segundo_valor)) if Numero_primo == True: def pri(Funcion): phi=0 for i in range(1,Funcion): ai=Funcion while i!=0: ai,i=i,ai%i if ai==1: phi+=1 if phi== Funcion-1: print("El numero", Funcion, "es primo.") elif phi < Funcion-1: print("El numero", Funcion, " no es primo.") print(pri(Funcion)) interact(fn, segundo_valor='', máximo_común_divisor= False, Numero_primo= False) ## Tema: Logaritmos, Radicales y Exponenciales if Tema == 'Logaritmos, Radicales y Exponenciales' : # Tema: Logaritmos, Radicales y Exponenciales def l(n, Logaritmos, Logaritmos_Naturales, Radicales, Exponenciales) : n = parse_expr(n) if Logaritmos == True: try: # Logaritmos huk = np.log(int(Funcion))/np.log(int(n)) print(huk) except: pass if Logaritmos_Naturales == True : try: print(np.log(int(Funcion))) except: pass if Radicales == True: # Radicales try: n2=(int(Funcion))**(1/n) print(spp.N(n2)) except: pass if Exponenciales == True: #Exponenciales try: n3=(int(Funcion))**n print(n3) except: pass interact(l, n='', Logaritmos = False, Logaritmos_Naturales = False, Radicales = False, Exponenciales = False) except: pass ## Materia: Álgebra lineal def algebra_lineal(Tema): def ing_matrizU(matriz): try: matriz=parse_expr(matriz) m = spp.Matrix(matriz) global U U = m return except: print("Al parecer escribiste mal la matriz") def ing_matrizV(matriz): try: matriz=parse_expr(matriz) m = spp.Matrix(matriz) global V V = m return except: print("Al parecer escribiste mal la matriz") mat1=widgets.Text(description="Matriz",value="[[1,0],[0,1]]") mat2=widgets.Text(description="Matriz",value="[[1,0],[0,1]]") # Tema: Operaciones entre matrices if Tema == 'Operaciones entre matrices': def sumar(n,m): try: print("Ingrese 2 matrices de igual tamaño para ser sumadas") suma=n+m return suma except: print("Las matrices no pueden ser sumadas") def resta(s,j): try: print("Ingrese 2 matrices de igual tamaño para ser restadas") restar=s-j return restar except: print("Las matrices no pueden ser restadas") def multi(n,m): try: print("Ingrese 2 matrices: La primera de tamaño m*n y la segunda n*p para ser multiplicadas") multi_vec=n*m return multi_vec except: print("No se puede realizar la multiplicacion entre matrices") def invert(m): try: matri_inv=m**-1 return matri_inv except: print("Error:La inversa de una matriz solo puede ser calculada para matrices cuadradas") def det(m): try: dt=m.det() return dt except: print("Esta matriz no tiene determinante ya que debe ser cuadrada") def f(Operacion,m1,m2): mat1=ing_matrizU(m1) mat2=ing_matrizV(m2) m = U r = V if Operacion == 'Suma': k=sumar(m,r) k if Operacion == 'Resta': k=resta(m,r) k if Operacion == 'Multiplicación': k=multi(m,r) k if Operacion == 'Inversa': k=invert(m) l=invert(r) def inv(Inversa_1,Inversa_2): if Inversa_1 == True: print("Matriz inversa de la primera matriz",k) if Inversa_2 == True: print("Matriz inversa de la segunda matriz",l) return inv interact(inv, Matriz='', Inversa_1 = False, Inversa_2 = False) if Operacion == 'Determinante': k=det(m) l=det(r) def DET(Determinante_1,Determinante_2): if Determinante_1 == True: print("El determinante de la primera matriz es ",k) if Determinante_2 == True: print("El determinante de la segunda matriz es ",l) return DET interact(DET, Matriz='', Determinante_1= False, Determinante_2 = False) return k interact(f, Operacion =['Suma','Resta','Multiplicación','Inversa','Determinante'],m1=mat1,m2=mat2) # Tema: Sistemas de ecuaciones lineales if Tema == 'Sistema de ecuaciones lineales': def solucion(n,m): try: solucion1=spp.linsolve((n,m),[x,y,z]) return solucion1 except: print("Ups ocurrio algo, revisa la matriz") def g(tema,m1,m2): mat1=ing_matrizU(m1) mat2=ing_matrizV(m2) m = U r = V if tema == 'Solución por eliminación': print("Ingrese las matrices: Tenga en cuenta que la primera matriz es la matriz aumentada y la segunda es el resultado de la matriz aumentada") k=solucion(m,r) k return k interact(g, tema ='Solución por eliminación',m1=mat1,m2=mat2) # Tema: Operaciones entre vectores if Tema == 'Operaciones entre vectores': import numpy as np def ing_matrizV(vector): try: vector=parse_expr(vector) vec_1= np.array(vector) m=vec_1[np.newaxis, :] global V V = m return except: print("Al parecer escribiste mal el vector") def ing_matrizU(vector): try: print("Ingresa dos vectores, recuerda que debe ser entre paréntesis y separado por comas: (1,2)") vector=parse_expr(vector) vec_1= np.array(vector) m=vec_1[np.newaxis, :] global U U = m except: print("Al parecer escribiste mal el vector") mat3=widgets.Text(description="Vector",value="(1,0)") mat4=widgets.Text(description="Vector",value="(1,0)") def sumar2(n,m): try: suma2=n+m return suma2 except: print("Los vectores no pueden ser sumados") def resta2(s,j): try: restar2=j+-s return restar2 except: print("Los vectores no pueden ser restados") def h(tema,m1,m2): mat3=ing_matrizU(m1) mat4=ing_matrizV(m2) m = U r = V if tema == 'Suma entre vectores': k=sumar2(r,m) print("Este es el resultado de sumar dos vectores:",k) if tema == 'Resta entre vectores': k=resta2(r,m) k return k if tema == 'Multiplicación': def ing_matrizZ(vector): try: vector=parse_expr(vector) vec_1= np.array(vector) m=vec_1[np.newaxis, :] global V V = m except: print("Al parecer escribiste mal el vector") def ing_matrizW(vector): try: print("Ingresa un vector y un escalar, recuerda que el vector debe ser entre paréntesis y separado por comas: (1,2)") vector=parse_expr(vector) m=vector global U U = m except: print("Al parecer escribiste mal el escalar") mat3=widgets.Text(description="Vector",value="(1,0)") mat4=widgets.Text(description="Escalar",value="#") def multip(n,m): try: multi_vec=n*m return multi_vec except: print("No se puede realizar la multiplicacion por escalar") def i(tema,m1,m2): mat3=ing_matrizZ(m1) mat4=ing_matrizW(m2) m = U r = V if tema == 'Multiplicación por escalar': P=multip(m,r) P return P interact(i,tema='Multiplicación por escalar',m1=mat3,m2=mat4) interact(h, tema =['Suma entre vectores','Resta entre vectores','Multiplicación'],m1=mat3,m2=mat4) # Materia: Cálculo diferencial def calculo_diferencial(Tema, Funcion): ## Gráfica de la función button1, button2 = widgets.Button(description="Dos Dimensiones"), widgets.Button(description="Tres Dimensiones") output1, output2 = widgets.Output(), widgets.Output() display(button1,output1); display(button2,output2) def DosD(dosd) : # 2D with output1: try: plot(Funcion,(x,-20,20), (y,-40,40)) except: print("No es posible gráficar en R2") def TresD(tresd) : # 3D with output2: try: plot3d(Funcion,(x,-20,20),(y,-40,40), (z,-20,20)) except: print("No es posible gráficar en R3") button1.on_click(DosD); button2.on_click(TresD) try: ## Tema: Derivadas if Tema == 'Derivada': try: def h2( Primera_derivada,Segunda_derivada, Tercera_derivada, Cuarta_derivada, Quinta_derivada): if Primera_derivada == True : try: Primera_derivada = spp.diff(Funcion,x) dom1 = np.arange(-20,20,0.1) ran1 = [Primera_derivada.subs(x,i) for i in dom1] plt.plot(dom1,ran1, 'r', label='Primera derivada') plt.title("Primera derivada"); plt.legend(); plt.grid() print(Primera_derivada) except: pass if Segunda_derivada == True : try: Segunda_derivada = spp.diff(Funcion,x,2) dom2 = np.arange(-20,20,0.1) ran2 = [Segunda_derivada.subs(x,i) for i in dom2] plt.plot(dom2,ran2, 'b', label = "Segunda derivada") plt.title("Segunda derivada"); plt.legend(); plt.grid() print(Segunda_derivada) except: pass if Tercera_derivada == True : try: Tercera_derivada = spp.diff(Funcion,x,3) dom3 = np.arange(-20,20,0.1) ran3 = [Tercera_derivada.subs(x,i) for i in dom3] plt.plot(dom3,ran3, 'g', label = "Tercera derivada") plt.title("Tercera derivada"); plt.legend(); plt.grid() print(Tercera_derivada) except: pass if Cuarta_derivada == True : try: Cuarta_derivada = spp.diff(Funcion,x,4) dom4 = np.arange(-20,20,0.1) ran4 = [Cuarta_derivada.subs(x,i) for i in dom4] plt.plot(dom4,ran4, 'c', label = "Cuarta derivada") plt.title("Cuarta derivada");plt.legend(); plt.grid() print(Cuarta_derivada) except: pass if Quinta_derivada == True : try: Quinta_derivada = spp.diff(Funcion,x,5) dom5 = np.arange(-20,20,0.1) ran5 = [Quinta_derivada.subs(x,i) for i in dom5] plt.plot(dom5,ran5, 'y', label = "Quinta derivada") plt.title("Quinta derivada"); plt.legend(); plt.grid() print(Quinta_derivada) except: pass if Primera_derivada == True and Segunda_derivada == True and Tercera_derivada == True and Cuarta_derivada == True and Quinta_derivada == True : try: plt.plot(dom1, ran1, dom2, ran2, dom3, ran3, dom4, ran4, dom5, ran5) plt.legend(); plt.grid() except: pass interact(h2, Primera_derivada = False, Segunda_derivada = False, Tercera_derivada= False, Cuarta_derivada= False, Quinta_derivada= False) except: pass ## Tema: Límites if Tema == 'Límites': try: def l2(Tendencia): Tendencia = parse_expr(Tendencia) Limite = spp.limit(Funcion, x, Tendencia) print (Limite) interact(l2, Tendencia = '') except: pass if Tema == 'Raíces de la función': try: def NR(valor_inicial): valor_inicial = parse_expr(valor_inicial) derivada = spp.diff(Funcion) for i in range(15) : a = spp.sympify(Funcion).subs(x,valor_inicial) #Raíces de funciones b = spp.sympify(derivada).subs(x,valor_inicial) valor_inicial = valor_inicial - spp.N(a/b) return(valor_inicial) interact(NR, valor_inicial='') except: pass ## Tema: Máximos y mínimos if Tema == 'Máximos y mínimos': try: f = Funcion dev1 = spp.diff(f) dev2 = spp.diff(dev1) enx = spp.solve(dev1, x) max_or_min = [dev2.subs(x,i) for i in enx] def mamin(max_or_min,enx): try: for i in range(len(max_or_min)): if max_or_min[i] >= 0: #Máximos y Mínimos pos = f.subs(x, enx[i]) print((enx[i], pos), "es un mínimo") elif max_or_min[i] < 0: neg = f.subs(x, enx[i]) print((enx[i], neg), "es un máximo") else: break except Exception: print("La función tiene soluciones complejas. No es posible determinar máximos o mínimos.") print(mamin(max_or_min,enx)) except: pass except: pass # Cálculo integral def calculo_integral(Tema, Funcion): ## Gráfica de la función button1, button2 = widgets.Button(description="Dos Dimensiones"), widgets.Button(description="Tres Dimensiones") output1, output2 = widgets.Output(), widgets.Output() display(button1,output1); display(button2,output2) def DosD(dosd) : # 2D with output1: try: plot(Funcion,(x,-20,20), (y,-40,40)) except: print("No es posible gráficar en R2") def TresD(tresd) : # 3D with output2: try: plot3d(Funcion,(x,-20,20),(y,-40,40), (z,-20,20)) except: print("No es posible gráficar en R3") button1.on_click(DosD); button2.on_click(TresD) try: ## Tema: Integrales de una variable if Tema == 'Integrales de una variable': try: def n(Tipo): if Tipo == 'Indefinida': try: def s(dx, dy): if dx == True: try: Integral_indefinida = spp.integrate(parse_expr(Funcion), (x)) dom_1 = np.arange(-20,20,0.1) ran_1 = [Integral_indefinida.subs(x,i) for i in dom_1] ran_2 = [parse_expr(Funcion).subs(x,i) for i in dom_1] plt.plot(dom_1,ran_1, 'r', dom_1, ran_2, 'b', label = Integral_indefinida) plt.title("Integral indefinida"); plt.legend(); plt.grid() print(spp.integrate(parse_expr(Funcion), (x))) except: pass if dy == True: try: print(spp.integrate(parse_expr(Funcion), (y))) except: pass interact(s, dx = False, dy = False) except: pass if Tipo == 'Definida': try: def s(a, b, dx, dy): if dx == True: print(spp.integrate(Funcion, (x, a, b))) elif dy == True: print(spp.integrate(Funcion, (y, a, b))) interact(s, a='', b='', dx = False, dy = False) except: pass interact(n, Tipo = ['Indefinida', 'Definida']) except: pass ## Tema: Sumatoria if Tema == 'Sumatoria': try: def k(a,b): c = 0 for i in range(a, b+1): c += parse_expr(Funcion).subs(x, i) return(spp.N(c)) interact(k, a=widgets.IntText(value=1), b=widgets.IntText(value=1)) except: pass ## Tema: Área bajo una curva if Tema == 'Área bajo una curva': try: def s(a, b, dx, dy): if dx == True: print(spp.integrate(Funcion, (x, a, b))) if dy == True: print(spp.integrate(Funcion, (x, a, b))) interact(s, a='', b='', dx = False, dy = False) except: pass ## Tema: Volumen bajo una curva if Tema == 'Volumen bajo una curva': try: def s(a, b): print(spp.integrate(parse_expr(Funcion)**2, (x, a, b))) interact(s, a='', b='') except: pass ## Tema: Área entre dos curvas if Tema == 'Área entre dos curvas': try: def s(Funcion1, a, b, dx, dy): try: Funcion1 = parse_expr(Funcion1) if dx == True: for i in range(a,b+1): F = Funcion.subs(x,i) F1 = Funcion1.subs(x,i) if F > F1: c = Funcion - Funcion1 if F < F1: c = Funcion1 - Funcion print(spp.integrate(c, (x, a, b))) elif dy == True: for i in range(a,b+1): F = Funcion.subs(y,i) F1 = Funcion1.subs(y,i) if F > F1: c = Funcion - Funcion1 if F < F1: c = Funcion1 - Funcion print(spp.integrate(c, (y, a, b))) except Exception: print("No es posible calcular el área.") interact(s, Funcion1='', a=widgets.IntText(value=1), b=widgets.IntText(value=1), dx = False, dy = False) except: pass ## Tema: Volumen entre dos curvas if Tema == 'Volumen entre dos curvas': try: def s(Funcion1, a, b, dx, dy): try: Funcion1 = parse_expr(Funcion1) if dx == True: for i in range(a,b+1): F = Funcion.subs(x,i) F1 = Funcion1.subs(x,i) if F > F1: c = Funcion**2 - Funcion1**2 if F < F1: c = Funcion1**2 - Funcion**2 print(spp.integrate(c, (x, a, b))) elif dy == True: for i in range(a,b+1): F = Funcion.subs(y,i) F1 = Funcion1.subs(y,i) if F > F1: c = Funcion**2 - Funcion1**2 if F < F1: c = Funcion1**2 - Funcion**2 print(spp.integrate(c, (y, a, b))) except Exception: print("No es posible calcular el volumen.") interact(s, Funcion1='', a=widgets.IntText(value=1), b=widgets.IntText(value=1), dx = False, dy = False) except: pass except: pass # Cálculo vectorial def calculo_vectorial(Tema, Funcion): ## Gráfica de la función button1, button2 = widgets.Button(description="Dos Dimensiones"), widgets.Button(description="Tres Dimensiones") output1, output2 = widgets.Output(), widgets.Output() display(button1,output1); display(button2,output2) def DosD(dosd) : # 2D with output1: try: plot(Funcion,(x,-20,20), (y,-40,40)) except: print("No es posible gráficar en R2") def TresD(tresd) : # 3D with output2: try: plot3d(Funcion,(x,-20,20),(y,-40,40), (z,-20,20)) except: print("No es posible gráficar en R3") button1.on_click(DosD); button2.on_click(TresD) try: ## Tema: Derivadas parciales if Tema == 'Derivadas parciales': # Tema: Derivadas Parciales try: def g(dfx,dfy,dfz) : if dfx == True : try: dfx = spp.diff(Funcion,x) plot3d(dfx,(x,-5,5),(y,-5,5), title='Derivada parcial con respecto a x',xlabel='eje x',ylabel='eje y') print(dfx) except: pass if dfy == True : try: dfy = spp.diff(Funcion,y) plot3d(dfy,(x,-5,5),(y,-5,5), title='Derivada parcial con respecto a y',xlabel='eje x',ylabel='eje y') print(dfy) except: pass if dfz == True : try: dfz = spp.diff(Funcion,z) plot3d(dfz,(z,-5,5),(y,-5,5), title='Derivada parcial con respecto a z',xlabel='eje x',ylabel='eje y') print(dfz) except: pass if dfx== True and dfy ==True and dfz==True: plot3d(dfx,(x,-5,5),(y,-5,5),(z,-5,5)) and plot3d(dfy,(x,-5,5),(y,-5,5),(z,-5,5)) and plot3d(dfz,(z,-5,5),(y,-5,5),(x,-5,5)) return interact(g, dfx = False, dfy = False, dfz = False) except: pass ## Tema: Integrales dobles if Tema == 'Integrales Dobles' : # Tema: Integrales Dobles try: def h(Tipo) : if Tipo == 'Indefinida': #Indefinida try: intind=spp.integrate(Funcion, (x), (y)) print(intind) plot3d(intind, (x,-5,5),(y,-5,5),title='Integral doble indefinida') except: pass if Tipo == 'Definida': # Definida try: def i(a, b, c,d) : print(spp.integrate(Funcion, (x, a, b), (y, c, d))) interact(i, a='', b='', c='', d='') except: pass interact(h,Tipo = ['Indefinida','Definida']) except: pass ## Tema: Integrales triples if Tema == 'Integrales Triples' : # Tema: Integrales Triples try: def h2(Tipo) : if Tipo == 'Indefinida': # Indefinida try: Indefinida = spp.integrate(Funcion, (x), (y), (z)) print(Indefinida) except: pass if Tipo == 'Definida': # Definida try: def i2(liminf_x, limsup_x, liminf_y, limsup_y, liminf_z, limsup_z) : print(spp.integrate(Funcion, (x, liminf_x, limsup_x), (y, liminf_y, limsup_y), (z, liminf_z, limsup_z))) interact(i2, liminf_x='', limsup_x='', liminf_y='', limsup_y='', liminf_z='', limsup_z='') except: pass interact(h2, Tipo = ['Indefinida','Definida']) except: pass except: pass # Creando la tabla, siendo cada pestaña una materia diferente tab_contents = ['Pre-Álgebra','Álgebra lineal', 'Cálculo diferencial', 'Cálculo integral','Cálculo vectorial'] funcion=[interactive(pre_algebra, Tema = ['Operaciones Básicas','Factores y Números primos','Logaritmos, Radicales y Exponenciales'], Funcion=widgets.Text(description="Función: ", value="")), interactive(algebra_lineal, Tema = ['Operaciones entre matrices','Operaciones entre vectores','Sistema de ecuaciones lineales']), interactive(calculo_diferencial, Tema = ['Derivada', 'Límites', 'Raíces de la función', 'Máximos y mínimos'], Funcion=widgets.Text(description="Función: ", value="")), interactive(calculo_integral, Tema = ['Integrales de una variable','Sumatoria', 'Área bajo una curva', 'Volumen bajo una curva', 'Área entre dos curvas', 'Volumen entre dos curvas'], Funcion=widgets.Text(description="Función: ", value="")), interactive(calculo_vectorial, Tema = ['Derivadas parciales','Integrales Dobles', 'Integrales Triples'], Funcion=widgets.Text(description="Función: ", value=""))] tab = widgets.Tab() tab.children = [widgets.VBox(children = i.children) for i in funcion] for i in range(len(tab_contents)): tab.set_title(i,tab_contents[i]) display(tab) ```
github_jupyter
<a href="https://colab.research.google.com/github/VinACE/san_mrc/blob/master/longformer_qa_training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # Longformer for Question Answering ``` !nvidia-smi !git clone https://github.com/huggingface/transformers.git !pip install -U ./transformers !pip install git+https://github.com/huggingface/nlp.git !pip install nlp ``` The Longformer model was presented in [Longformer: The Long-Document Transformer](https://arxiv.org/abs/2004.05150) by Iz Beltagy, Matthew E. Peters, Arman Cohan. As the paper explains it > `Longformer` is a BERT-like model for long documents. Training longformer for QA is similar to how you train BERT for QA. But there few things to keep in mind when using longformer for QA task. Longformer uses sliding-window local attention which scales linearly with sequence length. This is what allows longformer to handle longer sequences. For more details on how the sliding window attention works, please refer to the paper. Along with local attention longformer also allows you to use global attention for certain tokens. For QA task, all question tokens should have global attention. The attention is configured using the `attention_mask` paramter of the `forward` method of `LongformerForQuestionAnswering`. Mask values are selected in [0, 1, 2]: 0 for no attention (padding tokens), 1 for local attention (a sliding window attention), 2 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). As stated above all question tokens should be given gloabl attention. The `LongformerForQuestionAnswering` model handles this automatically for you. To allow it to do that 1. The input sequence must have three sep tokens, i.e the sequence should be encoded like this `<s> question</s></s> context</s>`. If you encode the question and answer as a input pair, then the tokenizer already takes care of that, you shouldn't worry about it. 2. input_ids should always be a batch of examples. ## Load and process data Here we are using the awesome new nlp library to load and process the dataset. Also we will use Transformers's fast tokenizers alignement methods to get position of answer spans ``` import torch import nlp from transformers import LongformerTokenizerFast tokenizer = LongformerTokenizerFast.from_pretrained('allenai/longformer-base-4096') def get_correct_alignement(context, answer): """ Some original examples in SQuAD have indices wrong by 1 or 2 character. We test and fix this here. """ gold_text = answer['text'][0] start_idx = answer['answer_start'][0] end_idx = start_idx + len(gold_text) if context[start_idx:end_idx] == gold_text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == gold_text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == gold_text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example): # Tokenize contexts and questions (as pairs of inputs) input_pairs = [example['question'], example['context']] encodings = tokenizer.encode_plus(input_pairs, pad_to_max_length=True, max_length=512) context_encodings = tokenizer.encode_plus(example['context']) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methodes. # this will give us the position of answer span in the context text start_idx, end_idx = get_correct_alignement(example['context'], example['answers']) start_positions_context = context_encodings.char_to_token(start_idx) end_positions_context = context_encodings.char_to_token(end_idx-1) # here we will compute the start and end position of the answer in the whole example # as the example is encoded like this <s> question</s></s> context</s> # and we know the postion of the answer in the context # we can just find out the index of the sep token and then add that to position + 1 (+1 because there are two sep tokens) # this will give us the position of the answer span in whole example sep_idx = encodings['input_ids'].index(tokenizer.sep_token_id) start_positions = start_positions_context + sep_idx + 1 end_positions = end_positions_context + sep_idx + 1 if end_positions > 512: start_positions, end_positions = 0, 0 encodings.update({'start_positions': start_positions, 'end_positions': end_positions, 'attention_mask': encodings['attention_mask']}) return encodings # !ls -ltr # !ls -lrt /root/.cache/huggingface/datasets/squad/plain_text/ # load train and validation split of squad train_dataset = nlp.load_dataset('squad', split=nlp.Split.TRAIN) valid_dataset = nlp.load_dataset('squad', split=nlp.Split.VALIDATION) train_dataset = train_dataset.map(convert_to_features) valid_dataset = valid_dataset.map(convert_to_features, load_from_cache_file=False) # set the tensor type and the columns which the dataset should return columns = ['input_ids', 'attention_mask', 'start_positions', 'end_positions'] train_dataset.set_format(type='torch', columns=columns) valid_dataset.set_format(type='torch', columns=columns) len(train_dataset), len(valid_dataset) # cach the dataset, so we can load it directly for training torch.save(train_dataset, 'train_data.pt') torch.save(valid_dataset, 'valid_data.pt') ``` ## Write training script ``` import dataclasses import logging import os import sys from dataclasses import dataclass, field from typing import Dict, List, Optional import numpy as np import torch from transformers import LongformerForQuestionAnswering, LongformerTokenizerFast, EvalPrediction from transformers import ( HfArgumentParser, DataCollator, Trainer, TrainingArguments, set_seed, ) logger = logging.getLogger(__name__) # @dataclass class DummyDataCollator(DataCollator): def collate_batch(self, batch: List) -> Dict[str, torch.Tensor]: """ Take a list of samples from a Dataset and collate them into a batch. Returns: A dictionary of tensors """ input_ids = torch.stack([example['input_ids'] for example in batch]) attention_mask = torch.stack([example['attention_mask'] for example in batch]) start_positions = torch.stack([example['start_positions'] for example in batch]) end_positions = torch.stack([example['end_positions'] for example in batch]) return { 'input_ids': input_ids, 'start_positions': start_positions, 'end_positions': end_positions, 'attention_mask': attention_mask } @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_file_path: Optional[str] = field( default='train_data.pt', metadata={"help": "Path for cached train dataset"}, ) valid_file_path: Optional[str] = field( default='valid_data.pt', metadata={"help": "Path for cached valid dataset"}, ) max_len: Optional[int] = field( default=512, metadata={"help": "Max input length for the source text"}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) # we will load the arguments from a json file, # make sure you save the arguments in at ./args.json model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath('args.json')) if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. tokenizer = LongformerTokenizerFast.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = LongformerForQuestionAnswering.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) # Get datasets print('loading data') train_dataset = torch.load(data_args.train_file_path) valid_dataset = torch.load(data_args.valid_file_path) print('loading done') # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=valid_dataset, data_collator=DummyDataCollator(), prediction_loss_only=True, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation results = {} if training_args.do_eval and training_args.local_rank in [-1, 0]: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(eval_output.keys()): logger.info(" %s = %s", key, str(eval_output[key])) writer.write("%s = %s\n" % (key, str(eval_output[key]))) results.update(eval_output) return results def _mp_fn(index): # For xla_spawn (TPUs) main() ``` ## Train ``` import json ``` Let's write the arguments in a dict and store in a json file. The above code will load this file and parse the arguments. ``` args_dict = { "n_gpu": 1, "model_name_or_path": 'allenai/longformer-base-4096', "max_len": 512 , "output_dir": './models', "overwrite_output_dir": True, "per_gpu_train_batch_size": 8, "per_gpu_eval_batch_size": 8, "gradient_accumulation_steps": 16, "learning_rate": 1e-4, "num_train_epochs": 3, "do_train": True } with open('args.json', 'w') as f: json.dump(args_dict, f) ``` Start training! ``` main() ``` ## Eval ``` ## SQuAD evaluation script. Modifed slightly for this notebook from __future__ import print_function from collections import Counter import string import re import argparse import json import sys def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r'\b(a|an|the)\b', ' ', text) def white_space_fix(text): return ' '.join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return ''.join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return (normalize_answer(prediction) == normalize_answer(ground_truth)) def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): scores_for_ground_truths = [] for ground_truth in ground_truths: score = metric_fn(prediction, ground_truth) scores_for_ground_truths.append(score) return max(scores_for_ground_truths) def evaluate(gold_answers, predictions): f1 = exact_match = total = 0 for ground_truths, prediction in zip(gold_answers, predictions): total += 1 exact_match += metric_max_over_ground_truths( exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths( f1_score, prediction, ground_truths) exact_match = 100.0 * exact_match / total f1 = 100.0 * f1 / total return {'exact_match': exact_match, 'f1': f1} import torch from transformers import LongformerTokenizerFast, LongformerForQuestionAnswering from tqdm.auto import tqdm tokenizer = LongformerTokenizerFast.from_pretrained('models') model = LongformerForQuestionAnswering.from_pretrained('models') model = model.cuda() model.eval() valid_dataset = torch.load('valid_data.pt') dataloader = torch.utils.data.DataLoader(valid_dataset, batch_size=16) answers = [] with torch.no_grad(): for batch in tqdm(dataloader): start_scores, end_scores = model(input_ids=batch['input_ids'].cuda(), attention_mask=batch['attention_mask'].cuda()) for i in range(start_scores.shape[0]): all_tokens = tokenizer.convert_ids_to_tokens(batch['input_ids'][i]) answer = ' '.join(all_tokens[torch.argmax(start_scores[i]) : torch.argmax(end_scores[i])+1]) ans_ids = tokenizer.convert_tokens_to_ids(answer.split()) answer = tokenizer.decode(ans_ids) answers.append(answer) predictions = [] references = [] for ref, pred in zip(valid_dataset, answers): predictions.append(pred) references.append(ref['answers']['text']) evaluate(references, predictions) ``` ## Model in action 🚀 The trained model is available on Huggingface hub if you want to play with it. You can find the model [here](https://huggingface.co/valhalla/longformer-base-4096-finetuned-squadv1) ``` import torch from transformers import LongformerTokenizer, LongformerForQuestionAnswering tokenizer = LongformerTokenizer.from_pretrained("valhalla/longformer-base-4096-finetuned-squadv1") model = LongformerForQuestionAnswering.from_pretrained("valhalla/longformer-base-4096-finetuned-squadv1") text = "Huggingface has democratized NLP. Huge thanks to Huggingface for this." question = "What has Huggingface done ?" encoding = tokenizer.encode_plus(question, text, return_tensors="pt") input_ids = encoding["input_ids"] # default is local attention everywhere # the forward method will automatically set global attention on question tokens attention_mask = encoding["attention_mask"] start_scores, end_scores = model(input_ids, attention_mask=attention_mask) all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) answer_tokens = all_tokens[torch.argmax(start_scores) :torch.argmax(end_scores)+1] answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # output => democratized NLP ```
github_jupyter
# Autobatching log-densities example This notebook demonstrates a simple Bayesian inference example where autobatching makes user code easier to write, easier to read, and less likely to include bugs. Inspired by a notebook by @davmre. ``` from __future__ import absolute_import from __future__ import division from __future__ import print_function import functools import itertools import re import sys import time from matplotlib.pyplot import * import jax from jax import lax from jax import numpy as np from jax import scipy from jax import random import numpy as onp import scipy as oscipy ``` # Generate a fake binary classification dataset ``` onp.random.seed(10009) num_features = 10 num_points = 100 true_beta = onp.random.randn(num_features).astype(np.float32) all_x = onp.random.randn(num_points, num_features).astype(np.float32) y = (onp.random.rand(num_points) < oscipy.special.expit(all_x.dot(true_beta))).astype(np.int32) y ``` # Write the log-joint function for the model We'll write a non-batched version, a manually batched version, and an autobatched version. ## Non-batched ``` def log_joint(beta): result = 0. # Note that no `axis` parameter is provided to `np.sum`. result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=1.)) result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta)))) return result log_joint(onp.random.randn(num_features)) # This doesn't work, because we didn't write `log_prob()` to handle batching. batch_size = 10 batched_test_beta = onp.random.randn(batch_size, num_features) log_joint(onp.random.randn(batch_size, num_features)) ``` ## Manually batched ``` def batched_log_joint(beta): result = 0. # Here (and below) `sum` needs an `axis` parameter. At best, forgetting to set axis # or setting it incorrectly yields an error; at worst, it silently changes the # semantics of the model. result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=1.), axis=-1) # Note the multiple transposes. Getting this right is not rocket science, # but it's also not totally mindless. (I didn't get it right on the first # try.) result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta.T).T)), axis=-1) return result batch_size = 10 batched_test_beta = onp.random.randn(batch_size, num_features) batched_log_joint(batched_test_beta) ``` ## Autobatched with vmap It just works. ``` vmap_batched_log_joint = jax.vmap(log_joint) vmap_batched_log_joint(batched_test_beta) ``` # Self-contained variational inference example A little code is copied from above. ## Set up the (batched) log-joint function ``` @jax.jit def log_joint(beta): result = 0. # Note that no `axis` parameter is provided to `np.sum`. result = result + np.sum(scipy.stats.norm.logpdf(beta, loc=0., scale=10.)) result = result + np.sum(-np.log(1 + np.exp(-(2*y-1) * np.dot(all_x, beta)))) return result batched_log_joint = jax.jit(jax.vmap(log_joint)) ``` ## Define the ELBO and its gradient ``` def elbo(beta_loc, beta_log_scale, epsilon): beta_sample = beta_loc + np.exp(beta_log_scale) * epsilon return np.mean(batched_log_joint(beta_sample), 0) + np.sum(beta_log_scale - 0.5 * onp.log(2*onp.pi)) elbo = jax.jit(elbo, static_argnums=(2, 3)) elbo_val_and_grad = jax.jit(jax.value_and_grad(elbo, argnums=(0, 1))) ``` ## Optimize the ELBO using SGD ``` def normal_sample(key, shape): """Convenience function for quasi-stateful RNG.""" new_key, sub_key = random.split(key) return new_key, random.normal(sub_key, shape) normal_sample = jax.jit(normal_sample, static_argnums=(1,)) key = random.PRNGKey(10003) beta_loc = np.zeros(num_features, np.float32) beta_log_scale = np.zeros(num_features, np.float32) step_size = 0.01 batch_size = 128 epsilon_shape = (batch_size, num_features) for i in range(1000): key, epsilon = normal_sample(key, epsilon_shape) elbo_val, (beta_loc_grad, beta_log_scale_grad) = elbo_val_and_grad( beta_loc, beta_log_scale, epsilon) beta_loc += step_size * beta_loc_grad beta_log_scale += step_size * beta_log_scale_grad if i % 10 == 0: print('{}\t{}'.format(i, elbo_val)) ``` ## Display the results Coverage isn't quite as good as we might like, but it's not bad, and nobody said variational inference was exact. ``` figure(figsize=(7, 7)) plot(true_beta, beta_loc, '.', label='Approximated Posterior Means') plot(true_beta, beta_loc + 2*np.exp(beta_log_scale), 'r.', label='Approximated Posterior $2\sigma$ Error Bars') plot(true_beta, beta_loc - 2*np.exp(beta_log_scale), 'r.') plot_scale = 3 plot([-plot_scale, plot_scale], [-plot_scale, plot_scale], 'k') xlabel('True beta') ylabel('Estimated beta') legend(loc='best') ```
github_jupyter
# 通过PYNQ加速OPENCV函数(Sobel算子) 在阅读本部分UserGuide时,请确认已做好以下准备: * 已经按照之前的预备文档安装好依赖环境<br> * 2根HDMI传输线(对输入视频流以及输出视频流进行测试) * 一台支持HDMI的显示器(对输入视频流以及输出视频流进行测试) ## 步骤1:加载cv2pynq库 ``` import cv2pynq as cv2 ``` 在正常运行的情况下,可以看到PYNQ板卡标记为“DONE”的LED闪烁(为加载了bit文件的效果); 这是由于在封装的时候,我们在初始化阶段调用了Overlay方法给PYNQ加载了定制的bit文件: ```python def __init__(self, load_overlay=True): self.bitstream_name = None self.bitstream_name = "cv2pynq03.bit" self.bitstream_path =os.path.join(CV2PYNQ_BIT_DIR,self.bitstream_name) self.ol = Overlay(self.bitstream_path) ``` 上述代码为cv2pynq.py的部分节选,从当中可以看出在初始化的过程中,加载了cv2pynq03.bit, 因此在导入库的时候会出现加载bit文件的效果。加载的bit文件的Block Design如下图所示: ![Image1](./image/1.png) 这个Block Design主要由以下三个部分组成: * HDMI输入输出模块(移植于BaseOverlay) * 由Vivado HLS生成的OPENCV算法加速IP核(内嵌于Image_Fliter模块) * 基于AXI总线架构的流传输通道 ## 步骤2:对单张图像的处理效果测试 ### 步骤2.1:采用原始的OpenCV中的Sobel算子对输入图像进行处理 在进行原始处理效果测试前,我们需要导入以下模块 * 导入原始的OPENCV * 导入time模块计算处理的时间 * 导入Pillow模块用于读取图像 * 导入numpy模块将jpeg格式的图片转化为数组形式 ``` import cv2 as openCV import time from PIL import Image import numpy as np frame_in = Image.open('../image/lena.jpg') img_in = np.array(frame_in) fps_opencv = 0 fps_cv2pynq_with_cma = 0 fps_cv2pynq_without_cma = 0 start = time.time() for i in range(10): out_opencv = openCV.Sobel(img_in,-1,1,0,ksize=5) end = time.time() img_opencv = Image.fromarray(out_opencv.astype('uint8')) fps_opencv = 1.0/((end - start)/10) print("Frames per second using openCV: " + str(fps_opencv)) img_opencv ``` ### 步骤2.2:采用cv2pynq中的Sobel算子对输入图像进行处理 ``` start = time.time() for i in range(10): out_cv2pynq1 = cv2.Sobel(img_in,-1,1,0,ksize=5) end = time.time() img_cv2pynq_0 = Image.fromarray(out_cv2pynq1.astype('uint8')) fps_cv2pynq_without_cma = 1.0/((end - start)/10) print("Frames per second using cv2pynq without cma: " + str(fps_cv2pynq_without_cma)) img_cv2pynq_0 ``` #### CMA&cv2pynq CMA(Contiguous Memory Allocator)是智能连续内存分配技术,是Linux Kernel内存管理系统的扩展,目的在于解决需要预留大量连续内存导致运行内存紧张的问题。通过对内存的连续分配,cv2pynq可以将图片数据以流的形式传输给PL端,提高图像处理的速率;在PYNQ的原生库中提供了Xlnk类用来分配连续的内存空间,对Xlnk的更多了解,可参考:<br> https://pynq-testing.readthedocs.io/en/master/pynq_libraries/xlnk.html ``` from pynq import Xlnk xlnk = Xlnk() image_buffer = xlnk.cma_array(shape=(512,512), dtype=np.uint8) out_buffer = xlnk.cma_array(shape=(512,512), dtype=np.uint8) np.copyto(image_buffer,img_in) start = time.time() for i in range(10): cv2.Sobel(image_buffer,-1,1,0,ksize=5,dst=out_buffer) end = time.time() img_cv2pynq_1 = Image.fromarray(out_buffer.astype('uint8')) fps_cv2pynq = 1.0/((end - start)/10) print("Frames per second using cv2PYNQ: " + str(fps_cv2pynq)) image_buffer.close() out_buffer.close() img_cv2pynq_1 ``` ## 步骤3:对视频流处理的效果显示 ### 步骤3.1:实例化HDMI输入输出接口 在进行cv2pynq的视频流测试之前,我们需要引入视频流,<br>此处由Block Design中设置好的HDMI输入模块传入视频以及输出模块输出经过处理好的视频流信息;<br>关于HDMI输入输出的更多详情,可以参考BaseOverlay中的Video模块;<br>https://github.com/Xilinx/PYNQ/tree/master/boards/Pynq-Z2/base/notebooks/video <br>https://github.com/Xilinx/PYNQ/tree/master/boards/Pynq-Z1/base/notebooks/video ``` hdmi_in = cv2.video.hdmi_in hdmi_out = cv2.video.hdmi_out hdmi_in.configure(cv2.PIXEL_GRAY) hdmi_out.configure(hdmi_in.mode) hdmi_in.start() hdmi_out.start() print(hdmi_in.mode) ``` 在正确的输入视频流信息之后,我们可以得到输入视频流的配置信息;<br>在本实验中,最大支持 1920 * 1080的输入信号。 ### 步骤3.2:采用原始的OpenCV中的Sobel算子对输入信号进行处理 ``` iterations = 10 start = time.time() for i in range(iterations): inframe = hdmi_in.readframe() outframe = hdmi_out.newframe() openCV.Sobel(inframe,-1,1,0,ksize=5,dst=outframe) inframe.freebuffer() hdmi_out.writeframe(outframe) end = time.time() print("Frames per second using OpenCV: " + str(iterations / (end - start))) ``` ### 步骤3.3:采用cv2pynq中的Sobel算子对输入信号进行处理 ``` import time iterations = 10 start = time.time() for i in range(iterations): inframe = hdmi_in.readframe() outframe = hdmi_out.newframe() cv2.Sobel(inframe,-1,1,0,ksize=5,dst=outframe) inframe.freebuffer() hdmi_out.writeframe(outframe) end = time.time() print("Frames per second using cv2PYNQ: " + str(iterations / (end - start))) ``` ### 步骤3.4:释放HDMI驱动 ``` hdmi_out.close() hdmi_in.close() ``` ### 步骤3.5:关闭cv2pynq 关闭cv2pynq是一个很重要的步骤,因为在BaseOverlay中的video子系统模块中,图片是以连续的内存数组(contiguous memory arrays)作为存储形式,因此在调用cv2pynq时,可以直接将数据以流的形式传输到PL端。所以避免cv2pynq一直占用连续的内存,必须关于cv2pynq以释放内存,而对连续的内存分配是基于PYNQ的Xlnk库,关于Xlnk的更多详情,可参考:<br>https://pynq.readthedocs.io/en/latest/pynq_libraries/xlnk.html ``` cv2.close() ``` ## 附录:PL端是如何加速OPENCV函数处理的 在此项目中式采用一种基于Vivado HLS加速OpenCV程序的方法:<br>其核心是利用Xilinx高层次综合工具Vivado HLS,将C++编写的OpenCV程序按照Vivado HLS处理规范进行修改,进而将代码转换为硬件描述语言,可快速生成IP核。结合Xilinx PYNQ SoC架构,在顶层可直接对我们的Block Design进行Python式的封装,实现OpenCV程序算法向SoC系统的移植和加速。<br> ![Image of HLS](./image/2.png) #### Sobel算子概述 Sobel算子是像素图像边缘检测中最重要的算子之一,在机器学习、数字媒体、计算机视觉等信息科技领域起着举足轻重的作用。在技术上,它是一个离散的一阶差分算子,用来计算图像亮度函数的一阶梯度之近似值。在图像的任何一点使用此算子,将会产生该点对应的梯度矢量或是其法矢量。 #### Sobel算子核心公式(ksize=3) 该算子包含两组3x3的矩阵(当ksize=3时),分别为横向及纵向,将之与图像作平面卷积,即可分别得出横向及纵向的亮度差分近似值。如果以A代表原始图像,Gx及Gy分别代表经横向及纵向边缘检测的图像,其公式如下: $G_{x}=\begin{bmatrix} +1&0&-1\\ +2&0&-2\\ +1&0&-1\\ \end{bmatrix}*A$ and $G_{y}=\begin{bmatrix} +1&+2&+1\\ 0&0&0\\ -1&-2&-1\\ \end{bmatrix}*A$ 图像的每一个像素的横向及纵向梯度近似值可用以下的公式结合,来计算梯度的大小。 $G = \sqrt[2]{G_x^2+G_y^2}$ 然后可用以下公式计算梯度方向: $\Theta=arctan(\frac{G_y}{G_x})$ 更多关于Sobel算子的详细信息,可参考:<br>https://docs.opencv.org/3.0-beta/doc/tutorials/imgproc/imgtrans/sobel_derivatives/sobel_derivatives.html #### 在Vivado HLS中映射Sobel算子的结构 在OpenCV中,通过传入dx与dy来求X方向的梯度以及Y方向的梯度从而输出不同方向上的处理结果, 而在从工程中,通过Vivado HLS建立了一个可通用性的卷积核矩阵IP核(fliter2D),其通过接受PS的传输参数来控制卷积核的参数 ```python def Sobel(self,src, ddepth, dx, dy, dst, ksize): if(ksize == 3): self.f2D.rows = src.shape[0] self.f2D.columns = src.shape[1] self.f2D.channels = 1 if (dx == 1) and (dy == 0) : if self.filter2DType != 0 : self.filter2DType = 0 self.f2D.r1 = 0x000100ff #[-1 0 1] self.f2D.r2 = 0x000200fe #[-2 0 2] self.f2D.r3 = 0x000100ff #[-1 0 1] ``` 上述代码为顶层Python封装时对Sobel函数的部分描述,从中可以获得信息:<br>顶层通过传递dx与dy的值,设置好卷积核的参数,传入IP核(fliter2D)中,此处可看出在dx=1,dy=0时(即Sobel算子在X方向的卷积核)与上面对Sobel算子的描述是相同的,下述代码为在Vivado HLS中对IP核的部分描述: ```C #include "filter2D_hls.h" void filter2D_hls(wide_stream* in_stream, wide_stream* out_stream, int rows, int cols, int channels, int mode, ap_uint<32> r1, ap_uint<32> r2, ap_uint<32> r3) { #pragma HLS INTERFACE axis port * #pragma HLS INTERFACE s_axilite * #pragma HLS INTERFACE ap_stable * #pragma HLS dataflow GRAY_IMAGE g_img_0(rows,cols); GRAY_IMAGE g_img_1(rows,cols); const int col_packets = cols*channels/4; const int packets = col_packets*rows; const int pixel_cnt = rows*cols; for(int r = 0; r < packets; r++){ #pragma HLS pipeline II=4 ap_uint<32> dat = in_stream->data; g_img_0.write(GRAY_PIXEL(dat.range(7,0))); ++in_stream; } const int kernel_size = 3; hls::Window<kernel_size,kernel_size,ap_int<8> > kernel; kernel.val[0][0] = r1.range(7,0); hls::Point_<int> c_point; c_point.x=-1; c_point.y=-1; hls::Filter2D<hls::BORDER_DEFAULT>(g_img_0,g_img_1,kernel, c_point); for(int r = 0; r < rows; r++){ #pragma HLS pipeline II=4 for(int c = 0; c < col_packets; c++){ ap_uint<32> dat; dat.range(7,0) = g_img_1.read().val[0];; out_stream->data = dat; out_stream->user = (r == 0 && c == 0)? 1: 0; out_stream->last = (r == rows-1 && c == col_packets-1)? 1: 0; ++out_stream; } } } ``` 从Vivado HLS中对IP核的部分描述中,可以得到以下信息:<br> * 将输入的信息用g_img_0来存储 * 根据PS端传入的r1,r2,r3参数设置卷积核 * 将g_img_0与设置好的卷积核(kernel)进行卷积,卷积结果输出给g_img_1 * 将输出结果赋予out_stream 上述过程简要的描述了Sobel算子在(ksize = 3)的情况下,如何在HLS中编写相应的算法从而生成IP核,并且在上层用Python对IP核进行封装的过程。<br>如要了解更多的关于OpenCV在HLS上的应用,可以参考XAP1167。<br>如需对本UserGuide中的源码有更多的了解,可以参考:<br>https://github.com/xupsh/cv2pynq/blob/XUP/cv2pynq/cv2pynq.py<br> https://github.com/xupsh/cv2PYNQ-The-project-behind-the-library/blob/master/ip/HLS/filter2D/filter2D_hls.cpp
github_jupyter
# Logistic Regression with a Neural Network mindset Welcome to your first (required) programming assignment! You will build a logistic regression classifier to recognize cats. This assignment will step you through how to do this with a Neural Network mindset, and so will also hone your intuitions about deep learning. **Instructions:** - Do not use loops (for/while) in your code, unless the instructions explicitly ask you to do so. **You will learn to:** - Build the general architecture of a learning algorithm, including: - Initializing parameters - Calculating the cost function and its gradient - Using an optimization algorithm (gradient descent) - Gather all three functions above into a main model function, in the right order. ## 1 - Packages ## First, let's run the cell below to import all the packages that you will need during this assignment. - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python. - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file. - [matplotlib](http://matplotlib.org) is a famous library to plot graphs in Python. - [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end. ``` import numpy as np import matplotlib.pyplot as plt import h5py import scipy from PIL import Image from scipy import ndimage from lr_utils import load_dataset %matplotlib inline ``` ## 2 - Overview of the Problem set ## **Problem Statement**: You are given a dataset ("data.h5") containing: - a training set of m_train images labeled as cat (y=1) or non-cat (y=0) - a test set of m_test images labeled as cat or non-cat - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB). Thus, each image is square (height = num_px) and (width = num_px). You will build a simple image-recognition algorithm that can correctly classify pictures as cat or non-cat. Let's get more familiar with the dataset. Load the data by running the following code. ``` # Loading the data (cat/non-cat) train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = load_dataset() ``` We added "_orig" at the end of image datasets (train and test) because we are going to preprocess them. After preprocessing, we will end up with train_set_x and test_set_x (the labels train_set_y and test_set_y don't need any preprocessing). Each line of your train_set_x_orig and test_set_x_orig is an array representing an image. You can visualize an example by running the following code. Feel free also to change the `index` value and re-run to see other images. ``` # Example of a picture index = 156 plt.imshow(train_set_x_orig[index]) print ("y = " + str(train_set_y[:, index]) + ", it's a '" + classes[np.squeeze(train_set_y[:, index])].decode("utf-8") + "' picture.") ``` Many software bugs in deep learning come from having matrix/vector dimensions that don't fit. If you can keep your matrix/vector dimensions straight you will go a long way toward eliminating many bugs. **Exercise:** Find the values for: - m_train (number of training examples) - m_test (number of test examples) - num_px (= height = width of a training image) Remember that `train_set_x_orig` is a numpy-array of shape (m_train, num_px, num_px, 3). For instance, you can access `m_train` by writing `train_set_x_orig.shape[0]`. ``` ### START CODE HERE ### (≈ 3 lines of code) m_train = train_set_x_orig.shape[0] m_test = test_set_x_orig.shape[0] num_px = train_set_x_orig.shape[1] ### END CODE HERE ### print ("Number of training examples: m_train = " + str(m_train)) print ("Number of testing examples: m_test = " + str(m_test)) print ("Height/Width of each image: num_px = " + str(num_px)) print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)") print ("train_set_x shape: " + str(train_set_x_orig.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x shape: " + str(test_set_x_orig.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) ``` **Expected Output for m_train, m_test and num_px**: <table style="width:15%"> <tr> <td>**m_train**</td> <td> 209 </td> </tr> <tr> <td>**m_test**</td> <td> 50 </td> </tr> <tr> <td>**num_px**</td> <td> 64 </td> </tr> </table> For convenience, you should now reshape images of shape (num_px, num_px, 3) in a numpy-array of shape (num_px $*$ num_px $*$ 3, 1). After this, our training (and test) dataset is a numpy-array where each column represents a flattened image. There should be m_train (respectively m_test) columns. **Exercise:** Reshape the training and test data sets so that images of size (num_px, num_px, 3) are flattened into single vectors of shape (num\_px $*$ num\_px $*$ 3, 1). A trick when you want to flatten a matrix X of shape (a,b,c,d) to a matrix X_flatten of shape (b$*$c$*$d, a) is to use: ```python X_flatten = X.reshape(X.shape[0], -1).T # X.T is the transpose of X ``` ``` # Reshape the training and test examples ### START CODE HERE ### (≈ 2 lines of code) (a, b, c, d) = train_set_x_orig.shape train_set_x_flatten = train_set_x_orig.reshape(a, -1).T (e, f, g, h) = test_set_x_orig.shape test_set_x_flatten = test_set_x_orig.reshape(e, -1).T ### END CODE HERE ### print ("train_set_x_flatten shape: " + str(train_set_x_flatten.shape)) print ("train_set_y shape: " + str(train_set_y.shape)) print ("test_set_x_flatten shape: " + str(test_set_x_flatten.shape)) print ("test_set_y shape: " + str(test_set_y.shape)) print ("sanity check after reshaping: " + str(train_set_x_flatten[0:5,0])) ``` **Expected Output**: <table style="width:35%"> <tr> <td>**train_set_x_flatten shape**</td> <td> (12288, 209)</td> </tr> <tr> <td>**train_set_y shape**</td> <td>(1, 209)</td> </tr> <tr> <td>**test_set_x_flatten shape**</td> <td>(12288, 50)</td> </tr> <tr> <td>**test_set_y shape**</td> <td>(1, 50)</td> </tr> <tr> <td>**sanity check after reshaping**</td> <td>[17 31 56 22 33]</td> </tr> </table> To represent color images, the red, green and blue channels (RGB) must be specified for each pixel, and so the pixel value is actually a vector of three numbers ranging from 0 to 255. One common preprocessing step in machine learning is to center and standardize your dataset, meaning that you substract the mean of the whole numpy array from each example, and then divide each example by the standard deviation of the whole numpy array. But for picture datasets, it is simpler and more convenient and works almost as well to just divide every row of the dataset by 255 (the maximum value of a pixel channel). <!-- During the training of your model, you're going to multiply weights and add biases to some initial inputs in order to observe neuron activations. Then you backpropogate with the gradients to train the model. But, it is extremely important for each feature to have a similar range such that our gradients don't explode. You will see that more in detail later in the lectures. !--> Let's standardize our dataset. ``` train_set_x = train_set_x_flatten/255. test_set_x = test_set_x_flatten/255. ``` <font color='blue'> **What you need to remember:** Common steps for pre-processing a new dataset are: - Figure out the dimensions and shapes of the problem (m_train, m_test, num_px, ...) - Reshape the datasets such that each example is now a vector of size (num_px \* num_px \* 3, 1) - "Standardize" the data ## 3 - General Architecture of the learning algorithm ## It's time to design a simple algorithm to distinguish cat images from non-cat images. You will build a Logistic Regression, using a Neural Network mindset. The following Figure explains why **Logistic Regression is actually a very simple Neural Network!** <img src="images/LogReg_kiank.png" style="width:650px;height:400px;"> **Mathematical expression of the algorithm**: For one example $x^{(i)}$: $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$ $$\hat{y}^{(i)} = a^{(i)} = sigmoid(z^{(i)})\tag{2}$$ $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$ The cost is then computed by summing over all training examples: $$ J = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{6}$$ **Key steps**: In this exercise, you will carry out the following steps: - Initialize the parameters of the model - Learn the parameters for the model by minimizing the cost - Use the learned parameters to make predictions (on the test set) - Analyse the results and conclude ## 4 - Building the parts of our algorithm ## The main steps for building a Neural Network are: 1. Define the model structure (such as number of input features) 2. Initialize the model's parameters 3. Loop: - Calculate current loss (forward propagation) - Calculate current gradient (backward propagation) - Update parameters (gradient descent) You often build 1-3 separately and integrate them into one function we call `model()`. ### 4.1 - Helper functions **Exercise**: Using your code from "Python Basics", implement `sigmoid()`. As you've seen in the figure above, you need to compute $sigmoid( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$ to make predictions. Use np.exp(). ``` # GRADED FUNCTION: sigmoid def sigmoid(z): """ Compute the sigmoid of z Arguments: z -- A scalar or numpy array of any size. Return: s -- sigmoid(z) """ ### START CODE HERE ### (≈ 1 line of code) s = 1/(1 + np.exp(-z)) ### END CODE HERE ### return s print ("sigmoid([0, 2]) = " + str(sigmoid(np.array([0,2])))) ``` **Expected Output**: <table> <tr> <td>**sigmoid([0, 2])**</td> <td> [ 0.5 0.88079708]</td> </tr> </table> ### 4.2 - Initializing parameters **Exercise:** Implement parameter initialization in the cell below. You have to initialize w as a vector of zeros. If you don't know what numpy function to use, look up np.zeros() in the Numpy library's documentation. ``` # GRADED FUNCTION: initialize_with_zeros def initialize_with_zeros(dim): """ This function creates a vector of zeros of shape (dim, 1) for w and initializes b to 0. Argument: dim -- size of the w vector we want (or number of parameters in this case) Returns: w -- initialized vector of shape (dim, 1) b -- initialized scalar (corresponds to the bias) """ ### START CODE HERE ### (≈ 1 line of code) w = np.zeros((dim, 1)) b = 0 ### END CODE HERE ### assert(w.shape == (dim, 1)) assert(isinstance(b, float) or isinstance(b, int)) return w, b dim = 2 w, b = initialize_with_zeros(dim) print ("w = " + str(w)) print ("b = " + str(b)) ``` **Expected Output**: <table style="width:15%"> <tr> <td> ** w ** </td> <td> [[ 0.] [ 0.]] </td> </tr> <tr> <td> ** b ** </td> <td> 0 </td> </tr> </table> For image inputs, w will be of shape (num_px $\times$ num_px $\times$ 3, 1). ### 4.3 - Forward and Backward propagation Now that your parameters are initialized, you can do the "forward" and "backward" propagation steps for learning the parameters. **Exercise:** Implement a function `propagate()` that computes the cost function and its gradient. **Hints**: Forward Propagation: - You get X - You compute $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$ - You calculate the cost function: $J = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$ Here are the two formulas you will be using: $$ \frac{\partial J}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{7}$$ $$ \frac{\partial J}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{8}$$ ``` # GRADED FUNCTION: propagate def propagate(w, b, X, Y): """ Implement the cost function and its gradient for the propagation explained above Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat) of size (1, number of examples) Return: cost -- negative log-likelihood cost for logistic regression dw -- gradient of the loss with respect to w, thus same shape as w db -- gradient of the loss with respect to b, thus same shape as b Tips: - Write your code step by step for the propagation. np.log(), np.dot() """ m = X.shape[1] # FORWARD PROPAGATION (FROM X TO COST) ### START CODE HERE ### (≈ 2 lines of code) A = sigmoid(np.dot(w.T, X) +b) # compute activation cost = -(1/m) * np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) # compute cost ### END CODE HERE ### # BACKWARD PROPAGATION (TO FIND GRAD) ### START CODE HERE ### (≈ 2 lines of code) dw = 1/m * np.dot(X, (A - Y).T) db = 1/m * np.sum(A - Y) ### END CODE HERE ### assert(dw.shape == w.shape) assert(db.dtype == float) cost = np.squeeze(cost) assert(cost.shape == ()) grads = {"dw": dw, "db": db} return grads, cost w, b, X, Y = np.array([[1.],[2.]]), 2., np.array([[1.,2.,-1.],[3.,4.,-3.2]]), np.array([[1,0,1]]) grads, cost = propagate(w, b, X, Y) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) print ("cost = " + str(cost)) ``` **Expected Output**: <table style="width:50%"> <tr> <td> ** dw ** </td> <td> [[ 0.99845601] [ 2.39507239]]</td> </tr> <tr> <td> ** db ** </td> <td> 0.00145557813678 </td> </tr> <tr> <td> ** cost ** </td> <td> 5.801545319394553 </td> </tr> </table> ### 4.4 - Optimization - You have initialized your parameters. - You are also able to compute a cost function and its gradient. - Now, you want to update the parameters using gradient descent. **Exercise:** Write down the optimization function. The goal is to learn $w$ and $b$ by minimizing the cost function $J$. For a parameter $\theta$, the update rule is $ \theta = \theta - \alpha \text{ } d\theta$, where $\alpha$ is the learning rate. ``` # GRADED FUNCTION: optimize def optimize(w, b, X, Y, num_iterations, learning_rate, print_cost = False): """ This function optimizes w and b by running a gradient descent algorithm Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of shape (num_px * num_px * 3, number of examples) Y -- true "label" vector (containing 0 if non-cat, 1 if cat), of shape (1, number of examples) num_iterations -- number of iterations of the optimization loop learning_rate -- learning rate of the gradient descent update rule print_cost -- True to print the loss every 100 steps Returns: params -- dictionary containing the weights w and bias b grads -- dictionary containing the gradients of the weights and bias with respect to the cost function costs -- list of all the costs computed during the optimization, this will be used to plot the learning curve. Tips: You basically need to write down two steps and iterate through them: 1) Calculate the cost and the gradient for the current parameters. Use propagate(). 2) Update the parameters using gradient descent rule for w and b. """ costs = [] for i in range(num_iterations): # Cost and gradient calculation (≈ 1-4 lines of code) ### START CODE HERE ### grads, cost = propagate(w, b, X, Y) ### END CODE HERE ### # Retrieve derivatives from grads dw = grads["dw"] db = grads["db"] # update rule (≈ 2 lines of code) ### START CODE HERE ### w = w - learning_rate * dw b = b - learning_rate * db ### END CODE HERE ### # Record the costs if i % 100 == 0: costs.append(cost) # Print the cost every 100 training iterations if print_cost and i % 100 == 0: print ("Cost after iteration %i: %f" %(i, cost)) params = {"w": w, "b": b} grads = {"dw": dw, "db": db} return params, grads, costs params, grads, costs = optimize(w, b, X, Y, num_iterations= 100, learning_rate = 0.009, print_cost = False) print ("w = " + str(params["w"])) print ("b = " + str(params["b"])) print ("dw = " + str(grads["dw"])) print ("db = " + str(grads["db"])) ``` **Expected Output**: <table style="width:40%"> <tr> <td> **w** </td> <td>[[ 0.19033591] [ 0.12259159]] </td> </tr> <tr> <td> **b** </td> <td> 1.92535983008 </td> </tr> <tr> <td> **dw** </td> <td> [[ 0.67752042] [ 1.41625495]] </td> </tr> <tr> <td> **db** </td> <td> 0.219194504541 </td> </tr> </table> **Exercise:** The previous function will output the learned w and b. We are able to use w and b to predict the labels for a dataset X. Implement the `predict()` function. There are two steps to computing predictions: 1. Calculate $\hat{Y} = A = \sigma(w^T X + b)$ 2. Convert the entries of a into 0 (if activation <= 0.5) or 1 (if activation > 0.5), stores the predictions in a vector `Y_prediction`. If you wish, you can use an `if`/`else` statement in a `for` loop (though there is also a way to vectorize this). ``` # GRADED FUNCTION: predict def predict(w, b, X): ''' Predict whether the label is 0 or 1 using learned logistic regression parameters (w, b) Arguments: w -- weights, a numpy array of size (num_px * num_px * 3, 1) b -- bias, a scalar X -- data of size (num_px * num_px * 3, number of examples) Returns: Y_prediction -- a numpy array (vector) containing all predictions (0/1) for the examples in X ''' m = X.shape[1] Y_prediction = np.zeros((1,m)) w = w.reshape(X.shape[0], 1) # Compute vector "A" predicting the probabilities of a cat being present in the picture ### START CODE HERE ### (≈ 1 line of code) A = sigmoid(np.dot(w.T, X) + b) ### END CODE HERE ### for i in range(A.shape[1]): # Convert probabilities A[0,i] to actual predictions p[0,i] ### START CODE HERE ### (≈ 4 lines of code) Y_prediction[0, i] = int(A[0, i] > 0.5) ### END CODE HERE ### assert(Y_prediction.shape == (1, m)) return Y_prediction w = np.array([[0.1124579],[0.23106775]]) b = -0.3 X = np.array([[1.,-1.1,-3.2],[1.2,2.,0.1]]) print ("predictions = " + str(predict(w, b, X))) ``` **Expected Output**: <table style="width:30%"> <tr> <td> **predictions** </td> <td> [[ 1. 1. 0.]] </td> </tr> </table> <font color='blue'> **What to remember:** You've implemented several functions that: - Initialize (w,b) - Optimize the loss iteratively to learn parameters (w,b): - computing the cost and its gradient - updating the parameters using gradient descent - Use the learned (w,b) to predict the labels for a given set of examples ## 5 - Merge all functions into a model ## You will now see how the overall model is structured by putting together all the building blocks (functions implemented in the previous parts) together, in the right order. **Exercise:** Implement the model function. Use the following notation: - Y_prediction_test for your predictions on the test set - Y_prediction_train for your predictions on the train set - w, costs, grads for the outputs of optimize() ``` # GRADED FUNCTION: model def model(X_train, Y_train, X_test, Y_test, num_iterations = 2000, learning_rate = 0.5, print_cost = False): """ Builds the logistic regression model by calling the function you've implemented previously Arguments: X_train -- training set represented by a numpy array of shape (num_px * num_px * 3, m_train) Y_train -- training labels represented by a numpy array (vector) of shape (1, m_train) X_test -- test set represented by a numpy array of shape (num_px * num_px * 3, m_test) Y_test -- test labels represented by a numpy array (vector) of shape (1, m_test) num_iterations -- hyperparameter representing the number of iterations to optimize the parameters learning_rate -- hyperparameter representing the learning rate used in the update rule of optimize() print_cost -- Set to true to print the cost every 100 iterations Returns: d -- dictionary containing information about the model. """ ### START CODE HERE ### # initialize parameters with zeros (≈ 1 line of code) w, b = initialize_with_zeros(X_train.shape[0]) # Gradient descent (≈ 1 line of code) parameters, grads, costs = optimize(w, b, X_train, Y_train, num_iterations, learning_rate, print_cost) # Retrieve parameters w and b from dictionary "parameters" w = parameters["w"] b = parameters["b"] # Predict test/train set examples (≈ 2 lines of code) Y_prediction_test = predict(w, b, X_test) Y_prediction_train = predict(w, b, X_train) ### END CODE HERE ### # Print train/test Errors print("train accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_train - Y_train)) * 100)) print("test accuracy: {} %".format(100 - np.mean(np.abs(Y_prediction_test - Y_test)) * 100)) d = {"costs": costs, "Y_prediction_test": Y_prediction_test, "Y_prediction_train" : Y_prediction_train, "w" : w, "b" : b, "learning_rate" : learning_rate, "num_iterations": num_iterations} return d ``` Run the following cell to train your model. ``` d = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 2000, learning_rate = 0.005, print_cost = True) ``` **Expected Output**: <table style="width:40%"> <tr> <td> **Cost after iteration 0 ** </td> <td> 0.693147 </td> </tr> <tr> <td> <center> $\vdots$ </center> </td> <td> <center> $\vdots$ </center> </td> </tr> <tr> <td> **Train Accuracy** </td> <td> 99.04306220095694 % </td> </tr> <tr> <td>**Test Accuracy** </td> <td> 70.0 % </td> </tr> </table> **Comment**: Training accuracy is close to 100%. This is a good sanity check: your model is working and has high enough capacity to fit the training data. Test error is 68%. It is actually not bad for this simple model, given the small dataset we used and that logistic regression is a linear classifier. But no worries, you'll build an even better classifier next week! Also, you see that the model is clearly overfitting the training data. Later in this specialization you will learn how to reduce overfitting, for example by using regularization. Using the code below (and changing the `index` variable) you can look at predictions on pictures of the test set. ``` # Example of a picture that was wrongly classified. index = 10 plt.imshow(test_set_x[:,index].reshape((num_px, num_px, 3))) print ("y = " + str(test_set_y[0,index]) + ", you predicted that it is a \"" + classes[d["Y_prediction_test"][0,index]].decode("utf-8") + "\" picture.") ``` Let's also plot the cost function and the gradients. ``` # Plot learning curve (with costs) costs = np.squeeze(d['costs']) plt.plot(costs) plt.ylabel('cost') plt.xlabel('iterations (per hundreds)') plt.title("Learning rate =" + str(d["learning_rate"])) plt.show() ``` **Interpretation**: You can see the cost decreasing. It shows that the parameters are being learned. However, you see that you could train the model even more on the training set. Try to increase the number of iterations in the cell above and rerun the cells. You might see that the training set accuracy goes up, but the test set accuracy goes down. This is called overfitting. ## 6 - Further analysis (optional/ungraded exercise) ## Congratulations on building your first image classification model. Let's analyze it further, and examine possible choices for the learning rate $\alpha$. #### Choice of learning rate #### **Reminder**: In order for Gradient Descent to work you must choose the learning rate wisely. The learning rate $\alpha$ determines how rapidly we update the parameters. If the learning rate is too large we may "overshoot" the optimal value. Similarly, if it is too small we will need too many iterations to converge to the best values. That's why it is crucial to use a well-tuned learning rate. Let's compare the learning curve of our model with several choices of learning rates. Run the cell below. This should take about 1 minute. Feel free also to try different values than the three we have initialized the `learning_rates` variable to contain, and see what happens. ``` learning_rates = [0.01, 0.001, 0.0001] models = {} for i in learning_rates: print ("learning rate is: " + str(i)) models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False) print ('\n' + "-------------------------------------------------------" + '\n') for i in learning_rates: plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"])) plt.ylabel('cost') plt.xlabel('iterations (hundreds)') legend = plt.legend(loc='upper center', shadow=True) frame = legend.get_frame() frame.set_facecolor('0.90') plt.show() ``` **Interpretation**: - Different learning rates give different costs and thus different predictions results. - If the learning rate is too large (0.01), the cost may oscillate up and down. It may even diverge (though in this example, using 0.01 still eventually ends up at a good value for the cost). - A lower cost doesn't mean a better model. You have to check if there is possibly overfitting. It happens when the training accuracy is a lot higher than the test accuracy. - In deep learning, we usually recommend that you: - Choose the learning rate that better minimizes the cost function. - If your model overfits, use other techniques to reduce overfitting. (We'll talk about this in later videos.) ## 7 - Test with your own image (optional/ungraded exercise) ## Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that: 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. 2. Add your image to this Jupyter Notebook's directory, in the "images" folder 3. Change your image's name in the following code 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)! ``` ## START CODE HERE ## (PUT YOUR IMAGE NAME) my_image = "my_image.jpg" # change this to the name of your image file ## END CODE HERE ## # We preprocess the image to fit your algorithm. fname = "images/" + my_image image = np.array(ndimage.imread(fname, flatten=False)) my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((1, num_px*num_px*3)).T my_predicted_image = predict(d["w"], d["b"], my_image) plt.imshow(image) print("y = " + str(np.squeeze(my_predicted_image)) + ", your algorithm predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.") ``` <font color='blue'> **What to remember from this assignment:** 1. Preprocessing the dataset is important. 2. You implemented each function separately: initialize(), propagate(), optimize(). Then you built a model(). 3. Tuning the learning rate (which is an example of a "hyperparameter") can make a big difference to the algorithm. You will see more examples of this later in this course! Finally, if you'd like, we invite you to try different things on this Notebook. Make sure you submit before trying anything. Once you submit, things you can play with include: - Play with the learning rate and the number of iterations - Try different initialization methods and compare the results - Test other preprocessings (center the data, or divide each row by its standard deviation) Bibliography: - http://www.wildml.com/2015/09/implementing-a-neural-network-from-scratch/ - https://stats.stackexchange.com/questions/211436/why-do-we-normalize-images-by-subtracting-the-datasets-image-mean-and-not-the-c
github_jupyter
# Image Data Storage for the Web ## Learning objectives - Become familiar with the design of modern, **cloud storage systems** - Gain experience with the **zarr** and **n5 formats** - Understand the relationship between **chunked, compressed**, object storage and **parallel processing and multi-scale visualization** *See also*: [I2K 2020 Tutorial: Zarr, N5, NGFF, Towards a community standard image file format for sharing big image data in the cloud](https://www.janelia.org/sites/default/files/You%20%2B%20Janelia/Conferences/19.pdf) # Cloud storage **Cloud storage services**, such as: - Amazon Simple Storage Service (AWS S3) - Google Cloud Storage - Microsoft Azure Storage - Minio Cloud Storage **differ from traditional filesystem storage**. In *File Storage*: - Data is organized into files and folders. - There is generally a pool of storage, e.g. a volume, with limited capacity that can be accessed. - Data can be overwritten. - Limited metadata is associated with the file. In cloud, *Object Storage* systems: - Objects, binary blobs, live in a flat structure. - Object have a unique identifier and associated metadata, typically JSON-compatible - Access is possible via simple HTTP requests - Object's cannot be modified - There are not structural limits to scaling ## Zarr and n5 formats [Zarr](https://zarr-developers.github.io/about/) and [n5](https://github.com/saalfeldlab/n5/) are file formats with reference implementatinos that map well to cloud Object Storage services. They are also suitable for storage of large bioimages. Together zarr and n5 are implementations of the [Next-generation File Format (NGFF)](https://ngff.openmicroscopy.org/latest/), which is *a hierarchy of n-dimensional (dense) arrays with metadata*. Zarr and n5 support: - Group hierarchies - Arbitrary JSON-compatible meta-data - Chunked, n-dimensional binary tensor datasets - Binary component types: [u]int8, [u]int16, [u]int32, [u]int64, float32, float64 - Next-generation lossless compression with [blosc](https://blosc.org/pages/blosc-in-depth/) of binary chunks. When combined with a **multi-scale image model** such as [OME-Zarr](https://blog.openmicroscopy.org/file-formats/community/2020/11/04/zarr-data/), **large image visualization** is possible. The object storage-compatible model facilitates **parallel processing** because it is conducive to **compressed chunk writes**, even in a cloud storage environment. ## Exercises ``` # Get metadata on an image !ome_zarr info https://s3.embassy.ebi.ac.uk/idr/zarr/v0.1/6001240.zarr/ ``` *Does the entire dataset need to be downloaded to examine its metadata?* ``` # Download an image dataset !ome_zarr download https://s3.embassy.ebi.ac.uk/idr/zarr/v0.1/6001240.zarr/ --output image.zarr ``` *Examine the contents of the filesystem representation of the OME-Zarr multi-scale image. What information is stored in each file? ``` %ls -a image.zarr/6001240.zarr/ %pycat image.zarr/6001240.zarr/.zattrs %pycat image.zarr/6001240.zarr/.zgroup %ls -a image.zarr/6001240.zarr/0 %pycat image.zarr/0/6001240.zarr/.zarray import zarr group = zarr.open('image.zarr/6001240.zarr/') group group.attrs.keys() group.attrs['multiscales'] list(group.keys()) scale0 = group['0'] scale0 import numpy as np np.asarray(scale0) ```
github_jupyter
This example shows how to create a radial profile from a SOXS event file, including using an exposure map to get flux-based quantities. We'll simulate a simple isothermal cluster. ``` import matplotlib matplotlib.rc("font", size=18) import matplotlib.pyplot as plt import soxs import astropy.io.fits as pyfits ``` First, create the spectrum for the cluster using an absorbed thermal APEC model: ``` emin = 0.05 # keV emax = 20.0 # keV nbins = 20000 agen = soxs.ApecGenerator(emin, emax, nbins) kT = 6.0 abund = 0.3 redshift = 0.05 norm = 1.0 spec = agen.get_spectrum(kT, abund, redshift, norm) spec.rescale_flux(1.0e-13, emin=0.5, emax=2.0, flux_type="energy") spec.apply_foreground_absorption(0.02) ``` And a spatial distribution based on a $\beta$-model: ``` pos = soxs.BetaModel(30.0, 45.0, 50.0, 0.67) ``` Generate a SIMPUT catalog from these two models using a large exposure time and area, and write it to a file: ``` t_exp = (300.0, "ks") area = (3.0, "m**2") cluster_cat = soxs.SimputCatalog.from_models("beta_model", "beta_model", spec, pos, t_exp, area) cluster_cat.write_catalog(overwrite=True) ``` and run the instrument simulation (for simplicity we'll turn off the point-source background): ``` soxs.instrument_simulator("beta_model_simput.fits", "evt.fits", (100.0, "ks"), "lynx_hdxi", [30., 45.], overwrite=True, ptsrc_bkgnd=False) ``` Make an exposure map so that we can obtain flux-based quantities: ``` soxs.make_exposure_map("evt.fits", "expmap.fits", 2.3, overwrite=True) ``` Make the radial profile, using energies between 0.5 and 5.0 keV, between radii of 0 and 200 arcseconds, with 50 bins: ``` soxs.write_radial_profile("evt.fits", "profile.fits", [30.0, 45.0], 0, 200, 50, emin=0.5, emax=5.0, expmap_file="expmap.fits", overwrite=True) ``` Now we can use AstroPy's FITS reader to open the profile and have a look at the columns that are inside: ``` f = pyfits.open("profile.fits") f["PROFILE"].columns ``` and use Matplotlib to plot some quantities. We can plot the surface brightness: ``` plt.figure(figsize=(8,8)) plt.errorbar(f["profile"].data["rmid"], f["profile"].data["sur_bri"], lw=2, yerr=f["profile"].data["sur_bri_err"]) plt.xscale('log') plt.yscale('log') plt.xlabel("r (arcsec)") plt.ylabel("S (cts/s/arcsec**2)") ``` and, since we used an exposure map, the surface flux: ``` plt.figure(figsize=(8,8)) plt.errorbar(f["profile"].data["rmid"], f["profile"].data["sur_flux"], lw=2, yerr=f["profile"].data["sur_flux_err"]) plt.xscale('log') plt.yscale('log') plt.xlabel("r (arcsec)") plt.ylabel("S (cts/s/cm**2/arcsec**2)") ```
github_jupyter
**Chapter 5 – Support Vector Machines** _This notebook contains all the sample code and solutions to the exercises in chapter 5._ <table align="left"> <td> <a target="_blank" href="https://colab.research.google.com/github/ageron/handson-ml/blob/master/05_support_vector_machines.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> </table> **Warning**: this is the code for the 1st edition of the book. Please visit https://github.com/ageron/handson-ml2 for the 2nd edition code, with up-to-date notebooks using the latest library versions. # Setup First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures: ``` # To support both python 2 and python 3 from __future__ import division, print_function, unicode_literals # Common imports import numpy as np import os # to make this notebook's output stable across runs np.random.seed(42) # To plot pretty figures %matplotlib inline import matplotlib as mpl import matplotlib.pyplot as plt mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # Where to save the figures PROJECT_ROOT_DIR = "." CHAPTER_ID = "svm" IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID) os.makedirs(IMAGES_PATH, exist_ok=True) def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300): path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension) print("Saving figure", fig_id) if tight_layout: plt.tight_layout() plt.savefig(path, format=fig_extension, dpi=resolution) ``` # Large margin classification The next few code cells generate the first figures in chapter 5. The first actual code sample comes after: ``` from sklearn.svm import SVC from sklearn import datasets iris = datasets.load_iris() X = iris["data"][:, (2, 3)] # petal length, petal width y = iris["target"] setosa_or_versicolor = (y == 0) | (y == 1) X = X[setosa_or_versicolor] y = y[setosa_or_versicolor] # SVM Classifier model svm_clf = SVC(kernel="linear", C=float("inf")) svm_clf.fit(X, y) # Bad models x0 = np.linspace(0, 5.5, 200) pred_1 = 5*x0 - 20 pred_2 = x0 - 1.8 pred_3 = 0.1 * x0 + 0.5 def plot_svc_decision_boundary(svm_clf, xmin, xmax): w = svm_clf.coef_[0] b = svm_clf.intercept_[0] # At the decision boundary, w0*x0 + w1*x1 + b = 0 # => x1 = -w0/w1 * x0 - b/w1 x0 = np.linspace(xmin, xmax, 200) decision_boundary = -w[0]/w[1] * x0 - b/w[1] margin = 1/w[1] gutter_up = decision_boundary + margin gutter_down = decision_boundary - margin svs = svm_clf.support_vectors_ plt.scatter(svs[:, 0], svs[:, 1], s=180, facecolors='#FFAAAA') plt.plot(x0, decision_boundary, "k-", linewidth=2) plt.plot(x0, gutter_up, "k--", linewidth=2) plt.plot(x0, gutter_down, "k--", linewidth=2) plt.figure(figsize=(12,2.7)) plt.subplot(121) plt.plot(x0, pred_1, "g--", linewidth=2) plt.plot(x0, pred_2, "m-", linewidth=2) plt.plot(x0, pred_3, "r-", linewidth=2) plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs", label="Iris-Versicolor") plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo", label="Iris-Setosa") plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.axis([0, 5.5, 0, 2]) plt.subplot(122) plot_svc_decision_boundary(svm_clf, 0, 5.5) plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs") plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo") plt.xlabel("Petal length", fontsize=14) plt.axis([0, 5.5, 0, 2]) save_fig("large_margin_classification_plot") plt.show() ``` # Sensitivity to feature scales ``` Xs = np.array([[1, 50], [5, 20], [3, 80], [5, 60]]).astype(np.float64) ys = np.array([0, 0, 1, 1]) svm_clf = SVC(kernel="linear", C=100) svm_clf.fit(Xs, ys) plt.figure(figsize=(12,3.2)) plt.subplot(121) plt.plot(Xs[:, 0][ys==1], Xs[:, 1][ys==1], "bo") plt.plot(Xs[:, 0][ys==0], Xs[:, 1][ys==0], "ms") plot_svc_decision_boundary(svm_clf, 0, 6) plt.xlabel("$x_0$", fontsize=20) plt.ylabel("$x_1$ ", fontsize=20, rotation=0) plt.title("Unscaled", fontsize=16) plt.axis([0, 6, 0, 90]) from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_scaled = scaler.fit_transform(Xs) svm_clf.fit(X_scaled, ys) plt.subplot(122) plt.plot(X_scaled[:, 0][ys==1], X_scaled[:, 1][ys==1], "bo") plt.plot(X_scaled[:, 0][ys==0], X_scaled[:, 1][ys==0], "ms") plot_svc_decision_boundary(svm_clf, -2, 2) plt.xlabel("$x_0$", fontsize=20) plt.title("Scaled", fontsize=16) plt.axis([-2, 2, -2, 2]) save_fig("sensitivity_to_feature_scales_plot") ``` # Sensitivity to outliers ``` X_outliers = np.array([[3.4, 1.3], [3.2, 0.8]]) y_outliers = np.array([0, 0]) Xo1 = np.concatenate([X, X_outliers[:1]], axis=0) yo1 = np.concatenate([y, y_outliers[:1]], axis=0) Xo2 = np.concatenate([X, X_outliers[1:]], axis=0) yo2 = np.concatenate([y, y_outliers[1:]], axis=0) svm_clf2 = SVC(kernel="linear", C=10**9) svm_clf2.fit(Xo2, yo2) plt.figure(figsize=(12,2.7)) plt.subplot(121) plt.plot(Xo1[:, 0][yo1==1], Xo1[:, 1][yo1==1], "bs") plt.plot(Xo1[:, 0][yo1==0], Xo1[:, 1][yo1==0], "yo") plt.text(0.3, 1.0, "Impossible!", fontsize=24, color="red") plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.annotate("Outlier", xy=(X_outliers[0][0], X_outliers[0][1]), xytext=(2.5, 1.7), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=16, ) plt.axis([0, 5.5, 0, 2]) plt.subplot(122) plt.plot(Xo2[:, 0][yo2==1], Xo2[:, 1][yo2==1], "bs") plt.plot(Xo2[:, 0][yo2==0], Xo2[:, 1][yo2==0], "yo") plot_svc_decision_boundary(svm_clf2, 0, 5.5) plt.xlabel("Petal length", fontsize=14) plt.annotate("Outlier", xy=(X_outliers[1][0], X_outliers[1][1]), xytext=(3.2, 0.08), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=16, ) plt.axis([0, 5.5, 0, 2]) save_fig("sensitivity_to_outliers_plot") plt.show() ``` # Large margin *vs* margin violations This is the first code example in chapter 5: ``` import numpy as np from sklearn import datasets from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.svm import LinearSVC iris = datasets.load_iris() X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica svm_clf = Pipeline([ ("scaler", StandardScaler()), ("linear_svc", LinearSVC(C=1, loss="hinge", random_state=42)), ]) svm_clf.fit(X, y) svm_clf.predict([[5.5, 1.7]]) ``` Now let's generate the graph comparing different regularization settings: ``` scaler = StandardScaler() svm_clf1 = LinearSVC(C=1, loss="hinge", random_state=42) svm_clf2 = LinearSVC(C=100, loss="hinge", random_state=42) scaled_svm_clf1 = Pipeline([ ("scaler", scaler), ("linear_svc", svm_clf1), ]) scaled_svm_clf2 = Pipeline([ ("scaler", scaler), ("linear_svc", svm_clf2), ]) scaled_svm_clf1.fit(X, y) scaled_svm_clf2.fit(X, y) # Convert to unscaled parameters b1 = svm_clf1.decision_function([-scaler.mean_ / scaler.scale_]) b2 = svm_clf2.decision_function([-scaler.mean_ / scaler.scale_]) w1 = svm_clf1.coef_[0] / scaler.scale_ w2 = svm_clf2.coef_[0] / scaler.scale_ svm_clf1.intercept_ = np.array([b1]) svm_clf2.intercept_ = np.array([b2]) svm_clf1.coef_ = np.array([w1]) svm_clf2.coef_ = np.array([w2]) # Find support vectors (LinearSVC does not do this automatically) t = y * 2 - 1 support_vectors_idx1 = (t * (X.dot(w1) + b1) < 1).ravel() support_vectors_idx2 = (t * (X.dot(w2) + b2) < 1).ravel() svm_clf1.support_vectors_ = X[support_vectors_idx1] svm_clf2.support_vectors_ = X[support_vectors_idx2] plt.figure(figsize=(12,3.2)) plt.subplot(121) plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^", label="Iris-Virginica") plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs", label="Iris-Versicolor") plot_svc_decision_boundary(svm_clf1, 4, 6) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper left", fontsize=14) plt.title("$C = {}$".format(svm_clf1.C), fontsize=16) plt.axis([4, 6, 0.8, 2.8]) plt.subplot(122) plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^") plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs") plot_svc_decision_boundary(svm_clf2, 4, 6) plt.xlabel("Petal length", fontsize=14) plt.title("$C = {}$".format(svm_clf2.C), fontsize=16) plt.axis([4, 6, 0.8, 2.8]) save_fig("regularization_plot") ``` # Non-linear classification ``` X1D = np.linspace(-4, 4, 9).reshape(-1, 1) X2D = np.c_[X1D, X1D**2] y = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0]) plt.figure(figsize=(11, 4)) plt.subplot(121) plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.plot(X1D[:, 0][y==0], np.zeros(4), "bs") plt.plot(X1D[:, 0][y==1], np.zeros(5), "g^") plt.gca().get_yaxis().set_ticks([]) plt.xlabel(r"$x_1$", fontsize=20) plt.axis([-4.5, 4.5, -0.2, 0.2]) plt.subplot(122) plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot(X2D[:, 0][y==0], X2D[:, 1][y==0], "bs") plt.plot(X2D[:, 0][y==1], X2D[:, 1][y==1], "g^") plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"$x_2$", fontsize=20, rotation=0) plt.gca().get_yaxis().set_ticks([0, 4, 8, 12, 16]) plt.plot([-4.5, 4.5], [6.5, 6.5], "r--", linewidth=3) plt.axis([-4.5, 4.5, -1, 17]) plt.subplots_adjust(right=1) save_fig("higher_dimensions_plot", tight_layout=False) plt.show() from sklearn.datasets import make_moons X, y = make_moons(n_samples=100, noise=0.15, random_state=42) def plot_dataset(X, y, axes): plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs") plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^") plt.axis(axes) plt.grid(True, which='both') plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"$x_2$", fontsize=20, rotation=0) plot_dataset(X, y, [-1.5, 2.5, -1, 1.5]) plt.show() from sklearn.datasets import make_moons from sklearn.pipeline import Pipeline from sklearn.preprocessing import PolynomialFeatures polynomial_svm_clf = Pipeline([ ("poly_features", PolynomialFeatures(degree=3)), ("scaler", StandardScaler()), ("svm_clf", LinearSVC(C=10, loss="hinge", random_state=42)) ]) polynomial_svm_clf.fit(X, y) def plot_predictions(clf, axes): x0s = np.linspace(axes[0], axes[1], 100) x1s = np.linspace(axes[2], axes[3], 100) x0, x1 = np.meshgrid(x0s, x1s) X = np.c_[x0.ravel(), x1.ravel()] y_pred = clf.predict(X).reshape(x0.shape) y_decision = clf.decision_function(X).reshape(x0.shape) plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2) plt.contourf(x0, x1, y_decision, cmap=plt.cm.brg, alpha=0.1) plot_predictions(polynomial_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(X, y, [-1.5, 2.5, -1, 1.5]) save_fig("moons_polynomial_svc_plot") plt.show() from sklearn.svm import SVC poly_kernel_svm_clf = Pipeline([ ("scaler", StandardScaler()), ("svm_clf", SVC(kernel="poly", degree=3, coef0=1, C=5)) ]) poly_kernel_svm_clf.fit(X, y) poly100_kernel_svm_clf = Pipeline([ ("scaler", StandardScaler()), ("svm_clf", SVC(kernel="poly", degree=10, coef0=100, C=5)) ]) poly100_kernel_svm_clf.fit(X, y) plt.figure(figsize=(11, 4)) plt.subplot(121) plot_predictions(poly_kernel_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(X, y, [-1.5, 2.5, -1, 1.5]) plt.title(r"$d=3, r=1, C=5$", fontsize=18) plt.subplot(122) plot_predictions(poly100_kernel_svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(X, y, [-1.5, 2.5, -1, 1.5]) plt.title(r"$d=10, r=100, C=5$", fontsize=18) save_fig("moons_kernelized_polynomial_svc_plot") plt.show() def gaussian_rbf(x, landmark, gamma): return np.exp(-gamma * np.linalg.norm(x - landmark, axis=1)**2) gamma = 0.3 x1s = np.linspace(-4.5, 4.5, 200).reshape(-1, 1) x2s = gaussian_rbf(x1s, -2, gamma) x3s = gaussian_rbf(x1s, 1, gamma) XK = np.c_[gaussian_rbf(X1D, -2, gamma), gaussian_rbf(X1D, 1, gamma)] yk = np.array([0, 0, 1, 1, 1, 1, 1, 0, 0]) plt.figure(figsize=(11, 4)) plt.subplot(121) plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.scatter(x=[-2, 1], y=[0, 0], s=150, alpha=0.5, c="red") plt.plot(X1D[:, 0][yk==0], np.zeros(4), "bs") plt.plot(X1D[:, 0][yk==1], np.zeros(5), "g^") plt.plot(x1s, x2s, "g--") plt.plot(x1s, x3s, "b:") plt.gca().get_yaxis().set_ticks([0, 0.25, 0.5, 0.75, 1]) plt.xlabel(r"$x_1$", fontsize=20) plt.ylabel(r"Similarity", fontsize=14) plt.annotate(r'$\mathbf{x}$', xy=(X1D[3, 0], 0), xytext=(-0.5, 0.20), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=18, ) plt.text(-2, 0.9, "$x_2$", ha="center", fontsize=20) plt.text(1, 0.9, "$x_3$", ha="center", fontsize=20) plt.axis([-4.5, 4.5, -0.1, 1.1]) plt.subplot(122) plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot(XK[:, 0][yk==0], XK[:, 1][yk==0], "bs") plt.plot(XK[:, 0][yk==1], XK[:, 1][yk==1], "g^") plt.xlabel(r"$x_2$", fontsize=20) plt.ylabel(r"$x_3$ ", fontsize=20, rotation=0) plt.annotate(r'$\phi\left(\mathbf{x}\right)$', xy=(XK[3, 0], XK[3, 1]), xytext=(0.65, 0.50), ha="center", arrowprops=dict(facecolor='black', shrink=0.1), fontsize=18, ) plt.plot([-0.1, 1.1], [0.57, -0.1], "r--", linewidth=3) plt.axis([-0.1, 1.1, -0.1, 1.1]) plt.subplots_adjust(right=1) save_fig("kernel_method_plot") plt.show() x1_example = X1D[3, 0] for landmark in (-2, 1): k = gaussian_rbf(np.array([[x1_example]]), np.array([[landmark]]), gamma) print("Phi({}, {}) = {}".format(x1_example, landmark, k)) rbf_kernel_svm_clf = Pipeline([ ("scaler", StandardScaler()), ("svm_clf", SVC(kernel="rbf", gamma=5, C=0.001)) ]) rbf_kernel_svm_clf.fit(X, y) from sklearn.svm import SVC gamma1, gamma2 = 0.1, 5 C1, C2 = 0.001, 1000 hyperparams = (gamma1, C1), (gamma1, C2), (gamma2, C1), (gamma2, C2) svm_clfs = [] for gamma, C in hyperparams: rbf_kernel_svm_clf = Pipeline([ ("scaler", StandardScaler()), ("svm_clf", SVC(kernel="rbf", gamma=gamma, C=C)) ]) rbf_kernel_svm_clf.fit(X, y) svm_clfs.append(rbf_kernel_svm_clf) plt.figure(figsize=(11, 7)) for i, svm_clf in enumerate(svm_clfs): plt.subplot(221 + i) plot_predictions(svm_clf, [-1.5, 2.5, -1, 1.5]) plot_dataset(X, y, [-1.5, 2.5, -1, 1.5]) gamma, C = hyperparams[i] plt.title(r"$\gamma = {}, C = {}$".format(gamma, C), fontsize=16) save_fig("moons_rbf_svc_plot") plt.show() ``` # Regression ``` np.random.seed(42) m = 50 X = 2 * np.random.rand(m, 1) y = (4 + 3 * X + np.random.randn(m, 1)).ravel() from sklearn.svm import LinearSVR svm_reg = LinearSVR(epsilon=1.5, random_state=42) svm_reg.fit(X, y) svm_reg1 = LinearSVR(epsilon=1.5, random_state=42) svm_reg2 = LinearSVR(epsilon=0.5, random_state=42) svm_reg1.fit(X, y) svm_reg2.fit(X, y) def find_support_vectors(svm_reg, X, y): y_pred = svm_reg.predict(X) off_margin = (np.abs(y - y_pred) >= svm_reg.epsilon) return np.argwhere(off_margin) svm_reg1.support_ = find_support_vectors(svm_reg1, X, y) svm_reg2.support_ = find_support_vectors(svm_reg2, X, y) eps_x1 = 1 eps_y_pred = svm_reg1.predict([[eps_x1]]) def plot_svm_regression(svm_reg, X, y, axes): x1s = np.linspace(axes[0], axes[1], 100).reshape(100, 1) y_pred = svm_reg.predict(x1s) plt.plot(x1s, y_pred, "k-", linewidth=2, label=r"$\hat{y}$") plt.plot(x1s, y_pred + svm_reg.epsilon, "k--") plt.plot(x1s, y_pred - svm_reg.epsilon, "k--") plt.scatter(X[svm_reg.support_], y[svm_reg.support_], s=180, facecolors='#FFAAAA') plt.plot(X, y, "bo") plt.xlabel(r"$x_1$", fontsize=18) plt.legend(loc="upper left", fontsize=18) plt.axis(axes) plt.figure(figsize=(9, 4)) plt.subplot(121) plot_svm_regression(svm_reg1, X, y, [0, 2, 3, 11]) plt.title(r"$\epsilon = {}$".format(svm_reg1.epsilon), fontsize=18) plt.ylabel(r"$y$", fontsize=18, rotation=0) #plt.plot([eps_x1, eps_x1], [eps_y_pred, eps_y_pred - svm_reg1.epsilon], "k-", linewidth=2) plt.annotate( '', xy=(eps_x1, eps_y_pred), xycoords='data', xytext=(eps_x1, eps_y_pred - svm_reg1.epsilon), textcoords='data', arrowprops={'arrowstyle': '<->', 'linewidth': 1.5} ) plt.text(0.91, 5.6, r"$\epsilon$", fontsize=20) plt.subplot(122) plot_svm_regression(svm_reg2, X, y, [0, 2, 3, 11]) plt.title(r"$\epsilon = {}$".format(svm_reg2.epsilon), fontsize=18) save_fig("svm_regression_plot") plt.show() np.random.seed(42) m = 100 X = 2 * np.random.rand(m, 1) - 1 y = (0.2 + 0.1 * X + 0.5 * X**2 + np.random.randn(m, 1)/10).ravel() ``` **Warning**: the default value of `gamma` will change from `'auto'` to `'scale'` in version 0.22 to better account for unscaled features. To preserve the same results as in the book, we explicitly set it to `'auto'`, but you should probably just use the default in your own code. ``` from sklearn.svm import SVR svm_poly_reg = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="auto") svm_poly_reg.fit(X, y) from sklearn.svm import SVR svm_poly_reg1 = SVR(kernel="poly", degree=2, C=100, epsilon=0.1, gamma="auto") svm_poly_reg2 = SVR(kernel="poly", degree=2, C=0.01, epsilon=0.1, gamma="auto") svm_poly_reg1.fit(X, y) svm_poly_reg2.fit(X, y) plt.figure(figsize=(9, 4)) plt.subplot(121) plot_svm_regression(svm_poly_reg1, X, y, [-1, 1, 0, 1]) plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg1.degree, svm_poly_reg1.C, svm_poly_reg1.epsilon), fontsize=18) plt.ylabel(r"$y$", fontsize=18, rotation=0) plt.subplot(122) plot_svm_regression(svm_poly_reg2, X, y, [-1, 1, 0, 1]) plt.title(r"$degree={}, C={}, \epsilon = {}$".format(svm_poly_reg2.degree, svm_poly_reg2.C, svm_poly_reg2.epsilon), fontsize=18) save_fig("svm_with_polynomial_kernel_plot") plt.show() ``` # Under the hood ``` iris = datasets.load_iris() X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica from mpl_toolkits.mplot3d import Axes3D def plot_3D_decision_function(ax, w, b, x1_lim=[4, 6], x2_lim=[0.8, 2.8]): x1_in_bounds = (X[:, 0] > x1_lim[0]) & (X[:, 0] < x1_lim[1]) X_crop = X[x1_in_bounds] y_crop = y[x1_in_bounds] x1s = np.linspace(x1_lim[0], x1_lim[1], 20) x2s = np.linspace(x2_lim[0], x2_lim[1], 20) x1, x2 = np.meshgrid(x1s, x2s) xs = np.c_[x1.ravel(), x2.ravel()] df = (xs.dot(w) + b).reshape(x1.shape) m = 1 / np.linalg.norm(w) boundary_x2s = -x1s*(w[0]/w[1])-b/w[1] margin_x2s_1 = -x1s*(w[0]/w[1])-(b-1)/w[1] margin_x2s_2 = -x1s*(w[0]/w[1])-(b+1)/w[1] ax.plot_surface(x1s, x2, np.zeros_like(x1), color="b", alpha=0.2, cstride=100, rstride=100) ax.plot(x1s, boundary_x2s, 0, "k-", linewidth=2, label=r"$h=0$") ax.plot(x1s, margin_x2s_1, 0, "k--", linewidth=2, label=r"$h=\pm 1$") ax.plot(x1s, margin_x2s_2, 0, "k--", linewidth=2) ax.plot(X_crop[:, 0][y_crop==1], X_crop[:, 1][y_crop==1], 0, "g^") ax.plot_wireframe(x1, x2, df, alpha=0.3, color="k") ax.plot(X_crop[:, 0][y_crop==0], X_crop[:, 1][y_crop==0], 0, "bs") ax.axis(x1_lim + x2_lim) ax.text(4.5, 2.5, 3.8, "Decision function $h$", fontsize=15) ax.set_xlabel(r"Petal length", fontsize=15) ax.set_ylabel(r"Petal width", fontsize=15) ax.set_zlabel(r"$h = \mathbf{w}^T \mathbf{x} + b$", fontsize=18) ax.legend(loc="upper left", fontsize=16) fig = plt.figure(figsize=(11, 6)) ax1 = fig.add_subplot(111, projection='3d') plot_3D_decision_function(ax1, w=svm_clf2.coef_[0], b=svm_clf2.intercept_[0]) #save_fig("iris_3D_plot") plt.show() ``` # Small weight vector results in a large margin ``` def plot_2D_decision_function(w, b, ylabel=True, x1_lim=[-3, 3]): x1 = np.linspace(x1_lim[0], x1_lim[1], 200) y = w * x1 + b m = 1 / w plt.plot(x1, y) plt.plot(x1_lim, [1, 1], "k:") plt.plot(x1_lim, [-1, -1], "k:") plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.plot([m, m], [0, 1], "k--") plt.plot([-m, -m], [0, -1], "k--") plt.plot([-m, m], [0, 0], "k-o", linewidth=3) plt.axis(x1_lim + [-2, 2]) plt.xlabel(r"$x_1$", fontsize=16) if ylabel: plt.ylabel(r"$w_1 x_1$ ", rotation=0, fontsize=16) plt.title(r"$w_1 = {}$".format(w), fontsize=16) plt.figure(figsize=(12, 3.2)) plt.subplot(121) plot_2D_decision_function(1, 0) plt.subplot(122) plot_2D_decision_function(0.5, 0, ylabel=False) save_fig("small_w_large_margin_plot") plt.show() from sklearn.svm import SVC from sklearn import datasets iris = datasets.load_iris() X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64) # Iris-Virginica svm_clf = SVC(kernel="linear", C=1) svm_clf.fit(X, y) svm_clf.predict([[5.3, 1.3]]) ``` # Hinge loss ``` t = np.linspace(-2, 4, 200) h = np.where(1 - t < 0, 0, 1 - t) # max(0, 1-t) plt.figure(figsize=(5,2.8)) plt.plot(t, h, "b-", linewidth=2, label="$max(0, 1 - t)$") plt.grid(True, which='both') plt.axhline(y=0, color='k') plt.axvline(x=0, color='k') plt.yticks(np.arange(-1, 2.5, 1)) plt.xlabel("$t$", fontsize=16) plt.axis([-2, 4, -1, 2.5]) plt.legend(loc="upper right", fontsize=16) save_fig("hinge_plot") plt.show() ``` # Extra material ## Training time ``` X, y = make_moons(n_samples=1000, noise=0.4, random_state=42) plt.plot(X[:, 0][y==0], X[:, 1][y==0], "bs") plt.plot(X[:, 0][y==1], X[:, 1][y==1], "g^") import time tol = 0.1 tols = [] times = [] for i in range(10): svm_clf = SVC(kernel="poly", gamma=3, C=10, tol=tol, verbose=1) t1 = time.time() svm_clf.fit(X, y) t2 = time.time() times.append(t2-t1) tols.append(tol) print(i, tol, t2-t1) tol /= 10 plt.semilogx(tols, times) ``` ## Linear SVM classifier implementation using Batch Gradient Descent ``` # Training set X = iris["data"][:, (2, 3)] # petal length, petal width y = (iris["target"] == 2).astype(np.float64).reshape(-1, 1) # Iris-Virginica from sklearn.base import BaseEstimator class MyLinearSVC(BaseEstimator): def __init__(self, C=1, eta0=1, eta_d=10000, n_epochs=1000, random_state=None): self.C = C self.eta0 = eta0 self.n_epochs = n_epochs self.random_state = random_state self.eta_d = eta_d def eta(self, epoch): return self.eta0 / (epoch + self.eta_d) def fit(self, X, y): # Random initialization if self.random_state: np.random.seed(self.random_state) w = np.random.randn(X.shape[1], 1) # n feature weights b = 0 m = len(X) t = y * 2 - 1 # -1 if t==0, +1 if t==1 X_t = X * t self.Js=[] # Training for epoch in range(self.n_epochs): support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel() X_t_sv = X_t[support_vectors_idx] t_sv = t[support_vectors_idx] J = 1/2 * np.sum(w * w) + self.C * (np.sum(1 - X_t_sv.dot(w)) - b * np.sum(t_sv)) self.Js.append(J) w_gradient_vector = w - self.C * np.sum(X_t_sv, axis=0).reshape(-1, 1) b_derivative = -C * np.sum(t_sv) w = w - self.eta(epoch) * w_gradient_vector b = b - self.eta(epoch) * b_derivative self.intercept_ = np.array([b]) self.coef_ = np.array([w]) support_vectors_idx = (X_t.dot(w) + t * b < 1).ravel() self.support_vectors_ = X[support_vectors_idx] return self def decision_function(self, X): return X.dot(self.coef_[0]) + self.intercept_[0] def predict(self, X): return (self.decision_function(X) >= 0).astype(np.float64) C=2 svm_clf = MyLinearSVC(C=C, eta0 = 10, eta_d = 1000, n_epochs=60000, random_state=2) svm_clf.fit(X, y) svm_clf.predict(np.array([[5, 2], [4, 1]])) plt.plot(range(svm_clf.n_epochs), svm_clf.Js) plt.axis([0, svm_clf.n_epochs, 0, 100]) print(svm_clf.intercept_, svm_clf.coef_) svm_clf2 = SVC(kernel="linear", C=C) svm_clf2.fit(X, y.ravel()) print(svm_clf2.intercept_, svm_clf2.coef_) yr = y.ravel() plt.figure(figsize=(12,3.2)) plt.subplot(121) plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^", label="Iris-Virginica") plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs", label="Not Iris-Virginica") plot_svc_decision_boundary(svm_clf, 4, 6) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.title("MyLinearSVC", fontsize=14) plt.axis([4, 6, 0.8, 2.8]) plt.subplot(122) plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^") plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs") plot_svc_decision_boundary(svm_clf2, 4, 6) plt.xlabel("Petal length", fontsize=14) plt.title("SVC", fontsize=14) plt.axis([4, 6, 0.8, 2.8]) from sklearn.linear_model import SGDClassifier sgd_clf = SGDClassifier(loss="hinge", alpha = 0.017, max_iter = 50, tol=-np.infty, random_state=42) sgd_clf.fit(X, y.ravel()) m = len(X) t = y * 2 - 1 # -1 if t==0, +1 if t==1 X_b = np.c_[np.ones((m, 1)), X] # Add bias input x0=1 X_b_t = X_b * t sgd_theta = np.r_[sgd_clf.intercept_[0], sgd_clf.coef_[0]] print(sgd_theta) support_vectors_idx = (X_b_t.dot(sgd_theta) < 1).ravel() sgd_clf.support_vectors_ = X[support_vectors_idx] sgd_clf.C = C plt.figure(figsize=(5.5,3.2)) plt.plot(X[:, 0][yr==1], X[:, 1][yr==1], "g^") plt.plot(X[:, 0][yr==0], X[:, 1][yr==0], "bs") plot_svc_decision_boundary(sgd_clf, 4, 6) plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.title("SGDClassifier", fontsize=14) plt.axis([4, 6, 0.8, 2.8]) ``` # Exercise solutions ## 1. to 7. See appendix A. # 8. _Exercise: train a `LinearSVC` on a linearly separable dataset. Then train an `SVC` and a `SGDClassifier` on the same dataset. See if you can get them to produce roughly the same model._ Let's use the Iris dataset: the Iris Setosa and Iris Versicolor classes are linearly separable. ``` from sklearn import datasets iris = datasets.load_iris() X = iris["data"][:, (2, 3)] # petal length, petal width y = iris["target"] setosa_or_versicolor = (y == 0) | (y == 1) X = X[setosa_or_versicolor] y = y[setosa_or_versicolor] from sklearn.svm import SVC, LinearSVC from sklearn.linear_model import SGDClassifier from sklearn.preprocessing import StandardScaler C = 5 alpha = 1 / (C * len(X)) lin_clf = LinearSVC(loss="hinge", C=C, random_state=42) svm_clf = SVC(kernel="linear", C=C) sgd_clf = SGDClassifier(loss="hinge", learning_rate="constant", eta0=0.001, alpha=alpha, max_iter=100000, tol=-np.infty, random_state=42) scaler = StandardScaler() X_scaled = scaler.fit_transform(X) lin_clf.fit(X_scaled, y) svm_clf.fit(X_scaled, y) sgd_clf.fit(X_scaled, y) print("LinearSVC: ", lin_clf.intercept_, lin_clf.coef_) print("SVC: ", svm_clf.intercept_, svm_clf.coef_) print("SGDClassifier(alpha={:.5f}):".format(sgd_clf.alpha), sgd_clf.intercept_, sgd_clf.coef_) ``` Let's plot the decision boundaries of these three models: ``` # Compute the slope and bias of each decision boundary w1 = -lin_clf.coef_[0, 0]/lin_clf.coef_[0, 1] b1 = -lin_clf.intercept_[0]/lin_clf.coef_[0, 1] w2 = -svm_clf.coef_[0, 0]/svm_clf.coef_[0, 1] b2 = -svm_clf.intercept_[0]/svm_clf.coef_[0, 1] w3 = -sgd_clf.coef_[0, 0]/sgd_clf.coef_[0, 1] b3 = -sgd_clf.intercept_[0]/sgd_clf.coef_[0, 1] # Transform the decision boundary lines back to the original scale line1 = scaler.inverse_transform([[-10, -10 * w1 + b1], [10, 10 * w1 + b1]]) line2 = scaler.inverse_transform([[-10, -10 * w2 + b2], [10, 10 * w2 + b2]]) line3 = scaler.inverse_transform([[-10, -10 * w3 + b3], [10, 10 * w3 + b3]]) # Plot all three decision boundaries plt.figure(figsize=(11, 4)) plt.plot(line1[:, 0], line1[:, 1], "k:", label="LinearSVC") plt.plot(line2[:, 0], line2[:, 1], "b--", linewidth=2, label="SVC") plt.plot(line3[:, 0], line3[:, 1], "r-", label="SGDClassifier") plt.plot(X[:, 0][y==1], X[:, 1][y==1], "bs") # label="Iris-Versicolor" plt.plot(X[:, 0][y==0], X[:, 1][y==0], "yo") # label="Iris-Setosa" plt.xlabel("Petal length", fontsize=14) plt.ylabel("Petal width", fontsize=14) plt.legend(loc="upper center", fontsize=14) plt.axis([0, 5.5, 0, 2]) plt.show() ``` Close enough! # 9. _Exercise: train an SVM classifier on the MNIST dataset. Since SVM classifiers are binary classifiers, you will need to use one-versus-all to classify all 10 digits. You may want to tune the hyperparameters using small validation sets to speed up the process. What accuracy can you reach?_ First, let's load the dataset and split it into a training set and a test set. We could use `train_test_split()` but people usually just take the first 60,000 instances for the training set, and the last 10,000 instances for the test set (this makes it possible to compare your model's performance with others): ``` try: from sklearn.datasets import fetch_openml mnist = fetch_openml('mnist_784', version=1, cache=True, as_frame=False) except ImportError: from sklearn.datasets import fetch_mldata mnist = fetch_mldata('MNIST original') X = mnist["data"] y = mnist["target"] X_train = X[:60000] y_train = y[:60000] X_test = X[60000:] y_test = y[60000:] ``` Many training algorithms are sensitive to the order of the training instances, so it's generally good practice to shuffle them first: ``` np.random.seed(42) rnd_idx = np.random.permutation(60000) X_train = X_train[rnd_idx] y_train = y_train[rnd_idx] ``` Let's start simple, with a linear SVM classifier. It will automatically use the One-vs-All (also called One-vs-the-Rest, OvR) strategy, so there's nothing special we need to do. Easy! ``` lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train, y_train) ``` Let's make predictions on the training set and measure the accuracy (we don't want to measure it on the test set yet, since we have not selected and trained the final model yet): ``` from sklearn.metrics import accuracy_score y_pred = lin_clf.predict(X_train) accuracy_score(y_train, y_pred) ``` Wow, 86% accuracy on MNIST is a really bad performance. This linear model is certainly too simple for MNIST, but perhaps we just needed to scale the data first: ``` scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train.astype(np.float32)) X_test_scaled = scaler.transform(X_test.astype(np.float32)) lin_clf = LinearSVC(random_state=42) lin_clf.fit(X_train_scaled, y_train) y_pred = lin_clf.predict(X_train_scaled) accuracy_score(y_train, y_pred) ``` That's much better (we cut the error rate in two), but still not great at all for MNIST. If we want to use an SVM, we will have to use a kernel. Let's try an `SVC` with an RBF kernel (the default). **Warning**: if you are using Scikit-Learn ≤ 0.19, the `SVC` class will use the One-vs-One (OvO) strategy by default, so you must explicitly set `decision_function_shape="ovr"` if you want to use the OvR strategy instead (OvR is the default since 0.19). ``` svm_clf = SVC(decision_function_shape="ovr", gamma="auto") svm_clf.fit(X_train_scaled[:10000], y_train[:10000]) y_pred = svm_clf.predict(X_train_scaled) accuracy_score(y_train, y_pred) ``` That's promising, we get better performance even though we trained the model on 6 times less data. Let's tune the hyperparameters by doing a randomized search with cross validation. We will do this on a small dataset just to speed up the process: ``` from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciprocal, uniform param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)} rnd_search_cv = RandomizedSearchCV(svm_clf, param_distributions, n_iter=10, verbose=2, cv=3) rnd_search_cv.fit(X_train_scaled[:1000], y_train[:1000]) rnd_search_cv.best_estimator_ rnd_search_cv.best_score_ ``` This looks pretty low but remember we only trained the model on 1,000 instances. Let's retrain the best estimator on the whole training set (run this at night, it will take hours): ``` rnd_search_cv.best_estimator_.fit(X_train_scaled, y_train) y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled) accuracy_score(y_train, y_pred) ``` Ah, this looks good! Let's select this model. Now we can test it on the test set: ``` y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled) accuracy_score(y_test, y_pred) ``` Not too bad, but apparently the model is overfitting slightly. It's tempting to tweak the hyperparameters a bit more (e.g. decreasing `C` and/or `gamma`), but we would run the risk of overfitting the test set. Other people have found that the hyperparameters `C=5` and `gamma=0.005` yield even better performance (over 98% accuracy). By running the randomized search for longer and on a larger part of the training set, you may be able to find this as well. ## 10. _Exercise: train an SVM regressor on the California housing dataset._ Let's load the dataset using Scikit-Learn's `fetch_california_housing()` function: ``` from sklearn.datasets import fetch_california_housing housing = fetch_california_housing() X = housing["data"] y = housing["target"] ``` Split it into a training set and a test set: ``` from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) ``` Don't forget to scale the data: ``` from sklearn.preprocessing import StandardScaler scaler = StandardScaler() X_train_scaled = scaler.fit_transform(X_train) X_test_scaled = scaler.transform(X_test) ``` Let's train a simple `LinearSVR` first: ``` from sklearn.svm import LinearSVR lin_svr = LinearSVR(random_state=42) lin_svr.fit(X_train_scaled, y_train) ``` Let's see how it performs on the training set: ``` from sklearn.metrics import mean_squared_error y_pred = lin_svr.predict(X_train_scaled) mse = mean_squared_error(y_train, y_pred) mse ``` Let's look at the RMSE: ``` np.sqrt(mse) ``` In this training set, the targets are tens of thousands of dollars. The RMSE gives a rough idea of the kind of error you should expect (with a higher weight for large errors): so with this model we can expect errors somewhere around $10,000. Not great. Let's see if we can do better with an RBF Kernel. We will use randomized search with cross validation to find the appropriate hyperparameter values for `C` and `gamma`: ``` from sklearn.svm import SVR from sklearn.model_selection import RandomizedSearchCV from scipy.stats import reciprocal, uniform param_distributions = {"gamma": reciprocal(0.001, 0.1), "C": uniform(1, 10)} rnd_search_cv = RandomizedSearchCV(SVR(), param_distributions, n_iter=10, verbose=2, cv=3, random_state=42) rnd_search_cv.fit(X_train_scaled, y_train) rnd_search_cv.best_estimator_ ``` Now let's measure the RMSE on the training set: ``` y_pred = rnd_search_cv.best_estimator_.predict(X_train_scaled) mse = mean_squared_error(y_train, y_pred) np.sqrt(mse) ``` Looks much better than the linear model. Let's select this model and evaluate it on the test set: ``` y_pred = rnd_search_cv.best_estimator_.predict(X_test_scaled) mse = mean_squared_error(y_test, y_pred) np.sqrt(mse) ```
github_jupyter
``` import numpy as np import pandas as pd import torch import torchvision from torch.utils.data import Dataset, DataLoader from torchvision import transforms, utils import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from matplotlib import pyplot as plt %matplotlib inline from scipy.stats import entropy from google.colab import drive drive.mount('/content/drive') path="/content/drive/MyDrive/Research/alternate_minimisation/" name="_50_50_10runs_entropy" # mu1 = np.array([3,3,3,3,0]) # sigma1 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu2 = np.array([4,4,4,4,0]) # sigma2 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu3 = np.array([10,5,5,10,0]) # sigma3 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu4 = np.array([-10,-10,-10,-10,0]) # sigma4 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu5 = np.array([-21,4,4,-21,0]) # sigma5 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu6 = np.array([-10,18,18,-10,0]) # sigma6 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu7 = np.array([4,20,4,20,0]) # sigma7 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu8 = np.array([4,-20,-20,4,0]) # sigma8 = np.array([[16,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu9 = np.array([20,20,20,20,0]) # sigma9 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # mu10 = np.array([20,-10,-10,20,0]) # sigma10 = np.array([[1,1,1,1,1],[1,16,1,1,1],[1,1,1,1,1],[1,1,1,1,1],[1,1,1,1,1]]) # sample1 = np.random.multivariate_normal(mean=mu1,cov= sigma1,size=500) # sample2 = np.random.multivariate_normal(mean=mu2,cov= sigma2,size=500) # sample3 = np.random.multivariate_normal(mean=mu3,cov= sigma3,size=500) # sample4 = np.random.multivariate_normal(mean=mu4,cov= sigma4,size=500) # sample5 = np.random.multivariate_normal(mean=mu5,cov= sigma5,size=500) # sample6 = np.random.multivariate_normal(mean=mu6,cov= sigma6,size=500) # sample7 = np.random.multivariate_normal(mean=mu7,cov= sigma7,size=500) # sample8 = np.random.multivariate_normal(mean=mu8,cov= sigma8,size=500) # sample9 = np.random.multivariate_normal(mean=mu9,cov= sigma9,size=500) # sample10 = np.random.multivariate_normal(mean=mu10,cov= sigma10,size=500) # X = np.concatenate((sample1,sample2,sample3,sample4,sample5,sample6,sample7,sample8,sample9,sample10),axis=0) # Y = np.concatenate((np.zeros((500,1)),np.ones((500,1)),2*np.ones((500,1)),3*np.ones((500,1)),4*np.ones((500,1)), # 5*np.ones((500,1)),6*np.ones((500,1)),7*np.ones((500,1)),8*np.ones((500,1)),9*np.ones((500,1))),axis=0).astype(int) # print(X.shape,Y.shape) # # plt.scatter(sample1[:,0],sample1[:,1],label="class_0") # # plt.scatter(sample2[:,0],sample2[:,1],label="class_1") # # plt.scatter(sample3[:,0],sample3[:,1],label="class_2") # # plt.scatter(sample4[:,0],sample4[:,1],label="class_3") # # plt.scatter(sample5[:,0],sample5[:,1],label="class_4") # # plt.scatter(sample6[:,0],sample6[:,1],label="class_5") # # plt.scatter(sample7[:,0],sample7[:,1],label="class_6") # # plt.scatter(sample8[:,0],sample8[:,1],label="class_7") # # plt.scatter(sample9[:,0],sample9[:,1],label="class_8") # # plt.scatter(sample10[:,0],sample10[:,1],label="class_9") # # plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left') # class SyntheticDataset(Dataset): # """MosaicDataset dataset.""" # def __init__(self, x, y): # """ # Args: # csv_file (string): Path to the csv file with annotations. # root_dir (string): Directory with all the images. # transform (callable, optional): Optional transform to be applied # on a sample. # """ # self.x = x # self.y = y # #self.fore_idx = fore_idx # def __len__(self): # return len(self.y) # def __getitem__(self, idx): # return self.x[idx] , self.y[idx] #, self.fore_idx[idx] # trainset = SyntheticDataset(X,Y) # # testset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform) # classes = ('zero','one','two','three','four','five','six','seven','eight','nine') # foreground_classes = {'zero','one','two'} # fg_used = '012' # fg1, fg2, fg3 = 0,1,2 # all_classes = {'zero','one','two','three','four','five','six','seven','eight','nine'} # background_classes = all_classes - foreground_classes # background_classes # trainloader = torch.utils.data.DataLoader(trainset, batch_size=100, shuffle=True) # dataiter = iter(trainloader) # background_data=[] # background_label=[] # foreground_data=[] # foreground_label=[] # batch_size=100 # for i in range(50): # images, labels = dataiter.next() # for j in range(batch_size): # if(classes[labels[j]] in background_classes): # img = images[j].tolist() # background_data.append(img) # background_label.append(labels[j]) # else: # img = images[j].tolist() # foreground_data.append(img) # foreground_label.append(labels[j]) # foreground_data = torch.tensor(foreground_data) # foreground_label = torch.tensor(foreground_label) # background_data = torch.tensor(background_data) # background_label = torch.tensor(background_label) # def create_mosaic_img(bg_idx,fg_idx,fg): # """ # bg_idx : list of indexes of background_data[] to be used as background images in mosaic # fg_idx : index of image to be used as foreground image from foreground data # fg : at what position/index foreground image has to be stored out of 0-8 # """ # image_list=[] # j=0 # for i in range(9): # if i != fg: # image_list.append(background_data[bg_idx[j]]) # j+=1 # else: # image_list.append(foreground_data[fg_idx]) # label = foreground_label[fg_idx] - fg1 # minus fg1 because our fore ground classes are fg1,fg2,fg3 but we have to store it as 0,1,2 # #image_list = np.concatenate(image_list ,axis=0) # image_list = torch.stack(image_list) # return image_list,label # desired_num = 3000 # mosaic_list_of_images =[] # list of mosaic images, each mosaic image is saved as list of 9 images # fore_idx =[] # list of indexes at which foreground image is present in a mosaic image i.e from 0 to 9 # mosaic_label=[] # label of mosaic image = foreground class present in that mosaic # list_set_labels = [] # for i in range(desired_num): # set_idx = set() # np.random.seed(i) # bg_idx = np.random.randint(0,3500,8) # set_idx = set(background_label[bg_idx].tolist()) # fg_idx = np.random.randint(0,1500) # set_idx.add(foreground_label[fg_idx].item()) # fg = np.random.randint(0,9) # fore_idx.append(fg) # image_list,label = create_mosaic_img(bg_idx,fg_idx,fg) # mosaic_list_of_images.append(image_list) # mosaic_label.append(label) # list_set_labels.append(set_idx) # def create_avg_image_from_mosaic_dataset(mosaic_dataset,labels,foreground_index,dataset_number): # """ # mosaic_dataset : mosaic_dataset contains 9 images 32 x 32 each as 1 data point # labels : mosaic_dataset labels # foreground_index : contains list of indexes where foreground image is present so that using this we can take weighted average # dataset_number : will help us to tell what ratio of foreground image to be taken. for eg: if it is "j" then fg_image_ratio = j/9 , bg_image_ratio = (9-j)/8*9 # """ # avg_image_dataset = [] # for i in range(len(mosaic_dataset)): # img = torch.zeros([5], dtype=torch.float64) # for j in range(9): # if j == foreground_index[i]: # img = img + mosaic_dataset[i][j]*dataset_number/9 # else : # img = img + mosaic_dataset[i][j]*(9-dataset_number)/(8*9) # avg_image_dataset.append(img) # return torch.stack(avg_image_dataset) , torch.stack(labels) , foreground_index class MosaicDataset1(Dataset): """MosaicDataset dataset.""" def __init__(self, mosaic_list, mosaic_label,fore_idx): """ Args: csv_file (string): Path to the csv file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.mosaic = mosaic_list self.label = mosaic_label self.fore_idx = fore_idx def __len__(self): return len(self.label) def __getitem__(self, idx): return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx] # data = [{"mosaic_list":mosaic_list_of_images, "mosaic_label": mosaic_label, "fore_idx":fore_idx}] # np.save("mosaic_data.npy",data) data = np.load(path+"mosaic_data.npy",allow_pickle=True) mosaic_list_of_images = data[0]["mosaic_list"] mosaic_label = data[0]["mosaic_label"] fore_idx = data[0]["fore_idx"] batch = 250 msd = MosaicDataset1(mosaic_list_of_images, mosaic_label, fore_idx) train_loader = DataLoader( msd,batch_size= batch ,shuffle=True) ``` **Focus Net** ``` class Focus_deep(nn.Module): ''' deep focus network averaged at zeroth layer input : elemental data ''' def __init__(self,inputs,output,K,d): super(Focus_deep,self).__init__() self.inputs = inputs self.output = output self.K = K self.d = d self.linear1 = nn.Linear(self.inputs,50) #,self.output) self.linear2 = nn.Linear(50,self.output) def forward(self,z): batch = z.shape[0] x = torch.zeros([batch,self.K],dtype=torch.float64) y = torch.zeros([batch,self.d], dtype=torch.float64) x,y = x.to("cuda"),y.to("cuda") for i in range(self.K): x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d log_x = F.log_softmax(x,dim=1) # log alpha to calculate entropy x = F.softmax(x,dim=1) # alphas x1 = x[:,0] for i in range(self.K): x1 = x[:,i] y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d return y , x,log_x def helper(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x ``` **Classification Net** ``` class Classification_deep(nn.Module): ''' input : elemental data deep classification module data averaged at zeroth layer ''' def __init__(self,inputs,output): super(Classification_deep,self).__init__() self.inputs = inputs self.output = output self.linear1 = nn.Linear(self.inputs,50) self.linear2 = nn.Linear(50,self.output) def forward(self,x): x = F.relu(self.linear1(x)) x = self.linear2(x) return x criterion = nn.CrossEntropyLoss() def my_cross_entropy(x, y,alpha,log_alpha,k): # log_prob = -1.0 * F.log_softmax(x, 1) # loss = log_prob.gather(1, y.unsqueeze(1)) # loss = loss.mean() loss = criterion(x,y) #alpha = torch.clamp(alpha,min=1e-10) b = -1.0* alpha * log_alpha b = torch.mean(torch.sum(b,dim=1)) closs = loss entropy = b loss = (1-k)*loss + ((k)*b) return loss,closs,entropy ``` ``` def calculate_attn_loss(dataloader,what,where,criter,k): what.eval() where.eval() r_loss = 0 cc_loss = 0 cc_entropy = 0 alphas = [] lbls = [] pred = [] fidices = [] with torch.no_grad(): for i, data in enumerate(dataloader, 0): inputs, labels,fidx = data lbls.append(labels) fidices.append(fidx) inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") avg,alpha,log_alpha = where(inputs) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) pred.append(predicted.cpu().numpy()) alphas.append(alpha.cpu().numpy()) #ent = np.sum(entropy(alpha.cpu().detach().numpy(), base=2, axis=1))/batch # mx,_ = torch.max(alpha,1) # entropy = np.mean(-np.log2(mx.cpu().detach().numpy())) # print("entropy of batch", entropy) #loss = (1-k)*criter(outputs, labels) + k*ent loss,closs,entropy = my_cross_entropy(outputs,labels,alpha,log_alpha,k) r_loss += loss.item() cc_loss += closs.item() cc_entropy += entropy.item() alphas = np.concatenate(alphas,axis=0) pred = np.concatenate(pred,axis=0) lbls = np.concatenate(lbls,axis=0) fidices = np.concatenate(fidices,axis=0) #print(alphas.shape,pred.shape,lbls.shape,fidices.shape) analysis = analyse_data(alphas,lbls,pred,fidices) return r_loss/i,cc_loss/i,cc_entropy/i,analysis def analyse_data(alphas,lbls,predicted,f_idx): ''' analysis data is created here ''' batch = len(predicted) amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0 for j in range (batch): focus = np.argmax(alphas[j]) if(alphas[j][focus] >= 0.5): amth +=1 else: alth +=1 if(focus == f_idx[j] and predicted[j] == lbls[j]): ftpt += 1 elif(focus != f_idx[j] and predicted[j] == lbls[j]): ffpt +=1 elif(focus == f_idx[j] and predicted[j] != lbls[j]): ftpf +=1 elif(focus != f_idx[j] and predicted[j] != lbls[j]): ffpf +=1 #print(sum(predicted==lbls),ftpt+ffpt) return [ftpt,ffpt,ftpf,ffpf,amth,alth] number_runs = 10 full_analysis =[] FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF"]) k = 0.005 every_what_epoch = 20 for n in range(number_runs): print("--"*40) # instantiate focus and classification Model torch.manual_seed(n) where = Focus_deep(5,1,9,5).double() torch.manual_seed(n) what = Classification_deep(5,3).double() where = where.to("cuda") what = what.to("cuda") # instantiate optimizer optimizer_where = optim.Adam(where.parameters(),lr =0.01) optimizer_what = optim.Adam(what.parameters(), lr=0.01) #criterion = nn.CrossEntropyLoss() acti = [] analysis_data = [] loss_curi = [] epochs = 2000 # calculate zeroth epoch loss and FTPT values running_loss ,_,_,anlys_data= calculate_attn_loss(train_loader,what,where,criterion,k) loss_curi.append(running_loss) analysis_data.append(anlys_data) print('epoch: [%d ] loss: %.3f' %(0,running_loss)) # training starts for epoch in range(epochs): # loop over the dataset multiple times ep_lossi = [] running_loss = 0.0 what.train() where.train() if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : print(epoch+1,"updating what_net, where_net is freezed") print("--"*40) elif ((epoch) % (every_what_epoch*2)) > every_what_epoch-1 : print(epoch+1,"updating where_net, what_net is freezed") print("--"*40) for i, data in enumerate(train_loader, 0): # get the inputs inputs, labels,_ = data inputs = inputs.double() inputs, labels = inputs.to("cuda"),labels.to("cuda") # zero the parameter gradients optimizer_where.zero_grad() optimizer_what.zero_grad() # forward + backward + optimize avg, alpha,log_alpha = where(inputs) outputs = what(avg) my_loss,_,_ = my_cross_entropy(outputs,labels,alpha,log_alpha,k) # print statistics running_loss += my_loss.item() my_loss.backward() if ((epoch) % (every_what_epoch*2) ) <= every_what_epoch-1 : optimizer_what.step() elif ( (epoch) % (every_what_epoch*2)) > every_what_epoch-1 : optimizer_where.step() # optimizer_where.step() # optimizer_what.step() #break running_loss,ccloss,ccentropy,anls_data = calculate_attn_loss(train_loader,what,where,criterion,k) analysis_data.append(anls_data) print('epoch: [%d] loss: %.3f celoss: %.3f entropy: %.3f' %(epoch + 1,running_loss,ccloss,ccentropy)) loss_curi.append(running_loss) #loss per epoch if running_loss<=0.001: break print('Finished Training run ' +str(n)) #break analysis_data = np.array(analysis_data) FTPT_analysis.loc[n] = analysis_data[-1,:4]/30 full_analysis.append((epoch, analysis_data)) correct = 0 total = 0 with torch.no_grad(): for data in train_loader: images, labels,_ = data images = images.double() images, labels = images.to("cuda"), labels.to("cuda") avg, alpha,log_alpha = where(images) outputs = what(avg) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 3000 train images: %d %%' % ( 100 * correct / total)) a,b= full_analysis[0] print(a) cnt=1 for epoch, analysis_data in full_analysis: analysis_data = np.array(analysis_data) # print("="*20+"run ",cnt,"="*20) plt.figure(figsize=(6,6)) plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0],label="ftpt") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1],label="ffpt") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2],label="ftpf") plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3],label="ffpf") plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title("Training trends for run "+str(cnt)) plt.savefig(path+"50_50_10runs_entropy/every20/run"+str(cnt)+".png",bbox_inches="tight") plt.savefig(path+"50_50_10runs_entropy/every20/run"+str(cnt)+".pdf",bbox_inches="tight") cnt+=1 np.mean(np.array(FTPT_analysis),axis=0) #array([87.85333333, 5.92 , 0. , 6.22666667]) FTPT_analysis.to_csv(path+"50_50_10runs_entropy/FTPT_analysis_every20"+name+".csv",index=False) FTPT_analysis ```
github_jupyter
``` # test the nn pipeline import sys sys.path.insert(0,"/Users/rezaie/github/DESILSS") import NN %matplotlib inline import matplotlib.pyplot as plt import numpy as np from sklearn.model_selection import KFold def split2Kfolds(data, k=5, shuffle=True, random_seed=123): ''' split data into k randomly chosen regions for training, validation and testing ''' assert k >=3 np.random.seed(random_seed) kfold = KFold(k, shuffle=shuffle, random_state=random_seed) index = np.arange(data.size) kfold_data = {'test':{}, 'train':{}, 'validation':{}} for i, (nontestID, testID) in enumerate(kfold.split(index)): # # foldname = 'fold'+str(i) validID = np.random.choice(nontestID, size=testID.size, replace=False) trainID = np.setdiff1d(nontestID, validID) # # kfold_data['test'][foldname] = data[testID] kfold_data['train'][foldname] = data[trainID] kfold_data['validation'][foldname] = data[validID] return kfold_data F = lambda x, y, z: (1.+y*y)*(1.-x*x) np.random.seed(123) n = 10000 x = np.random.uniform(-1, 1, size=n) y = np.random.uniform(-1, 1, size=n) z = np.random.uniform(-1, 1, size=n) Fy = [] for i in range(n): Fy.append(F(x[i], y[i], z[i])) Fy = np.array(Fy) error = np.random.normal(size=Fy.size) Fyn = Fy + error # turn off noise, we expect RMSE to be 0 print(Fy.size, Fyn.size) print("std of errors = {:.4f}".format(np.std(error))) f,a = plt.subplots(ncols=3, sharey=True, figsize=(15,5)) l = ["x","y","z"] a[0].set_ylabel("F(x,y,z)") for i,s in enumerate([x, y, z]): a[i].scatter(s, Fyn, alpha=0.1) a[i].set_xlabel(l[i]) data = np.zeros(n, dtype=[('hpix','i8'),('label','f8'),\ ('features',('f8', 3)), ('error', 'f8'), ('fracgood', 'f8')]) data['hpix'] = np.zeros(Fyn.size) data['label'] = Fyn data['features']= np.column_stack([x, y, z]) data['error'] = error data['fracgood'] = 1.0 data data2fold = split2Kfolds(data, k=3) print("RMSE of fold0 errors %.5f"%np.std(data2fold['train']['fold0']['error'])) print("RMSE of fold1 errors %.5f"%np.std(data2fold['train']['fold1']['error'])) print("RMSE of fold2 errors %.5f"%np.std(data2fold['train']['fold2']['error'])) address = "./outputs/mock/" config = {'nchain':10, 'nepoch':100, 'batchsize':200, 'Units':[5,5,5], 'learning_rate':0.01, 'scale':1.e-6} for rank in [0, 1, 2]: fold = 'fold'+str(rank) print(fold, ' is being processed') NN.run_nchainlearning(address+fold+'/', data2fold['train'][fold], data2fold['validation'][fold], data2fold['test'][fold], **config) from glob import glob files = glob("./outputs/mock/fold*/*nepoch100*scale1e*.npz") files plt.rc('font', size=12, family='Serif') plt.rc('axes.spines', right=False, top=False) f, ax = plt.subplots(ncols=2, nrows=2, figsize=(8,8), sharex=True, sharey=True) plt.subplots_adjust(wspace=0.02, hspace=0.1) ax = ax.flatten() f.delaxes(ax[-1]) for j,file_i in enumerate(files): d = np.load(file_i) out = d['arr_0'].item() # print(out.keys(), out['options']) # break for i,mse in enumerate(out['epoch_MSEs']): if i == 0: ax[j].axhline(np.sqrt(out['options']['baselineMSE'][0]), ls='-', c='k', label='train') ax[j].axhline(np.sqrt(out['options']['baselineMSE'][1]), ls='--', c='k', label='validation') ax[j].plot(mse[-1][:,0], np.sqrt(mse[-1][:,1]),ls='-',c='blue', alpha=0.5) ax[j].plot(mse[-1][:,0], np.sqrt(mse[-1][:,2]),ls='--',c='grey', alpha=0.6) ax[j].set_xscale('log') ax[j].text(0.02, 0.95, "fold-"+str(j), transform=ax[j].transAxes, fontsize=12) if j ==1:ax[j].legend(frameon=False, loc='upper right') ax[j].set_ylim(0.9, 1.1) # ax[j].text(100, 1.02*rmse[2], "Baseline RMSE", color="k") if j in [0, 2]:ax[j].set_ylabel('RMSE') if j in [2, 3]:ax[j].set_xlabel('Training Epoch') ax[j].set_xlim(1, 500) ax[j].set_xlabel("training epoch") ``` Blue solid lines stand for training RMSEs, while grey dashed lines are testing RMSEs. Different lines stand for different chains, just as a check for convergence. Horizontal solid (dashed) black line stands for train (test) baseline RMSEs. ``` # this function loops over the outputs and combines them _,X,Yt,Yp,_,_ = NN.read_NNfolds(files) X.shape, Yt.shape, Yp.shape f,a = plt.subplots(ncols=3, nrows=3, figsize=(15,5), sharey=True, sharex=True) a = a.flatten() plt.subplots_adjust(wspace=0.05, hspace=0.1) k = 0 for j in range(3): a[0+k].scatter(X[:,j], Yt, 1, marker='.', c='b', alpha=0.5) a[6+k].scatter(X[:,j], Yp, 1, marker='.', c='r', alpha=0.5) a[3+k].scatter(X[:,j], Yt, 1, marker='.', c='b', alpha=0.5) a[3+k].scatter(X[:,j], Yp, 1, marker='.', c='r', alpha=0.5) k += 1 for a_i in a: a_i.set_ylim(-4., 4.) l = ['true label', 'true/pred\nlabel', 'pred label'] for j,i in enumerate([0, 3, 6]): a[i].set_ylabel(l[j]) l = ['x', 'y', 'z'] for j,i in enumerate([6,7,8]): a[i].set_xlabel("feature-"+l[j]) ```
github_jupyter
## Coming soon in `numba` 0.34 You can install the release candidate as of 07/09/2017 from the `numba` conda channel ``` conda install -c numba numba ``` ``` import numpy from numba import njit ``` Define some reasonably expensive operation in a function. ``` def do_trig(x, y): z = numpy.sin(x**2) + numpy.cos(y) return z ``` We can start with 1000 x 1000 arrays ``` x = numpy.random.random((1000, 1000)) y = numpy.random.random((1000, 1000)) %timeit do_trig(x, y) ``` Now let's `jit` this function. What do we expect to get out of this? Probably nothing, honestly. As we've seen, `numpy` is pretty good at what it does. ``` do_trig_jit = njit()(do_trig) %timeit do_trig_jit(x, y) ``` Maybe a _hair_ slower than the bare `numpy` version. So yeah, no improvement. ### BUT Starting in version 0.34, with help from the Intel Parallel Accelerator team, you can now pass a `parallel` keyword argument to `jit` and `njit`. Like this: ``` do_trig_jit_par = njit(parallel=True)(do_trig) ``` How do we think this will run? ``` %timeit do_trig_jit_par(x, y) ``` Not bad -- around a 3x speedup for a single line? And what if we unroll the array operations like we've seen before? Does that help us out? ``` @njit def do_trig(x, y): z = numpy.empty_like(x) for i in range(x.shape[0]): for j in range(x.shape[1]): z[i, j] = numpy.sin(x[i, j]**2) + numpy.cos(y[i, j]) return z %timeit do_trig(x, y) ``` Hmm, that's actually a hair faster than before. Cool! Now let's parallelize it! ``` @njit(parallel=True) def do_trig(x, y): z = numpy.empty_like(x) for i in range(x.shape[0]): for j in range(x.shape[1]): z[i, j] = numpy.sin(x[i, j]**2) + numpy.cos(y[i, j]) return z %timeit do_trig(x, y) ``` What happened? Well, automatic parallelization is a _pretty hard_ problem. (This is a massive understatement). Basically, parallel `jit` is "limited" to working on array operations, so in this case, unrolling loops will hurt you. Blarg. ### FAQ that I just made up - Why didn't you tell us about this before? It is brand new. The numba team is great, but have a really bad habit of releasing new features 5-10 days before I run a tutorial. - Is regular `jit` just dead now? It honestly might be. I've only started playing around with it but I haven't seen any speed _hits_ for using it when there are no array operations to operate on. - Is all of that stuff about `vectorize` just useless now? Short answer: no. Long answer: Let's check it out! ``` from numba import vectorize import math ``` Recall that we define the function as if it operates on scalars, then apply the vectorize decorator. ``` @vectorize def do_trig_vec(x, y): z = math.sin(x**2) + math.cos(y) return z %timeit do_trig_vec(x, y) ``` A little faster, but roughly equivalent to the base `numpy` and `jit` versions. Now let's type our inputs and run it in `parallel` ``` @vectorize('float64(float64, float64)', target='parallel') def do_trig_vec_par(x, y): z = math.sin(x**2) + math.cos(y) return z %timeit do_trig_vec_par(x, y) ``` Yowza! So yeah, `vectorize` is still the best performer when you have element-wise operations, but if you have a big mess of stuff that you just want to speed up, then parallel `jit` is an awesome and easy way to try to boost performance. ``` a = x b = y c = numpy.random.random((a.shape)) %%timeit b**2 - 4 * a * c def discrim(a, b, c): return b**2 - 4 * a * c discrim_vec = vectorize()(discrim) %timeit discrim_vec(a, b, c) discrim_vec_par = vectorize('float64(float64, float64, float64)', target='parallel')(discrim) %timeit discrim_vec_par(a, b, c) discrim_jit = njit()(discrim) %timeit discrim_jit(a, b, c) discrim_jit_par = njit(parallel=True)(discrim) %timeit discrim_jit_par(a, b, c) ```
github_jupyter
``` from __future__ import print_function import pandas as pd import numpy as np from scipy import stats import matplotlib.pyplot as plt import statsmodels.api as sm from statsmodels.graphics.api import qqplot %matplotlib inline dta=[10930,10318,10595,10972,7706,6756,9092,10551,9722,10913,11151,8186,6422, 6337,11649,11652,10310,12043,7937,6476,9662,9570,9981,9331,9449,6773,6304,9355, 10477,10148,10395,11261,8713,7299,10424,10795,11069,11602,11427,9095,7707,10767, 12136,12812,12006,12528,10329,7818,11719,11683,12603,11495,13670,11337,10232, 13261,13230,15535,16837,19598,14823,11622,19391,18177,19994,14723,15694,13248, 9543,12872,13101,15053,12619,13749,10228,9725,14729,12518,14564,15085,14722, 11999,9390,13481,14795,15845,15271,14686,11054,10395] dta = pd.Series(dta) dta.index = pd.date_range(start='2001-01-01', end='2091-01-01', freq='A') dta.plot(figsize=(12,8)) fig = plt.figure(figsize=(12, 8)) ax1= fig.add_subplot(111) diff1 = dta.diff(1) diff1.plot(ax=ax1) fig = plt.figure(figsize=(12, 8)) ax1= fig.add_subplot(111) diff1 = dta.diff(2) diff1.plot(ax=ax1) dta= dta.diff(1)[1:] fig = plt.figure(figsize=(12,8)) ax1=fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(dta,lags=40,ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(dta,lags=40,ax=ax2) arma_mod20 = sm.tsa.ARMA(dta,(7,0)).fit() print(arma_mod20.aic,arma_mod20.bic,arma_mod20.hqic) arma_mod30 = sm.tsa.ARMA(dta,(0,1)).fit() print(arma_mod30.aic,arma_mod30.bic,arma_mod30.hqic) arma_mod40 = sm.tsa.ARMA(dta,(7,1)).fit() print(arma_mod40.aic,arma_mod40.bic,arma_mod40.hqic) arma_mod50 = sm.tsa.ARMA(dta,(8,0)).fit() print(arma_mod50.aic,arma_mod50.bic,arma_mod50.hqic) ``` aic/bic/hqic 越小越好,股选择ARMA(7,0) ## 模型检验 在指数平滑模型下,观察ARIMA模型的残差是否是平均值为0且方差为常数的正态分布(服从零均值、方差不变的正态分布),同时也要观察连续残差是否(自)相关。 ``` fig = plt.figure(figsize=(12,8)) ax1 = fig.add_subplot(211) fig = sm.graphics.tsa.plot_acf(arma_mod20.resid, lags=40, ax=ax1) ax2 = fig.add_subplot(212) fig = sm.graphics.tsa.plot_pacf(arma_mod20.resid, lags=40, ax=ax2) arma_mod20.resid.mean()/arma_mod20.resid.std() arma_mod20.resid.plot() ``` ## 做D-W检验 德宾-沃森(Durbin-Watson)检验。德宾-沃森检验,简称D-W检验,是目前检验自相关性最常用的方法,但它只使用于检验一阶自相关性。因为自相关系数ρ的值介于-1和1之间,所以 0≤DW≤4。并且DW=0 ->ρ=1   即存在正自相关性 DW=4->ρ=-1 即存在负自相关性 DW=2->ρ=0  即不存在(一阶)自相关性 因此,当DW值显著的接近于0或4时,则存在自相关性,而接近于2时,则不存在(一阶)自相关性。这样只要知道DW统计量的概率分布,在给定的显著水平下,根据临界值的位置就可以对原假设$H_0$进行检验。 ``` print(sm.stats.durbin_watson(arma_mod20.resid.values)) resid = arma_mod20.resid#残差 fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) fig = qqplot(resid, line='q', ax=ax, fit=True) ``` ## Ljung-Box检验 Ljung-Box test是对randomness的检验,或者说是对时间序列是否存在滞后相关的一种统计检验。对于滞后相关的检验,我们常常采用的方法还包括计算ACF和PCAF并观察其图像,但是无论是ACF还是PACF都仅仅考虑是否存在某一特定滞后阶数的相关。LB检验则是基于一系列滞后阶数,判断序列总体的相关性或者说随机性是否存在。 时间序列中一个最基本的模型就是高斯白噪声序列。而对于ARIMA模型,其残差被假定为高斯白噪声序列,所以当我们用ARIMA模型去拟合数据时,拟合后我们要对残差的估计序列进行LB检验,判断其是否是高斯白噪声,如果不是,那么就说明ARIMA模型也许并不是一个适合样本的模型。 ``` r,q,p = sm.tsa.acf(resid.values.squeeze(), qstat=True) data = np.c_[range(1,41), r[1:], q, p] table = pd.DataFrame(data, columns=['lag', "AC", "Q", "Prob(>Q)"]) table.set_index('lag') table[:20] ``` 检验的结果就是看最后一列前十二行的检验概率(一般观察滞后1~12阶),如果检验概率小于给定的显著性水平,比如0.05、0.10等就拒绝原假设,其原假设是相关系数为零。就结果来看,如果取显著性水平为0.05,那么相关系数与零没有显著差异,即为白噪声序列。 ``` predict_sunspots = arma_mod20.predict('2090', '2100', dynamic=True) print(predict_sunspots) fig, ax = plt.subplots(figsize=(12, 8)) ax = dta.loc['2001':].plot(ax=ax) predict_sunspots.plot(ax=ax) pd.ewma? ```
github_jupyter
# Deming Regression ------------------------------- This function shows how to use TensorFlow to solve linear Deming regression. $y = Ax + b$ We will use the iris data, specifically: y = Sepal Length and x = Petal Width. Demming regression is also called total least squares, in which we minimize the shortest distance from the predicted line and the actual (x,y) points. If least squares linear regression minimizes the vertical distance to the line, Deming regression minimizes the total distance to the line. This type of regression minimizes the error in the y values and the x values. See the below figure for a comparison. <img src="../images/05_demming_vs_linear_reg.png" width="512"> To implement this in TensorFlow, we start by loading the necessary libraries. ``` import matplotlib.pyplot as plt import numpy as np import tensorflow as tf from sklearn import datasets from tensorflow.python.framework import ops ops.reset_default_graph() ``` Start a computational graph session: ``` sess = tf.Session() ``` We load the iris data. ``` # Load the data # iris.data = [(Sepal Length, Sepal Width, Petal Length, Petal Width)] iris = datasets.load_iris() x_vals = np.array([x[3] for x in iris.data]) # Petal Width y_vals = np.array([y[0] for y in iris.data]) # Sepal Length ``` Next we declare the batch size, model placeholders, model variables, and model operations. ``` # Declare batch size batch_size = 125 # Initialize placeholders x_data = tf.placeholder(shape=[None, 1], dtype=tf.float32) y_target = tf.placeholder(shape=[None, 1], dtype=tf.float32) # Create variables for linear regression A = tf.Variable(tf.random_normal(shape=[1,1])) b = tf.Variable(tf.random_normal(shape=[1,1])) # Declare model operations model_output = tf.add(tf.matmul(x_data, A), b) ``` For the demming loss, we want to compute: $$ \frac{\left| A \cdot x + b - y \right|}{\sqrt{A^{2} + 1}} $$ Which will give us the shortest distance between a point (x,y) and the predicted line, $A \cdot x + b$. ``` # Declare Demming loss function demming_numerator = tf.abs(tf.subtract(tf.add(tf.matmul(x_data, A), b), y_target)) demming_denominator = tf.sqrt(tf.add(tf.square(A),1)) loss = tf.reduce_mean(tf.truediv(demming_numerator, demming_denominator)) ``` Next we declare the optimization function and initialize all model variables. ``` # Declare optimizer my_opt = tf.train.GradientDescentOptimizer(0.25) train_step = my_opt.minimize(loss) # Initialize variables init = tf.global_variables_initializer() sess.run(init) ``` Now we train our Demming regression for 250 iterations. ``` # Training loop loss_vec = [] for i in range(1500): rand_index = np.random.choice(len(x_vals), size=batch_size) rand_x = np.transpose([x_vals[rand_index]]) rand_y = np.transpose([y_vals[rand_index]]) sess.run(train_step, feed_dict={x_data: rand_x, y_target: rand_y}) temp_loss = sess.run(loss, feed_dict={x_data: rand_x, y_target: rand_y}) loss_vec.append(temp_loss) if (i+1)%100==0: print('Step #' + str(i+1) + ' A = ' + str(sess.run(A)) + ' b = ' + str(sess.run(b))) print('Loss = ' + str(temp_loss)) ``` Retrieve the optimal coefficients (slope and intercept). ``` # Get the optimal coefficients [slope] = sess.run(A) [y_intercept] = sess.run(b) # Get best fit line best_fit = [] for i in x_vals: best_fit.append(slope*i+y_intercept) ``` Here is matplotlib code to plot the best fit Demming regression line and the Demming Loss. ``` # Plot the result plt.plot(x_vals, y_vals, 'o', label='Data Points') plt.plot(x_vals, best_fit, 'r-', label='Best fit line', linewidth=3) plt.legend(loc='upper left') plt.title('Sepal Length vs Pedal Width') plt.xlabel('Pedal Width') plt.ylabel('Sepal Length') plt.show() # Plot loss over time plt.plot(loss_vec, 'k-') plt.title('Demming Loss per Generation') plt.xlabel('Iteration') plt.ylabel('Demming Loss') plt.show() ``` tested; Gopal
github_jupyter
# Test of widgets * lets see what we got here ``` # try the following: #!pip install ipywidgets==7.4.2 #!pip install bqplot # lets import our usual stuff import pandas as pd import bqplot import numpy as np import traitlets import ipywidgets %matplotlib inline data = np.random.random((10, 10)) # now add scales - colors, x & y col_sc = bqplot.ColorScale(scheme = "Reds") x_sc = bqplot.OrdinalScale() y_sc = bqplot.OrdinalScale() # create axis - for colors, x & y c_ax = bqplot.ColorAxis(scale = col_sc, orientation = 'vertical', side = 'right') x_ax = bqplot.Axis(scale = x_sc) y_ax = bqplot.Axis(scale = y_sc, orientation = 'vertical') # so now, lets write a little function that links the data value # to the selected & lets print this in a little ipywidgets label mySelectedLabel = ipywidgets.Label() # (1) # lets write our linking function # there are a few ways to link this, # here is a simple way first def get_data_value(change): i,j = heat_map.selected[0] v = data[i,j] # grab data value mySelectedLabel.value = str(v) # set our label # (2) this is maybe in-elegant as we are # explicitly calling our origininal heat map! # so, lets instead remind ourselves what "change" is here def get_data_value(change): print(change) i,j = heat_map.selected[0] v = data[i,j] # grab data value mySelectedLabel.value = str(v) # set our label # now we see when we click we get back a whole # dictionary of information - if we recall, # "owner" here is our heat_map which "owns" # this change. # If we want to be able to apply our function to # this or any other heatmap figure we generate, # we can re-write the above function as follows: # (3) #def get_data_value(change,mylab): def get_data_value(change): #print(change['owner'].selected) i,j = change['owner'].selected[0] v = data[i,j] # grab data value mySelectedLabel.value = str(v) # set our label #mylab.value = str(v) # set our label # so, this now is applied to any map that we choose to input # regenerate our heatmap to use in our fig canvas heat_map = bqplot.GridHeatMap(color = data, scales = {'color': col_sc, 'row': y_sc, 'column': x_sc}, interactions = {'click': 'select'}, anchor_style = {'fill':'blue'}, selected_style = {'opacity': 1.0}, unselected_style = {'opacity': 0.8}) # make sure we check out heat_map.observe(get_data_value, 'selected') #heat_map.observe(self, mySelectedLabel) fig = bqplot.Figure(marks = [heat_map], axes = [c_ax, y_ax, x_ax]) ipywidgets.VBox([mySelectedLabel, fig]) ```
github_jupyter
TSG098 - Get BDC replicasets (Kubernetes) ========================================= Description ----------- Steps ----- ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" # Load this notebook as json to get access to the expert rules in the notebook metadata. # try: j = load_json("tsg098-get-replicasets.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "expanded_rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["expanded_rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Run kubectl to display the replica sets ``` run(f"kubectl get replicaset -n {namespace} -o wide") print('Notebook execution complete.') ```
github_jupyter