text
stringlengths
2.5k
6.39M
kind
stringclasses
3 values
``` #hide from utils import * ``` # A fastai Learner from Scratch ## Data ``` path = untar_data(URLs.IMAGENETTE_160) t = get_image_files(path) t[0] from glob import glob files = L(glob(f'{path}/**/*.JPEG', recursive=True)).map(Path) files[0] im = Image.open(files[0]) im im_t = tensor(im) im_t.shape lbls = files.map(Self.parent.name()).unique(); lbls v2i = lbls.val2idx(); v2i ``` ### Dataset ``` class Dataset: def __init__(self, fns): self.fns=fns def __len__(self): return len(self.fns) def __getitem__(self, i): im = Image.open(self.fns[i]).resize((64,64)).convert('RGB') y = v2i[self.fns[i].parent.name] return tensor(im).float()/255, tensor(y) train_filt = L(o.parent.parent.name=='train' for o in files) train,valid = files[train_filt],files[~train_filt] len(train),len(valid) train_ds,valid_ds = Dataset(train),Dataset(valid) x,y = train_ds[0] x.shape,y show_image(x, title=lbls[y]); def collate(idxs, ds): xb,yb = zip(*[ds[i] for i in idxs]) return torch.stack(xb),torch.stack(yb) x,y = collate([1,2], train_ds) x.shape,y class DataLoader: def __init__(self, ds, bs=128, shuffle=False, n_workers=1): self.ds,self.bs,self.shuffle,self.n_workers = ds,bs,shuffle,n_workers def __len__(self): return (len(self.ds)-1)//self.bs+1 def __iter__(self): idxs = L.range(self.ds) if self.shuffle: idxs = idxs.shuffle() chunks = [idxs[n:n+self.bs] for n in range(0, len(self.ds), self.bs)] with ProcessPoolExecutor(self.n_workers) as ex: yield from ex.map(collate, chunks, ds=self.ds) n_workers = min(16, defaults.cpus) train_dl = DataLoader(train_ds, bs=128, shuffle=True, n_workers=n_workers) valid_dl = DataLoader(valid_ds, bs=256, shuffle=False, n_workers=n_workers) xb,yb = first(train_dl) xb.shape,yb.shape,len(train_dl) stats = [xb.mean((0,1,2)),xb.std((0,1,2))] stats class Normalize: def __init__(self, stats): self.stats=stats def __call__(self, x): if x.device != self.stats[0].device: self.stats = to_device(self.stats, x.device) return (x-self.stats[0])/self.stats[1] norm = Normalize(stats) def tfm_x(x): return norm(x).permute((0,3,1,2)) t = tfm_x(x) t.mean((0,2,3)),t.std((0,2,3)) ``` ## Module and Parameter ``` class Parameter(Tensor): def __new__(self, x): return Tensor._make_subclass(Parameter, x, True) def __init__(self, *args, **kwargs): self.requires_grad_() Parameter(tensor(3.)) class Module: def __init__(self): self.hook,self.params,self.children,self._training = None,[],[],False def register_parameters(self, *ps): self.params += ps def register_modules (self, *ms): self.children += ms @property def training(self): return self._training @training.setter def training(self,v): self._training = v for m in self.children: m.training=v def parameters(self): return self.params + sum([m.parameters() for m in self.children], []) def __setattr__(self,k,v): super().__setattr__(k,v) if isinstance(v,Parameter): self.register_parameters(v) if isinstance(v,Module): self.register_modules(v) def __call__(self, *args, **kwargs): res = self.forward(*args, **kwargs) if self.hook is not None: self.hook(res, args) return res def cuda(self): for p in self.parameters(): p.data = p.data.cuda() class ConvLayer(Module): def __init__(self, ni, nf, stride=1, bias=True, act=True): super().__init__() self.w = Parameter(torch.zeros(nf,ni,3,3)) self.b = Parameter(torch.zeros(nf)) if bias else None self.act,self.stride = act,stride init = nn.init.kaiming_normal_ if act else nn.init.xavier_normal_ init(self.w) def forward(self, x): x = F.conv2d(x, self.w, self.b, stride=self.stride, padding=1) if self.act: x = F.relu(x) return x l = ConvLayer(3, 4) len(l.parameters()) xbt = tfm_x(xb) r = l(xbt) r.shape class Linear(Module): def __init__(self, ni, nf): super().__init__() self.w = Parameter(torch.zeros(nf,ni)) self.b = Parameter(torch.zeros(nf)) nn.init.xavier_normal_(self.w) def forward(self, x): return x@self.w.t() + self.b l = Linear(4,2) r = l(torch.ones(3,4)) r.shape class T(Module): def __init__(self): super().__init__() self.c,self.l = ConvLayer(3,4),Linear(4,2) t = T() len(t.parameters()) t.cuda() t.l.w.device ``` ### Simple CNN ``` class Sequential(Module): def __init__(self, *layers): super().__init__() self.layers = layers self.register_modules(*layers) def forward(self, x): for l in self.layers: x = l(x) return x class AdaptivePool(Module): def forward(self, x): return x.mean((2,3)) def simple_cnn(): return Sequential( ConvLayer(3 ,16 ,stride=2), #32 ConvLayer(16,32 ,stride=2), #16 ConvLayer(32,64 ,stride=2), # 8 ConvLayer(64,128,stride=2), # 4 AdaptivePool(), Linear(128, 10) ) m = simple_cnn() len(m.parameters()) def print_stats(outp, inp): print (outp.mean().item(),outp.std().item()) for i in range(4): m.layers[i].hook = print_stats r = m(xbt) r.shape ``` ## Loss ``` def nll(input, target): return -input[range(target.shape[0]), target].mean() def log_softmax(x): return (x.exp()/(x.exp().sum(-1,keepdim=True))).log() sm = log_softmax(r); sm[0][0] loss = nll(sm, yb) loss def log_softmax(x): return x - x.exp().sum(-1,keepdim=True).log() sm = log_softmax(r); sm[0][0] x = torch.rand(5) a = x.max() x.exp().sum().log() == a + (x-a).exp().sum().log() def logsumexp(x): m = x.max(-1)[0] return m + (x-m[:,None]).exp().sum(-1).log() logsumexp(r)[0] def log_softmax(x): return x - x.logsumexp(-1,keepdim=True) sm = log_softmax(r); sm[0][0] def cross_entropy(preds, yb): return nll(log_softmax(preds), yb).mean() ``` ## Learner ``` class SGD: def __init__(self, params, lr, wd=0.): store_attr(self, 'params,lr,wd') def step(self): for p in self.params: p.data -= (p.grad.data + p.data*self.wd) * self.lr p.grad.data.zero_() class DataLoaders: def __init__(self, *dls): self.train,self.valid = dls dls = DataLoaders(train_dl,valid_dl) class Learner: def __init__(self, model, dls, loss_func, lr, cbs, opt_func=SGD): store_attr(self, 'model,dls,loss_func,lr,cbs,opt_func') for cb in cbs: cb.learner = self def one_batch(self): self('before_batch') xb,yb = self.batch self.preds = self.model(xb) self.loss = self.loss_func(self.preds, yb) if self.model.training: self.loss.backward() self.opt.step() self('after_batch') def one_epoch(self, train): self.model.training = train self('before_epoch') dl = self.dls.train if train else self.dls.valid for self.num,self.batch in enumerate(progress_bar(dl, leave=False)): self.one_batch() self('after_epoch') def fit(self, n_epochs): self('before_fit') self.opt = self.opt_func(self.model.parameters(), self.lr) self.n_epochs = n_epochs try: for self.epoch in range(n_epochs): self.one_epoch(True) self.one_epoch(False) except CancelFitException: pass self('after_fit') def __call__(self,name): for cb in self.cbs: getattr(cb,name,noop)() ``` ### Callbacks ``` class Callback(GetAttr): _default='learner' class SetupLearnerCB(Callback): def before_batch(self): xb,yb = to_device(self.batch) self.learner.batch = tfm_x(xb),yb def before_fit(self): self.model.cuda() class TrackResults(Callback): def before_epoch(self): self.accs,self.losses,self.ns = [],[],[] def after_epoch(self): n = sum(self.ns) print(self.epoch, self.model.training, sum(self.losses).item()/n, sum(self.accs).item()/n) def after_batch(self): xb,yb = self.batch acc = (self.preds.argmax(dim=1)==yb).float().sum() self.accs.append(acc) n = len(xb) self.losses.append(self.loss*n) self.ns.append(n) cbs = [SetupLearnerCB(),TrackResults()] learn = Learner(simple_cnn(), dls, cross_entropy, lr=0.1, cbs=cbs) learn.fit(1) ``` ### Scheduling the Learning Rate ``` class LRFinder(Callback): def before_fit(self): self.losses,self.lrs = [],[] self.learner.lr = 1e-6 def before_batch(self): if not self.model.training: return self.opt.lr *= 1.2 def after_batch(self): if not self.model.training: return if self.opt.lr>10 or torch.isnan(self.loss): raise CancelFitException self.losses.append(self.loss.item()) self.lrs.append(self.opt.lr) lrfind = LRFinder() learn = Learner(simple_cnn(), dls, cross_entropy, lr=0.1, cbs=cbs+[lrfind]) learn.fit(2) plt.plot(lrfind.lrs[:-2],lrfind.losses[:-2]) plt.xscale('log') class OneCycle(Callback): def __init__(self, base_lr): self.base_lr = base_lr def before_fit(self): self.lrs = [] def before_batch(self): if not self.model.training: return n = len(self.dls.train) bn = self.epoch*n + self.num mn = self.n_epochs*n pct = bn/mn pct_start,div_start = 0.25,10 if pct<pct_start: pct /= pct_start lr = (1-pct)*self.base_lr/div_start + pct*self.base_lr else: pct = (pct-pct_start)/(1-pct_start) lr = (1-pct)*self.base_lr self.opt.lr = lr self.lrs.append(lr) onecyc = OneCycle(0.1) learn = Learner(simple_cnn(), dls, cross_entropy, lr=0.1, cbs=cbs+[onecyc]) learn.fit(8) plt.plot(onecyc.lrs); ``` ## Conclusion ## Questionnaire > tip: Experiments: For the questions here that ask you to explain what some function or class is, you should also complete your own code experiments. 1. What is `glob`? 1. How do you open an image with the Python imaging library? 1. What does `L.map` do? 1. What does `Self` do? 1. What is `L.val2idx`? 1. What methods do you need to implement to create your own `Dataset`? 1. Why do we call `convert` when we open an image from Imagenette? 1. What does `~` do? How is it useful for splitting training and validation sets? 1. Does `~` work with the `L` or `Tensor` classes? What about NumPy arrays, Python lists, or pandas DataFrames? 1. What is `ProcessPoolExecutor`? 1. How does `L.range(self.ds)` work? 1. What is `__iter__`? 1. What is `first`? 1. What is `permute`? Why is it needed? 1. What is a recursive function? How does it help us define the `parameters` method? 1. Write a recursive function that returns the first 20 items of the Fibonacci sequence. 1. What is `super`? 1. Why do subclasses of `Module` need to override `forward` instead of defining `__call__`? 1. In `ConvLayer`, why does `init` depend on `act`? 1. Why does `Sequential` need to call `register_modules`? 1. Write a hook that prints the shape of every layer's activations. 1. What is "LogSumExp"? 1. Why is `log_softmax` useful? 1. What is `GetAttr`? How is it helpful for callbacks? 1. Reimplement one of the callbacks in this chapter without inheriting from `Callback` or `GetAttr`. 1. What does `Learner.__call__` do? 1. What is `getattr`? (Note the case difference to `GetAttr`!) 1. Why is there a `try` block in `fit`? 1. Why do we check for `model.training` in `one_batch`? 1. What is `store_attr`? 1. What is the purpose of `TrackResults.before_epoch`? 1. What does `model.cuda` do? How does it work? 1. Why do we need to check `model.training` in `LRFinder` and `OneCycle`? 1. Use cosine annealing in `OneCycle`. ### Further Research 1. Write `resnet18` from scratch (refer to <<chapter_resnet>> as needed), and train it with the `Learner` in this chapter. 1. Implement a batchnorm layer from scratch and use it in your `resnet18`. 1. Write a Mixup callback for use in this chapter. 1. Add momentum to SGD. 1. Pick a few features that you're interested in from fastai (or any other library) and implement them in this chapter. 1. Pick a research paper that's not yet implemented in fastai or PyTorch and implement it in this chapter. - Port it over to fastai. - Submit a pull request to fastai, or create your own extension module and release it. - Hint: you may find it helpful to use [`nbdev`](https://nbdev.fast.ai/) to create and deploy your package.
github_jupyter
# High-performance simulations with TFF This tutorial will describe how to setup high-performance simulations with TFF in a variety of common scenarios. Note: The mechanisms covered here are not included in the latest release, have not been tested yet, and the API may evolve. In order to follow this tutorial, you will need to build a TFF pip package from scratch from the latest sources, and install it in a Jupyter notebook with a Python 3 runtime. The new executor stack is not compatible with Python 2. TODO(b/134543154): Populate the content, some of the things to cover here: - using GPUs in a single-machine setup, - multi-machine setup on GCP/GKE, with and without TPUs, - interfacing MapReduce-like backends, - current limitations and when/how they will be relaxed. ## Before we begin First, make sure your notebook is connected to a backend that has the relevant components (including gRPC dependencies for multi-machine scenarios) compiled. Now, let's start by loading the MNIST example from the TFF website, and declaring the Python function that will run a small experiment loop over a group of 10 clients. ``` #@test {"skip": true} !pip install --quiet --upgrade tensorflow_federated # Note: Jupyter requires a patch to asyncio. !pip install --quiet --upgrade nest_asyncio import nest_asyncio nest_asyncio.apply() import collections import time import tensorflow as tf tf.compat.v1.enable_v2_behavior() import tensorflow_federated as tff source, _ = tff.simulation.datasets.emnist.load_data() def map_fn(example): return collections.OrderedDict( x=tf.reshape(example['pixels'], [-1, 784]), y=example['label']) def client_data(n): ds = source.create_tf_dataset_for_client(source.client_ids[n]) return ds.repeat(10).shuffle(500).batch(20).map(map_fn) train_data = [client_data(n) for n in range(10)] batch = tf.nest.map_structure(lambda x: x.numpy(), next(iter(train_data[0]))) def model_fn(): model = tf.keras.models.Sequential([ tf.keras.layers.Input(shape=(784,)), tf.keras.layers.Dense(units=10, kernel_initializer='zeros'), tf.keras.layers.Softmax(), ]) return tff.learning.from_keras_model( model, dummy_batch=batch, loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[tf.keras.metrics.SparseCategoricalAccuracy()]) trainer = tff.learning.build_federated_averaging_process( model_fn, client_optimizer_fn=lambda: tf.keras.optimizers.SGD(0.02)) def evaluate(num_rounds=10): state = trainer.initialize() for _ in range(num_rounds): t1 = time.time() state, metrics = trainer.next(state, train_data) t2 = time.time() print('loss {}, round time {}'.format(metrics.loss, t2 - t1)) ``` ## Single-machine simulations Now on by default. ``` evaluate() ``` ## Multi-machine simulations on GCP/GKE, GPUs, TPUs, and beyond... Coming very soon.
github_jupyter
<center><h1 style="font-size:40px;">Exercise II:<br> Multi-layer perceptrons for classification and regression problems. </h1></center> --- # Introduction Welcome to the second lab in the Deep learning course! In this lab we will continue to take a look at four parts for MLP classification; * Introduction for setup and train an MLP * Impact of overfitting in validation performance * Avoid overfitting for a regression problem * Model selection for classification The lab includes different datasets, binary and multiple classification problems, and function approximation problems. The first part of the lab uses three different synthetic classification problems. They are all 2D binary classification problems that allow for easy visual inspection of the other classes and the network decision boundary. The datasets are defined as *syn1, syn2*, and *syn3*. These datasets are generated "on the fly" each time and are sampled from normal distributions. Note that as the data is sampled from a distribution, the data will vary for each time the data is generated it will be slightly different. All tasks include **TODO's** these are expected to be done before the deadline for this lab. The labs also include **Question**, which should be answered and included in the report. The lab gives a hand to ensure correct answer with a few asserts or the expected results. Some sections do not contain any **TODO's** but are good to understand. Good luck! --- The following code allows us to edit imported files without restarting the kernel for the notebook ``` %load_ext autoreload %autoreload 2 # Hacky solution to access the global utils package import sys,os sys.path.append(os.path.dirname(os.path.realpath(''))) import numpy as np import matplotlib.pyplot as plt import torch import pytorch_lightning as pl from config import LabConfig from dataset import MLPData from utils.model import Model from utils.progressbar import LitProgressBar from utils.model import Model from torch.utils.data import TensorDataset, DataLoader from utils import ( plot, progressbar ) cfg = LabConfig() ``` ## Plotting the data To get an idea of the datasets for this lab. Below plots with 100 sample points are generated for syn1-syn3 and the spiral dataset. ``` plot.data_distribution({ "Syn1":MLPData.syn1(100), "Syn2":MLPData.syn2(100), "Syn3":MLPData.syn3(100), "Spiral":MLPData.spiral(cfg.spiral_path) }) ``` # Defining the MLP model This cell defines the MLP model. There are a number of parameters that is needed to define a model. Here is a list of them: **Note:** They can all be specified when you call this function in later cells. The ones specified in this cell are the default values. ``` class MLP(torch.nn.Module): def __init__(self, inp_dim=None, hidden_nodes=1, # number of nodes in hidden layer num_out=None, **kwargs ): super(MLP, self).__init__() self.fc1 = torch.nn.Linear(inp_dim, hidden_nodes) self.relu = torch.nn.ReLU() self.fc2 = torch.nn.Linear(hidden_nodes, num_out) def forward(self, x): hidden = self.fc1(x) relu = self.relu(hidden) output = self.fc2(relu) return torch.sigmoid(output) ``` ## Define a function that allow us to convert numpy to pytorch DataLoader With this fucnction we can take a dataset from numpy and convert it to a torch tensor and then to a Dataset. ``` def numpy2Dataloader(x,y, batch_size=25, num_workers=10,**kwargs): return DataLoader( TensorDataset( torch.from_numpy(x).float(), torch.from_numpy(y).unsqueeze(1).float() ), batch_size=batch_size, num_workers=num_workers, **kwargs ) ``` # Excercise 1 The cell below should be used for question 1. Looking at the code will help you understand how the network is created, trained and evaluated. It will be useful for the other questions. **TODO:** Use syn1 with 100 data points and train a linear MLP to separate the two classes, i.e. use a single hidden node. What happened? Can the problem be solved with a single hidden node? ## Dataset First we convert our dataset into a pytorch Dataset. Thereafter we load it into our DataLoader. Note that we here define the batch_size and the number of workers that should be used. ``` x,y = MLPData.syn1(10000) train_loader = numpy2Dataloader(x,y) ``` ## Configuration Setup our local config that should be used for the trainer. ``` config = { 'max_epochs':4, 'model_params':{ 'inp_dim':x.shape[1], 'hidden_nodes':1, # activation functions for the hidden layer 'num_out':1 # if binary --> 1 | regression--> num inputs | multi-class--> num of classes }, 'criterion':torch.nn.BCELoss(), # error function 'optimizer':{ "type":torch.optim.Adam, "args":{ "lr":0.005, } } } ``` ## Training Lastly, put everything together and call on the trainers fit method. ``` model = Model(MLP(**config["model_params"]),**config) trainer = pl.Trainer( max_epochs=config['max_epochs'], gpus=cfg.GPU, logger=pl.loggers.TensorBoardLogger(save_dir=cfg.TENSORBORD_DIR), callbacks=[LitProgressBar()], progress_bar_refresh_rate=1, weights_summary=None, # Can be None, top or full num_sanity_val_steps=10, ) trainer.fit( model, train_dataloader=train_loader ); ``` ## Evaluation ``` plot.decision_bondary(trainer.lightning_module, x,y) # To be fixed! #metrics.stats_class() plot.stats_class(x, y, 'Training', trainer.get_model()) ```
github_jupyter
``` %load_ext autoreload %autoreload 2 from neural_circuits.LRRNN import get_W_eigs_np import numpy as np import os import pickle import matplotlib.pyplot as plt import seaborn as sns import itertools import torch torch.manual_seed(0) import tensorflow as tf from epi.models import Model, Parameter from epi.util import get_max_H_dist from neural_circuits.LRRNN import get_W_eigs_np, get_W_eigs_tf, get_simulator, \ get_epi_times, get_snpe_times, \ load_ME_EPI_LRRNN, load_best_SNPE_LRRNN, \ SNPE_entropy, sim_r2RNN, \ tf_num_params, torch_num_params, eig_scatter, \ get_EPI_conv, get_SNPE_conv, get_SMC_conv import pyabc import warnings warnings.filterwarnings('ignore') """RNN stable amplification.""" ``` ## Panel B ``` colors = sns.color_palette() # SNPE setup K = 1 num_sims = 1000 num_batch = 200 num_atoms = 100 x0 = np.array([0.5, 1.5]) # EPI setup J_eig_realmax_mean = 0.5 Js_eig_max_mean = 1.5 eig_std = 0.25 mu = np.array([J_eig_realmax_mean, Js_eig_max_mean, eig_std**2, eig_std**2], dtype=np.float32) SMC_conv = {} SNPE_conv = {} EPI_conv = {} EPI_eps_conv = {} Ns = [2, 5, 10, 25, 50, 100, 250] eps = [0.5] random_seeds = [1,2,3,4,5] g = 0.01 for i, N in enumerate(Ns): for j, _eps in enumerate(eps): if j==0: smc_times, smc_sims = get_SMC_conv(N, random_seeds) SMC_conv.update({N:{'times':smc_times, 'sims':smc_sims}}) if N < 50: _num_sims = num_sims elif N == 50: _num_sims = 25000 elif N >= 100: _num_sims = 250000 _num_batch = 1000 if N >= 100 else num_batch snpe_conv = get_SNPE_conv(N, g, K, x0, _eps, num_sims=_num_sims, num_batch=_num_batch, num_atoms=num_atoms, random_seeds=random_seeds) if snpe_conv[0] is not None: SNPE_conv.update({(N,_eps):{'times':snpe_conv[0], 'sims':snpe_conv[1]}}) epi_conv = get_EPI_conv(N, g, K) if epi_conv[0] is not None: EPI_conv.update({(N,_eps):{'times':epi_conv[0], 'sims':epi_conv[1]}}) epi_eps_conv = get_EPI_conv(N, g, K, _eps) if epi_eps_conv[0] is not None: EPI_eps_conv.update({(N,_eps):{'times':epi_eps_conv[0], 'sims':epi_eps_conv[1]}}) ``` ## Figure 2A ``` Ns = [2, 5, 10, 25, 50, 100, 250] _eps = 0.5 epi_conv_means = [np.nanmean(EPI_conv[(N,_eps)]['times']) for N in Ns] epi_conv_stds = [np.nanstd(EPI_conv[(N,_eps)]['times']) for N in Ns] fontsize=15 scat_alpha = 1. D_xticks = np.array([4*N for N in Ns]) xticks = np.log(Ns) xtick_labels = ["%d\n(N=%d)" % (D,D//4) if ((D//4) in Ns) else '' for D in D_xticks] yticks = np.log([1., 10., 60., 10*60., 60*60., 5*60*60., 24*60*60.]) ytick_labels = ["1 s", "10 s", "1 min", "10 min", "1 hour", "5 hours", "1 day"] log_y_jit_std = 0.2 dotsize = 60 fig, ax = plt.subplots(1,1,figsize=(6,4)) #ax.plot(np.log(np.array(Ns)), np.log(epi_conv_means), color=colors[0]) nan_y = 4*7*24*60*60. for N in Ns: smc_conv_times = SMC_conv[N]['times'] if N==10 and len(smc_conv_times) == 0: smc_conv_times = 5*[np.nan] smc_y = np.array([val if not np.isnan(val) else nan_y for val in smc_conv_times]) smc_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in smc_conv_times]) snpe_y = np.array([val if not np.isnan(val) else nan_y for val in SNPE_conv[(N, _eps)]['times']]) snpe_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in SNPE_conv[(N,_eps)]['times']]) epi_y = np.array([val if not np.isnan(val) else nan_y for val in EPI_eps_conv[(N, _eps)]['times']]) epi_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in EPI_conv[(N,_eps)]['times']]) print(snpe_y) if epi_y.shape[0] == 0: log_epi_y = np.nan*np.ones((len(epi_y))) else: assert(len(epi_y) == 5) log_epi_y = np.log(epi_y) + epi_log_jit_y if snpe_y.shape[0] == 0: log_snpe_y = np.nan*np.ones((len(snpe_y))) else: assert(len(snpe_y) == 5) log_snpe_y = np.log(snpe_y) + snpe_log_jit_y if smc_y.shape[0] == 0: log_smc_y = np.nan*np.ones((len(smc_y))) else: print(smc_y) #assert(len(smc_y) == 5) log_smc_y = np.log(smc_y) + smc_log_jit_y smc_log_jit = np.random.normal(0., 0.075, (len(smc_y),)) snpe_log_jit = np.random.normal(0., 0.075, (len(snpe_y),)) epi_log_jit = np.random.normal(0., 0.075, (len(epi_y),)) ax.scatter(np.log(N)+smc_log_jit, log_smc_y, color=colors[2], s=dotsize, edgecolor='k', alpha=scat_alpha) ax.scatter(np.log(N)+snpe_log_jit, log_snpe_y, color=colors[1], s=dotsize, edgecolor='k', alpha=scat_alpha) ax.scatter(np.log(N)+epi_log_jit, log_epi_y, color=colors[0], s=dotsize, edgecolor='k', alpha=scat_alpha) ax.set_xticks(xticks) ax.set_xticklabels(xtick_labels, fontname="Arial", fontsize=fontsize-2) ax.set_yticks(yticks) ax.set_yticklabels(ytick_labels, fontname="Arial", fontsize=fontsize-2) plt.savefig(os.path.join("figures", "fig4", "converge_time.pdf")) #ax.set_xlabel('parameter dimension (|z|)', fontname="Arial", fontsize=fontsize) #ax.set_ylabel('time to convergence', fontname="Arial", fontsize=fontsize) ``` ## Figure 2B ``` Ns = [2, 5, 10, 25, 50, 100, 250] yticks = np.log10([900, 1e3, 1e4, 1e5, 1e6]) ytick_labels = ["", "1k", "10k", "100k", "1m"] epi_conv_means = [np.nanmean(EPI_conv[(N,_eps)]['sims']) for N in Ns] epi_conv_stds = [np.nanstd(EPI_conv[(N,_eps)]['sims']) for N in Ns] fig, ax = plt.subplots(1,1,figsize=(6,4)) nan_y = 1e8 for N in Ns: smc_conv_sims = SMC_conv[N]['sims'] if N==10 and len(smc_conv_sims) == 0: smc_conv_sims = 5*[np.nan] smc_y = np.array([val if not np.isnan(val) else nan_y for val in smc_conv_sims]) smc_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in smc_conv_sims]) snpe_y = np.array([val if not np.isnan(val) else nan_y for val in SNPE_conv[(N, _eps)]['sims']]) snpe_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in SNPE_conv[(N,_eps)]['sims']]) epi_y = np.array([val if not np.isnan(val) else nan_y for val in EPI_eps_conv[(N, _eps)]['sims']]) epi_log_jit_y = np.array([0. if not np.isnan(val) else np.random.normal(0., log_y_jit_std) for val in EPI_conv[(N,_eps)]['sims']]) if epi_y.shape[0] == 0: log_epi_y = np.nan*np.ones((len(epi_y))) else: assert(len(epi_y) == 5) log_epi_y = np.log10(epi_y) + epi_log_jit_y if snpe_y.shape[0] == 0: log_snpe_y = np.nan*np.ones((len(snpe_y))) else: assert(len(snpe_y) == 5) log_snpe_y = np.log10(snpe_y) + snpe_log_jit_y if smc_y.shape[0] == 0: log_smc_y = np.nan*np.ones((len(smc_y))) else: #assert(len(smc_y) == 5) log_smc_y = np.log10(smc_y) + smc_log_jit_y smc_log_jit = np.random.normal(0., 0.05, (len(smc_y),)) snpe_log_jit = np.random.normal(0., 0.05, (len(snpe_y),)) epi_log_jit = np.random.normal(0., 0.05, (len(epi_y),)) ax.scatter(np.log(N)+smc_log_jit, log_smc_y, color=colors[2], s=dotsize, edgecolor='k', alpha=scat_alpha) ax.scatter(np.log(N)+snpe_log_jit, log_snpe_y, color=colors[1], s=dotsize, edgecolor='k', alpha=scat_alpha) ax.scatter(np.log(N)+epi_log_jit, log_epi_y, color=colors[0], s=dotsize, edgecolor='k', alpha=scat_alpha) #ax.plot(np.log(np.array(Ns)), np.log10(epi_conv_means), color=colors[0]) ax.set_xticks(xticks) ax.set_xticklabels(xtick_labels, fontname="Arial", fontsize=fontsize-2) ax.set_yticks(yticks) ax.set_yticklabels(ytick_labels, fontname="Arial", fontsize=fontsize-2) plt.savefig(os.path.join("figures", "fig4", "converge_sims.pdf")) #ax.set_xlabel('parameter dimension (|z|)', fontname="Arial", fontsize=fontsize) #ax.set_ylabel('# sims to convergence', fontname="Arial", fontsize=fontsize) Ns = [2, 5, 10, 100, 250] g = 0.01 num_Ns = len(Ns) random_seeds = [1,2,3,4,5] def load_SMC_samps(N, random_seeds): zs = [] for _rs in random_seeds: base_path = os.path.join("data", "smc") save_dir = "SMC_RNN_stab_amp_N=%d_rs=%d" % (N, _rs) save_path = os.path.join(base_path, save_dir) try: with open(os.path.join(save_path, "optim.pkl"), "rb") as f: optim = pickle.load(f) except: continue zs.append(optim['z']) return zs SMCs = {} SNPE_max_val = {} EPI_max_ent = {} for i, N in enumerate(Ns): SMCs.update({N:{'zs':load_SMC_samps(N, random_seeds)}}) if N < 50: _num_sims = num_sims elif N == 50: _num_sims = 25000 elif N == 100: _num_sims = 250000 _num_batch = 1000 if N == 100 else num_batch snpe_optim = load_best_SNPE_LRRNN(N, g, K, x0, num_sims=_num_sims, num_batch=_num_batch, num_atoms=num_atoms, random_seeds=random_seeds) if snpe_optim is not None: SNPE_max_val.update({N:snpe_optim}) epi_optim = load_ME_EPI_LRRNN(N, g, K, mu, by_df=True) if epi_optim is not None: EPI_max_ent.update({N:epi_optim}) ``` ## Figure 2C-D ``` g = 0.01 plot_Ns = [2, 5, 10, 100, 250] num_Ns = len(plot_Ns) num_plot = 100 t = np.linspace(0,2,201) n_traces = 15 fontsize = 12 all_T_xs = [] all_r_ts = [] all_colors = [] for i, N in enumerate(plot_Ns): T_xs = [] r_ts = [] _colors = [] # SMC if N in SMCs: epi_optim = EPI_max_ent[N] model = epi_optim['model'] zs = np.array(SMCs[N]['zs']) if len(zs): smc_zs = np.reshape(zs, (zs.shape[0]*zs.shape[1], zs.shape[2])).astype(np.float32) smc_T_x = model.eps(smc_zs)[:,:2] r_t_smc = sim_r2RNN(smc_zs, N, n_traces, t) T_xs.append(smc_T_x) _colors.append(colors[2]) r_ts.append(r_t_smc) # SNPE if N in SNPE_max_val: snpe_optim = SNPE_max_val[N] round_val_log_probs = snpe_optim['round_val_log_probs'] best_round = np.argmax(round_val_log_probs) + 1 snpe_z = snpe_optim['zs'][best_round][:num_plot] snpe_T_x = snpe_optim['xs'][best_round][:num_plot] r_t_snpe = sim_r2RNN(snpe_z, N, n_traces, t) T_xs.append(snpe_T_x) _colors.append(colors[1]) r_ts.append(r_t_snpe) # EPI if N in EPI_max_ent: epi_optim = EPI_max_ent[N] dist = epi_optim['dist'] model = epi_optim['model'] epi_z = dist(num_plot) epi_T_x = model.eps(epi_z)[:,:2] r_t_epi = sim_r2RNN(epi_z, N, n_traces, t) T_xs.append(epi_T_x) _colors.append(colors[0]) r_ts.append(r_t_epi) T_xs.reverse() all_T_xs.append(T_xs) all_r_ts.append(r_ts) all_colors.append(_colors) fig, axs = plt.subplots(1, num_Ns, figsize=(num_Ns*3, 3)) for i, N in enumerate(plot_Ns): r_ts = all_r_ts[i] _colors = all_colors[i] for j, r_t in enumerate(r_ts): axs[i].plot(t, r_t.T, c=_colors[j], lw=1., alpha=.6) axs[i].set_ylim([0,3]) plt.savefig(os.path.join("figures", "fig4", "T_x_simplots2.pdf")) plt.show() fig, axs = plt.subplots(1, num_Ns, figsize=(num_Ns*3, 3)) for i, N in enumerate(plot_Ns): T_xs = all_T_xs[i] _colors = all_colors[i] _colors.reverse() eig_scatter(T_xs, colors=_colors, ax=axs[i], perm=not(N==2)) plt.savefig(os.path.join("figures", "fig4", "T_x_simplots1.pdf")) plt.show() def regime_breakdown(T_x, c='k'): total = T_x.shape[0] num_stable_amp = np.sum(np.logical_and(T_x[:,0] < 1., T_x[:,1] > 1.)) num_stable_mon = np.sum(np.logical_and(T_x[:,0] < 1., T_x[:,1] < 1.)) num_unstable = np.sum(T_x[:,0] > 1.) sizes = [num_stable_amp, num_stable_mon, num_unstable] #labels = ['stable amplification', 'stable monotonic', 'unstable'] explode = [0., 0., 0.] fig, ax = plt.subplots(1,1) ax.pie(sizes, colors=[c, 0.6*np.ones(3), 0.3*np.ones(3)], explode=explode, #labels=labels, shadow=True, startangle=90) return num_stable_amp, num_stable_mon, num_unstable for i, N in enumerate(Ns): if N < 10: T_x_epi, T_x_snpe, T_x_smc = all_T_xs[i] elif N < 250: T_x_epi, T_x_snpe = all_T_xs[i] else: T_x_epi = all_T_xs[i][0] regime_breakdown(T_x_epi, colors[0]) plt.savefig(os.path.join("figures", "fig4", "EPI_pie_N=%d" % N)) plt.show() if N < 250: regime_breakdown(T_x_snpe, colors[1]) plt.savefig(os.path.join("figures", "fig4", "SNPE_pie_N=%d" % N)) plt.show() if N < 10: regime_breakdown(T_x_smc, colors[2]) plt.savefig(os.path.join("figures", "fig4", "SMC_pie_N=%d" % N)) plt.show() ```
github_jupyter
SOP010 - Upgrade a big data cluster =================================== Upgrade a Big Data Cluster using `azdata`. Steps ----- ### Parameters ``` docker_image_tag = f"<enter here>" # i.e. 15.0.4003.10029_2 print('PARAMETERS:') print('') print(f'docker_image_tag = {docker_image_tag}') print('') ``` ### Common functions Define helper functions used in this notebook. ``` # Define `run` function for transient fault handling, suggestions on error, and scrolling updates on Windows import sys import os import re import json import platform import shlex import shutil import datetime from subprocess import Popen, PIPE from IPython.display import Markdown retry_hints = {} # Output in stderr known to be transient, therefore automatically retry error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help install_hint = {} # The SOP to help install the executable if it cannot be found first_run = True rules = None debug_logging = False def run(cmd, return_output=False, no_output=False, retry_count=0): """Run shell command, stream stdout, print stderr and optionally return output NOTES: 1. Commands that need this kind of ' quoting on Windows e.g.: kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name} Need to actually pass in as '"': kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name} The ' quote approach, although correct when pasting into Windows cmd, will hang at the line: `iter(p.stdout.readline, b'')` The shlex.split call does the right thing for each platform, just use the '"' pattern for a ' """ MAX_RETRIES = 5 output = "" retry = False global first_run global rules if first_run: first_run = False rules = load_rules() # When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see: # # ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)') # if platform.system() == "Windows" and cmd.startswith("azdata sql query"): cmd = cmd.replace("\n", " ") # shlex.split is required on bash and for Windows paths with spaces # cmd_actual = shlex.split(cmd) # Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries # user_provided_exe_name = cmd_actual[0].lower() # When running python, use the python in the ADS sandbox ({sys.executable}) # if cmd.startswith("python "): cmd_actual[0] = cmd_actual[0].replace("python", sys.executable) # On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail # with: # # UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128) # # Setting it to a default value of "en_US.UTF-8" enables pip install to complete # if platform.system() == "Darwin" and "LC_ALL" not in os.environ: os.environ["LC_ALL"] = "en_US.UTF-8" # When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc` # if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ: cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc") # To aid supportabilty, determine which binary file will actually be executed on the machine # which_binary = None # Special case for CURL on Windows. The version of CURL in Windows System32 does not work to # get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance # of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost # always the first curl.exe in the path, and it can't be uninstalled from System32, so here we # look for the 2nd installation of CURL in the path) if platform.system() == "Windows" and cmd.startswith("curl "): path = os.getenv('PATH') for p in path.split(os.path.pathsep): p = os.path.join(p, "curl.exe") if os.path.exists(p) and os.access(p, os.X_OK): if p.lower().find("system32") == -1: cmd_actual[0] = p which_binary = p break # Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this # seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound) # # NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split. # if which_binary == None: which_binary = shutil.which(cmd_actual[0]) if which_binary == None: if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None: display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") else: cmd_actual[0] = which_binary start_time = datetime.datetime.now().replace(microsecond=0) print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)") print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})") print(f" cwd: {os.getcwd()}") # Command-line tools such as CURL and AZDATA HDFS commands output # scrolling progress bars, which causes Jupyter to hang forever, to # workaround this, use no_output=True # # Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait # wait = True try: if no_output: p = Popen(cmd_actual) else: p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1) with p.stdout: for line in iter(p.stdout.readline, b''): line = line.decode() if return_output: output = output + line else: if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file regex = re.compile(' "(.*)"\: "(.*)"') match = regex.match(line) if match: if match.group(1).find("HTML") != -1: display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"')) else: display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"')) wait = False break # otherwise infinite hang, have not worked out why yet. else: print(line, end='') if rules is not None: apply_expert_rules(line) if wait: p.wait() except FileNotFoundError as e: if install_hint is not None: display(Markdown(f'HINT: Use {install_hint} to resolve this issue.')) raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait() if not no_output: for line in iter(p.stderr.readline, b''): try: line_decoded = line.decode() except UnicodeDecodeError: # NOTE: Sometimes we get characters back that cannot be decoded(), e.g. # # \xa0 # # For example see this in the response from `az group create`: # # ERROR: Get Token request returned http error: 400 and server # response: {"error":"invalid_grant",# "error_description":"AADSTS700082: # The refresh token has expired due to inactivity.\xa0The token was # issued on 2018-10-25T23:35:11.9832872Z # # which generates the exception: # # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte # print("WARNING: Unable to decode stderr line, printing raw bytes:") print(line) line_decoded = "" pass else: # azdata emits a single empty line to stderr when doing an hdfs cp, don't # print this empty "ERR:" as it confuses. # if line_decoded == "": continue print(f"STDERR: {line_decoded}", end='') if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"): exit_code_workaround = 1 # inject HINTs to next TSG/SOP based on output in stderr # if user_provided_exe_name in error_hints: for error_hint in error_hints[user_provided_exe_name]: if line_decoded.find(error_hint[0]) != -1: display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.')) # apply expert rules (to run follow-on notebooks), based on output # if rules is not None: apply_expert_rules(line_decoded) # Verify if a transient error, if so automatically retry (recursive) # if user_provided_exe_name in retry_hints: for retry_hint in retry_hints[user_provided_exe_name]: if line_decoded.find(retry_hint) != -1: if retry_count < MAX_RETRIES: print(f"RETRY: {retry_count} (due to: {retry_hint})") retry_count = retry_count + 1 output = run(cmd, return_output=return_output, retry_count=retry_count) if return_output: return output else: return elapsed = datetime.datetime.now().replace(microsecond=0) - start_time # WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so # don't wait here, if success known above # if wait: if p.returncode != 0: raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n') else: if exit_code_workaround !=0 : raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n') print(f'\nSUCCESS: {elapsed}s elapsed.\n') if return_output: return output def load_json(filename): """Load a json file from disk and return the contents""" with open(filename, encoding="utf8") as json_file: return json.load(json_file) def load_rules(): """Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable""" try: # Load this notebook as json to get access to the expert rules in the notebook metadata. # j = load_json("sop010-upgrade-bdc.ipynb") except: pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename? else: if "metadata" in j and \ "azdata" in j["metadata"] and \ "expert" in j["metadata"]["azdata"] and \ "rules" in j["metadata"]["azdata"]["expert"]: rules = j["metadata"]["azdata"]["expert"]["rules"] rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first. # print (f"EXPERT: There are {len(rules)} rules to evaluate.") return rules def apply_expert_rules(line): """Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so inject a 'HINT' to the follow-on SOP/TSG to run""" global rules for rule in rules: # rules that have 9 elements are the injected (output) rules (the ones we want). Rules # with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029, # not ../repair/tsg029-nb-name.ipynb) if len(rule) == 9: notebook = rule[1] cell_type = rule[2] output_type = rule[3] # i.e. stream or error output_type_name = rule[4] # i.e. ename or name output_type_value = rule[5] # i.e. SystemExit or stdout details_name = rule[6] # i.e. evalue or text expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it! if debug_logging: print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.") if re.match(expression, line, re.DOTALL): if debug_logging: print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook)) match_found = True display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.')) print('Common functions defined successfully.') # Hints for binary (transient fault) retry, (known) error and install guide # retry_hints = {'kubectl': ['A connection attempt failed because the connected party did not properly respond after a period of time, or established connection failed because connected host has failed to respond'], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use']} error_hints = {'kubectl': [['no such host', 'TSG010 - Get configuration contexts', '../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb'], ['no such host', 'TSG011 - Restart sparkhistory server', '../repair/tsg011-restart-sparkhistory-server.ipynb'], ['No connection could be made because the target machine actively refused it', 'TSG056 - Kubectl fails with No connection could be made because the target machine actively refused it', '../repair/tsg056-kubectl-no-connection-could-be-made.ipynb']], 'azdata': [['azdata login', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb']]} install_hint = {'kubectl': ['SOP036 - Install kubectl command line interface', '../install/sop036-install-kubectl.ipynb'], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']} ``` ### Get the Kubernetes namespace for the big data cluster Get the namespace of the Big Data Cluster use the kubectl command line interface . **NOTE:** If there is more than one Big Data Cluster in the target Kubernetes cluster, then either: - set \[0\] to the correct value for the big data cluster. - set the environment variable AZDATA\_NAMESPACE, before starting Azure Data Studio. ``` # Place Kubernetes namespace name for BDC into 'namespace' variable if "AZDATA_NAMESPACE" in os.environ: namespace = os.environ["AZDATA_NAMESPACE"] else: try: namespace = run(f'kubectl get namespace --selector=MSSQL_CLUSTER -o jsonpath={{.items[0].metadata.name}}', return_output=True) except: from IPython.display import Markdown print(f"ERROR: Unable to find a Kubernetes namespace with label 'MSSQL_CLUSTER'. SQL Server Big Data Cluster Kubernetes namespaces contain the label 'MSSQL_CLUSTER'.") display(Markdown(f'HINT: Use [TSG081 - Get namespaces (Kubernetes)](../monitor-k8s/tsg081-get-kubernetes-namespaces.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [TSG010 - Get configuration contexts](../monitor-k8s/tsg010-get-kubernetes-contexts.ipynb) to resolve this issue.')) display(Markdown(f'HINT: Use [SOP011 - Set kubernetes configuration context](../common/sop011-set-kubernetes-context.ipynb) to resolve this issue.')) raise print(f'The SQL Server Big Data Cluster Kubernetes namespace is: {namespace}') ``` ### Upgrade the cluster ``` run(f'azdata bdc upgrade --name {namespace} --tag {docker_image_tag}') print('Notebook execution complete.') ```
github_jupyter
# Introduction It can be a troubling time, but we do have hope on the horizon, with the news we get daily about vaccines. Multiple companies are releasing and getting their vaccines approved; we may soon see a path forward. Using the robust toolset provided by Kaggle, I'll show you how to create an interactive map to track, for each state, the percentage of inhabitants that have been vaccinated against COVID-19. To get started, if you haven't already, make your own copy of this notebook by clicking on the **[Copy and Edit]** button in the top right corner. This notebook is an example of a project that you can create based on what you'd learn from taking Kaggle's [Geospatial Analysis course](https://www.kaggle.com/learn/geospatial-analysis). # US Vaccine Tracker We'll use two datasets. - The first dataset has the total number of inhabitants of each state, along with latitude and longitude data for each state's capital city. This dataset is pulled from the 2019 US Census, and I've uploaded it [here](https://www.kaggle.com/peretzcohen/2019-census-us-population-data-by-state). - The second dataset contains a recent estimate for the total number of people that have been vaccinated in each state. This [vaccine dataset](https://github.com/owid/covid-19-data/blob/master/public/data/vaccinations/us_state_vaccinations.csv) is drawn from [Our World In Data](https://ourworldindata.org/), who update their vaccine datasets from the CDC quite regularly. Every time you run this notebook, you'll use the most recent version of their data. In the next code cell, we load and preprocess the data. As output, you'll see the total percent of the population that has been vaccinated in the US, along with a preview of the Pandas DataFrame that we'll use to make the tracker. ``` # Imports import pandas as pd from datetime import date, timedelta import folium from folium import Marker from folium.plugins import MarkerCluster import math import matplotlib.pyplot as plt import seaborn as sns # Population Data populationData = pd.read_csv('/kaggle/input/2019-census-us-population-data-by-state/2019_Census_US_Population_Data_By_State_Lat_Long.csv') # Get the most recent date for filtering freshDate = date.today() - timedelta(days=1) freshDate = date.strftime(freshDate,"%Y%m%d") freshDate = freshDate[0:4] + "-" + freshDate[4:6] + "-" + freshDate[6:8] # Vaccination data, for most recent date vaccinationData = pd.read_csv('https://raw.githubusercontent.com/owid/covid-19-data/master/public/data/vaccinations/us_state_vaccinations.csv') vaccinationByLocation = vaccinationData.loc[(vaccinationData.date == freshDate)][["location", "people_vaccinated"]] # Vaccination and population data vaccinationAndPopulationByLocation = pd.merge(populationData, vaccinationByLocation, left_on='STATE',right_on='location').drop(columns="location") # Calculate percentage vaccinated by state vaccinationAndPopulationByLocation["percent_vaccinated"] = vaccinationAndPopulationByLocation["people_vaccinated"] / vaccinationAndPopulationByLocation["POPESTIMATE2019"] vaccinationAndPopulationByLocation print("Date ran:", date.today()) # Calculate the total percent vaccinated in the US percentageTotal = vaccinationAndPopulationByLocation["people_vaccinated"].sum() / vaccinationAndPopulationByLocation["POPESTIMATE2019"].sum() print('Percentage Vaccinated in the US: {}%'.format(round(percentageTotal*100, 2))) ``` The next code cell uses the data to create a tracker, with one marker for each state. You can click on the markers to see the percentage of the population that has been vaccinated. ``` # Create the map v_map = folium.Map(location=[42.32,-71.0589], tiles='cartodbpositron', zoom_start=4) # Add points to the map mc = MarkerCluster() for idx, row in vaccinationAndPopulationByLocation.iterrows(): if not math.isnan(row['long']) and not math.isnan(row['lat']): mc.add_child(Marker(location=[row['lat'], row['long']], tooltip=str(round(row['percent_vaccinated']*100, 2))+"%")) v_map.add_child(mc) # Display the map v_map ``` # Your turn Here are some ideas for how you might improve on the work here: - In Kaggle's [Geospatial Analysis course](https://www.kaggle.com/learn/geospatial-analysis), you learn how to use folium to create many different types of interactive maps. How might you use this data to instead create a choropleth map? - In case you would like to work with more data sources, - The Centers for Disease Control and Prevention (CDC) in the US releases daily vaccine data and has a vaccination progress tracker on its [COVID Data Tracker site](https://covid.cdc.gov/covid-data-tracker/#vaccinations). - NBC News has a [vaccine tracker](https://www.nbcnews.com/health/health-news/map-covid-19-vaccination-tracker-across-u-s-n1252085) as well which is quite well done. Once you have created your own extension of this work, let us know about it in the comments!
github_jupyter
# 函数 - 函数可以用来定义可重复代码,组织和简化 - 一般来说一个函数在实际开发中为一个小功能 - 一个类为一个大功能 - 同样函数的长度不要超过一屏 ## 定义一个函数 def function_name(list of parameters): do something ![](../Photo/69.png) - 以前使用的random 或者range 或者print.. 其实都是函数或者类 ``` def LYX(): print('Hi') LYX()#调用函数()用括号调用 LYX() def LYX(): print('Hi') a = LYX() print(a) def LYX(): print('Hi') return 100 LYX() #Python中的所有函数实际上都是有返回值(return NONE), 如果你没有设置return,那么Python将不显示None, 如果你设置return,将返回特突然这个值 def test2(name = 'huwang'): print(name) test2() # 使用()调用函数 test2('Joker') def shishi(num,num1,num2): print(num +num1 +num2) a,b,c = jiujiu(10,10,11) shishi(a,b,c) print(jiujiu(100,101,102)) def fun1(): num_ = eval(input('>>')) return num_ a = fun1() print(a **3) print(a **2) def fun1(num1,num2,num3): 打印 返回 def fun1(nu1,nu2,nu3): nu1 ** 2 nu2 ** 2 nu3 ** return def minus(nu1,nu2,....nu6) print(nu1_ - nu1,.....) # 当你新函数需要使用原来函数处理过的参数,可以要使用return 返回参数 # 供新函数继续使用 def fun1(num1,num2): pass return num1,num2 def fun2(num1,num2): num11 = num1 ** 2 num22 = num2 ** 2 return num11,num22 def fun3(num1,num2,num3,num4): res1 =num3 - num1 res2 = num4 - num2 print(res1,res2) a,b = fun1(1,2) c,d = fun2(a,b) fun3(a,b,c,d) a,b = Joker() print(a,b) ``` ## 调用一个函数 - functionName() - "()" 就代表调用 ``` def max_(num1,num2): if num1 > num2: return num1 else: return num2 max_(1,2) max_(5,2) max([1,2,3]) max(1,2,3,4,5,6,3) max() def max_(num1,num2,num3): if num1 > num2 and num1 > num3: return num1 elif num2 > num1 and num1 > num3: return num2 elif num3 > num1 and num3 > num2: print('Jelly is a good girl !') max_() def Good(name='Jelly'): print('%s is a good man !'%name) def Good(name='Jelly',name):#注意给与不给值的位置,含有默认值的参数一定要放在最后面 print('%s is a good man !'%name) def Good2(name): print('%s is a good man !'%name) Good('Jelly') #函数的参数如果有默认值的情况,当你调用该函数的时候: 可以不给予参数值,那么就会走该参数的默认值,否则的话, 就走你给予的参数值。 ``` ![](../Photo/70.png) ## 带返回值和不带返回值的函数 - return 返回的内容 - return 返回多个值 - 一般情况下,在多个函数协同完成一个功能的时候,那么将会有返回值 ![](../Photo/71.png) - 当然也可以自定义返回None ## EP: ![](../Photo/72.png) ``` def fun(): num = eval(input('输入一个数')) if num % 2 == 0: print('偶数') else: print('奇数') fun() def fun(num): if num % 2 == 0: print('偶数') else: print('奇数') fun(num)() def fun1(): print('haha') def fun2(f): f() fun2(fun1) ``` ## 类型和关键字参数 - 普通参数 - 多个参数 - 默认值参数 - 不定长参数 ``` import os def kuajiang(name): # 函数的传参 os.system('say {}你真是一个大天才'.format(name)) kuajiang('星期二') # 位置参数 不带参数名传递方式 kuajiang(name = '于洁') # 带参数名传递方式 def san(num): return num **3 def liang(num): return num ** 2 def input_(): num = eval(input('>>')) res3 = san(num) res2 = liang(num) print(res3 - res2) input_() ``` ## 普通参数 ## 多个参数 ## 默认值参数 ## 强制命名 ``` U('HJi12') ``` ## 不定长参数 - \*args > - 不定长,来多少装多少,不装也是可以的 - 返回的数据类型是元组 - args 名字是可以修改的,只是我们约定俗成的是args - \**kwargs > - 返回的字典 - 输入的一定要是表达式(键值对) - name,\*args,name2,\**kwargs 使用参数名 ``` test(a=1,b=2,c=3) #返回出来的是一个字典 def test(*args,**kwargs): print(args) print(kwargs) test(1,2,3,4,2,3,4,a=100,b= 200) def test(*args,**kwargs): # print(name) print(args) print(kwargs) test('huwang',123,1312,name2='JOker',a=100,b=1000) ``` ## 变量的作用域 - 局部变量 local - 全局变量 global - globals 函数返回一个全局变量的字典,包括所有导入的变量 - locals() 函数会以字典类型返回当前位置的全部局部变量。 ``` globals() def YY(a1): a1+100 print(a1) YY(a) print(a) locals() a = 1000 def Y(): print(a) Y() a = 1000 b = 10 def Y(): global a,b a += 100 print(a) Y() a = 100 b = 100 c = True d = [] def test2(): global a,b,c,d a = 1000 b = 1000 c = False d.append(999) print(a,b,c) test2() print(a,b,c,d) ``` ## 注意: - global :在进行赋值操作的时候需要声明 - 官方解释:This is because when you make an assignment to a variable in a scope, that variable becomes local to that scope and shadows any similarly named variable in the outer scope. - ![](../Photo/73.png) # Homework - 1 ![](../Photo/74.png) ``` def num(): n=0 for n in range(1,100): wujiaoshu = n*(3*n-1)/2 print(wujiaoshu,end=' ') n+=1 if n%10==0: print() num() ``` - 2 ![](../Photo/75.png) ``` def sumDigits(n): import random random = random.randint(0,1000) a = n % 10 b = n // 10 c = b % 10 d = b //10 e = a + c + d return e sumDigits(555) ``` #### - 3 ![](../Photo/76.png) ``` def displaySortedNumbers(num1,num2,num3): nums =[num1,num2,num3] nums.sort() print(nums) displaySortedNumbers(3,2.4,5) ``` - 4 ![](../Photo/77.png) ``` #第四个不写了,另加一个 ``` - 5 ![](../Photo/78.png) ``` def printChars(ch1,ch2,numberPerLine): printChars(1,Z) ``` - 6 ![](../Photo/79.png) ``` def number(year): year = 0 for year in range(2010,2021): if (year % 4 == 0 and year % 100 != 0) or(year%400==0): print(year,'年有366天') else: print(year,'年有365天') year+=1 print() number(2010) ``` - 7 ![](../Photo/80.png) ``` def distance(x1,y1,x2,y2): a = (x2-x1)*(x2-x1)+(y2-y1)*(y2-y1) b = pow((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1),1/2) print(b) distance(1,2,3,4) ``` - 8 ![](../Photo/81.png) ``` def num_(n): for i in range(2,n): if n % i == 0: print('%d 不是质数'%n) break if n == 2^p-1: print(n,'为梅森素数') num_(3) n = int(input('输入一个数')) for i in range(2,n): if n % i == 0: print('%d 不是质数'%n) break else: print('%d 是质数'%n) ``` - 9 ![](../Photo/82.png) ![](../Photo/83.png) ``` def time_(): now=time.time() print(now) print(time.ctime(now)) print(time.localtime(now)) mon=time.localtime(now)[1] day=time.localtime(now)[2] print('当前日期:%s月%s日'%(mon,day)) time.time() now = time.time()#当前时间戳 twl = now-15330*24*60*60#12天前的时间戳 print(twl) print(time.ctime(twl))#格式化12天前时间戳 print(time.localtime(twl))#12天前时间结构体 mon = time.localtime(twl)[1]#从12天前时间结构体中提取月 day = time.localtime(twl)[2]#从12天前时间结构体中提取日 print("12天前日期:%s月%s日"%(mon,day))#打印12天前月与日 now = time.time() print(now) print(time.ctime(now)) print(time.localtime(now)) mon=time.localtime(now)[1] day=time.localtime(now)[2] print('当前日期:%s月%s日'%(mon,day)) ``` - 10 ![](../Photo/84.png) - 11 ### 去网上寻找如何用Python代码发送邮件 ``` import smtplib import email.mime.multipart import email.mime.text msg = email.mime.multipart.MIMEMultipart() msgFrom = '3032438439@1QQ.com' #从该邮箱发送 msgTo = '3578606257@1QQ.com' #发送到该邮箱 smtpSever='smtp.163.com' # 163邮箱的smtp Sever地址 smtpPort = '25' #开放的端口 sqm='将这里替换为你的授权码' # 在登录smtp时需要login中的密码应当使用授权码而非账户密码 msg['from'] = msgFrom msg['to'] = msgTo msg['subject'] = 'Python自动邮件-'+dataNumber content = ''' 你好: 这是一封python3发送的邮件 ''' txt = email.mime.text.MIMEText(content) msg.attach(txt) smtp = smtplib smtp = smtplib.SMTP() ''' smtplib的connect(连接到邮件服务器)、login(登陆验证)、sendmail(发送邮件) ''' smtp.connect(smtpSever, smtpPort) smtp.login(msgFrom, sqm) smtp.sendmail(msgFrom, msgTo, str(msg)) # s = smtplib.SMTP("localhost") # s.send_message(msg) smtp.quit() import smtplib import os from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart from email import encoders user = '3578606257@qq.com' pwd = '0905.0905.' to = ['3578606257@1QQ.com', '0905.0905.@qq.com'] msg = MIMEMultipart() msg['Subject'] = '这里是主题...' content1 = MIMEText('这里是正文!', 'plain', 'utf-8') msg.attach(content1) attfile = 'C:\MY PHOTO' basename = os.path.basename(attfile) fp = open(attfile, 'rb') att = MIMEText(fp.read(), 'base64', 'utf-8') att["Content-Type"] = 'application/octet-stream' att.add_header('Content-Disposition', 'attachment',filename=('gbk', '', basename)) encoders.encode_base64(att) msg.attach(att) s = smtplib.SMTP('smtp.qq.com') s.login(user, pwd) s.sendmail(user, to, msg.as_string()) print('发送成功') s.close() ```
github_jupyter
#1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. ``` !pip install git+https://github.com/google/starthinker ``` #2. Get Cloud Project ID To run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play. ``` CLOUD_PROJECT = 'PASTE PROJECT ID HERE' print("Cloud Project Set To: %s" % CLOUD_PROJECT) ``` #3. Get Client Credentials To read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play. ``` CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE' print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS) ``` #4. Enter DV360 Report Parameters Create a DV360 report. 1. Reference field values from the <a href='https://developers.google.com/bid-manager/v1/reports'>DV360 API</a> to build a report. 1. Copy and paste the JSON definition of a report, <a href='https://github.com/google/starthinker/blob/master/tests/scripts/dbm_to_bigquery.json#L9-L40' target='_blank'>sample for reference</a>. 1. The report is only created, a seperate script is required to move the data. 1. To reset a report, delete it from DV360 reporting. Modify the values below for your use case, can be done multiple times, then click play. ``` FIELDS = { 'auth_read': 'user', # Credentials used for reading data. 'report': '{}', # Report body and filters. 'delete': False, # If report exists, delete it before creating a new one. } print("Parameters Set To: %s" % FIELDS) ``` #5. Execute DV360 Report This does NOT need to be modified unles you are changing the recipe, click play. ``` from starthinker.util.project import project from starthinker.script.parse import json_set_fields USER_CREDENTIALS = '/content/user.json' TASKS = [ { 'dbm': { 'auth': 'user', 'report': {'field': {'name': 'report','kind': 'json','order': 1,'default': '{}','description': 'Report body and filters.'}}, 'delete': {'field': {'name': 'delete','kind': 'boolean','order': 2,'default': False,'description': 'If report exists, delete it before creating a new one.'}} } } ] json_set_fields(TASKS, FIELDS) project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True) project.execute(_force=True) ```
github_jupyter
# Capítulo 6 - Uso de Appium para automatizar acciones en dispositivos ___ ## Conectar un dispositivo ___ ### Pasos comunes Para conectar un dispositivo de Android hay que seguir los siguientes pasos: 1. Descargar e instalar Java jdk 1.8: https://www.oracle.com/technetwork/java/javase/downloads/jdk8-downloads-2133151.html 2. Añadir la variable de entorno JAVA_HOME = "C:\Program Files\Java\jdk {version} " 3. Descargar e instalar Android Studio: https://developer.android.com/studio 4. Añadir la variable de entorno ANDROID_HOME = "C:\Users\\ {user} \AppData\Local\Android\Sdk\" 5. Añadir el directorio "C:\Users\\ {user} \AppData\Local\Android\Sdk\platform-tools\" al Path de Windows #### Emulador Para crear un emulador hay que seguir los siguientes pasos: 1. Lanzar Android Studio, si pide crear un proyecto se crea un vacío (que no usaremos para nada) 2. Dejar que se actualice con las actualizaciones por defecto (puede variar dependiendo de la versión) 3. Ir a "Tools" > "AVD Manager" 4. CLick en "Create Virtual Device". 5. Seleccionar "Phone" > "Nexus 5X", "Next" 6. Seleccionar "Oreo" (API Level 27, Android 8.1), si no está disponible click en descargar, "Next" 7. Nombrar y "Finish" #### Real Para conectar un dispositivo real hay que seguir los siguientes pasos (No todos los dispositivos son compatibles): 1. En el dispositivo: Ir a "Settings" > "About phone" > "Software information" y pulsar "Build number" 7 veces, esto activa el modo "desarrollador" (puede variar según el modelo del dispositivo) 2. En el dispositivo: Ir a "Settings" > "Developer options" y activar "Stay awake" y "USB debugging" (puede variar según el modelo del dispositivo) 3. Conectar por USB y aceptar permisos ### Comprobar la conexión Par comprobar que todo funciona correctamente ejecutar: ``` ! adb devices ``` debería aparecer el nombre del dispositio seguido de "device": ``` List of devices attached LRINFIZPPN7TYHUC device ``` ## Levantar un servidor de Appium en local ___ 1. Descargar e instalar Appium-Desktop: https://github.com/appium/appium-desktop/releases/ 2. Iniciar Appium (tarda) 3. Poner Host: 0.0.0.0 y Puerto: 4723, pulsar "Start Server" ## Crear un script con el cliente de Appium para Python ___ Se instalan los sdk's de Appium para Python: ``` ! pip install Appium-Python-Client ``` Importamos la librería: ``` from appium import webdriver import os desired_caps = {} desired_caps['platformName'] = 'Android' desired_caps['deviceName'] = 'Android Emulator' desired_caps['app'] = os.path.join(os.getcwd(), 'example.apk') # ruta a una apk de ejemplo driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps) from appium.webdriver.common.mobileby import MobileBy driver.find_element(MobileBy.ACCESSIBILITY_ID, "Add Contact").click() import time time.sleep(1) driver.find_element(MobileBy.ID, "com.example.android.contactmanager:id/contactNameEditText").send_keys('Alejandro') driver.find_element(MobileBy.ID, "com.example.android.contactmanager:id/contactPhoneEditText").send_keys('987654321') driver.quit() ``` ## Obtener los localizadores de objectos manualmente ___ 1. [Descargar Appium-Inspector-windows-{última versión}.exe](https://github.com/appium/appium-inspector/releases) e iniciarlo 2. Rellenar la tabla con los valores: Name | Type | Value -----|------|------ platformName | text | Android deviceName | text | Android Emulator app | text | {ruta absoluta al}\example.apk 3. Pulsar en "Start Session" Se abrirá una ventana que es similar a pulsar F12 en Chrome
github_jupyter
# Week 3 - Quiz Assignment 1) Assume that the chain rule is used to compute the joint probability of the sentence $P('\text{I got this one}') $. The products of probabilities are represented by $P(got|I) \times P(this|I,got) \times P(one|I,got,this)$ - True - False __Answer__: False It should be $P(I) \times P(got|I) \times P(this|I,got) \times P(one|I,got,this)$ Probability of the sentence $W$: > $P(W) = P(w_1, w_2, ..., w_n)$ Chain Rule: > $P(w_1, w_2, ..., w_n) = P(w_1)P(w_2|x_1)...P(w_n|w_1,...w_{n-1})$ > $P('\text{I got this one}') = P('\text{I}', '\text{got}', '\text{this}', '\text{one}')$ > $P('\text{I got this one}') = P('\text{I}') \times P('\text{got}' | '\text{I}') \times P('\text{this}' | '\text{I got}') \times P('\text{one}' | '\text{I got this}')$ Markove Assumption: > $P('\text{I got this one}') = P('\text{I}') \times P('\text{got}' | '\text{I}') \times P('\text{this}' | '\text{got}') \times P('\text{one}' | '\text{this}')$ *** 2) Assume that the language model is evaluated as given below $\phi(W) = \sqrt[n]{\frac{1}{{P(w_1,w_2,\ldots, w_n)}}}$ *__Note:__* $n$ is the number of words in the sentence. Smoothing will be used if the denominator →0. Is the statement $"\text{Minimizing}$ $ϕ(W)$ $\text{is same as maximizing the probability}$ $P(w_1,w_2,…,w_n)$ $\text{of the sentence"}$ true? - True - False __Answer__: True Refer [9], about Perplexity ``` import math def get_nth_root(num,root): ''' Computers Nth root over the given num and returns it ''' answer = num ** (1/root) return answer def getPerplexityMetric(n, p): ''' Input: n: number of words in the sentence p: probability of the sentence Output: Returns perplexity ''' return get_nth_root(1/p, n) print(getPerplexityMetric(5, 0.333)) print(getPerplexityMetric(5, 0.666)) print(getPerplexityMetric(5, 0.782)) print() print(getPerplexityMetric(11, 0.333)) print(getPerplexityMetric(11, 0.666)) print(getPerplexityMetric(11, 0.782)) # From below, we can see Minimizing Perplexity, increases sentence Probability ``` *** 3) Select one of the following bigram probabilities that represents the sentence I love dogs (i) __&lt;\S&gt; I love dogs&lt;/S&gt;__ P (I)· P (love | I) · P (dogs | I love) (ii) P (<S>) · P (I | __&lt;S&gt;__) · P (love | __&lt;S&gt;__ I) · P (dogs | I love) · P (__&lt;/S&gt;__ | love dogs) (iii) P (I | __&lt;S&gt;__) · P (love | I) · P (dogs | love) · P (__&lt;/S&gt;__ | dogs) - a - b - c - d __Answer__: c *** 4) The table given below contains some of the bigram frequencies of $(determine,w_i)$ where $w_i$ represents every word in the column | first word | the | how | this | a | his | |------------|:-----:|:---:|--------|-------|--------| | determine | 0.115 | 0 | 0.0125 | 0.006 | 0.0013 | What is the conditional probability of $P(his|determine)$ if the probability of $determine$ as the starting word is 0.6? - 0.0031 - 0.0022 - 0.0122 - 0.0128 __Answer__: 0.0022 *Derivation:* Given $E_1 = determine$, $E_2 = his$, $P(E_1, E_2) = 0.0013$, $P(E1) = 0.6$ Conditional Probability Formula: $P(E_2 | E_1) = \frac{P(E_1,E_2)} {P(E_1)}$ if $P(E_1) > 0$ $P(his | determine) = \frac{P(determine,his)} {P(determine)}$ $P(his | determine) = \frac{0.0013} {0.6}$ 0.00216666666666666666666666666667 *** 5) Assuming that a language model assigns the following conditional probabilities to a 4-word sentence (S)=0.01212. What is the perplexity? Note: Perplexity is defined in question 2. - 2.41 - 3.14 - 4.35 - 3.014 __Answer__: d ``` print(getPerplexityMetric(4, 0.01212)) ``` *** 6) Consider the following three sentences Ram read a novel Raj read a journal Rai read a book What is the bigram probability of the sentence Ram read a book? Include start and end symbols in your calculations - 0.06 - 0.2222 - 0.1111 - 0.0556 __Answer__: 3 *** 7) Consider the following three sentences Ram read a novel Raj read a journal Rai read a book What is the trigram probability of the sentence Ram read a book? Include start and end symbols in your calculations - 0.06 - 0.2222 - 0.1111 - 0.0556 - None of the above __Answer__: 3 ``` from ngram_lm import NGramLM corpus = [ 'Ram read a novel', \ 'Raj read a journal', \ 'Rai read a book' ] query = 'Ram read a book' bi_lm = NGramLM(corpus,True,True) bi_lm.buildBiGramModel() print(bi_lm.getBiGramProbability(query)) # Add additional start/stop tri_lm = NGramLM(corpus,True,True,2) tri_lm.buildTriGramModel() print(tri_lm.getTriGramProbability(query)) ``` *** 8) In Naive Bayes classification, Posterior probability is estimated for predicting the class? - True - False __Answer__: True Refer <https://www.saedsayad.com/naive_bayesian.htm> for explanation *** 9) The following table contains the extracted features of emails and their classes (Spam and not Spam). The prior probabilities of the two classes (Spam,not Spam) = (0.57, 0.43) true? | # of Words/# of capitalized words | Subject in all capital letters | # URLs > 3 | Spam | |-----------------------------------|--------------------------------|------------|------| | No | No | 0 | No | | No | No | 5 | No | | Yes | Yes | 4 | Yes | | Yes | No | 4 | Yes | | Yes | No | 0 | No | | No | Yes | 7 | Yes | | Yes | No | 0 | No | - Yes - No __Answer__: No - Count of Total Emails: 7 - Count of Spam Emails: 3 - Count of NoSpam Emails: 4 $P(Spam) = \frac{Count(Spam Emails)}{Count(Total Emails} = \frac{3}{7} = 0.43$ $P(NoSpam) = \frac{Count(NoSpam Emails)}{Count(Total Emails} = \frac{4}{7} = 0.57$ ***
github_jupyter
# Object formatters ## Default formatting behaviors When you return a value or a display a value in a .NET notebook, the default formatting behavior is to try to provide some useful information about the object. If it's an array or other type implementing `IEnumerable`, that might look like this: ``` display ["hello"; "world"] Enumerable.Range(1, 5) ``` As you can see, the same basic structure is used whether you pass the object to the `display` method or return it as the cell's value. Similarly to the behavior for `IEnumerable` objects, you'll also see table output for dictionaries, but for each value in the dictionary, the key is provided rather than the index within the collection. ``` // Cannot simply use 'dict' here, see https://github.com/dotnet/try/issues/554 let d = dict [("zero", 0); ("one", 1); ("two", 2)] System.Collections.Generic.Dictionary<string, int>(d) ``` The default formatting behavior for other types of objects is to produce a table showing their properties and the values of those properties. ``` type Person = { FirstName: string; LastName: string; Age: int } // Evaluate a new person { FirstName = "Mitch"; LastName = "Buchannon"; Age = 42 } ``` When you have a collection of such objects, you can see the values listed for each item in the collection: ``` let people = [ { FirstName = "Mitch"; LastName = "Buchannon"; Age = 42 } { FirstName = "Hobie "; LastName = "Buchannon"; Age = 23 } { FirstName = "Summer"; LastName = "Quinn"; Age = 25 } { FirstName = "C.J."; LastName = "Parker"; Age = 23 } ] people ``` Now let's try something a bit more complex. Let's look at a graph of objects. We'll redefine the `Person` class to allow a reference to a collection of other `Person` instances. ``` type Person = { FirstName: string LastName: string Age: int Friends: ResizeArray<Person> } let mitch = { FirstName = "Mitch"; LastName = "Buchannon"; Age = 42; Friends = ResizeArray() } let hobie = { FirstName = "Hobie "; LastName = "Buchannon"; Age = 23; Friends = ResizeArray() } let summer = { FirstName = "Summer"; LastName = "Quinn"; Age = 25; Friends = ResizeArray() } mitch.Friends.AddRange([ hobie; summer ]) hobie.Friends.AddRange([ mitch; summer ]) summer.Friends.AddRange([ mitch; hobie ]) let people = [ mitch; hobie; summer ] display people ``` That's a bit hard to read, right? The defaut formatting behaviors are thorough, but that doesn't always mean they're as useful as they might be. In order to give you more control in these kinds of cases, the object formatters can be customized from within the .NET notebook. ## Custom formatters Let's clean up the output above by customizing the formatter for the `Person.Friends` property, which is creating a lot of noise. The way to do this is to use the `Formatter` API. This API lets you customize the formatting for a specific type. Since `Person.Friends` is of type `ResizeArray<Person>`, we can register a custom formatter for that type to change the output. Let's just list their first names: ``` Formatter<ResizeArray<Person>>.Register( fun people writer -> for person in people do writer.Write("person") , mimeType = "text/plain") people ``` You might have noticed that `people` is of type `ResizeArray<Person>`, but the table output still includes columns for `LastName`, `Age`, and `Friends`. What's going on here? Notice that the custom formatter we just registered was registered for the mime type `"text/plain"`. The top-level formatter that's used when we call `display` requests output of mime type `"text/html"` and the nested objects are formatted using `"text/plain"`. It's the nested objects, not the top-level HTML table, that's using the custom formatter here. With that in mind, we can make it even more concise by registering a formatter for `Person`: ``` Formatter<Person>.Register( fun person writer -> writer.Write(person.FirstName) , mimeType = "text/plain"); people ``` Of course, you might not want table output. To replace the default HTML table view, you can register a formatter for the `"text/html"` mime type. Let's do that, and write some HTML using PocketView.
github_jupyter
# BDF Introduction The Jupyter notebook for this demo can be found in: - docs/quick_start/demo/bdf_demo.ipynb - https://github.com/SteveDoyle2/pyNastran/tree/master/docs/quick_start/demo/bdf_demo.ipynb Import pyNastran ``` import os import pyNastran print (pyNastran.__file__) print (pyNastran.__version__) pkg_path = pyNastran.__path__[0] from pyNastran.bdf.bdf import BDF, read_bdf from pyNastran.utils import object_attributes, object_methods print("pkg_path = %s" % pkg_path) ``` ## Loading a BDF There are two ways to load a BDF; the long way or the short way. The short way instantiates the **```BDF```** class and the short way uses the **```read_bdf```** function. As this demo was written for the Jupyter Notebook, we'll use **``read_bdf``** and then mention the other method. The class-based method allows finer control over things like: - what cards should be loaded - OpenMDAO dynamic syntax support ### The class-based method ``` bdf_filename = os.path.abspath(os.path.join(pkg_path, '..', 'models', 'iSat', 'ISat_Launch_Sm_Rgd.dat')) print(bdf_filename) # create the BDF object bdf = BDF() # read the file from the GUI # don't cross-reference bdf.read_bdf(bdf_filename, xref=False) ``` ### The function-based method ``` bdf = read_bdf(bdf_filename, xref=False) ``` For simplicity of using the demo, we'll again use the ```read_bdf``` method ``` #bdf_filename = r'D:\work\pynastran_0.8.0_py27\models\iSat\ISat_Launch_Sm_Rgd.dat' bdf_filename = os.path.abspath(os.path.join(pkg_path, '..', 'models', 'iSat', 'ISat_Launch_Sm_Rgd.dat')) # read the file as a path bdf_xref = read_bdf(bdf_filename, xref=True) ``` We can use the generic object attributes/methods functions #### Some other very handy methods that will be used later by ```test_bdf``` ``` print(bdf.get_bdf_stats()) print("card_count = %s\n" % bdf.card_count) print("reject_count = %s" % bdf.reject_count) ``` ## Cross-referencing Cross-referencing a BDF allows improved usability of the **``BDF``** class. It comes with some negative side effects, but in general is a very useful thing. It dramatically minimizes the amount of code you need to write, greatly simplifies future operations, and is highly recommended. The major downside is it slows down the code. ### Without Cross-Referencing (xref=False) Here the raw values of the the data objects are returned to us ``` cquad = bdf.elements[1] print(cquad) nid1 = cquad.nodes[0] print("nid1 = %s" % nid1) n1 = bdf.nodes[nid1] cd4 = n1.cd c4 = bdf.coords[cd4] print("i (xref=False) = %s" % str(c4.i)) #print object_attributes(c4) ``` ### Cross-Referenced (xref=True) Here we can trace the referenced objects very easily. A cross-referenced attribute is indicated with the **``*_ref``** suffix: * ``cquad4_element.nodes`` : not cross referenced * ``cquad4_element.nodes_ref`` : cross referenced ``` print("i (xref=True) = %s" % bdf_xref.elements[1].nodes_ref[0].cd_ref.i) ``` So how is this done? ``` cquad.nodes_ref = [] cquad.nodes_ref.append(n1) print(cquad.nodes_ref[0]) ``` #### Let's show off the GRID card ``` # some Grid methods n1 = bdf_xref.nodes[1] print(n1) # the comment c1 = bdf_xref.nodes[1].comment c2 = bdf_xref.nodes[2].comment print("c1=%r" % c1) print("c2=%r" % c2) # get the position of a node # in the local cooordinate system print("xyz = %s" % n1.xyz) # in the global frame print("position = %s" % n1.get_position()) # in an arbitrary frame print("wrt5 = %s" % n1.get_position_wrt(bdf, 5)) print("wrt4 = %s" % n1.get_position_wrt(bdf, 4)) ``` Now let's modify the **``GRID``** card and write it out ``` n1 = bdf_xref.nodes[1] n1.xyz[1] = -7.5 print("repr = %s" % n1.repr_fields()) print("raw = %s" % n1.raw_fields()) #n1.xyz[1] = 100000000000. print("repr2 = %s" % n1.repr_fields()) print(n1) print(n1.write_card(size=8)) print(n1.write_card(size=16, is_double=False)) print(n1.write_card(size=16, is_double=True)) ``` ### Examples of xref on elements ``` eid100 = bdf_xref.elements[100] print(eid100) print("nodes = %s" % eid100.nodes) print("--node0--\n%s" % eid100.nodes_ref[0]) print("--cd--\n%s" % eid100.nodes_ref[0].cd) print("cd.cid = %s" % eid100.nodes_ref[0].cd_ref.cid) print("area = %s" % eid100.Area()) print("mass = %s" % eid100.Mass()) print("--pid--\n%s" % eid100.pid) print("pid.pid = %s" % eid100.pid_ref.pid) print("pid.Pid() = %s" % eid100.Pid()) print(eid100.pid_ref.mid1_ref) print("type = %s" % eid100.pid_ref.mid1_ref.type) print("nu12 = %s" % eid100.pid_ref.mid1_ref.nu12) print("mass = %s" % eid100.Mass()) ``` ## Write the modified deck Let's first switch to the desktop to make the file easy to find ``` import getpass name = getpass.getuser() os.chdir(os.path.join(r'C:\Users', name, 'Desktop')) pwd ``` There are two ways to write a deck - **``interspersed``** : alternate properties and elements (similar to how Patran writes decks) - **``not-interspersed (default)``** : much faster We can also use 8 or 16 character field width as well as double precision. Note that double precision only works for certain cards (e.g. ``GRID``, ``COORD``, ``DMIG``) and not much else. ``` bdf_xref.write_bdf('fem.bdf', interspersed=False, size=8, is_double=False) !tail -n 5 "fem.bdf" bdf_xref.write_bdf('fem.bdf', interspersed=True, size=16, is_double=False) !tail "fem.bdf" bdf_xref.write_bdf('fem.bdf', interspersed=True, size=16, is_double=True) !tail "fem.bdf" bdf_filename ```
github_jupyter
``` import os, numpy, warnings import pandas as pd os.environ['R_HOME'] = '/home/gdpoore/anaconda3/envs/tcgaAnalysisPythonR/lib/R' warnings.filterwarnings('ignore') %config InlineBackend.figure_format = 'retina' %reload_ext rpy2.ipython %%R require(ggplot2) require(snm) require(limma) require(edgeR) require(dplyr) require(edgeR) require(pvca) require(lme4) require(ggsci) require(cowplot) require(doMC) require(splitstackshape) numCores <- detectCores() registerDoMC(cores=numCores) %%R load("snmCfdnaShogunAndMetadata_Dec2_Final.RData") load("snmKrakenAndMetadataFiltered_Dec2_Final.RData") %%R # Load dependencies require(devtools) require(doMC) require(tibble) require(gbm) require(splitstackshape) require(reshape2) require(ggpubr) require(caret) # for model building require(pROC) # for AUC calculations require(purrr) # for functional programming using map() require(dplyr) # for data manipulation require(doMC) # for parallel computing require(gbm) # for machine learning require(tibble) # for df operations require(cowplot) # for plotting require(PRROC) # for precision-recall curves require(MLmetrics) # for multi-class learning require(caret) # for machine learning defaultGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 5) customGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 1) numKFold <- 4 numResampleIter <- 1 ml2DTs <- function(snmData, classOfInterest = "Lung Adenocarcinoma", cutPoint = 0.5, samplingSize = 20, caretTuneGrid = defaultGBMGrid){ metaTmp1 <- droplevels(metadataPSMatchedDPQCFiltered[(metadataPSMatchedDPQCFiltered$disease_type_consol %in% c("PRAD", "SKCM", "NSCLC")),]) tmp <- metaTmp1 tmp$disease_type_consol <- factor(ifelse(metaTmp1$disease_type_consol == classOfInterest, yes = classOfInterest, no = "Other")) metadataSimSampled <- as.data.frame(stratified(tmp, group = "disease_type_consol", size = samplingSize, keep.rownames = TRUE, replace = FALSE, bothSets = FALSE)) rownames(metadataSimSampled) <- metadataSimSampled$rn mlDataY <- metadataSimSampled mlDataX <- snmData[rownames(mlDataY),] set.seed(42) index <- createDataPartition(mlDataY$disease_type_consol, p = 0.7, list = FALSE) trainX <- mlDataX[index,] trainY <- mlDataY[index,]$disease_type_consol testX <- mlDataX[-index,] testY <- mlDataY[-index,]$disease_type_consol # print(testY) refactoredTrainY <- factor(gsub('([[:punct:]])|\\s+','',trainY)) refactoredTestY <- factor(gsub('([[:punct:]])|\\s+','',testY)) set.seed(42) ctrl <- trainControl(method = "repeatedcv", number = numKFold, repeats = numResampleIter, sampling = "up", summaryFunction = twoClassSummary, classProbs = TRUE, verboseIter = TRUE, savePredictions = TRUE, allowParallel=TRUE) mlModel <- train(x = trainX, y = refactoredTrainY, method = "gbm", preProcess = c("scale","center"), trControl = ctrl, verbose = TRUE, metric = "ROC", tuneGrid = customGBMGrid) positiveClass <- gsub(" ","", classOfInterest) negativeClass <- "Other" predProbs <- as.numeric(predict(mlModel, newdata = testX, type = "prob")[,positiveClass]) fg <- predProbs[refactoredTestY == positiveClass] bg <- predProbs[refactoredTestY == negativeClass] prroc_roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T) prroc_pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T, rand.compute=T) # par(mfrow = c(1,2)) plot(prroc_roc) plot(prroc_pr) # dev.off() predClass <- predict(mlModel, newdata = testX) confusionMatrix(table(predict(mlModel, newdata = testX, type="prob")[,positiveClass] >= cutPoint, refactoredTestY == positiveClass)) } #-----------------------------------------# # Machine learning #-----------------------------------------# # mlHvsC <- function(snmData){ # Load dependencies mlHvsC <- function(snmData){ numCores <- detectCores() registerDoMC(cores=numCores) defaultGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 5) customGBMGrid <- expand.grid(interaction.depth = seq(1,3), n.trees = floor((1:3) * 50), shrinkage = 0.1, n.minobsinnode = 1) caretTuneGrid <- defaultGBMGrid numKFold <- 4 numResampleIter <- 1 mlDataY <- metadataPSMatchedDPQCFiltered mlDataX <- snmData[rownames(mlDataY),] set.seed(42) index <- createDataPartition(mlDataY$HvsC, p = 0.7, list = FALSE) trainX <- mlDataX[index,] trainY <- mlDataY[index,]$HvsC testX <- mlDataX[-index,] testY <- mlDataY[-index,]$HvsC # print(testY) refactoredTrainY <- factor(gsub('([[:punct:]])|\\s+','',trainY)) refactoredTestY <- factor(gsub('([[:punct:]])|\\s+','',testY)) set.seed(42) ctrl <- trainControl(method = "repeatedcv", number = numKFold, repeats = numResampleIter, sampling = "up", summaryFunction = twoClassSummary, classProbs = TRUE, verboseIter = TRUE, savePredictions = TRUE, allowParallel=TRUE) mlModel <- train(x = trainX, y = refactoredTrainY, method = "gbm", preProcess = c("scale","center"), trControl = ctrl, verbose = TRUE, metric = "ROC", tuneGrid = defaultGBMGrid) positiveClass <- "Cancer" negativeClass <- "Control" predProbs <- as.numeric(predict(mlModel, newdata = testX, type = "prob")[,positiveClass]) fg <- predProbs[refactoredTestY == positiveClass] bg <- predProbs[refactoredTestY == negativeClass] prroc_roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T) prroc_pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T, rand.compute=T) plot(prroc_roc) plot(prroc_pr) predClass <- predict(mlModel, newdata = testX) print(confusionMatrix(data = predClass, reference = refactoredTestY, positive = positiveClass)) } #----------------------------------------------------- loocvDTs <- function(snmData, samplingSize = 15, DTs, caretTuneGrid = defaultGBMGrid, filenameString = paste(DTs,collapse = "__"), HvsCFlag = FALSE){ if(HvsCFlag){ metaTmpX <- droplevels(metadataPSMatchedDPQCFiltered[(metadataPSMatchedDPQCFiltered$disease_type_consol %in% DTs),]) metaTmpX$disease_type_consol <- metaTmpX$HvsC classes <- gsub(" ","",levels(metaTmpX$disease_type_consol)) } else{ metaTmpX <- droplevels(metadataPSMatchedDPQCFiltered[(metadataPSMatchedDPQCFiltered$disease_type_consol %in% DTs),]) classes <- gsub(" ","",DTs) } # Do LOOCV model building and testing multiClassSummaryStats <- list() multiClassSummaryStatsDist <- list() numKFold <- 4 numResampleIter <- 1 metaData <- metaTmpX snmData <- snmData # dataPSUniqueDecontamQC # iterSize <- 1 for(jj in 1:iterSize){ metadataSimSampled <- as.data.frame(stratified(metaData, group = "disease_type_consol", size = samplingSize, keep.rownames = TRUE, replace = FALSE, bothSets = FALSE)) rownames(metadataSimSampled) <- metadataSimSampled$rn mlDataY <- metadataSimSampled mlDataX <- snmData[rownames(mlDataY),] dim(mlDataY)[1] == dim(mlDataX)[1] # Sanity check # Create data partitions # set.seed(42) indexSuper <- 1:dim(mlDataY)[1] predProbs <- list() obsClass <- vector() predClass <- vector() varImpBestModelDF2OrderedNonzeroList <- list() for(ii in 1:length(indexSuper)){ print(sprintf("Iteration: %d/%d", ii, length(indexSuper))) index <- indexSuper[ii] # print(index) trainX <- mlDataX[-index,] trainY <- mlDataY[-index,]$disease_type_consol testX <- mlDataX[index,,drop=FALSE] testY <- mlDataY[index,,drop=FALSE]$disease_type_consol # print(testY) refactoredTrainY <- factor(gsub('([[:punct:]])|\\s+','',trainY)) refactoredTestY <- factor(gsub('([[:punct:]])|\\s+','',testY)) obsClass[ii] <- as.character(refactoredTestY) set.seed(42) ctrl <- trainControl(method = "repeatedcv", number = numKFold, repeats = numResampleIter, sampling = "up", summaryFunction = multiClassSummary, classProbs = TRUE, verboseIter = FALSE, savePredictions = TRUE, allowParallel=TRUE) mlModel <- train(x = trainX, y = refactoredTrainY, method = "gbm", preProcess = c("scale","center"), trControl = ctrl, verbose = FALSE, metric = "ROC", tuneGrid = caretTuneGrid) predProbs[ii] <- list(predict(mlModel, newdata = testX, type = "prob")) predClass[ii] <- as.character(predict(mlModel, newdata = testX, type = "raw")) varImpBestModelDF <- as.data.frame(varImp( mlModel$finalModel, scale = FALSE )) varImpBestModelDF2 <- rownames_to_column(varImpBestModelDF, "Taxa") varImpBestModelDF2Ordered <- varImpBestModelDF2[order(-varImpBestModelDF2$Overall),] colnames(varImpBestModelDF2Ordered)[2] <- "varImp" varImpBestModelDF2OrderedNonzero <- varImpBestModelDF2Ordered[varImpBestModelDF2Ordered$varImp != 0,] varImpBestModelDF2OrderedNonzeroList[[ii]] <- varImpBestModelDF2OrderedNonzero rm(mlModel) } loocvPreds <- cbind(obs = factor(obsClass, levels = classes), pred = factor(predClass, levels = classes), do.call(rbind,predProbs)) # multiClassSummaryStats <- multiClassSummary(loocvPreds, lev = classes) # print(multiClassSummaryStats) multiClassSummaryStats[[jj]] <- multiClassSummary(loocvPreds, lev = classes) print(multiClassSummaryStats[[jj]]) filenameROC <- paste0(filenameString,"__SHOGUN__ROC.png") filenamePR <- paste0(filenameString,"__SHOGUN__PR.png") filenameROCData <- paste0(filenameString,"__SHOGUN__Data__ROC.csv") filenamePRData <- paste0(filenameString,"__SHOGUN__Data__PR.csv") filenameSink <- paste0(filenameString,"__SHOGUN__CM.txt") predProbs <- loocvPreds[,DTs[1]] fg <- predProbs[loocvPreds$obs == DTs[1]] bg <- predProbs[loocvPreds$obs == DTs[2]] prroc_roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T) prroc_pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T, rand.compute=T) png(filename=filenameROC, width = 6, height = 4, units = 'in', res = 300) plot(prroc_roc) dev.off() png(filename=filenamePR, width = 6, height = 4, units = 'in', res = 300) plot(prroc_pr) dev.off() rocCurveData <- cbind(as.data.frame(prroc_roc$curve), DT1 = DTs[1], DT2 = DTs[2]) prCurveData <- cbind(as.data.frame(prroc_pr$curve), DT1 = DTs[1], DT2 = DTs[2]) write.table(prCurveData, sep=",", file = filenamePRData, col.names = FALSE) write.table(rocCurveData, sep=",", file = filenameROCData, col.names = FALSE) } print(confusionMatrix(loocvPreds$obs, loocvPreds$pred)) multiClassSummaryStatsDist <- data.frame(do.call(rbind, multiClassSummaryStats)) sink(filenameSink) print(print(confusionMatrix(loocvPreds$obs, loocvPreds$pred))) sink() } #----------------------------------------------------- ml2DTs <- function(snmData, classOfInterest = "Lung Adenocarcinoma", cutPoint = 0.5, samplingSize = 20, caretTuneGrid = defaultGBMGrid){ metaTmp1 <- droplevels(metadataPSMatchedDPQCFiltered[(metadataPSMatchedDPQCFiltered$disease_type_consol %in% c("PRAD", "SKCM", "NSCLC")),]) tmp <- metaTmp1 tmp$disease_type_consol <- factor(ifelse(metaTmp1$disease_type_consol == classOfInterest, yes = classOfInterest, no = "Other")) metadataSimSampled <- as.data.frame(stratified(tmp, group = "disease_type_consol", size = samplingSize, keep.rownames = TRUE, replace = FALSE, bothSets = FALSE)) rownames(metadataSimSampled) <- metadataSimSampled$rn mlDataY <- metadataSimSampled mlDataX <- snmData[rownames(mlDataY),] set.seed(42) index <- createDataPartition(mlDataY$disease_type_consol, p = 0.7, list = FALSE) trainX <- mlDataX[index,] trainY <- mlDataY[index,]$disease_type_consol testX <- mlDataX[-index,] testY <- mlDataY[-index,]$disease_type_consol # print(testY) refactoredTrainY <- factor(gsub('([[:punct:]])|\\s+','',trainY)) refactoredTestY <- factor(gsub('([[:punct:]])|\\s+','',testY)) set.seed(42) ctrl <- trainControl(method = "repeatedcv", number = numKFold, repeats = numResampleIter, sampling = "up", summaryFunction = twoClassSummary, classProbs = TRUE, verboseIter = TRUE, savePredictions = TRUE, allowParallel=TRUE) mlModel <- train(x = trainX, y = refactoredTrainY, method = "gbm", preProcess = c("scale","center"), trControl = ctrl, verbose = TRUE, metric = "ROC", tuneGrid = customGBMGrid) positiveClass <- gsub(" ","", classOfInterest) negativeClass <- "Other" predProbs <- as.numeric(predict(mlModel, newdata = testX, type = "prob")[,positiveClass]) fg <- predProbs[refactoredTestY == positiveClass] bg <- predProbs[refactoredTestY == negativeClass] prroc_roc <- roc.curve(scores.class0 = fg, scores.class1 = bg, curve = T) prroc_pr <- pr.curve(scores.class0 = fg, scores.class1 = bg, curve = T, rand.compute=T) # par(mfrow = c(1,2)) plot(prroc_roc) plot(prroc_pr) # dev.off() predClass <- predict(mlModel, newdata = testX) confusionMatrix(table(predict(mlModel, newdata = testX, type="prob")[,positiveClass] >= cutPoint, refactoredTestY == positiveClass)) } %%R hVsC4 <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 100, DTs = c("Control","SKCM", "PRAD", "NSCLC"), caretTuneGrid = defaultGBMGrid, HvsCFlag = TRUE) %%R hVsC_PRAD <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 69, DTs = c("PRAD","Control"), caretTuneGrid = defaultGBMGrid) %%R hVsC_NSCLC <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 69, DTs = c("NSCLC","Control"), caretTuneGrid = defaultGBMGrid) %%R hVsC_SKCM <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 69, DTs = c("SKCM","Control"), caretTuneGrid = defaultGBMGrid) %%R prad_nsclc <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 59, DTs = c("PRAD","NSCLC"), caretTuneGrid = defaultGBMGrid) %%R nsclc_skcm <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 25, DTs = c("NSCLC","SKCM"), caretTuneGrid = defaultGBMGrid) %%R prad_skcm <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 59, DTs = c("PRAD","SKCM"), caretTuneGrid = defaultGBMGrid) %%R DT3 <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 59, DTs = c("PRAD","NSCLC","SKCM"), caretTuneGrid = defaultGBMGrid) %%R DTH4 <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 69, DTs = c("Control","PRAD","NSCLC","SKCM"), caretTuneGrid = defaultGBMGrid) %%R DTH3 <- loocvDTs(snmData = snmShogundataPSUniqueDecontamDPQC, samplingSize = 69, DTs = c("Control","PRAD","NSCLC"), caretTuneGrid = defaultGBMGrid) ```
github_jupyter
# Install Transformers Library ``` !pip install transformers==3.0.2 import numpy as np import pandas as pd import torch import torch.nn as nn from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report import transformers from transformers import AutoModel, BertTokenizerFast # specify GPU device = torch.device("cuda") ``` # Load Dataset ``` from google.colab import drive drive.mount('/content/gdrive') df=pd.read_csv('gdrive/My Drive/Licenta/Data/politifact_strict_binarized.csv') # df=pd.read_csv('gdrive/My Drive/Licenta/Data/mafiascum_label_text.csv') # df=pd.read_csv('gdrive/My Drive/Licenta/Data/mafiascum_label_words.csv') df.head() print(df[:50]) df.shape # check class distribution df['veracity'].value_counts(normalize = True) ``` # Split train dataset into train, validation and test sets ``` train_text, temp_text, train_labels, temp_labels = train_test_split(df['statement'], df['veracity'], random_state=2018, test_size=0.3, stratify=df['veracity']) # we will use temp_text and temp_labels to create validation and test set val_text, test_text, val_labels, test_labels = train_test_split(temp_text, temp_labels, random_state=2018, test_size=0.5, stratify=temp_labels) ``` # Import BERT Model and BERT Tokenizer ``` # import BERT-base pretrained model bert = AutoModel.from_pretrained('bert-large-uncased') # Load the BERT tokenizer tokenizer = BertTokenizerFast.from_pretrained('bert-large-uncased') # sample data text = ["this is a bert model tutorial", "we will fine-tune a bert model"] # encode text sent_id = tokenizer.batch_encode_plus(text, padding=True, return_token_type_ids=False) # output print(sent_id) ``` # Tokenization ``` # get length of all the messages in the train set seq_len = [len(i.split()) for i in train_text] pd.Series(seq_len).hist(bins = 30) max_seq_len = 50 # tokenize and encode sequences in the training set tokens_train = tokenizer.batch_encode_plus( train_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False ) # tokenize and encode sequences in the validation set tokens_val = tokenizer.batch_encode_plus( val_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False ) # tokenize and encode sequences in the test set tokens_test = tokenizer.batch_encode_plus( test_text.tolist(), max_length = max_seq_len, pad_to_max_length=True, truncation=True, return_token_type_ids=False ) ``` # Convert Integer Sequences to Tensors ``` # for train set train_seq = torch.tensor(tokens_train['input_ids']) train_mask = torch.tensor(tokens_train['attention_mask']) train_y = torch.tensor(train_labels.tolist()) # for validation set val_seq = torch.tensor(tokens_val['input_ids']) val_mask = torch.tensor(tokens_val['attention_mask']) val_y = torch.tensor(val_labels.tolist()) # for test set test_seq = torch.tensor(tokens_test['input_ids']) test_mask = torch.tensor(tokens_test['attention_mask']) test_y = torch.tensor(test_labels.tolist()) ``` # Create DataLoaders ``` from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler #define a batch size batch_size = 48 # wrap tensors train_data = TensorDataset(train_seq, train_mask, train_y) # sampler for sampling the data during training train_sampler = RandomSampler(train_data) # dataLoader for train set train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=batch_size) # wrap tensors val_data = TensorDataset(val_seq, val_mask, val_y) # sampler for sampling the data during training val_sampler = SequentialSampler(val_data) # dataLoader for validation set val_dataloader = DataLoader(val_data, sampler = val_sampler, batch_size=batch_size) ``` # Freeze BERT Parameters ``` # freeze all the parameters for param in bert.parameters(): param.requires_grad = False ``` # Define Model Architecture ``` class BERT_Arch(nn.Module): def __init__(self, bert): super(BERT_Arch, self).__init__() self.bert = bert # dropout layer self.dropout = nn.Dropout(0.1) # relu activation function self.relu = nn.ReLU() # dense layer 1 self.fc1 = nn.Linear(1024,512) # dense layer 2 (Output layer) self.fc2 = nn.Linear(512,2) #softmax activation function self.softmax = nn.LogSoftmax(dim=1) #define the forward pass def forward(self, sent_id, mask): #pass the inputs to the model _, cls_hs = self.bert(sent_id, attention_mask=mask) x = self.fc1(cls_hs) x = self.relu(x) x = self.dropout(x) # output layer x = self.fc2(x) # apply softmax activation x = self.softmax(x) return x # pass the pre-trained BERT to our define architecture model = BERT_Arch(bert) # push the model to GPU model = model.to(device) # optimizer from hugging face transformers from transformers import AdamW # define the optimizer optimizer = AdamW(model.parameters(), lr = 4e-5) ``` # Find Class Weights ``` from sklearn.utils.class_weight import compute_class_weight #compute the class weights class_wts = compute_class_weight('balanced', np.unique(train_labels), train_labels) print(class_wts) # convert class weights to tensor weights= torch.tensor(class_wts,dtype=torch.float) weights = weights.to(device) # loss function cross_entropy = nn.NLLLoss(weight=weights) # number of training epochs epochs = 30 ``` # Fine-Tune BERT ``` # function to train the model def train(): model.train() total_loss, total_accuracy = 0, 0 # empty list to save model predictions total_preds=[] # iterate over batches for step,batch in enumerate(train_dataloader): # progress update after every 50 batches. if step % 50 == 0 and not step == 0: print(' Batch {:>5,} of {:>5,}.'.format(step, len(train_dataloader))) # push the batch to gpu batch = [r.to(device) for r in batch] sent_id, mask, labels = batch # clear previously calculated gradients model.zero_grad() # get model predictions for the current batch preds = model(sent_id, mask) # compute the loss between actual and predicted values loss = cross_entropy(preds, labels) # add on to the total loss total_loss = total_loss + loss.item() # backward pass to calculate the gradients loss.backward() # clip the the gradients to 1.0. It helps in preventing the exploding gradient problem torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) # update parameters optimizer.step() # model predictions are stored on GPU. So, push it to CPU preds=preds.detach().cpu().numpy() # append the model predictions total_preds.append(preds) # compute the training loss of the epoch avg_loss = total_loss / len(train_dataloader) # predictions are in the form of (no. of batches, size of batch, no. of classes). # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) #returns the loss and predictions return avg_loss, total_preds # function for evaluating the model def evaluate(): print("\nEvaluating...") # deactivate dropout layers model.eval() total_loss, total_accuracy = 0, 0 # empty list to save the model predictions total_preds = [] # iterate over batches for step,batch in enumerate(val_dataloader): # Progress update every 50 batches. if step % 50 == 0 and not step == 0: # Calculate elapsed time in minutes. # elapsed = format_time(time.time() - t0) # Report progress. print(' Batch {:>5,} of {:>5,}.'.format(step, len(val_dataloader))) # push the batch to gpu batch = [t.to(device) for t in batch] sent_id, mask, labels = batch # deactivate autograd with torch.no_grad(): # model predictions preds = model(sent_id, mask) # compute the validation loss between actual and predicted values loss = cross_entropy(preds,labels) total_loss = total_loss + loss.item() preds = preds.detach().cpu().numpy() total_preds.append(preds) # compute the validation loss of the epoch avg_loss = total_loss / len(val_dataloader) # reshape the predictions in form of (number of samples, no. of classes) total_preds = np.concatenate(total_preds, axis=0) return avg_loss, total_preds ``` # Start Model Training ``` # set initial loss to infinite best_valid_loss = float('inf') # empty lists to store training and validation loss of each epoch train_losses=[] valid_losses=[] #for each epoch for epoch in range(epochs): print('\n Epoch {:} / {:}'.format(epoch + 1, epochs)) #train model train_loss, _ = train() #evaluate model valid_loss, _ = evaluate() #save the best model if valid_loss < best_valid_loss: best_valid_loss = valid_loss torch.save(model.state_dict(), 'saved_weights.pt') # append training and validation loss train_losses.append(train_loss) valid_losses.append(valid_loss) print(f'\nTraining Loss: {train_loss:.3f}') print(f'Validation Loss: {valid_loss:.3f}') ``` # Load Saved Model ``` #load weights of best model path = 'saved_weights.pt' model.load_state_dict(torch.load(path)) ``` # Get Predictions for Test Data ``` # get predictions for test data with torch.no_grad(): preds = model(test_seq.to(device), test_mask.to(device)) preds = preds.detach().cpu().numpy() # model's performance preds = np.argmax(preds, axis = 1) print(classification_report(test_y, preds)) # confusion matrix pd.crosstab(test_y, preds) ```
github_jupyter
``` # Import external resources import json from allennlp.common.util import import_submodules from allennlp.models.archival import load_archive from allennlp.predictors import Predictor from collections import defaultdict from typing import List # Change the working directory to be the root of the Github repo # so that the module's code can be found by AllenNLP import os os.chdir('../..') os.getcwd() import_submodules('summarize') # Load the extractive model (selects exactly 1 sentence) overrides = '{"model.metrics": [], "model.initializer": null, "dataset_reader.max_num_sentences": null, "model.max_words": null, "model.max_sents": 1}' extractive_archive = load_archive('https://danieldeutsch.s3.amazonaws.com/summarize/experiments/deutsch2019/v1.1/extractive-step/extractive-model/model/topics/context/model.tar.gz', overrides=overrides) # Load the extractive step model (selects 200 words as a preprocessing step) overrides = '{"model.metrics": [], "model.initializer": null, "dataset_reader.max_num_sentences": null}' extractive_step_archive = load_archive('https://danieldeutsch.s3.amazonaws.com/summarize/experiments/deutsch2019/v1.1/extractive-step/extractive-model/model/topics/context/model.tar.gz', overrides=overrides) # Load the abstractive step model overrides = '{"model.metrics": [], "model.initializer": null}' abstractive_archive = load_archive('https://danieldeutsch.s3.amazonaws.com/summarize/experiments/deutsch2019/v1.1/abstractive-step/coverage/model/extractive-model/context/model.tar.gz', overrides=overrides) extractive_predictor = Predictor.from_archive(extractive_archive, 'cloze-extractive-predictor') extractive_step_predictor = Predictor.from_archive(extractive_step_archive, 'cloze-extractive-predictor') abstractive_predictor = Predictor.from_archive(abstractive_archive, 'cloze-abstractive-predictor') # Define the method to produce the summary def _run_extractive_model(predictor: Predictor, document: List[str], topics: List[str], context: List[str]) -> List[str]: output = predictor.predict_json({'document': document, 'topics': topics, 'context': context}) indices = output['predicted_indices'] document = output['metadata']['document'] cloze = [document[index] for index in indices] return cloze def run_extractive_model(document: List[str], topics: List[str], context: List[str]) -> List[str]: return _run_extractive_model(extractive_predictor, document, topics, context) def run_extractive_step(document: List[str], topics: List[str], context: List[str]) -> List[str]: return _run_extractive_model(extractive_step_predictor, document, topics, context) def run_abstractive_step(document: List[str], topics: List[str], context: List[str]) -> str: output = abstractive_predictor.predict_json({'document': document, 'topics': topics, 'context': context}) return output['cloze'] # Define the input data. The text should be pretokenized topics = ['Barack Obama', 'Early life and career', 'Family and personal life'] document = [ "Michelle Robinson and Barack Obama had been dating for a couple of years , and she was tired of his endless debates about whether marriage still meant anything as an institution .", "So when Obama launched into one of those discussions yet again over dinner at a fancy restaurant in 1991 , Robinson lit into her boyfriend , lecturing him on the need to get serious in their relationship .", "Then dessert came .", "On the plate was a box .", "Inside was an engagement ring .", "`` He said , 'That kind of shuts you up , does n't it ? ' `` Michelle Obama recounted years later .", "The couple married the following year .", "And today , Michelle , 43 , and Democratic presidential hopeful Barack Obama , 46 , will celebrate their 15th wedding anniversary .", "The marriage might never have happened .", "They met in 1989 when Obama spent his summer as a first-year law student at the Chicago law firm of Sidley & Austin , and Michelle Robinson was the lawyer assigned to be his adviser .", "Everybody at the firm had been buzzing about the smart , first-year Harvard Law School student , so she was expecting him to be `` nerdy , strange , off-putting . ''", "`` But I was charmed , '' she said .", "`` I was pleasantly surprised by who he turned out to be . ''", "Still , because of their professional relationship , Michelle Robinson tried to fix Obama up with her friends .", "Then , halfway through the summer , Obama asked her out .", "On their first date , they went to the Art Institute , strolled down Michigan Avenue and caught Spike Lee 's `` Do the Right Thing . ''", "`` It was fantastic , '' Michelle Obama said in 2004 .", "`` He was definitely putting on the charm . ... It worked .", "He swept me off my feet . ''", "Two years later came the proposal dinner at Gordon 's on Clark Street .", "And what was the dessert next to that engagement ring ?", "`` I do n't even remember , '' Michelle Obama conceded in 2004 .", "`` I do n't think I even ate it .", "I was so shocked and sort of a little embarrassed because he did sort of shut me up . ''", ] context = [ 'In June 1989, Obama met Michelle Robinson when he was employed as a summer associate at the Chicago law firm of Sidley Austin .', 'Robinson was assigned for three months as Obama\'s adviser at the firm, and she joined him at several group social functions but declined his initial requests to date.' ] # Runs the extractive model which selects 1 sentence from the input document run_extractive_model(document, topics, context) # Runs the extractive preprocessing step that takes ~200 tokens from the document preprocessed_data = run_extractive_step(document, topics, context) print(json.dumps(preprocessed_data, indent=2)) # Run the abstractive step on the preprocessed data to generate the cloze run_abstractive_step(preprocessed_data, topics, context) ```
github_jupyter
# 1) Data Preprocessing --- ``` import tensorflow as tf print(tf.__version__) import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras.preprocessing.sequence import pad_sequences from tensorflow.keras.layers import Dense, Input, GlobalAveragePooling1D ,GlobalMaxPooling1D from tensorflow.keras.layers import Conv1D, MaxPool1D, Embedding from tensorflow.keras.models import Model df = pd.read_csv('/content/spam.csv', encoding= 'ISO-8859-1') df.head() # delete garbage columns df = df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis = 1) df.head() # rename columns df.columns = ['labels', 'data'] df.head() # create binary labels (0 and 1) df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1}) y = df['b_labels'].values # split the dataset X_train, X_test, y_train, y_test = train_test_split(df['data'], y, test_size = 0.33) # Convert sentences to sequences max_vocab_size = 20000 tokenizer = Tokenizer(num_words = max_vocab_size) tokenizer.fit_on_texts(X_train) sequences_train = tokenizer.texts_to_sequences(X_train) sequences_test = tokenizer.texts_to_sequences(X_test) len(sequences_train[0]) # Check word index mapping (to check the nımber of words in vocabulary) word2idx = tokenizer.word_index V = len(word2idx) print("Total number of unique tokens are %s" %V) # pada sequences to get N * T matrix data_train = pad_sequences(sequences_train) print("Shape of data train tensor:", data_train.shape) # Set the value of t to get sequence length T = data_train.shape[1] print(T) # Pad the test set data_test = pad_sequences(sequences_test, maxlen = T) # maxlen = T, to truncate longer sentences in test set print('Shape of data test tensor:', data_test.shape) data_train[0] len(data_train[0]) ``` # 2) Building The Model --- ``` # Create the model # Choose embedding dimensionality D = 20 # this is a hyper parameter, we can choose any word vector size that we want # Input layer i = Input(shape = (T,)) # input layer takes in sequences of integers, so shape is T # Embedding layer x = Embedding(V+1, D)(i) #This takes in sequences of integers and returns sequences # ıf word vectors # this will be an N * T * D array # we want size of embedding to (V + 1) x D, because first word index starts from 1 and not 0 # first cnn layer x = Conv1D(32, 3, activation = 'relu')(x) x = MaxPool1D(3)(x) # second cnn layer x = Conv1D(64, 3, activation='relu')(x) x = MaxPool1D()(x) # third cnn layer x = Conv1D(128, 3, activation= 'relu')(x) x = GlobalMaxPooling1D()(x) # dense layer x = Dense(1, activation= 'sigmoid')(x) model = Model(i, x) # compile the model model.compile(optimizer='adam', loss = 'binary_crossentropy', metrics = ['accuracy']) # Train the model r = model.fit(x=data_train, y = y_train, epochs = 5, validation_data=(data_test, y_test)) # Loss per iteration import matplotlib.pyplot as plt plt.plot(r.history['loss'], label = 'Loss') plt.plot(r.history['val_loss'], label = 'Validation Loss') plt.legend() plt.show() # accuracy per iteration plt.plot(r.history['accuracy'], label = 'Accuracy') plt.plot(r.history['val_accuracy'], label = 'Validation accuracy') plt.legend() plt.show() ```
github_jupyter
### Problem-1 In this problem we use the ColumnarStructure and boolean indexing to create a distance map of the HIV protease dimer. We will use C-beta atoms instead of C-alpha atoms. ``` from pyspark.sql import SparkSession from mmtfPyspark.io import mmtfReader from mmtfPyspark.utils import traverseStructureHierarchy, ColumnarStructure from mmtfPyspark import structureViewer import numpy as np from scipy.spatial.distance import pdist, squareform import matplotlib.pyplot as plt ``` #### Configure Spark ``` spark = SparkSession.builder.appName("Problem-1").getOrCreate() ``` ### Download an example structure Here we download an HIV protease structure with a bound ligand (Nelfinavir). ``` pdb = mmtfReader.download_full_mmtf_files(["1OHR"]) ``` Structures are represented as keyword-value pairs (tuples): * key: structure identifier (e.g., PDB ID) * value: MmtfStructure (structure data) In this case, we only have one structure, so we can use the first() method to extract the data. ``` structure = pdb.values().first() ``` ## Create a columnar structure from an MMTF structure Here we convert an MMTF structure to a columnar structure. By specifying the firstModel flag, we only retrieve data for the first model (this structure has only one model, anyways). ### TODO-1: create a ColumnarStructure ``` arrays = ... your code here ... ``` ### Get atom coordinates as numpy arrays ### TODO-2: get coordinates ``` x = ... your code here ... y = ... your code here ... z = ... your code here ... ``` ### Get entity types Entity types can be used to distinguish polymer from non-polymer groups and select specific components, e.g., all protein groups. The following entity types are available: * **Polymer groups** * PRO: protein * DNA: DNA * RNA: RNA * PSR: saccharide * **Non-polymer groups** * LGO: ligand organic * LGI: ligand inorganic * SAC: saccaride * WAT: water ``` entity_types = arrays.get_entity_types() entity_types ``` ### Get atom, group, and chain name arrays ``` atom_names = arrays.get_atom_names() atom_names group_names = arrays.get_group_names() group_names ``` ### Boolean array indexing Boolean indexing is an efficient way to access selected elements from numpy arrays. ### TODO-3: create a boolean index to select: * C-alpha atoms for glycine * C-beta atoms for all other amino acids This time, do the selection for the entire structure. ``` cb_idx = ... your code here ... ``` ### TODO-4: Print the atom names for the selected atoms ``` ... your code here ... ``` Then, we apply this index to get the coordinates for the selected atoms ``` xc = x[cb_idx] yc = y[cb_idx] zc = z[cb_idx] ``` #### Combine separate x, y, and z arrays and swap axes `[x0, x1, ..., xn],[y0, y1,...,yn],[z0, z1, ...,zn]` to `[x0, y0, z0],[x1, y1, z1], ..., [xn, yn, zn]` ``` coords = np.swapaxes(np.array([xc,yc,zc]), 0, 1) ``` #### Calculate distance map for the protein dimer ``` dist_matrix = squareform(pdist(coords), 'euclidean') plt.pcolor(dist_matrix, cmap='RdBu') plt.title('C-beta distance map') plt.gca().set_aspect('equal') plt.colorbar(); ``` #### Calculate distance map for the protein dimer Only consider distance <= 9. We use boolean indexing to set all distance > 9 to zero. ``` dist_matrix[dist_matrix > 9] = 0 plt.pcolor(dist_matrix, cmap='Greys') plt.title('C-beta distance map') plt.gca().set_aspect('equal') plt.colorbar(); spark.stop() ```
github_jupyter
## Iris Flower dataset classification ``` import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import numpy as np '''downlaod iris.csv from https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv''' #Load Iris.csv into a pandas dataFrame. iris = pd.read_csv("iris.csv") # (Q) how many data-points and features? print (iris.shape) #(Q) What are the column names in our dataset? print (iris.columns) #(Q) How many data points for each class are present? #(or) How many flowers for each species are present? iris["species"].value_counts() # balanced-dataset vs imbalanced datasets #Iris is a balanced dataset as the number of data points for every class is 50. ``` # (3.2) 2-D Scatter Plot ``` #2-D scatter plot: #ALWAYS understand the axis: labels and scale. iris.plot(kind='scatter', x='sepal_length', y='sepal_width') ; plt.show() #cannot make much sense out it. #What if we color the points by thier class-label/flower-type. # 2-D Scatter plot with color-coding for each flower type/class. # Here 'sns' corresponds to seaborn. sns.set_style("whitegrid"); sns.FacetGrid(iris, hue="species", size=4) \ .map(plt.scatter, "sepal_length", "sepal_width") \ .add_legend(); plt.show(); # Notice that the blue points can be easily seperated # from red and green by drawing a line. # But red and green data points cannot be easily seperated. # Can we draw multiple 2-D scatter plots for each combination of features? # How many cobinations exist? 4C2 = 6. ``` **Observation(s):** 1. Using sepal_length and sepal_width features, we can distinguish Setosa flowers from others. 2. Seperating Versicolor from Viginica is much harder as they have considerable overlap. # (3.3) Pair-plot ``` # pairwise scatter plot: Pair-Plot # Dis-advantages: ##Can be used when number of features are high. ##Cannot visualize higher dimensional patterns in 3-D and 4-D. #Only possible to view 2D patterns. plt.close(); sns.set_style("whitegrid"); sns.pairplot(iris, hue="species", size=3); plt.show() # NOTE: the diagnol elements are PDFs for each feature. PDFs are expalined below. ``` **Observations** 1. petal_length and petal_width are the most useful features to identify various flower types. 2. While Setosa can be easily identified (linearly seperable), Virnica and Versicolor have some overlap (almost linearly seperable). 3. We can find "lines" and "if-else" conditions to build a simple model to classify the flower types. # (3.4) Histogram, PDF, CDF ``` # What about 1-D scatter plot using just one feature? #1-D scatter plot of petal-length import numpy as np iris_setosa = iris.loc[iris["species"] == "setosa"]; iris_virginica = iris.loc[iris["species"] == "virginica"]; iris_versicolor = iris.loc[iris["species"] == "versicolor"]; #print(iris_setosa["petal_length"]) plt.plot(iris_setosa["petal_length"], np.zeros_like(iris_setosa['petal_length']), 'o') plt.plot(iris_versicolor["petal_length"], np.zeros_like(iris_versicolor['petal_length']), 'o') plt.plot(iris_virginica["petal_length"], np.zeros_like(iris_virginica['petal_length']), 'o') plt.show() #Disadvantages of 1-D scatter plot: Very hard to make sense as points #are overlapping a lot. #Are there better ways of visualizing 1-D scatter plots? sns.FacetGrid(iris, hue="species", size=5) \ .map(sns.distplot, "petal_length") \ .add_legend(); plt.show(); sns.FacetGrid(iris, hue="species", size=5) \ .map(sns.distplot, "petal_width") \ .add_legend(); plt.show(); sns.FacetGrid(iris, hue="species", size=5) \ .map(sns.distplot, "sepal_length") \ .add_legend(); plt.show(); sns.FacetGrid(iris, hue="species", size=5) \ .map(sns.distplot, "sepal_width") \ .add_legend(); plt.show(); # Need for Cumulative Distribution Function (CDF) # We can visually see what percentage of versicolor flowers have a # petal_length of less than 5? # How to construct a CDF? # How to read a CDF? #Plot CDF of petal_length counts, bin_edges = np.histogram(iris_setosa['petal_length'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges); cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf); plt.plot(bin_edges[1:], cdf) counts, bin_edges = np.histogram(iris_setosa['petal_length'], bins=20, density = True) pdf = counts/(sum(counts)) plt.plot(bin_edges[1:],pdf); plt.show(); # Need for Cumulative Distribution Function (CDF) # We can visually see what percentage of versicolor flowers have a # petal_length of less than 1.6? # How to construct a CDF? # How to read a CDF? #Plot CDF of petal_length counts, bin_edges = np.histogram(iris_setosa['petal_length'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) #compute CDF cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf) plt.show(); # Plots of CDF of petal_length for various types of flowers. # Misclassification error if you use petal_length only. counts, bin_edges = np.histogram(iris_setosa['petal_length'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf) # virginica counts, bin_edges = np.histogram(iris_virginica['petal_length'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf) #versicolor counts, bin_edges = np.histogram(iris_versicolor['petal_length'], bins=10, density = True) pdf = counts/(sum(counts)) print(pdf); print(bin_edges) cdf = np.cumsum(pdf) plt.plot(bin_edges[1:],pdf) plt.plot(bin_edges[1:], cdf) plt.show(); ``` # (3.5) Mean, Variance and Std-dev ``` #Mean, Variance, Std-deviation, print("Means:") print(np.mean(iris_setosa["petal_length"])) #Mean with an outlier. print(np.mean(np.append(iris_setosa["petal_length"],50))); print(np.mean(iris_virginica["petal_length"])) print(np.mean(iris_versicolor["petal_length"])) print("\nStd-dev:"); print(np.std(iris_setosa["petal_length"])) print(np.std(iris_virginica["petal_length"])) print(np.std(iris_versicolor["petal_length"])) ``` # (3.6) Median, Percentile, Quantile, IQR, MAD ``` #Median, Quantiles, Percentiles, IQR. print("\nMedians:") print(np.median(iris_setosa["petal_length"])) #Median with an outlier print(np.median(np.append(iris_setosa["petal_length"],50))); print(np.median(iris_virginica["petal_length"])) print(np.median(iris_versicolor["petal_length"])) print("\nQuantiles:") print(np.percentile(iris_setosa["petal_length"],np.arange(0, 100, 25))) print(np.percentile(iris_virginica["petal_length"],np.arange(0, 100, 25))) print(np.percentile(iris_versicolor["petal_length"], np.arange(0, 100, 25))) print("\n90th Percentiles:") print(np.percentile(iris_setosa["petal_length"],90)) print(np.percentile(iris_virginica["petal_length"],90)) print(np.percentile(iris_versicolor["petal_length"], 90)) from statsmodels import robust print ("\nMedian Absolute Deviation") print(robust.mad(iris_setosa["petal_length"])) print(robust.mad(iris_virginica["petal_length"])) print(robust.mad(iris_versicolor["petal_length"])) ``` # (3.7) Box plot and Whiskers ``` #Box-plot with whiskers: another method of visualizing the 1-D scatter plot more intuitivey. # The Concept of median, percentile, quantile. # How to draw the box in the box-plot? # How to draw whiskers: [no standard way] Could use min and max or use other complex statistical techniques. # IQR like idea. #NOTE: IN the plot below, a technique call inter-quartile range is used in plotting the whiskers. #Whiskers in the plot below donot correposnd to the min and max values. #Box-plot can be visualized as a PDF on the side-ways. sns.boxplot(x='species',y='petal_length', data=iris) plt.show() ``` # (3.8) Violin plots ``` # A violin plot combines the benefits of the previous two plots #and simplifies them # Denser regions of the data are fatter, and sparser ones thinner #in a violin plot sns.violinplot(x="species", y="petal_length", data=iris, size=8) plt.show() ```
github_jupyter
``` import os os.chdir('..') from pathlib import Path import json ``` # Paths ``` coco_train_path = Path('data')/'benign_data'/'coco_train.json' coco_test_path = Path('data')/'benign_data'/'coco_test.json' coco_eval_path = Path('data')/'benign_data'/'coco_eval.json' data_dir = Path('data')/'benign_data' eval_coco_path = data_dir/'coco_eval.json' eval_img_dir = data_dir/'eval_imgs' eval_dir = Path('output')/'eval' rcnn_annos_path = eval_dir/'rcnn'/'eval_annos_rcnn.json' retina_annos_path = eval_dir/'retina'/'eval_annos_retina.json' yolo_annos_path = eval_dir/'eval_annos_yolo_1.json' ``` --- ``` with coco_test_path.open('r') as f: coco_test = json.load(f) with coco_train_path.open('r') as f: coco_train = json.load(f) with coco_eval_path.open('r') as f: coco_eval = json.load(f) coco_train['categories'] # for coco in (coco_test, coco_train): # for anno in coco['annotations']: # anno['iscrowd'] = 0 for anno in coco_eval['annotations']: anno['iscrowd'] = 0 # with coco_test_path.open('w') as f: # json.dump(coco_test, f) # with coco_train_path.open('w') as f: # json.dump(coco_train, f) with coco_eval_path.open('w') as f: json.dump(coco_eval, f) ``` # Viz ``` import os os.chdir('..') import detectron2_1 from detectron2.config import get_cfg from pathlib import Path from detectron2.engine import DefaultPredictor import cv2 from detectron2.data import MetadataCatalog from detectron2.utils.visualizer import Visualizer from PIL import Image img_dir = Path('data') img_path = img_dir/'samples'/'WechatIMG18.png' save_path = img_dir/'samples'/'WechatIMG18_pred.png' model_dir = Path('output') rcnn_dir = model_dir/'rcnn_2' rcnn_cfg_path = rcnn_dir/'config.yaml' rcnn_weights_path = rcnn_dir/'model_final.pth' im = cv2.imread(str(img_path)) cfg = get_cfg() cfg.merge_from_file(rcnn_cfg_path) cfg.MODEL.WEIGHTS = str(rcnn_weights_path) cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.03 predictor = DefaultPredictor(cfg) outputs = predictor(im) benign_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) MetadataCatalog.get(cfg.DATASETS.TRAIN[0]).thing_classes = ["box", "logo"] v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0])) out = v.draw_instance_predictions(outputs["instances"].to("cpu")) Image.fromarray(out.get_image()) Image.fromarray(out.get_image()).save(save_path) ``` # Evaluation ``` import matplotlib.pyplot as plt from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval import numpy as np import skimage.io as io import pylab cocoGt = COCO(eval_coco_path) cocoDt = cocoGt.loadRes(str(yolo_annos_path)) cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() ```
github_jupyter
# Using Ax for Human-in-the-loop Experimentation¶ While Ax can be used in as a fully automated service, generating and deploying candidates Ax can be also used in a trial-by-trial fashion, allowing for human oversight. Typically, human intervention in Ax is necessary when there are clear tradeoffs between multiple metrics of interest. Condensing multiple outcomes of interest into a single scalar quantity can be really challenging. Instead, it can be useful to specify an objective and constraints, and tweak these based on the information from the experiment. To facilitate this, Ax provides the following key features: 1. Constrained optimization 2. Interfaces for easily modifying optimization goals 3. Utilities for visualizing and deploying new trials composed of multiple optimizations. In this tutorial, we'll demonstrate how Ax enables users to explore these tradeoffs. With an understanding of the tradeoffs present in our data, we'll then make use of the constrained optimization utilities to generate candidates from multiple different optimization objectives, and create a conglomerate batch, with all of these candidates in together in one trial. ## Experiment Setup For this tutorial, we will assume our experiment has already been created. ``` from ax import Data, Metric, OptimizationConfig, Objective, OutcomeConstraint, ComparisonOp, load from ax.modelbridge.cross_validation import cross_validate from ax.modelbridge.factory import get_GPEI from ax.plot.diagnostic import tile_cross_validation from ax.plot.scatter import plot_multiple_metrics, tile_fitted from ax.utils.notebook.plotting import render, init_notebook_plotting import pandas as pd init_notebook_plotting() experiment = load('hitl_exp.json') ``` ### Initial Sobol Trial Bayesian Optimization experiments almost always begin with a set of random points. In this experiment, these points were chosen via a Sobol sequence, accessible via the `ModelBridge` factory. A collection of points run and analyzed together form a `BatchTrial`. A `Trial` object provides metadata pertaining to the deployment of these points, including details such as when they were deployed, and the current status of their experiment. Here, we see an initial experiment has finished running (COMPLETED status). ``` experiment.trials[0] experiment.trials[0].time_created # Number of arms in first experiment, including status_quo len(experiment.trials[0].arms) # Sample arm configuration experiment.trials[0].arms[0] ``` ## Experiment Analysis **Optimization Config** An important construct for analyzing an experiment is an OptimizationConfig. An OptimizationConfig contains an objective, and outcome constraints. Experiment's can have a default OptimizationConfig, but models can also take an OptimizationConfig as input independent of the default. **Objective:** A metric to optimize, along with a direction to optimize (default: maximize) **Outcome Constraint:** A metric to constrain, along with a constraint direction (<= or >=), as well as a bound. Let's start with a simple OptimizationConfig. By default, our objective metric will be maximized, but can be minimized by setting the `minimize` flag. Our outcome constraint will, by default, be evaluated as a relative percentage change. This percentage change is computed relative to the experiment's status quo arm. ``` experiment.status_quo objective_metric = Metric(name="metric_1") constraint_metric = Metric(name="metric_2") experiment.optimization_config = OptimizationConfig( objective=Objective(objective_metric), outcome_constraints=[ OutcomeConstraint(metric=constraint_metric, op=ComparisonOp.LEQ, bound=5), ] ) ``` **Data** Another critical piece of analysis is data itself! Ax data follows a standard format, shown below. This format is imposed upon the underlying data structure, which is a Pandas DataFrame. A key set of fields are required for all data, for use with Ax models. It's a good idea to double check our data before fitting models -- let's make sure all of our expected metrics and arms are present. ``` data = Data(pd.read_json('hitl_data.json')) data.df.head() data.df['arm_name'].unique() data.df['metric_name'].unique() ``` **Search Space** The final component necessary for human-in-the-loop optimization is a SearchSpace. A SearchSpace defines the feasible region for our parameters, as well as their types. Here, we have both parameters and a set of constraints on those parameters. Without a SearchSpace, our models are unable to generate new candidates. By default, the models will read the search space off of the experiment, when they are told to generate candidates. SearchSpaces can also be specified by the user at this time. Sometimes, the first round of an experiment is too restrictive--perhaps the experimenter was too cautious when defining their initial ranges for exploration! In this case, it can be useful to generate candidates from new, expanded search spaces, beyond that specified in the experiment. ``` experiment.search_space.parameters experiment.search_space.parameter_constraints ``` ### Model Fit Fitting BoTorch's GPEI will allow us to predict new candidates based on our first Sobol batch. Here, we make use of the default settings for GP-EI defined in the ModelBridge factory. ``` gp = get_GPEI( experiment=experiment, data=data, ) ``` We can validate the model fits using cross validation, shown below for each metric of interest. Here, our model fits leave something to be desired--the tail ends of each metric are hard to model. In this situation, there are three potential actions to take: 1. Increase the amount of traffic in this experiment, to reduce the measurement noise. 2. Increase the number of points run in the random batch, to assist the GP in covering the space. 3. Reduce the number of parameters tuned at one time. However, away from the tail effects, the fits do show a strong correlations, so we will proceed with candidate generation. ``` cv_result = cross_validate(gp) render(tile_cross_validation(cv_result)) ``` The parameters from the initial batch have a wide range of effects on the metrics of interest, as shown from the outcomes from our fitted GP model. ``` render(tile_fitted(gp, rel=True)) METRIC_X_AXIS = 'metric_1' METRIC_Y_AXIS = 'metric_2' render(plot_multiple_metrics( gp, metric_x=METRIC_X_AXIS, metric_y=METRIC_Y_AXIS, )) ``` ### Candidate Generation With our fitted GPEI model, we can optimize EI (Expected Improvement) based on any optimization config. We can start with our initial optimization config, and aim to simply maximize the playback smoothness, without worrying about the constraint on quality. ``` unconstrained = gp.gen( n=3, optimization_config=OptimizationConfig( objective=Objective(objective_metric), ) ) ``` Let's plot the tradeoffs again, but with our new arms. ``` render(plot_multiple_metrics( gp, metric_x=METRIC_X_AXIS, metric_y=METRIC_Y_AXIS, generator_runs_dict={ 'unconstrained': unconstrained, } )) ``` ### Change Objectives With our unconstrained optimization, we generate some candidates which are pretty promising with respect to our objective! However, there is a clear regression in our constraint metric, above our initial 5% desired constraint. Let's add that constraint back in. ``` constraint_5 = OutcomeConstraint(metric=constraint_metric, op=ComparisonOp.LEQ, bound=5) constraint_5_results = gp.gen( n=3, optimization_config=OptimizationConfig( objective=Objective(objective_metric), outcome_constraints=[constraint_5] ) ) ``` This yields a *GeneratorRun*, which contains points according to our specified optimization config, along with metadata about how the points were generated. Let's plot the tradeoffs in these new points. ``` from ax.plot.scatter import plot_multiple_metrics render(plot_multiple_metrics( gp, metric_x=METRIC_X_AXIS, metric_y=METRIC_Y_AXIS, generator_runs_dict={ 'constraint_5': constraint_5_results } )) ``` It is important to note that the treatment of constraints in GP EI is probabilistic. The acquisition function weights our objective by the probability that each constraint is feasible. Thus, we may allow points with a very small probability of violating the constraint to be generated, as long as the chance of the points increasing our objective is high enough. You can see above that the point estimate for each point is significantly below a 5% increase in the constraint metric, but that there is uncertainty in our prediction, and the tail probabilities do include probabilities of small regressions beyond 5%. ``` constraint_1 = OutcomeConstraint(metric=constraint_metric, op=ComparisonOp.LEQ, bound=1) constraint_1_results = gp.gen( n=3, optimization_config=OptimizationConfig( objective=Objective(objective_metric), outcome_constraints=[constraint_1], ) ) render(plot_multiple_metrics( gp, metric_x=METRIC_X_AXIS, metric_y=METRIC_Y_AXIS, generator_runs_dict={ "constraint_1": constraint_1_results, } )) ``` Finally, let's view all three sets of candidates together. ``` render(plot_multiple_metrics( gp, metric_x=METRIC_X_AXIS, metric_y=METRIC_Y_AXIS, generator_runs_dict={ 'unconstrained': unconstrained, 'loose_constraint': constraint_5_results, 'tight_constraint': constraint_1_results, } )) ``` ## Creating a New Trial Having done the analysis and candidate generation for three different optimization configs, we can easily create a new `BatchTrial` which combines the candidates from these three different optimizations. Each set of candidates looks promising -- the point estimates are higher along both metric values than in the previous batch. However, there is still a good bit of uncertainty in our predictions. It is hard to choose between the different constraint settings without reducing this noise, so we choose to run a new trial with all three constraint settings. However, we're generally convinced that the tight constraint is too conservative. We'd still like to reduce our uncertainty in that region, but we'll only take one arm from that set. ``` # We can add entire generator runs, when constructing a new trial. trial = experiment.new_batch_trial().add_generator_run(unconstrained).add_generator_run(constraint_5_results) # Or, we can hand-pick arms. trial.add_arm(constraint_1_results.arms[0]) ``` The arms are combined into a single trial, along with the `status_quo` arm. Their generator can be accessed from the trial as well. ``` experiment.trials[1].arms ``` The original `GeneratorRuns` can be accessed from within the trial as well. This is useful for later analyses, allowing introspection of the `OptimizationConfig` used for generation (as well as other information, e.g. `SearchSpace` used for generation). ``` experiment.trials[1]._generator_run_structs ``` Here, we can see the unconstrained set-up used for our first set of candidates. ``` experiment.trials[1]._generator_run_structs[0].generator_run.optimization_config ```
github_jupyter
``` %load_ext autoreload %autoreload 2 ``` # FP16 In this notebook we are going to implement mixed precision floating points. By default, all computations are done in single-precision which means that all the floats (inputs, activations and weights) are 32-bit floats. If we could use 16-bit floats for each of these values, we would save half the space in RAM and this would enable us to double the size of our model and double the batch size (the first helping to get better results and the second to train quicker). However, half-precision floating points might lead to not-as-accurate results. Specifically, half-precision floating points can only represent 1, 1+2e-10, 1+2\*2e-10 ... which in standard notation are 1, 1.0009765625, 1.001953125 ... (for more information on the limitations of the encoding of half-precision floating point numbers click [here](https://en.wikipedia.org/wiki/Half-precision_floating-point_format)). There are some specific calculations where this lack of acccuracy will impact our results. These are: 1. When updating the weights we basically do _w=w-lr*w.grad_ for each weight and usually _lr*w.grad_ is several orders of magnitude below *w*. When this happens (e.g. _w=1_ and _lr*w.grad=0.0001_) the update will make no effect. 2. Your gradients may be replaced by 0 because they are too low (underflow). 3. Activations or loss may hit nan/infinity (overflow) and training might more easily diverge. To address these problems we will use a combination of different strategies. To take care of 1 and 3, we will use sigle-precision floating points for some parameters in the training. For 1, it’s okay if *w* and *grad* are both half floats, but when we do the operation _w = w - lr * grad_, we need to compute it in FP32. To achieve this, we will keep a copy of the weights in FP32 precision (from now on, master model) where we will update and then copy over to the original model. When we copy the weights into the original model we will lose precision, but the updated weight will be kept in FP32 in the master model so that, when the updates add up to a value that can be represented in FP16, the original model can tell the difference (i.e. if the update is +0.0001, the new weight value is updated it will be 1.0001 and the original model will not be able to tell the difference but if it is updated five times the new weight value will be 1.0005 and the original model will incorporate it as 1.0005). For 3, we will simply keep our batchnorms in single-precision (so our activations are in single precision) and our loss in single-precision (done by converting the last output of the model to single precision before passing it to the loss). For 2, we will take a different approach, called gradient scaling. We multiply the loss by a scale factor to place the values in a scale that FP16 can handle with more precision. We will then calculate the gradients by backpropagation and, before updating the weights, we will rescale the gradients to the original scale by dividing by _scale_ (remember that, because of the solution proposed in 1, we will update the weights in FP32 in the master model). ``` #export from nb_004a import * DATA_PATH = Path('data') PATH = DATA_PATH/'cifar10' data_mean,data_std = map(tensor, ([0.491, 0.482, 0.447], [0.247, 0.243, 0.261])) cifar_norm,cifar_denorm = normalize_funcs(data_mean, data_std) train_tfms = [flip_lr(p=0.5), pad(padding=4), crop(size=32, row_pct=(0,1.), col_pct=(0,1.))] valid_tfms = [] bs = 64 #export def to_half(b): return [b[0].half(), b[1]] @dataclass class DeviceDataLoader(): dl: DataLoader device: torch.device tfms: List[Callable]=None half: bool = False def __len__(self): return len(self.dl) def proc_batch(self,b): b = to_device(self.device,b) if self.tfms is not None: b = self.tfms(b) return to_half(b) if self.half else b def __iter__(self): self.gen = map(self.proc_batch, self.dl) return iter(self.gen) @classmethod def create(cls, *args, device=default_device,tfms=tfms, **kwargs): return cls(DataLoader(*args, **kwargs), device=device, tfms=tfms, half=False) import nb_002b nb_002b.DeviceDataLoader = DeviceDataLoader train_ds = FilesDataset.from_folder(PATH/'train', classes=['airplane','dog']) valid_ds = FilesDataset.from_folder(PATH/'test', classes=['airplane','dog']) data = DataBunch.create(train_ds, valid_ds, bs=bs, num_workers=0, train_tfm=train_tfms, valid_tfm=valid_tfms, dl_tfms=cifar_norm) len(data.train_dl), len(data.valid_dl) model = Darknet([1, 2, 2, 2, 2], num_classes=2, nf=16) learn = Learner(data, model) learn.metrics = [accuracy] sched = OneCycleScheduler(learn, 0.1, 5) ``` # FP16 ## Utils ``` #export def bn2float(module): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): module.float() for child in module.children(): bn2float(child) return module def model2half(model): "Converts the model to half precision except the batchnorm layers" return bn2float(model.half()) ``` Helper function to save the master model in FP32 with flat tensors (apparently it helps with performance) ``` #export from torch._utils import _unflatten_dense_tensors from torch.nn.utils import parameters_to_vector ``` Now we will implement the three changes we noted above. A summary of the steps we will follow is: 1. Compute the output with the FP16 model, then the loss 2. Back-propagate the gradients in half-precision 3. Copy the gradients in FP32 precision 4. Do the update on the master model (in FP32 precision) 5. Copy the master model in the FP16 model ``` def vector_to_parameters1(vec, parameters): if not isinstance(vec, torch.Tensor): raise TypeError('expected torch.Tensor, but got: {}' .format(torch.typename(vec))) param_device = None pointer = 0 for param in parameters: param_device = _check_param_device(param, param_device) num_param = torch.prod(torch.LongTensor(list(param.size()))) param.data.copy_(vec[pointer:pointer + num_param].view(param.size()).data) pointer += num_param #export def get_master(layer_groups:Collection[nn.Module], flat_master:bool=False) -> Tuple[List[List[Tensor]], List[List[Tensor]]]: "Returns two lists, one for the model parameters in FP16 and one for the master parameters in FP32" model_params = [[param for param in lg.parameters() if param.requires_grad] for lg in layer_groups] if flat_master: master_params = [parameters_to_vector([param.data.float() for param in lg]) for lg in model_params] master_params = [torch.nn.Parameter(mp, requires_grad=True) for mp in master_params] for mp in master_params: if mp.grad is None: mp.grad = mp.new(*mp.size()) return model_params, [[mp] for mp in master_params] else: master_params = [[param.clone().float().detach() for param in lg] for lg in model_params] for mp in master_params: for param in mp: param.requires_grad = True return model_params, master_params def model_g2master_g(model_params:Sequence[Tensor], master_params:Sequence[Tensor], flat_master:bool=False): "Copies the model gradients to the master parameters for the optimizer step" if flat_master: for model_group,master_group in zip(model_params,master_params): master_group[0].grad.data.copy_(parameters_to_vector([p.grad.data.float() for p in model_group])) else: for model_group,master_group in zip(model_params,master_params): for model, master in zip(model_group, master_group): if model.grad is not None: if master.grad is None: master.grad = master.data.new(*master.data.size()) master.grad.data.copy_(model.grad.data) else: master.grad = None def master2model(model_params:Sequence[Tensor], master_params:Sequence[Tensor], flat_master:bool=False): "Copy master parameters to model parameters" if flat_master: for model_group,master_group in zip(model_params,master_params): for model, master in zip(model_group, _unflatten_dense_tensors(master_group[0].data, model_group)): model.data.copy_(master) else: for model_group,master_group in zip(model_params,master_params): for model, master in zip(model_group, master_group): model.data.copy_(master.data) ``` ## MixedPrecision ``` #export from torch._utils import _unflatten_dense_tensors from torch.nn.utils import parameters_to_vector @dataclass class MixedPrecision(Callback): "Callback that handles mixed-precision training" learn:Learner loss_scale:float=512. flat_master:bool=False def __post_init__(self): assert torch.backends.cudnn.enabled, "Mixed precision training requires cudnn." def on_train_begin(self, **kwargs): #Insures the dataloaders are in half precision. self.learn.data.train_dl.half = True if hasattr(self.learn.data, 'valid_dl') and self.learn.data.valid_dl is not None: self.learn.data.valid_dl.half = True #Get a copy of the model params in FP32 self.model_params, self.master_params = get_master(self.learn.layer_groups, self.flat_master) #Changes the optimizer so that the optimization step is done in FP32. opt = self.learn.opt mom,wd,beta = opt.mom,opt.wd,opt.beta opt_params = [{'params': mp, 'lr': lr} for mp,lr in zip(self.master_params, self.learn.opt._lr)] self.learn.opt.opt = self.learn.opt_fn(opt_params) opt.mom,opt.wd,opt.beta = mom,wd,beta def on_loss_begin(self, last_output:Tensor, **kwargs) -> Tensor: #It's better to compute the loss in FP32, to avoid reduction overflow. return last_output.float() def on_backward_begin(self, last_loss:Rank0Tensor, **kwargs) -> Rank0Tensor: #To avoid gradient underflow, we scale the gradients return last_loss * self.loss_scale def on_backward_end(self, **kwargs): #Convert the gradients back to FP32 and divide them by the scale. model_g2master_g(self.model_params, self.master_params, self.flat_master) for group in self.master_params: for param in group: param.grad.div_(self.loss_scale) def on_step_end(self, **kwargs): #Zeros the gradients of the model since the optimizer is disconnected. self.learn.model.zero_grad() #Update the params from master to model. master2model(self.model_params, self.master_params, self.flat_master) def mixed_precision(loss_scale:float=512., flat_master:bool=False, **kwargs): return partial(MixedPrecision, loss_scale=loss_scale, flat_master=flat_master, **kwargs) cbs = [one_cycle_scheduler(0.1)] model = Darknet([1, 2, 2, 2, 2], num_classes=2, nf=16) model = model2half(model) learn = Learner(data, model, metrics=accuracy, callback_fns=cbs) mp_cb = MixedPrecision(learn, flat_master=True) learn.fit(2, 1e-2, callbacks=mp_cb) learn.model.layers[0][0].weight.type() mp_cb.master_params[0][0].size(),mp_cb.master_params[0][0].type() ``` ## to_fp16 ``` def to_fp16(learn:Learner, loss_scale:float=512., flat_master:bool=False): "Transforms the learner in FP16 precision" learn.model = model2half(learn.model) learn.mp_cb = MixedPrecision(learn, loss_scale=loss_scale, flat_master=flat_master) learn.callbacks.append(learn.mp_cb) Learner.to_fp16 = to_fp16 model = Darknet([1, 2, 2, 2, 2], num_classes=2, nf=16) learn = Learner(data, model, metrics=accuracy, callback_fns=cbs) learn.to_fp16(flat_master=True) learn.fit(2, 1e-2) learn.mp_cb.master_params[0][0].size() ``` ## Test with discriminative lrs ``` model = Darknet([1, 2, 2, 2, 2], num_classes=2, nf=16) model = model2half(model) learn = Learner(data, model, metrics=accuracy) learn.split(lambda m: (m.layers[5],m.layers[9])) cbs = [MixedPrecision(learn, flat_master=True), OneCycleScheduler(learn, 0.1)] learn.fit(1, 1e-2, callbacks=cbs) learn.model.layers[0][0].weight.type() for master in cbs[0].master_params: print(master[0].size(),master[0].type()) ```
github_jupyter
Peakcalling Peak Stats ================================================================ This notebook is for the analysis of outputs from the peakcalling pipeline relating to the quality of the peakcalling steps There are severals stats that you want collected and graphed - you can click on the links below to find the jupyter **notebooks** where you can directly interact with the code or the **html** files that can be opened in your web browser. Stats you should be interested in are: Quality of Bam files for Peakcalling ------------------------------------ - how many reads input: [notebook](./1_peakcalling_filtering_Report.ipynb) [html](./1_peakcalling_filtering_Report.html) - how many reads removed at each step (numbers and percentages): [notebook](./1_peakcalling_filtering_Report.ipynb) [html](./1_peakcalling_filtering_Report.html) - how many reads left after filtering: [notebook](./1_peakcalling_filtering_Report.ipynb) [html](./1_peakcalling_filtering_Report.html) - how many reads mapping to each chromosome before filtering?: [notebook](./2_peakcalling_filtering_Report_reads_per_chr.ipynb) [html](./2_peakcalling_filtering_Report_reads_per_chr.html) - how many reads mapping to each chromosome after filtering?: [notebook](./2_peakcalling_filtering_Report_reads_per_chr.ipynb) [html](./2_peakcalling_filtering_Report_reads_per_chr.html) - X:Y reads ratio: [notebook](./2_peakcalling_filtering_Report_reads_per_chr.ipynb) [html](./2_peakcalling_filtering_Report_reads_per_chr.html) - inset size distribution after filtering for PE reads: [notebook](./3_peakcalling_filtering_Report_insert_sizes.ipynb) [html](./3_peakcalling_filtering_Report_insert_sizes.html) - samtools flags - check how many reads are in categories they shouldn't be: [notebook](./1_peakcalling_filtering_Report.ipynb) [html](./1_peakcalling_filtering_Report.html) - [picard stats - check how many reads are in categories they shouldn't be: Peakcalling stats ----------------- - Number of peaks called in each sample: [notebook](./4_peakcalling_peakstats.ipynb) [html](./4_peakcalling_peakstats.html) - Number of reads in peaks: [notebook](./4_peakcalling_peakstats.ipynb) [html](./4_peakcalling_peakstats.html) - Size distribution of the peaks - Location of peaks - correlation of peaks between samples - other things? - IDR stats - What peak lists are the best This notebook takes the sqlite3 database created by CGAT peakcalling_pipeline.py and uses it for plotting the above statistics It assumes a file directory of: location of database = project_folder/csvdb location of this notebook = project_folder/notebooks.dir/ Firstly lets load all the things that might be needed This is where we are and when the notebook was run ``` !pwd !date ```
github_jupyter
``` %pylab inline import numpy as np import torch import os from torch import nn from torch import optim from torch.nn import functional as F from torch import autograd from torch.autograd import Variable import nibabel as nib from torch.utils.data.dataset import Dataset from torch.utils.data import dataloader from nilearn import plotting from ADNI_dataset import * from BRATS_dataset import * from ATLAS_dataset import * from Model_alphaWGAN import * ``` # Configuration ``` BATCH_SIZE=4 gpu = True workers = 4 LAMBDA= 10 _eps = 1e-15 Use_BRATS=False Use_ATLAS = False #setting latent variable sizes latent_dim = 1000 trainset = ADNIdataset(augmentation=True) train_loader = torch.utils.data.DataLoader(trainset,batch_size=BATCH_SIZE, shuffle=True,num_workers=workers) if Use_BRATS: #'flair' or 't2' or 't1ce' trainset = BRATSdataset(imgtype='flair') train_loader = torch.utils.data.DataLoader(trainset,batch_size = BATCH_SIZE, shuffle=True, num_workers=workers) if Use_ATLAS: trainset = ATLASdataset(augmentation=True) train_loader = torch.utils.data.DataLoader(trainset,batch_size=BATCH_SIZE, shuffle=True,num_workers=workers) def inf_train_gen(data_loader): while True: for _,images in enumerate(data_loader): yield images G = Generator(noise = latent_dim) CD = Code_Discriminator(code_size = latent_dim ,num_units = 4096) D = Discriminator(is_dis=True) E = Discriminator(out_class = latent_dim,is_dis=False) G.cuda() D.cuda() CD.cuda() E.cuda() g_optimizer = optim.Adam(G.parameters(), lr=0.0002) d_optimizer = optim.Adam(D.parameters(), lr=0.0002) e_optimizer = optim.Adam(E.parameters(), lr = 0.0002) cd_optimizer = optim.Adam(CD.parameters(), lr = 0.0002) def calc_gradient_penalty(model, x, x_gen, w=10): """WGAN-GP gradient penalty""" assert x.size()==x_gen.size(), "real and sampled sizes do not match" alpha_size = tuple((len(x), *(1,)*(x.dim()-1))) alpha_t = torch.cuda.FloatTensor if x.is_cuda else torch.Tensor alpha = alpha_t(*alpha_size).uniform_() x_hat = x.data*alpha + x_gen.data*(1-alpha) x_hat = Variable(x_hat, requires_grad=True) def eps_norm(x): x = x.view(len(x), -1) return (x*x+_eps).sum(-1).sqrt() def bi_penalty(x): return (x-1)**2 grad_xhat = torch.autograd.grad(model(x_hat).sum(), x_hat, create_graph=True, only_inputs=True)[0] penalty = w*bi_penalty(eps_norm(grad_xhat)).mean() return penalty ``` # Training ``` real_y = Variable(torch.ones((BATCH_SIZE, 1)).cuda(async=True)) fake_y = Variable(torch.zeros((BATCH_SIZE, 1)).cuda(async=True)) criterion_bce = nn.BCELoss() criterion_l1 = nn.L1Loss() criterion_mse = nn.MSELoss() g_iter = 1 d_iter = 1 cd_iter =1 TOTAL_ITER = 200000 gen_load = inf_train_gen(train_loader) for iteration in range(TOTAL_ITER): for p in D.parameters(): p.requires_grad = False for p in CD.parameters(): p.requires_grad = False for p in E.parameters(): p.requires_grad = True for p in G.parameters(): p.requires_grad = True ############################################### # Train Encoder - Generator ############################################### for iters in range(g_iter): G.zero_grad() E.zero_grad() real_images = gen_load.__next__() _batch_size = real_images.size(0) real_images = Variable(real_images,volatile=True).cuda(async=True) z_rand = Variable(torch.randn((_batch_size,latent_dim)),volatile=True).cuda() z_hat = E(real_images).view(_batch_size,-1) x_hat = G(z_hat) x_rand = G(z_rand) c_loss = -CD(z_hat).mean() d_real_loss = D(x_hat).mean() d_fake_loss = D(x_rand).mean() d_loss = -d_fake_loss-d_real_loss l1_loss =10* criterion_l1(x_hat,real_images) loss1 = l1_loss + c_loss + d_loss if iters<g_iter-1: loss1.backward() else: loss1.backward(retain_graph=True) e_optimizer.step() g_optimizer.step() g_optimizer.step() ############################################### # Train D ############################################### for p in D.parameters(): p.requires_grad = True for p in CD.parameters(): p.requires_grad = False for p in E.parameters(): p.requires_grad = False for p in G.parameters(): p.requires_grad = False for iters in range(d_iter): d_optimizer.zero_grad() real_images = gen_load.__next__() _batch_size = real_images.size(0) z_rand = Variable(torch.randn((_batch_size,latent_dim)),volatile=True).cuda() real_images = Variable(real_images,volatile=True).cuda(async=True) z_hat = E(real_images).view(_batch_size,-1) x_hat = G(z_hat) x_rand = G(z_rand) x_loss2 = -2*D(real_images).mean()+D(x_hat).mean()+D(x_rand).mean() gradient_penalty_r = calc_gradient_penalty(D,real_images.data, x_rand.data) gradient_penalty_h = calc_gradient_penalty(D,real_images.data, x_hat.data) loss2 = x_loss2+gradient_penalty_r+gradient_penalty_h loss2.backward(retain_graph=True) d_optimizer.step() ############################################### # Train CD ############################################### for p in D.parameters(): p.requires_grad = False for p in CD.parameters(): p.requires_grad = True for p in E.parameters(): p.requires_grad = False for p in G.parameters(): p.requires_grad = False for iters in range(cd_iter): cd_optimizer.zero_grad() z_rand = Variable(torch.randn((_batch_size,latent_dim)),volatile=True).cuda() gradient_penalty_cd = calc_gradient_penalty(CD,z_hat.data, z_rand.data) loss3 = -CD(z_rand).mean() - c_loss + gradient_penalty_cd loss3.backward(retain_graph=True) cd_optimizer.step() ############################################### # Visualization ############################################### if iteration % 10 == 0: print('[{}/{}]'.format(iteration,TOTAL_ITER), 'D: {:<8.3}'.format(loss2.data[0].cpu().numpy()), 'En_Ge: {:<8.3}'.format(loss1.data[0].cpu().numpy()), 'Code: {:<8.3}'.format(loss3.data[0].cpu().numpy()), ) feat = np.squeeze((0.5*real_images[0]+0.5).data.cpu().numpy()) feat = nib.Nifti1Image(feat,affine = np.eye(4)) plotting.plot_img(feat,title="X_Real") plotting.show() feat = np.squeeze((0.5*x_hat[0]+0.5).data.cpu().numpy()) feat = nib.Nifti1Image(feat,affine = np.eye(4)) plotting.plot_img(feat,title="X_DEC") plotting.show() feat = np.squeeze((0.5*x_rand[0]+0.5).data.cpu().numpy()) feat = nib.Nifti1Image(feat,affine = np.eye(4)) plotting.plot_img(feat,title="X_rand") plotting.show() ############################################### # Model Save ############################################### if (iteration+1)%500 ==0: torch.save(G.state_dict(),'./checkpoint/G_iter'+str(iteration+1+es)+'.pth') torch.save(D.state_dict(),'./checkpoint/D_iter'+str(iteration+1+es)+'.pth') torch.save(E.state_dict(),'./checkpoint/E_iter'+str(iteration+1+es)+'.pth') torch.save(CD.state_dict(),'./checkpoint/CD_iter'+str(iteration+1+es)+'.pth') ```
github_jupyter
This notebook is adapted from a lesson from the 2019 [KIPAC/StatisticalMethods course](https://github.com/KIPAC/StatisticalMethods), (c) 2019 Adam Mantz and Phil Marshall, licensed under the [GPLv2](LICENSE). # Generative Models and Probabilistic Graphical Models Goals: * Introduce generative models in the context of mocking data and inference * Introduce probabilistic graphical models as a tool for model visualization * Practice drawing some PGMs You've already seen Bayes Theorem and the role of the prior, sampling and posterior distributions. Fully specifying these requires us to write down (or at least approximate) _how the data set comes to exist,_ e.g. * physical processes happening out there in the Universe * instrumental effects and the measurement process * any computations done prior to calling the result a "data set" A **generative model** is formally the joint distribution of all our data and model parameters, $P(\mathrm{data},\mathrm{params}) = P(\mathrm{params}) P(\mathrm{data}|\mathrm{params})$ It encodes the modelling information needed for both inference and creating mock data. What are generative models useful for? * Performing inference: constructing the *sampling distribution* or *likelihood function* * Testing inference: does our analysis, run on mock data, recover the input model? * Checking inferences: do mock data generated from a fitted model resemble the real data? $P(\mathrm{data},\mathrm{params})$ is an abstract beast, but usually it factorizes in helpful ways. This is where **probabilistic graphical models** (PGMs) come in. A PGM is a sketch that encodes the conditional dependences within a generative model. PGMs are to inference what free-body diagrams are to kinematics. Everyone hates having to draw them, yet everyone makes fewer mistakes when they do. Let's do a simple example, first specifying a problem in words and then building the PGM. Here's an image (and a zoom-in): <table><tr width=90%> <td><img src="graphics/tour_cluster_image.png" height=300></td> <td><img src="graphics/tour_cluster_image_zoom.png" height=300></td> </tr></table> Our measurement is the number of counts in each pixel. Here is a generative model in words: * There's an object emitting light, whose properties are parametrized by $\theta$. * From $\theta$, we can determine the average flux falling on a given pixel $k$, $F_k$. * Given the exposure time of our observation, $T$ (and some conversion factors), $F_k$ determines the average number of counts expected, $\mu_k$. * The number of counts measured, $N_k$, is a Poisson draw, given the average $\mu_k$. Notice that the model was described in terms of conditional relationships. * $\theta \sim$ some prior For every pixel, $k$ * $F_k \Leftarrow \theta,k$ * $\mu_k \Leftarrow F_k,T$ * $N_k \sim \mathrm{Poisson}(\mu_k)$ NB: $\Leftarrow$ indicates a deterministic dependence, $\sim$ means "is distributed as". The PGM shows most of the same information, visually: <img src="graphics/pgms_pixelcounts.png"> Ingredients of a PGM (sometimes also called a *directed acyclic graph*): * **Nodes** (dots and circles) represent PDFs for parameters * **Edges** (arrows) represent conditional relationships * **Plates** (rectangles) represent repeated model components whose contents are conditionally independent Types of nodes: * **Circles** represent a PDF. This parameter is a *stochastic* ($\sim$) function of the parameters feeding into it. * **Points** represent a delta-function PDF. This parameter is a *deterministic* ($\Leftarrow$) function of the parameters feeding into it. * **Double circles** (or shading) indicate measured data. They are stochastic ($\sim$) in the context of generating mock data, but fixed in the context of parameter inference. Q: According to this PGM, how can we factorize $p(\theta,T,\{F_k, \mu_k, N_k\})$? <img src="graphics/pgms_pixelcounts.png"> What does all this imply in the context of 1. parameter inference? 2. mock data generation? The key here is that the PGM shows _conditional dependences_ - therefore, it also shows (by omission) where parameters are _conditionally independent_. That feature, plus the directional aspect, mean that the PGM is a map to the most logical sequence of steps (lines in code) for either generating mock data or evaluating the posterior density of real data. Q: How are these PGMs different, and what does the difference mean? <table><tr><td> <img src="graphics/pgms_pixelcounts.png"> </td><td> <img src="graphics/pgms_pixelcounts2.png"> </td></tr></table> In this case, some PDFs are delta functions, so we can straightforwardly marginalize over such _deterministic_ variables: $p(\theta,\{N_k\}) = $ $\quad \int dF_k\,d\mu_k\,dT\; p(\theta)p(T) \prod_k P(N_k|\mu_k)p(\mu_k|F_k,T)p(F_k|\theta)$ $= \underbrace{p(\theta)} ~ \underbrace{\prod_k P\left(N_k|\mu_k(\theta,T)\right)}$ $= \mathrm{prior}(\theta) ~\times~ (\mathrm{sampling~distribution~of~}\vec{N})$ We *could* have written/drawn everything without explicitly mentioning $F_k$ (or even $\mu_k$). Like all simplifications, this is sometimes helpful and sometimes a pitfall. ### Exercise <table width=60%><tr> <td><img src="graphics/pgms_a-c-d.png"></td> <td><img src="graphics/pgms_c-y-d.png"></td> </tr></table> * On your own, write down the probability expressions (factorization of the generative model) illustrated by these two graphs. * Then, discuss their meaning with your neighbor, and prepare to report back to the class. ### Take-home messages * Both simulation of mock data and inference from data require a model for how the Universe (or our computer) generates data. * PGMs are a helpful way of visualizing the conditional dependences of a model (how the probability expressions factorize). Note: the `daft` Python package can be useful for making PGMs programatically, though it's no substitute for paper. ### Exercise: linear regression Your data is a list of $\{x_k,y_k,\sigma_k\}$ triplets, where $\sigma_k$ is some estimate of the "error" on $y_k$. You think a linear model, $y(x)=a+bx$, might explain these data. To start exploring this idea, you decide to generate some simulated data, to compare with your real dataset. In the absence of any better information, assume that $\vec{x}$ and $\vec{\sigma}$ are (somehow) known precisely, and that the "error" on $y_k$ is Gaussian (mean of $a+bx_k$ and standard deviation $\sigma_k$). 1. Draw the PGM, and write down the corresponding probability expressions, for this problem. 2. What (unspecified) assumptions, if any, would you have to make to actually generate data? Which assumptions do you think are unlikely to hold in practice? Choose one (or more) of these assumptions and work out how to generalize the PGM/generative model to avoid making it. ## Bonus numerical exercise: Extending the linear regression exercise, simulate a few data sets, given some values (your choice) for the input parameters. The commented code below is a (crummy) starting point. ``` ''' import numpy as np import scipy.stats as st import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot as plt plt.rcParams['xtick.labelsize'] = 'x-large' plt.rcParams['ytick.labelsize'] = 'x-large' %matplotlib inline '''; """ # Choose some linear model parameters, somehow a = b = # Choose some x and sigma values... somehow n = 10 # Number of data points. Feel free to change. x = np.array([ sigma = np.array([ # Work out the values for any intermediate nodes in your PGM # generate the "observed" y values y = st.norm.rvs( """; """ # plot x, y and sigma in the usual way plt.rcParams['figure.figsize'] = (12.0, 5.0) plt.errorbar(x, y, yerr=sigma, fmt='none'); plt.plt(x, y, 'bo'); plt.xlabel('x', fontsize=14); plt.ylabel('y', fontsize=14); """; ``` ## Bonus exercise: Exoplanet transit photometry You've taken several images of a particular field, in order to record the transit of an exoplanet in front of a star (resulting in a temporary decrease in its brightness). Some kind of model, parametrized by $\theta$, describes the time series of the resulting flux. Before we get to measure a number of counts, however, each image is affected by time-specific variables, e.g. related to changing weather. To account for these, you've also measured a second star in the same field in every exposure. The assumption is that the average intrinsic flux of this second star is constant in time, so that it can be used to correct for photometric variations, putting the multiple measurements of the target star on the same scale. Draw a PGM and write down the corresponding probability expressions for this problem. Thanks to Anja von der Linden for inspiring (and then correcting) the above problem. Note: Sketchy solutions for the PGM-drawing exercises can be found with the corresponding material from DSFP Session 4.
github_jupyter
# 02 - Introduction to Python for Data Analysis by [Alejandro Correa Bahnsen](http://www.albahnsen.com/) & [Iván Torroledo](http://www.ivantorroledo.com/) version 1.2, Feb 2018 ## Part of the class [Machine Learning for Risk Management](https://github.com/albahnsen/ML_RiskManagement) This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [Rick Muller](http://www.cs.sandia.gov/~rmuller/), Sandia National Laboratories ## Why Python? Python is the programming language of choice for many scientists to a large degree because it offers a great deal of power to analyze and model scientific data with relatively little overhead in terms of learning, installation or development time. It is a language you can pick up in a weekend, and use for the rest of one's life. The [Python Tutorial](http://docs.python.org/3/tutorial/) is a great place to start getting a feel for the language. To complement this material, I taught a [Python Short Course](http://www.wag.caltech.edu/home/rpm/python_course/) years ago to a group of computational chemists during a time that I was worried the field was moving too much in the direction of using canned software rather than developing one's own methods. I wanted to focus on what working scientists needed to be more productive: parsing output of other programs, building simple models, experimenting with object oriented programming, extending the language with C, and simple GUIs. I'm trying to do something very similar here, to cut to the chase and focus on what scientists need. In the last year or so, the [Jupyter Project](http://jupyter.org) has put together a notebook interface that I have found incredibly valuable. A large number of people have released very good IPython Notebooks that I have taken a huge amount of pleasure reading through. Some ones that I particularly like include: * Rick Muller [A Crash Course in Python for Scientists](http://nbviewer.jupyter.org/gist/rpmuller/5920182) * Rob Johansson's [excellent notebooks](http://jrjohansson.github.io/), including [Scientific Computing with Python](https://github.com/jrjohansson/scientific-python-lectures) and [Computational Quantum Physics with QuTiP](https://github.com/jrjohansson/qutip-lectures) lectures; * [XKCD style graphs in matplotlib](http://nbviewer.ipython.org/url/jakevdp.github.com/downloads/notebooks/XKCD_plots.ipynb); * [A collection of Notebooks for using IPython effectively](https://github.com/ipython/ipython/tree/master/examples/notebooks#a-collection-of-notebooks-for-using-ipython-effectively) * [A gallery of interesting IPython Notebooks](https://github.com/ipython/ipython/wiki/A-gallery-of-interesting-IPython-Notebooks) I find Jupyter notebooks an easy way both to get important work done in my everyday job, as well as to communicate what I've done, how I've done it, and why it matters to my coworkers. In the interest of putting more notebooks out into the wild for other people to use and enjoy, I thought I would try to recreate some of what I was trying to get across in the original Python Short Course, updated by 15 years of Python, Numpy, Scipy, Pandas, Matplotlib, and IPython development, as well as my own experience in using Python almost every day of this time. ## Why Python for Data Analysis? - Python is great for scripting and applications. - The `pandas` library offers imporved library support. - Scraping, web APIs - Strong High Performance Computation support - Load balanceing tasks - MPI, GPU - MapReduce - Strong support for abstraction - Intel MKL - HDF5 - Environment ## But we already know R ...Which is better? Hard to answer http://www.kdnuggets.com/2015/05/r-vs-python-data-science.html http://www.kdnuggets.com/2015/03/the-grammar-data-science-python-vs-r.html https://www.datacamp.com/community/tutorials/r-or-python-for-data-analysis https://www.dataquest.io/blog/python-vs-r/ http://www.dataschool.io/python-or-r-for-data-science/ ## What You Need to Install There are two branches of current releases in Python: the older-syntax Python 2, and the newer-syntax Python 3. This schizophrenia is largely intentional: when it became clear that some non-backwards-compatible changes to the language were necessary, the Python dev-team decided to go through a five-year (or so) transition, during which the new language features would be introduced and the old language was still actively maintained, to make such a transition as easy as possible. Nonetheless, I'm going to write these notes with Python 3 in mind, since this is the version of the language that I use in my day-to-day job, and am most comfortable with. With this in mind, these notes assume you have a Python distribution that includes: * [Python](http://www.python.org) version 3.5; * [Numpy](http://www.numpy.org), the core numerical extensions for linear algebra and multidimensional arrays; * [Scipy](http://www.scipy.org), additional libraries for scientific programming; * [Matplotlib](http://matplotlib.sf.net), excellent plotting and graphing libraries; * [IPython](http://ipython.org), with the additional libraries required for the notebook interface. * [Pandas](http://pandas.pydata.org/), Python version of R dataframe * [scikit-learn](http://scikit-learn.org), Machine learning library! A good, easy to install option that supports Mac, Windows, and Linux, and that has all of these packages (and much more) is the [Anaconda](https://www.continuum.io/). ### Checking your installation You can run the following code to check the versions of the packages on your system: (in IPython notebook, press `shift` and `return` together to execute the contents of a cell) ``` import sys print('Python version:', sys.version) import IPython print('IPython:', IPython.__version__) import numpy print('numpy:', numpy.__version__) import scipy print('scipy:', scipy.__version__) import matplotlib print('matplotlib:', matplotlib.__version__) import pandas print('pandas:', pandas.__version__) import sklearn print('scikit-learn:', sklearn.__version__) ``` # I. Python Overview This is a quick introduction to Python. There are lots of other places to learn the language more thoroughly. I have collected a list of useful links, including ones to other learning resources, at the end of this notebook. If you want a little more depth, [Python Tutorial](http://docs.python.org/2/tutorial/) is a great place to start, as is Zed Shaw's [Learn Python the Hard Way](http://learnpythonthehardway.org/book/). The lessons that follow make use of the IPython notebooks. There's a good introduction to notebooks [in the IPython notebook documentation](http://ipython.org/notebook.html) that even has a [nice video](http://www.youtube.com/watch?v=H6dLGQw9yFQ#!) on how to use the notebooks. You should probably also flip through the [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html) in your copious free time. Briefly, notebooks have code cells (that are generally followed by result cells) and text cells. The text cells are the stuff that you're reading now. The code cells start with "In []:" with some number generally in the brackets. If you put your cursor in the code cell and hit Shift-Enter, the code will run in the Python interpreter and the result will print out in the output cell. You can then change things around and see whether you understand what's going on. If you need to know more, see the [IPython notebook documentation](http://ipython.org/notebook.html) or the [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html). ## Using Python as a Calculator Many of the things I used to use a calculator for, I now use Python for: ``` 2+2 (50-5*6)/4 ``` (If you're typing this into an IPython notebook, or otherwise using notebook file, you hit shift-Enter to evaluate a cell.) In the last few lines, we have sped by a lot of things that we should stop for a moment and explore a little more fully. We've seen, however briefly, two different data types: **integers**, also known as *whole numbers* to the non-programming world, and **floating point numbers**, also known (incorrectly) as *decimal numbers* to the rest of the world. We've also seen the first instance of an **import** statement. Python has a huge number of libraries included with the distribution. To keep things simple, most of these variables and functions are not accessible from a normal Python interactive session. Instead, you have to import the name. For example, there is a **math** module containing many useful functions. To access, say, the square root function, you can either first from math import sqrt and then ``` sqrt(81) from math import sqrt sqrt(81) ``` or you can simply import the math library itself ``` import math math.sqrt(81) ``` You can define variables using the equals (=) sign: ``` radius = 20 pi = math.pi area = pi * radius ** 2 area ``` You can name a variable *almost* anything you want. It needs to start with an alphabetical character or "\_", can contain alphanumeric charcters plus underscores ("\_"). Certain words, however, are reserved for the language: and, as, assert, break, class, continue, def, del, elif, else, except, exec, finally, for, from, global, if, import, in, is, lambda, not, or, pass, print, raise, return, try, while, with, yield Trying to define a variable using one of these will result in a syntax error: ``` return = 0 ``` The [Python Tutorial](http://docs.python.org/2/tutorial/introduction.html#using-python-as-a-calculator) has more on using Python as an interactive shell. The [IPython tutorial](http://ipython.org/ipython-doc/dev/interactive/tutorial.html) makes a nice complement to this, since IPython has a much more sophisticated iteractive shell. ## Strings Strings are lists of printable characters, and can be defined using either single quotes ``` 'Hello, World!' ``` or double quotes ``` "Hello, World!" ``` Just like the other two data objects we're familiar with (ints and floats), you can assign a string to a variable ``` greeting = "Hello, World!" ``` The **print** statement is often used for printing character strings: ``` print(greeting) ``` But it can also print data types other than strings: ``` print("The area is " + area) print("The area is " + str(area)) ``` In the above snipped, the number 600 (stored in the variable "area") is converted into a string before being printed out. You can use the + operator to concatenate strings together: Don't forget the space between the strings, if you want one there. ``` statement = "Hello, " + "World!" print(statement) ``` If you have a lot of words to concatenate together, there are other, more efficient ways to do this. But this is fine for linking a few strings together. ## Lists Very often in a programming language, one wants to keep a group of similar items together. Python does this using a data type called **lists**. ``` days_of_the_week = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"] ``` You can access members of the list using the **index** of that item: ``` days_of_the_week[2] ``` Python lists, like C, but unlike Fortran, use 0 as the index of the first element of a list. Thus, in this example, the 0 element is "Sunday", 1 is "Monday", and so on. If you need to access the *n*th element from the end of the list, you can use a negative index. For example, the -1 element of a list is the last element: ``` days_of_the_week[-1] ``` You can add additional items to the list using the .append() command: ``` languages = ["Fortran","C","C++"] languages.append("Python") print(languages) ``` The **range()** command is a convenient way to make sequential lists of numbers: ``` list(range(10)) ``` Note that range(n) starts at 0 and gives the sequential list of integers less than n. If you want to start at a different number, use range(start,stop) ``` list(range(2,8)) ``` The lists created above with range have a *step* of 1 between elements. You can also give a fixed step size via a third command: ``` evens = list(range(0,20,2)) evens evens[3] ``` Lists do not have to hold the same data type. For example, ``` ["Today",7,99.3,""] ``` However, it's good (but not essential) to use lists for similar objects that are somehow logically connected. If you want to group different data types together into a composite data object, it's best to use **tuples**, which we will learn about below. You can find out how long a list is using the **len()** command: ``` help(len) len(evens) ``` ## Iteration, Indentation, and Blocks One of the most useful things you can do with lists is to *iterate* through them, i.e. to go through each element one at a time. To do this in Python, we use the **for** statement: ``` for day in days_of_the_week: print(day) ``` This code snippet goes through each element of the list called **days_of_the_week** and assigns it to the variable **day**. It then executes everything in the indented block (in this case only one line of code, the print statement) using those variable assignments. When the program has gone through every element of the list, it exists the block. (Almost) every programming language defines blocks of code in some way. In Fortran, one uses END statements (ENDDO, ENDIF, etc.) to define code blocks. In C, C++, and Perl, one uses curly braces {} to define these blocks. Python uses a colon (":"), followed by indentation level to define code blocks. Everything at a higher level of indentation is taken to be in the same block. In the above example the block was only a single line, but we could have had longer blocks as well: ``` for day in days_of_the_week: statement = "Today is " + day print(statement) ``` The **range()** command is particularly useful with the **for** statement to execute loops of a specified length: ``` for i in range(20): print("The square of ",i," is ",i*i) ``` ## Slicing Lists and strings have something in common that you might not suspect: they can both be treated as sequences. You already know that you can iterate through the elements of a list. You can also iterate through the letters in a string: ``` for letter in "Sunday": print(letter) ``` This is only occasionally useful. Slightly more useful is the *slicing* operation, which you can also use on any sequence. We already know that we can use *indexing* to get the first element of a list: ``` days_of_the_week[0] ``` If we want the list containing the first two elements of a list, we can do this via ``` days_of_the_week[0:2] ``` or simply ``` days_of_the_week[:2] ``` If we want the last items of the list, we can do this with negative slicing: ``` days_of_the_week[-2:] ``` which is somewhat logically consistent with negative indices accessing the last elements of the list. You can do: ``` workdays = days_of_the_week[1:6] print(workdays) ``` Since strings are sequences, you can also do this to them: ``` day = "Sunday" abbreviation = day[:3] print(abbreviation) ``` If we really want to get fancy, we can pass a third element into the slice, which specifies a step length (just like a third argument to the **range()** function specifies the step): ``` numbers = list(range(0,40)) evens = numbers[2::2] evens ``` Note that in this example I was even able to omit the second argument, so that the slice started at 2, went to the end of the list, and took every second element, to generate the list of even numbers less that 40. ## Booleans and Truth Testing We have now learned a few data types. We have integers and floating point numbers, strings, and lists to contain them. We have also learned about lists, a container that can hold any data type. We have learned to print things out, and to iterate over items in lists. We will now learn about **boolean** variables that can be either True or False. We invariably need some concept of *conditions* in programming to control branching behavior, to allow a program to react differently to different situations. If it's Monday, I'll go to work, but if it's Sunday, I'll sleep in. To do this in Python, we use a combination of **boolean** variables, which evaluate to either True or False, and **if** statements, that control branching based on boolean values. For example: ``` if day == "Sunday": print("Sleep in") else: print("Go to work") ``` (Quick quiz: why did the snippet print "Go to work" here? What is the variable "day" set to?) Let's take the snippet apart to see what happened. First, note the statement ``` day == "Sunday" ``` If we evaluate it by itself, as we just did, we see that it returns a boolean value, False. The "==" operator performs *equality testing*. If the two items are equal, it returns True, otherwise it returns False. In this case, it is comparing two variables, the string "Sunday", and whatever is stored in the variable "day", which, in this case, is the other string "Saturday". Since the two strings are not equal to each other, the truth test has the false value. The if statement that contains the truth test is followed by a code block (a colon followed by an indented block of code). If the boolean is true, it executes the code in that block. Since it is false in the above example, we don't see that code executed. The first block of code is followed by an **else** statement, which is executed if nothing else in the above if statement is true. Since the value was false, this code is executed, which is why we see "Go to work". You can compare any data types in Python: ``` 1 == 2 50 == 2*25 3 < 3.14159 1 == 1.0 1 != 0 1 <= 2 1 >= 1 ``` We see a few other boolean operators here, all of which which should be self-explanatory. Less than, equality, non-equality, and so on. Particularly interesting is the 1 == 1.0 test, which is true, since even though the two objects are different data types (integer and floating point number), they have the same *value*. There is another boolean operator **is**, that tests whether two objects are the same object: ``` 1 is 1.0 ``` We can do boolean tests on lists as well: ``` [1,2,3] == [1,2,4] [1,2,3] < [1,2,4] ``` Finally, note that you can also string multiple comparisons together, which can result in very intuitive tests: ``` hours = 5 0 < hours < 24 ``` If statements can have **elif** parts ("else if"), in addition to if/else parts. For example: ``` if day == "Sunday": print("Sleep in") elif day == "Saturday": print("Do chores") else: print("Go to work") ``` Of course we can combine if statements with for loops, to make a snippet that is almost interesting: ``` for day in days_of_the_week: statement = "Today is " + day print(statement) if day == "Sunday": print(" Sleep in") elif day == "Saturday": print(" Do chores") else: print(" Go to work") ``` This is something of an advanced topic, but ordinary data types have boolean values associated with them, and, indeed, in early versions of Python there was not a separate boolean object. Essentially, anything that was a 0 value (the integer or floating point 0, an empty string "", or an empty list []) was False, and everything else was true. You can see the boolean value of any data object using the **bool()** function. ``` bool(1) bool(0) bool(["This "," is "," a "," list"]) ``` ## Code Example: The Fibonacci Sequence The [Fibonacci sequence](http://en.wikipedia.org/wiki/Fibonacci_number) is a sequence in math that starts with 0 and 1, and then each successive entry is the sum of the previous two. Thus, the sequence goes 0,1,1,2,3,5,8,13,21,34,55,89,... A very common exercise in programming books is to compute the Fibonacci sequence up to some number **n**. First I'll show the code, then I'll discuss what it is doing. ``` n = 10 sequence = [0,1] for i in range(2,n): # This is going to be a problem if we ever set n <= 2! sequence.append(sequence[i-1]+sequence[i-2]) print(sequence) ``` Let's go through this line by line. First, we define the variable **n**, and set it to the integer 20. **n** is the length of the sequence we're going to form, and should probably have a better variable name. We then create a variable called **sequence**, and initialize it to the list with the integers 0 and 1 in it, the first two elements of the Fibonacci sequence. We have to create these elements "by hand", since the iterative part of the sequence requires two previous elements. We then have a for loop over the list of integers from 2 (the next element of the list) to **n** (the length of the sequence). After the colon, we see a hash tag "#", and then a **comment** that if we had set **n** to some number less than 2 we would have a problem. Comments in Python start with #, and are good ways to make notes to yourself or to a user of your code explaining why you did what you did. Better than the comment here would be to test to make sure the value of **n** is valid, and to complain if it isn't; we'll try this later. In the body of the loop, we append to the list an integer equal to the sum of the two previous elements of the list. After exiting the loop (ending the indentation) we then print out the whole list. That's it! ## Functions We might want to use the Fibonacci snippet with different sequence lengths. We could cut an paste the code into another cell, changing the value of **n**, but it's easier and more useful to make a function out of the code. We do this with the **def** statement in Python: ``` def fibonacci(sequence_length): "Return the Fibonacci sequence of length *sequence_length*" sequence = [0,1] if sequence_length < 1: print("Fibonacci sequence only defined for length 1 or greater") return if 0 < sequence_length < 3: return sequence[:sequence_length] for i in range(2,sequence_length): sequence.append(sequence[i-1]+sequence[i-2]) return sequence ``` We can now call **fibonacci()** for different sequence_lengths: ``` fibonacci(2) fibonacci(12) ``` We've introduced a several new features here. First, note that the function itself is defined as a code block (a colon followed by an indented block). This is the standard way that Python delimits things. Next, note that the first line of the function is a single string. This is called a **docstring**, and is a special kind of comment that is often available to people using the function through the python command line: ``` help(fibonacci) ``` If you define a docstring for all of your functions, it makes it easier for other people to use them, since they can get help on the arguments and return values of the function. Next, note that rather than putting a comment in about what input values lead to errors, we have some testing of these values, followed by a warning if the value is invalid, and some conditional code to handle special cases. ## Two More Data Structures: Tuples and Dictionaries Before we end the Python overview, I wanted to touch on two more data structures that are very useful (and thus very common) in Python programs. A **tuple** is a sequence object like a list or a string. It's constructed by grouping a sequence of objects together with commas, either without brackets, or with parentheses: ``` t = (1,2,'hi',9.0) t ``` Tuples are like lists, in that you can access the elements using indices: ``` t[1] ``` However, tuples are *immutable*, you can't append to them or change the elements of them: ``` t.append(7) t[1]=77 ``` Tuples are useful anytime you want to group different pieces of data together in an object, but don't want to create a full-fledged class (see below) for them. For example, let's say you want the Cartesian coordinates of some objects in your program. Tuples are a good way to do this: ``` ('Bob',0.0,21.0) ``` Again, it's not a necessary distinction, but one way to distinguish tuples and lists is that tuples are a collection of different things, here a name, and x and y coordinates, whereas a list is a collection of similar things, like if we wanted a list of those coordinates: ``` positions = [ ('Bob',0.0,21.0), ('Cat',2.5,13.1), ('Dog',33.0,1.2) ] ``` Tuples can be used when functions return more than one value. Say we wanted to compute the smallest x- and y-coordinates of the above list of objects. We could write: ``` def minmax(objects): minx = 1e20 # These are set to really big numbers miny = 1e20 for obj in objects: name,x,y = obj if x < minx: minx = x if y < miny: miny = y return minx,miny x,y = minmax(positions) print(x,y) ``` **Dictionaries** are an object called "mappings" or "associative arrays" in other languages. Whereas a list associates an integer index with a set of objects: ``` mylist = [1,2,9,21] ``` The index in a dictionary is called the *key*, and the corresponding dictionary entry is the *value*. A dictionary can use (almost) anything as the key. Whereas lists are formed with square brackets [], dictionaries use curly brackets {}: ``` ages = {"Rick": 46, "Bob": 86, "Fred": 21} print("Rick's age is ",ages["Rick"]) ``` There's also a convenient way to create dictionaries without having to quote the keys. ``` dict(Rick=46,Bob=86,Fred=20) ``` The **len()** command works on both tuples and dictionaries: ``` len(t) len(ages) ``` ## Conclusion of the Python Overview There is, of course, much more to the language than I've covered here. I've tried to keep this brief enough so that you can jump in and start using Python to simplify your life and work. My own experience in learning new things is that the information doesn't "stick" unless you try and use it for something in real life. You will no doubt need to learn more as you go. I've listed several other good references, including the [Python Tutorial](http://docs.python.org/2/tutorial/) and [Learn Python the Hard Way](http://learnpythonthehardway.org/book/). Additionally, now is a good time to start familiarizing yourself with the [Python Documentation](http://docs.python.org/2.7/), and, in particular, the [Python Language Reference](http://docs.python.org/2.7/reference/index.html). Tim Peters, one of the earliest and most prolific Python contributors, wrote the "Zen of Python", which can be accessed via the "import this" command: ``` import this ``` No matter how experienced a programmer you are, these are words to meditate on. # II. Numpy and Scipy [Numpy](http://numpy.org) contains core routines for doing fast vector, matrix, and linear algebra-type operations in Python. [Scipy](http://scipy) contains additional routines for optimization, special functions, and so on. Both contain modules written in C and Fortran so that they're as fast as possible. Together, they give Python roughly the same capability that the [Matlab](http://www.mathworks.com/products/matlab/) program offers. (In fact, if you're an experienced Matlab user, there a [guide to Numpy for Matlab users](http://www.scipy.org/NumPy_for_Matlab_Users) just for you.) ## Making vectors and matrices Fundamental to both Numpy and Scipy is the ability to work with vectors and matrices. You can create vectors from lists using the **array** command: ``` import numpy as np import scipy as sp array = np.array([1,2,3,4,5,6]) array ``` size of the array ``` array.shape ``` To build matrices, you can either use the array command with lists of lists: ``` mat = np.array([[0,1],[1,0]]) mat ``` Add a column of ones to mat ``` mat2 = np.c_[mat, np.ones(2)] mat2 ``` size of a matrix ``` mat2.shape ``` You can also form empty (zero) matrices of arbitrary shape (including vectors, which Numpy treats as vectors with one row), using the **zeros** command: ``` np.zeros((3,3)) ``` There's also an **identity** command that behaves as you'd expect: ``` np.identity(4) ``` as well as a **ones** command. ## Linspace, matrix functions, and plotting The **linspace** command makes a linear array of points from a starting to an ending value. ``` np.linspace(0,1) ``` If you provide a third argument, it takes that as the number of points in the space. If you don't provide the argument, it gives a length 50 linear space. ``` np.linspace(0,1,11) ``` **linspace** is an easy way to make coordinates for plotting. Functions in the numpy library (all of which are imported into IPython notebook) can act on an entire vector (or even a matrix) of points at once. Thus, ``` x = np.linspace(0,2*np.pi) np.sin(x) ``` In conjunction with **matplotlib**, this is a nice way to plot things: ``` %matplotlib inline import matplotlib.pyplot as plt plt.style.use('ggplot') plt.plot(x,np.sin(x)) ``` ## Matrix operations Matrix objects act sensibly when multiplied by scalars: ``` 0.125*np.identity(3) ``` as well as when you add two matrices together. (However, the matrices have to be the same shape.) ``` np.identity(2) + np.array([[1,1],[1,2]]) ``` Something that confuses Matlab users is that the times (*) operator give element-wise multiplication rather than matrix multiplication: ``` np.identity(2)*np.ones((2,2)) ``` To get matrix multiplication, you need the **dot** command: ``` np.dot(np.identity(2),np.ones((2,2))) ``` **dot** can also do dot products (duh!): ``` v = np.array([3,4]) np.sqrt(np.dot(v,v)) ``` as well as matrix-vector products. There are **determinant**, **inverse**, and **transpose** functions that act as you would suppose. Transpose can be abbreviated with ".T" at the end of a matrix object: ``` m = np.array([[1,2],[3,4]]) m.T np.linalg.inv(m) ``` There's also a **diag()** function that takes a list or a vector and puts it along the diagonal of a square matrix. ``` np.diag([1,2,3,4,5]) ``` We'll find this useful later on. ## Least squares fitting Very often we deal with some data that we want to fit to some sort of expected behavior. Say we have the following: ``` raw_data = """\ 3.1905781584582433,0.028208609537968457 4.346895074946466,0.007160804747670053 5.374732334047101,0.0046962988461934805 8.201284796573875,0.0004614473299618756 10.899357601713055,0.00005038370219939726 16.295503211991434,4.377451812785309e-7 21.82012847965739,3.0799922117601088e-9 32.48394004282656,1.524776208284536e-13 43.53319057815846,5.5012073588707224e-18""" ``` There's a section below on parsing CSV data. We'll steal the parser from that. For an explanation, skip ahead to that section. Otherwise, just assume that this is a way to parse that text into a numpy array that we can plot and do other analyses with. ``` data = [] for line in raw_data.splitlines(): words = line.split(',') data.append(words) data = np.array(data, dtype=np.float) data data[:, 0] plt.title("Raw Data") plt.xlabel("Distance") plt.plot(data[:,0],data[:,1],'bo') ``` Since we expect the data to have an exponential decay, we can plot it using a semi-log plot. ``` plt.title("Raw Data") plt.xlabel("Distance") plt.semilogy(data[:,0],data[:,1],'bo') ``` For a pure exponential decay like this, we can fit the log of the data to a straight line. The above plot suggests this is a good approximation. Given a function $$ y = Ae^{-ax} $$ $$ \log(y) = \log(A) - ax$$ Thus, if we fit the log of the data versus x, we should get a straight line with slope $a$, and an intercept that gives the constant $A$. There's a numpy function called **polyfit** that will fit data to a polynomial form. We'll use this to fit to a straight line (a polynomial of order 1) ``` params = sp.polyfit(data[:,0],np.log(data[:,1]),1) a = params[0] A = np.exp(params[1]) ``` Let's see whether this curve fits the data. ``` x = np.linspace(1,45) plt.title("Raw Data") plt.xlabel("Distance") plt.semilogy(data[:,0],data[:,1],'bo') plt.semilogy(x,A*np.exp(a*x),'b-') ``` If we have more complicated functions, we may not be able to get away with fitting to a simple polynomial. Consider the following data: ``` gauss_data = """\ -0.9902286902286903,1.4065274110372852e-19 -0.7566104566104566,2.2504438576596563e-18 -0.5117810117810118,1.9459459459459454 -0.31887271887271884,10.621621621621626 -0.250997150997151,15.891891891891893 -0.1463309463309464,23.756756756756754 -0.07267267267267263,28.135135135135133 -0.04426734426734419,29.02702702702703 -0.0015939015939017698,29.675675675675677 0.04689304689304685,29.10810810810811 0.0840994840994842,27.324324324324326 0.1700546700546699,22.216216216216214 0.370878570878571,7.540540540540545 0.5338338338338338,1.621621621621618 0.722014322014322,0.08108108108108068 0.9926849926849926,-0.08108108108108646""" data = [] for line in gauss_data.splitlines(): words = line.split(',') data.append(words) data = np.array(data, dtype=np.float) plt.plot(data[:,0],data[:,1],'bo') ``` This data looks more Gaussian than exponential. If we wanted to, we could use **polyfit** for this as well, but let's use the **curve_fit** function from Scipy, which can fit to arbitrary functions. You can learn more using help(curve_fit). First define a general Gaussian function to fit to. ``` def gauss(x,A,a): return A*np.exp(a*x**2) ``` Now fit to it using **curve_fit**: ``` from scipy.optimize import curve_fit params,conv = curve_fit(gauss,data[:,0],data[:,1]) x = np.linspace(-1,1) plt.plot(data[:,0],data[:,1],'bo') A,a = params plt.plot(x,gauss(x,A,a),'b-') ``` The **curve_fit** routine we just used is built on top of a very good general **minimization** capability in Scipy. You can learn more [at the scipy documentation pages](http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html). ## Monte Carlo and random numbers Many methods in scientific computing rely on Monte Carlo integration, where a sequence of (pseudo) random numbers are used to approximate the integral of a function. Python has good random number generators in the standard library. The **random()** function gives pseudorandom numbers uniformly distributed between 0 and 1: ``` from random import random rands = [] for i in range(100): rands.append(random()) plt.plot(rands) ``` **random()** uses the [Mersenne Twister](http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/emt.html) algorithm, which is a highly regarded pseudorandom number generator. There are also functions to generate random integers, to randomly shuffle a list, and functions to pick random numbers from a particular distribution, like the normal distribution: ``` from random import gauss grands = [] for i in range(100): grands.append(gauss(0,1)) plt.plot(grands) ``` It is generally more efficient to generate a list of random numbers all at once, particularly if you're drawing from a non-uniform distribution. Numpy has functions to generate vectors and matrices of particular types of random distributions. ``` plt.plot(np.random.rand(100)) ``` # III. Introduction to Pandas ``` import pandas as pd import numpy as np ``` ## Series A Series is a one-dimensional array-like object containing an array of data and an associated array of data labels. The data can be any NumPy data type and the labels are the Series' index. Create a Series: ``` ser_1 = pd.Series([1, 1, 2, -3, -5, 8, 13]) ser_1 ``` Get the array representation of a Series: ``` ser_1.values ``` Index objects are immutable and hold the axis labels and metadata such as names and axis names. Get the index of the Series: ``` ser_1.index ``` Create a Series with a custom index: ``` ser_2 = pd.Series([1, 1, 2, -3, -5], index=['a', 'b', 'c', 'd', 'e']) ser_2 ``` Get a value from a Series: ``` ser_2[4] == ser_2['e'] ``` Get a set of values from a Series by passing in a list: ``` ser_2[['c', 'a', 'b']] ``` Get values great than 0: ``` ser_2[ser_2 > 0] ``` Scalar multiply: ``` ser_2 * 2 ``` Apply a numpy math function: ``` np.exp(ser_2) ``` A Series is like a fixed-length, ordered dict. Create a series by passing in a dict: ``` dict_1 = {'foo' : 100, 'bar' : 200, 'baz' : 300} ser_3 = pd.Series(dict_1) ser_3 ``` Re-order a Series by passing in an index (indices not found are NaN): ``` index = ['foo', 'bar', 'baz', 'qux'] ser_4 = pd.Series(dict_1, index=index) ser_4 ``` Check for NaN with the pandas method: ``` pd.isnull(ser_4) ``` Check for NaN with the Series method: ``` ser_4.isnull() ``` Series automatically aligns differently indexed data in arithmetic operations: ``` ser_3 + ser_4 ``` Name a Series: ``` ser_4.name = 'foobarbazqux' ``` Name a Series index: ``` ser_4.index.name = 'label' ser_4 ``` Rename a Series' index in place: ``` ser_4.index = ['fo', 'br', 'bz', 'qx'] ser_4 ``` ## DataFrame A DataFrame is a tabular data structure containing an ordered collection of columns. Each column can have a different type. DataFrames have both row and column indices and is analogous to a dict of Series. Row and column operations are treated roughly symmetrically. Columns returned when indexing a DataFrame are views of the underlying data, not a copy. To obtain a copy, use the Series' copy method. Create a DataFrame: ``` data_1 = {'state' : ['VA', 'VA', 'VA', 'MD', 'MD'], 'year' : [2012, 2013, 2014, 2014, 2015], 'pop' : [5.0, 5.1, 5.2, 4.0, 4.1]} df_1 = pd.DataFrame(data_1) df_1 df_2 = pd.DataFrame(data_1, columns=['year', 'state', 'pop']) df_2 ``` Like Series, columns that are not present in the data are NaN: ``` df_3 = pd.DataFrame(data_1, columns=['year', 'state', 'pop', 'unempl']) df_3 ``` Retrieve a column by key, returning a Series: ``` df_3['state'] ``` Retrive a column by attribute, returning a Series: ``` df_3.year ``` Retrieve a row by position: ``` df_3.iloc[0] ``` Update a column by assignment: ``` df_3['unempl'] = np.arange(5) df_3 ``` Assign a Series to a column (note if assigning a list or array, the length must match the DataFrame, unlike a Series): ``` unempl = pd.Series([6.0, 6.0, 6.1], index=[2, 3, 4]) df_3['unempl'] = unempl df_3 ``` Assign a new column that doesn't exist to create a new column: ``` df_3['state_dup'] = df_3['state'] df_3 ``` Delete a column: ``` del df_3['state_dup'] df_3 ``` Transpose the DataFrame: ``` df_3.T ``` Create a DataFrame from a nested dict of dicts (the keys in the inner dicts are unioned and sorted to form the index in the result, unless an explicit index is specified): ``` pop = {'VA' : {2013 : 5.1, 2014 : 5.2}, 'MD' : {2014 : 4.0, 2015 : 4.1}} df_4 = pd.DataFrame(pop) df_4 ``` Create a DataFrame from a dict of Series: ``` data_2 = {'VA' : df_4['VA'][1:], 'MD' : df_4['MD'][2:]} df_5 = pd.DataFrame(data_2) df_5 ``` Set the DataFrame index name: ``` df_5.index.name = 'year' df_5 ``` Set the DataFrame columns name: ``` df_5.columns.name = 'state' df_5 ``` Return the data contained in a DataFrame as a 2D ndarray: ``` df_5.values ``` If the columns are different dtypes, the 2D ndarray's dtype will accomodate all of the columns: ``` df_3.values ``` ## Reindexing Create a new object with the data conformed to a new index. Any missing values are set to NaN. ``` df_3 ``` Reindexing rows returns a new frame with the specified index: ``` df_3.reindex(list(reversed(range(0, 6)))) ``` Reindex columns: ``` df_3.reindex(columns=['state', 'pop', 'unempl', 'year']) ``` ## Dropping Entries Drop rows from a Series or DataFrame: ``` df_7 = df_3.drop([0, 1]) df_7 df_7 = df_7.drop('unempl', axis=1) df_7 ``` ## Indexing, Selecting, Filtering Pandas supports indexing into a DataFrame. ``` df_3 ``` Select specified columns from a DataFrame: ``` df_3[['pop', 'unempl']] ``` Select a slice from a DataFrame: ``` df_3[:2] df_3.iloc[1:3] ``` Select from a DataFrame based on a filter: ``` df_3[df_3['pop'] > 5] ``` Select a slice of rows from a specific column of a DataFrame: ``` df_3.loc[0:2, 'pop'] df_3 ``` ## Arithmetic and Data Alignment Adding DataFrame objects results in the union of index pairs for rows and columns if the pairs are not the same, resulting in NaN for indices that do not overlap: ``` np.random.seed(0) df_8 = pd.DataFrame(np.random.rand(9).reshape((3, 3)), columns=['a', 'b', 'c']) df_8 np.random.seed(1) df_9 = pd.DataFrame(np.random.rand(9).reshape((3, 3)), columns=['b', 'c', 'd']) df_9 df_8 + df_9 ``` Set a fill value instead of NaN for indices that do not overlap: ``` df_10 = df_8.add(df_9, fill_value=0) df_10 ``` Like NumPy, pandas supports arithmetic operations between DataFrames and Series. Match the index of the Series on the DataFrame's columns, broadcasting down the rows: ``` ser_8 = df_10.iloc[0] df_11 = df_10 - ser_8 df_11 ``` Match the index of the Series on the DataFrame's columns, broadcasting down the rows and union the indices that do not match: ``` ser_9 = pd.Series(range(3), index=['a', 'd', 'e']) ser_9 df_11 - ser_9 ``` ## Function Application and Mapping NumPy ufuncs (element-wise array methods) operate on pandas objects: ``` df_11 = np.abs(df_11) df_11 ``` Apply a function on 1D arrays to each column: ``` df_11.apply(sum) ``` Apply a function on 1D arrays to each row: ``` df_11.apply(sum, axis=1) ``` Apply an element-wise Python function to a DataFrame: ``` def func_3(x): return '%.2f' %x df_11.applymap(func_3) ``` ## Sorting ``` df_12 = pd.DataFrame(np.arange(12).reshape((3, 4)), index=['three', 'one', 'two'], columns=['c', 'a', 'b', 'd']) df_12 ``` Sort a DataFrame by its index: ``` df_12.sort_index() ``` Sort a DataFrame by columns in descending order: ``` df_12.sort_index(axis=1, ascending=False) ``` Sort a DataFrame's values by column: ``` df_12.sort_values(by=['d', 'c']) ``` ## Summarizing and Computing Descriptive Statistics Unlike NumPy arrays, Pandas descriptive statistics automatically exclude missing data. NaN values are excluded unless the entire row or column is NA. ``` df_15 = pd.DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c']) df_15['cat1'] = (np.random.rand(10) * 3).round(0) df_15['cat2'] = (np.random.rand(10)).round(0) df_15 ``` ### Sum and Mean ``` df_15.sum() df_15.sum(axis=1) df_15.mean(axis=0) ``` ### Descriptive analysis ``` df_15['a'].describe() df_15['cat1'].value_counts() ``` ## Pivot tables ### group by cat1 and calculate mean ``` pd.pivot_table(df_15, index='cat1', aggfunc=np.mean) ```
github_jupyter
### **Importing Libraries** <a id="head1"></a> ``` import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.model_selection import train_test_split from sklearn.decomposition import PCA from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score, confusion_matrix,classification_report from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.tree import DecisionTreeClassifier from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler from sklearn.metrics import roc_curve from sklearn.metrics import precision_recall_curve from sklearn.compose import ColumnTransformer from sklearn.impute import SimpleImputer import pickle import warnings warnings.filterwarnings('ignore') ``` ### **Loading Data** <a id="head2"></a> ``` telco_customer = pd.read_csv("ChurnTrainDataset.csv") telco_customer ``` ### **Preprocessing** <a id="head5"></a> ``` # Encoding categorical data using cat codes for col in telco_customer.columns[telco_customer.dtypes == 'object']: if col!='churn': telco_customer[col]=telco_customer[col].astype('category').cat.codes # Fill Null Values of target column telco_customer['churn'] = telco_customer['churn'].fillna(telco_customer['churn'].mode()[0]) # Manual encoding the target variable a={'yes':1,'no':0} telco_customer['churn']=telco_customer['churn'].map(a) ``` ### **Seperate Features & Target Variable** <a id="head6"></a> ``` X = telco_customer.drop('churn',axis=1) y=telco_customer['churn'] cat_cols = X.select_dtypes(include=['int8']).columns.values num_cols = X.select_dtypes(include=['float64']).columns.values ``` ### **Train Test Split** <a id="head7"></a> ``` X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=17) ``` ## Feature Engineering pipeline ``` # Imputation Transformer to fill null values fill_null_col = ColumnTransformer([ ('FillCat',SimpleImputer(strategy='most_frequent'),cat_cols), ('FillNumeric',SimpleImputer(strategy='median'),num_cols), ],remainder='passthrough') # Scaling scale_col = ColumnTransformer([ ('scale', StandardScaler(),slice(5,18)) ]) # Principal Component Analysis pca_col = ColumnTransformer([ ('PCA', PCA(n_components=10),slice(0,18)) ]) # # Model logistic=LogisticRegression() decision=DecisionTreeClassifier() random = RandomForestClassifier() ``` ### **Model Pipeline** <a id="head9"></a> ``` pipeline_model1 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier1', logistic)]) pipeline_model2 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier2', decision)]) pipeline_model3 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier3', random)]) pipelines=[pipeline_model1,pipeline_model2,pipeline_model3] best_accuracy=0.0 best_classifier=0 best_pipeline="" pipe_dict={0:'Logistic Regression',1:'Decision Tree',2:'Random Forest'} # Fitting pipelines for pipe in pipelines: pipe.fit(X_train,y_train) for i,model in enumerate(pipelines): print("{} Test Accuracy : {}".format(pipe_dict[i],model.score(X_test,y_test))) for i,model in enumerate(pipelines): pred=model.predict(X_test) print("\n{} Confusion Matrix :\n {}".format(pipe_dict[i],confusion_matrix(y_test, pred))) print("\n{} Classification Report :\n {}".format(pipe_dict[i],classification_report(y_test,pred))) ``` ### Precision Recall Curve of all models ``` def precision_recall_curve_plots(p,r,n,y_test): fig, ax = plt.subplots(figsize=(6,6)) ax.plot(r, p, label=n) baseline = len(y_test[y_test==1]) / len(y_test) ax.plot([0, 1], [baseline, baseline], linestyle='--', label='Baseline') ax.set_xlabel('Recall') ax.set_ylabel('Precision') ax.legend(loc='center left'); # Precision Recall Curve For Logistic Regression probs = pipeline_model1.predict_proba(X_test)[:, 1] precision, recall, thresholds = precision_recall_curve(y_test, probs) name="Logisitic Regression" precision_recall_curve_plots(precision,recall,name,y_test) # Precision Recall Curve For Decisison Tree probs = pipeline_model2.predict_proba(X_test)[:, 1] precision, recall, thresholds = precision_recall_curve(y_test, probs) name="Decision Tree" precision_recall_curve_plots(precision,recall,name,y_test) # Precision Recall Curve For Random Forest probs = pipeline_model3.predict_proba(X_test)[:, 1] precision, recall, thresholds = precision_recall_curve(y_test, probs) name="Random Forest" precision_recall_curve_plots(precision,recall,name,y_test) ``` ### ROC Curve ``` probs = pipeline_model1.predict_proba(X_test)[:, 1] fpr1, tpr1, thresh1 = roc_curve(y_test, probs) probs = pipeline_model2.predict_proba(X_test)[:, 1] fpr2, tpr2, thresh2 = roc_curve(y_test, probs) probs = pipeline_model3.predict_proba(X_test)[:, 1] fpr3, tpr3, thresh3 = roc_curve(y_test, probs) random_probs = [0 for i in range(len(y_test))] p_fpr, p_tpr, _ = roc_curve(y_test, random_probs, pos_label=1) # plot roc curves plt.plot(fpr1, tpr1, linestyle='--',color='orange', label='Logistic Regression') plt.plot(fpr2, tpr2, linestyle='--',color='green', label='Decision Tree') plt.plot(fpr3, tpr3, linestyle='--',color='red', label='Random Forest') plt.plot(p_fpr, p_tpr, linestyle='--', color='blue') # title plt.title('ROC curve') # x label plt.xlabel('False Positive Rate') # y label plt.ylabel('True Positive rate') plt.legend(loc='best') plt.show(); for i,model in enumerate(pipelines): if model.score(X_test,y_test)>best_accuracy: best_accuracy=model.score(X_test,y_test) best_pipeline=model best_classifier=i print('Classifier with the best accuracy:{}'.format(pipe_dict[best_classifier])) ``` ### **Best Hyperparameters for Models Using GridSearchCV using a pipeline** <a id="head10"></a> ``` params = [{ 'classifier1__solver' : ['newton-cg', 'lbfgs', 'liblinear'], 'classifier1__penalty': ['l2','l1'] , 'classifier1__C' :np.logspace(-4,4,20), }] hypertuned_model1 = GridSearchCV(estimator = pipeline_model1, param_grid = params, cv = 10 ,verbose = 1, n_jobs=-1) hypertuned_model1.fit(X_train,y_train) hypertuned_model1.best_params_ randomized_cv1 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier1', LogisticRegression(solver='liblinear',penalty='l2',C=0.0001))]) randomized_cv1.fit(X_train,y_train) params = [{ "classifier2__max_depth":[2, 3, 5, 10, 20], "classifier2__min_samples_leaf":[5, 10, 20, 50, 100], "classifier2__criterion": ['gini', 'entropy'] }] hypertuned_model2 = GridSearchCV(estimator = pipeline_model2, param_grid = params, cv = 10 ,verbose = 1, n_jobs=-1) hypertuned_model2.fit(X_train,y_train) hypertuned_model2.best_params_ randomized_cv2 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier2',DecisionTreeClassifier(criterion='entropy',max_depth=5,min_samples_leaf=20))]) randomized_cv2.fit(X_train,y_train) params = [{"classifier3__n_estimators": range(100, 501, 100), "classifier3__max_depth":range(10,30,10), "classifier3__min_samples_leaf":[1, 2, 4], "classifier3__min_samples_split": [2, 5, 10], }] hypertuned_model3 = GridSearchCV(estimator = pipeline_model3, param_grid = params, cv = 10 ,verbose = 1, n_jobs=-1) hypertuned_model3.fit(X_train,y_train) hypertuned_model3.best_params_ randomized_cv3 = Pipeline([('fill_null_col', fill_null_col), ('scale_col', scale_col), ('pca_col', pca_col), ('classifier3', RandomForestClassifier(max_depth=20,min_samples_leaf=1,min_samples_split=5,n_estimators=400,class_weight='balanced'))]) randomized_cv3.fit(X_train,y_train) loaded_model1=randomized_cv1 loaded_model2=randomized_cv2 ``` ### Saving Random Forest Model ``` filename = 'modelv1.pkl' pickle.dump(randomized_cv3, open(filename, 'wb')) ``` ### Load the model from disk ``` filename = 'modelv1.pkl' loaded_model3 = pickle.load(open(filename, 'rb')) result = loaded_model3.score(X_test, y_test) print(result) ``` ### Model Prediction After Hypertuning ``` def model_predict(loaded_model,X_test,y_test): y_pred_proba = loaded_model.predict_proba(X_test) y_pred= loaded_model.predict(X_test) print("Predcition Probability\n\n",y_pred_proba) print("\nPredcition\n\n",y_pred) acc = accuracy_score(y_test, y_pred) print("\nAccuracy=",acc) print("\nClassification Report\n\n",classification_report(y_test,y_pred)) print("\nConfusion Matrix\n",confusion_matrix(y_test, y_pred)) # Logistic Regression model_predict(loaded_model1,X_test,y_test) # Decision Tree model_predict(loaded_model2,X_test,y_test) # Random Forest model_predict(loaded_model3,X_test,y_test) ``` ### **Model Prediction After Thresholding**<a id="head11"></a> ``` def model_predict_with_threshold(loaded_model,X_test,threshold,y_test): y_pred_proba = loaded_model.predict_proba(X_test) y_pred= (loaded_model.predict_proba(X_test)[:,1] >= threshold).astype(int) print("Predcition Probability\n\n",y_pred_proba) print("\nPredcition\n\n",y_pred) acc = accuracy_score(y_test, y_pred) print("\nAccuracy=",acc) print("\nClassification Report\n\n",classification_report(y_test,y_pred)) print("\nConfusion Matrix\n",confusion_matrix(y_test, y_pred)) # Logistic Regression pred = loaded_model1.predict_proba(X_test)[:,1] precision, recall, thresholds = precision_recall_curve(y_test, pred) fscore = (2 * precision * recall) / (precision + recall) index = np.argmax(fscore) thresholdOpt = round(thresholds[index], ndigits = 4) fscoreOpt = round(fscore[index], ndigits = 4) print('Best Threshold: {} with F-Score: {}'.format(thresholdOpt, fscoreOpt)) model_predict_with_threshold(loaded_model1,X_test,thresholdOpt,y_test) # Decision Tree pred = loaded_model2.predict_proba(X_test)[:,1] precision, recall, thresholds = precision_recall_curve(y_test, pred) fscore = (2 * precision * recall) / (precision + recall) index = np.argmax(fscore) thresholdOpt = round(thresholds[index], ndigits = 4) fscoreOpt = round(fscore[index], ndigits = 4) print('Best Threshold: {} with F-Score: {}'.format(thresholdOpt, fscoreOpt)) model_predict_with_threshold(loaded_model2,X_test,thresholdOpt,y_test) # Random Forest pred = loaded_model3.predict_proba(X_test)[:,1] precision, recall, thresholds = precision_recall_curve(y_test, pred) fscore = (2 * precision * recall) / (precision + recall) index = np.argmax(fscore) thresholdOpt = round(thresholds[index], ndigits = 4) fscoreOpt = round(fscore[index], ndigits = 4) print('Best Threshold: {} with F-Score: {}'.format(thresholdOpt, fscoreOpt)) model_predict_with_threshold(loaded_model3,X_test,thresholdOpt,y_test) ```
github_jupyter
# Exploring SQLAlchemy Joins a Touch First we need to setup our environment to answer the questions from the blog post * Setting up our ORM objects * Creating the tables in a SQLite database * Configuring and initializing a session for us to use for our exploration ``` from datetime import datetime from sqlalchemy import (Column, Integer, Numeric, String, DateTime, ForeignKey, Boolean, create_engine) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import sessionmaker, relationship, backref Base = declarative_base() class Cookie(Base): __tablename__ = 'cookies' cookie_id = Column(Integer, primary_key=True) cookie_name = Column(String(50), index=True) cookie_recipe_url = Column(String(255)) cookie_sku = Column(String(55)) quantity = Column(Integer()) unit_cost = Column(Numeric(12, 2)) def __repr__(self): return "Cookie(cookie_name='{self.cookie_name}', " \ "cookie_recipe_url='{self.cookie_recipe_url}', " \ "cookie_sku='{self.cookie_sku}', " \ "quantity={self.quantity}, " \ "unit_cost={self.unit_cost})".format(self=self) class User(Base): __tablename__ = 'users' user_id = Column(Integer(), primary_key=True) username = Column(String(15), nullable=False, unique=True) email_address = Column(String(255), nullable=False) phone = Column(String(20), nullable=False) password = Column(String(25), nullable=False) created_on = Column(DateTime(), default=datetime.now) updated_on = Column(DateTime(), default=datetime.now, onupdate=datetime.now) def __repr__(self): return "User(username='{self.username}', " \ "email_address='{self.email_address}', " \ "phone='{self.phone}', " \ "password='{self.password}')".format(self=self) class Order(Base): __tablename__ = 'orders' order_id = Column(Integer(), primary_key=True) user_id = Column(Integer(), ForeignKey('users.user_id')) shipped = Column(Boolean(), default=False) user = relationship("User", backref=backref('orders', order_by=order_id)) def __repr__(self): return "Order(user_id={self.user_id}, " \ "shipped={self.shipped})".format(self=self) class LineItem(Base): __tablename__ = 'line_items' line_item_id = Column(Integer(), primary_key=True) order_id = Column(Integer(), ForeignKey('orders.order_id')) cookie_id = Column(Integer(), ForeignKey('cookies.cookie_id')) quantity = Column(Integer()) extended_cost = Column(Numeric(12, 2)) order = relationship("Order", backref=backref('line_items', order_by=line_item_id)) cookie = relationship("Cookie", uselist=False) def __repr__(self): return "LineItems(order_id={self.order_id}, " \ "cookie_id={self.cookie_id}, " \ "quantity={self.quantity}, " \ "extended_cost={self.extended_cost})".format( self=self) # Connect do an in memory SQLite database engine = create_engine('sqlite:///:memory:') # Create our tables in that database Base.metadata.create_all(engine) # Configure our sessions to use the SQLite database engine Session = sessionmaker(bind=engine) # Initialize a session session = Session() ``` One of the readers of Essential SQLAlchemy sent me an email with more questions about how .join() works. In the example below, he wanted to know why join was only required for User, LineItem, and Cookie objects. Why isn't Order required? ``` query = session.query(Order.order_id, User.username, User.phone, Cookie.cookie_name, LineItem.quantity, LineItem.extended_cost) query = query.join(User).join(LineItem).join(Cookie) results = query.filter(User.username == 'cookiemon').all() ``` To answer that question, lets take a look at the SQL generated by the ORM for our query. ``` query = session.query(Order.order_id, User.username, User.phone, Cookie.cookie_name, LineItem.quantity, LineItem.extended_cost) print(query) ``` We can see that the FROM clause contains the Orders, Users, Cookies, and LineItems ORM objects \__tablename\__s for each object in the query. Also, notice the order is based on where they appeared in the SELECT clause. Just like in SQL, we need to define how the tables are related with JOIN clauses. These JOIN clauses need to follow the order of the relationships between the tables. This means we need to make sure that the table to the left of the JOIN clause has a relationship with the table in the .join() statement. This can be a bit confusing when we have chained .join() statements as shown in the first example. The table in the prior .join() statement to the left must have a relationship with the table in the current .join() statement that was being evaluated. Lets look at the SQL generated after all the .join() statements. ``` query = query.join(User).join(LineItem).join(Cookie) print(query) ``` We can see now that the FROM clause contains the JOIN clauses in the order we chained them into the query. So Order is the target of the first JOIN with User, which is why we didn't have to have a .join() for it.
github_jupyter
## Selecting airport hubs for a new airline The problem of facility location is common to industries that have physical locations ("facilities") and need to identify the best new location as their business grows. There are numerous examples of this, ranging from chain stores and restaurants trying to open new locations, to placing public electric car charging stations in cities and along well-traveled routes. A common thread among these examples is that, generally, many locations have already been picked out and cannot be moved. A simple strategy for placing locations would be to make selections based on their total (or expected) number of served people. However, depending on how a location is defined, this approach might leave people in smaller cities or lower density areas completely unserved. For instance, if locations were defined as small contiguous regions and choices made based on their population, some large cities are likely to get multiple locations. Put another way, there may be redundancy among the people who are served. A more complex strategy is to use a clustering method applied to data where each potential location is encoded as an example weighted by the number of people at the location. Using k-medoids with k being the number of locations that would be open guarantees that exact locations are picked and can try to minimize the redundancy among those served. However, clustering approaches have the significant downside that, when placing a new facility, one would have to re-run the clustering algorithm with a larger number of clusters. Given that it is likely impossible to move currently existing facilities in these locations, this approach does not seem feasible. In contrast to traditional clustering approaches, submodular optimization provides an appealing framework for this task. Other notebooks in this repository go more in depth on the details of submodular optimization, but essentially, one can use a greedy approach that iteratives selects examples which minimize redundancy with those that have already been chosen. This property is convenient when you can't move previous facilities. Let's say you're a new airline and you want to know where to put your hubs. You might want these hubs to be at locations that service the most routes so that you can minimize the distance an airplane has to fly to return to a hub. Initially, you might suspect that putting these hubs at the biggest cities is likely the best choice. However, since this notebook is being written as part of a tutorial on submodular optimization, you also suspect that submodular optimization might yield a better selection. ``` %pylab inline numpy.random.seed(0) import pandas import seaborn; seaborn.set_style('whitegrid') ``` ### Loading the Data We will be using two tables of data from openflights.org for this example. The first table contains information about airports around the world and the second table contains information about recorded routes. A limitation of this data is that it was updated most recently in 2014. Let's first load up the airport table. Because we are focusing on the United States, we will filter out the airports from other parts of the world. This table includes a lot of information about each airport, but the only things we really need are the ID and the name, for human interpretability. ``` names = 'Airport ID', 'Name', 'City', 'Country', 'IATA', 'ICAO', 'Latitude', 'Longitude', 'Altitude', 'Timezone', 'DST', 'Tz', 'Type', 'Source' airports = pandas.read_csv("airports.csv", header=None, names=names) airports = airports[airports['Country'] == 'United States'] airports.head() ``` Next, let's load up the routes. These routes include a source airport, which is where the flight began, and a destination airport, which is where the plane was flying to, as well as some other information that we can ignore. ``` names = 'Airline', 'Airline ID', 'Source ', 'Source ID', 'Destination', 'Destination ID', 'Codeshare', 'Stops', 'Equipment' routes = pandas.read_csv("routes.csv", header=None, names=names) routes = routes.replace("\\N", numpy.nan).dropna() routes['Source ID'] = routes['Source ID'].astype(int) routes['Destination ID'] = routes['Destination ID'].astype(int) routes = routes.dropna() routes.head() ``` ### Airport Visualization We can start off by visualizing the location of each airport in our data set. There are a few airports that are in other countries that we can safely ignore because they are unlikely to be commercial. ``` plt.figure(figsize=(14, 7)) plt.scatter(airports['Longitude'], airports['Latitude'], s=2, color='c') plt.xlim(-200, -50) plt.ylim(10, 80) plt.axis('off') plt.show() ``` That shape looks familiar. Next, we can perform two table joins in order to get the latitude and longitude of the sorce and destination airports for each flight. This will let us visualize the routes. ``` routes_reduced = routes[['Source ID', 'Destination ID']].drop_duplicates() airports_reduced = airports[['Airport ID', 'Latitude', 'Longitude']] routes_merged = pandas.merge(routes_reduced, airports_reduced, left_on='Source ID', right_on='Airport ID') routes_merged = pandas.merge(routes_merged, airports_reduced, left_on='Destination ID', right_on='Airport ID', suffixes=('_source', '_destination')) routes_merged = routes_merged.drop(['Airport ID_destination', 'Airport ID_source'], axis=1) routes_merged.head() ``` Now let's add the routes to the plot as thin lines. ``` plt.figure(figsize=(14, 7)) plt.scatter(airports['Longitude'], airports['Latitude'], s=2, color='c') for i, (_, _, la_x, lo_x, la_y, lo_y) in routes_merged.iterrows(): plt.plot([lo_x, lo_y], [la_x, la_y], color='k', linewidth=0.15) plt.xlim(-200, -50) plt.ylim(10, 80) plt.axis('off') plt.show() ``` We can already see some hubs forming. It looks like Atlanta is a clear hub near the east coast, Chicago is a clear hub north of it, Houston is a hub in the south, and Los Angeles is a clear hub on the west coast. ### Airport selection using the most routes As a baseline, let's see how many routes are covered by those airports that are connected to the most routes. In this case, we're saying that an airport covers a route if it is either the source or the destination of the routes in the data set. We can do this by creating a matrix where each row and column are airports and a 1 indicates that there is a route connected the two airports. This will result in a square symmetric matrix. ``` n = len(airports) mapping = {airport: i for i, airport in enumerate(airports['Airport ID'])} route_map = numpy.zeros((n, n)) for _, (source, destination) in routes_reduced.iterrows(): if source in mapping and destination in mapping: x, y = mapping[source], mapping[destination] route_map[x, y] = 1 route_map[y, x] = 1 route_map.sum() ``` Looks like there are 3404 total routes in our data base. What are the 6 airports connected to the most routes? ``` airports_w_routes = airports.copy() airports_w_routes['# Routes'] = [route_map[mapping[airport]].sum() for airport in airports['Airport ID']] airports_w_routes.sort_values("# Routes", ascending=False).head(6) ``` These airports are fairly expected. Atlanta is the busiest airport in the United States, so it's not surprising that it comes up first. Further, Delta has hubs in Atlanta and Minneapolis-St Paul (among other cities), United has hubs in Chicago, Denver, and Houston. Lastly, Dallas is a hub for American. Thus, all of the top 6 airports here currently serve as hubs for some airline. What do these airports look like if we plotted their location as well as all the routes that are connected to them? ``` airports_ = airports_w_routes.sort_values("# Routes", ascending=False).head(6) airport_idxs = airports_['Airport ID'].values d = {} plt.figure(figsize=(14, 7)) plt.scatter(airports['Longitude'], airports['Latitude'], s=2, color='c') plt.scatter(airports_['Longitude'], airports_['Latitude'], color='r') for i, (sid, did, la_x, lo_x, la_y, lo_y) in routes_merged.iterrows(): if (sid, did) in d: continue if int(sid) in airport_idxs or int(did) in airport_idxs: d[(sid, did)] = True plt.plot([lo_x, lo_y], [la_x, la_y], color='r', linewidth=0.15) plt.xlim(-200, -50) plt.ylim(10, 80) plt.axis('off') plt.show() ``` The selected airports seem to be concentrated in the mid-west and ~east. This makes sense, because airports in the middle of the country are well suited for supporting flights to either coast. However, you'll notice that these airports do not serve any of the airports in Alaska or lie at all on the west coast. Let's evaluate this choice of airports by summing the total number of unique airports that they are connected to. This means that if both Atlanta and Chicago have flights to Seattle that the pair only gets 1 point. ``` most_routes = numpy.array([mapping[airport] for airport in airport_idxs[:6]]) route_map[most_routes].max(axis=0).sum() ``` It looks like this set of airport hubs service 256 unique airports across the United States. Can we do better with submodular optimization? ### Airport selection by optimizing a facility location function There are many submodular functions that one could optimize for this problem. However, the problem of placing facilities is such a canonical use of submodular optimization that there is a function named the "facility location" function. The functiont takes the form \begin{equation} f(X) = \sum\limits_{x} \max\limits_{y} \phi(x, y) \end{equation} where $x$ is an example from your data set, or potential locations in this case, $y$ is a facility that has already been selected, and $\phi$ is a similarity measure. Potentially similarity is the physical distance between $x$ and $y$. Now let's use facility location to select the set of six airports. We can directly use the route map matrix that we made early as a precomputed similarity matrix, with airports having a route between them being "similar" and those not having routes connecting them being "dissimilar." ``` from apricot import FacilityLocationSelection model = FacilityLocationSelection(6, pairwise_func='precomputed') model.fit(route_map) airports_w_routes.iloc[model.ranking].head(6) ``` Naturally, this approach will select Atlanta first, due in part to its greedy nature which begins by selecting the single most representative example. In this case, the most representative airport is the one connected to the most other airports. The two major differences between selecting the airports with the most routes and optimizing a facility location function are differences in ranking, with Denver being selected this time ahead of Chicago, and the inclusion of Bethel and Seattle instead of Houston and Minneapolis-St. Paul. The inclusion of Bethel is interesting because, by itself, it is connected to few routes in comparison to the other selected airports. However, each of these routes is connected to a new airport, whereas other airports are connected to airports that likely alrady have flights to Atlanta, Denver, or Chicago. Finally, this approach selects Seattle to be a hub, which is so obviously correct that it needs no further elaboration here. What do these airports look like on the map, along with the routes connected to them? ``` airports_ = airports_w_routes.iloc[model.ranking] airport_idxs = airports_['Airport ID'].values d = {} plt.figure(figsize=(14, 7)) plt.scatter(airports['Longitude'], airports['Latitude'], s=2, color='c') plt.scatter(airports_['Longitude'], airports_['Latitude'], color='m') for i, (sid, did, la_x, lo_x, la_y, lo_y) in routes_merged.iterrows(): if (sid, did) in d: continue if int(sid) in airport_idxs or int(did) in airport_idxs: d[(sid, did)] = True plt.plot([lo_x, lo_y], [la_x, la_y], color='m', linewidth=0.15) plt.xlim(-200, -50) plt.ylim(10, 80) plt.axis('off') plt.show() ``` The airports appear to be more evenly spaced across the US. The largest visible difference is more activity on the west coast, particularly the inclusion of more airports from Alaska. ``` most_routes = numpy.array([mapping[airport] for airport in airport_idxs[:6]]) route_map[most_routes].max(axis=0).sum() ``` Looks like we're up to 270 from the 256 before. While it may not seem like much initially, keep in mind that there are many routes that most large airports will be connected to. For instance, Atlanta alone is connected to more than half of the airports that this set of hubs is connected to. ### Concluding remarks The example here is clearly a toy example. In reality, one would want to factor in many more aspects of each location before deciding on hubs, such as the price of setting up a hub at each airport, the population served, the number of flights that they could reasonably fly out of each airport, and international or shipping routes. Further, the number of airports connected to a set of hub might not be the only metric one would use. However, it is a simple example of submodular optimization for placing facilities.
github_jupyter
<a href="https://colab.research.google.com/github/TarrySingh/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials/blob/master/TensorFlow_2_0_%2B_Keras_Crash_Course.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ``` !pip install tensorflow==2.0.0 import tensorflow as tf print(tf.__version__) ``` # TensorFlow 2.0 + Keras Overview for Deep Learning Researchers *@fchollet, October 2019* --- **This document serves as an introduction, crash course, and quick API reference for TensorFlow 2.0.** --- TensorFlow and Keras were both released over four years ago (March 2015 for Keras and November 2015 for TensorFlow). That's a long time in deep learning years! In the old days, TensorFlow 1.x + Keras had a number of known issues: - Using TensorFlow meant manipulating static computation graphs, which would feel awkward and difficult to programmers used to imperative styles of coding. - While the TensorFlow API was very powerful and flexible, it lacked polish and was often confusing or difficult to use. - While Keras was very productive and easy to use, it would often lack flexibility for research use cases. --- ### TensorFlow 2.0 is an extensive redesign of TensorFlow and Keras that takes into account over four years of user feedback and technical progress. It fixes the issues above in a big way. ### It's a machine learning platform from the future. --- TensorFlow 2.0 is built on the following key ideas: - Let users run their computation eagerly, like they would in Numpy. This makes TensorFlow 2.0 programming intuitive and Pythonic. - Preserve the considerable advantages of compiled graphs (for performance, distribution, and deployment). This makes TensorFlow fast, scalable, and production-ready. - Leverage Keras as its high-level deep learning API, making TensorFlow approachable and highly productive. - Extend Keras into a spectrum of workflows ranging from the very high-level (easier to use, less flexible) to the very low-level (requires more expertise, but provides great flexibility). # Part 1: TensorFlow basics ## Tensors This is a [constant](https://www.tensorflow.org/api_docs/python/tf/constant) tensor: ``` x = tf.constant([[5, 2], [1, 3]]) print(x) ``` You can get its value as a Numpy array by calling `.numpy()`: ``` x.numpy() ``` Much like a Numpy array, it features the attributes `dtype` and `shape`: ``` print('dtype:', x.dtype) print('shape:', x.shape) ``` A common way to create constant tensors is via `tf.ones` and `tf.zeros` (just like `np.ones` and `np.zeros`): ``` print(tf.ones(shape=(2, 1))) print(tf.zeros(shape=(2, 1))) ``` ## Random constant tensors This is all pretty [normal](https://www.tensorflow.org/api_docs/python/tf/random/normal): ``` tf.random.normal(shape=(2, 2), mean=0., stddev=1.) ``` And here's an integer tensor with values drawn from a random [uniform](https://www.tensorflow.org/api_docs/python/tf/random/uniform) distribution: ``` tf.random.uniform(shape=(2, 2), minval=0, maxval=10, dtype='int32') ``` ## Variables [Variables](https://www.tensorflow.org/guide/variable) are special tensors used to store mutable state (like the weights of a neural network). You create a Variable using some initial value. ``` initial_value = tf.random.normal(shape=(2, 2)) a = tf.Variable(initial_value) print(a) ``` You update the value of a Variable by using the methods `.assign(value)`, or `.assign_add(increment)` or `.assign_sub(decrement)`: ``` new_value = tf.random.normal(shape=(2, 2)) a.assign(new_value) for i in range(2): for j in range(2): assert a[i, j] == new_value[i, j] added_value = tf.random.normal(shape=(2, 2)) a.assign_add(added_value) for i in range(2): for j in range(2): assert a[i, j] == new_value[i, j] + added_value[i, j] ``` ## Doing math in TensorFlow You can use TensorFlow exactly like you would use Numpy. The main difference is that your TensorFlow code can run on GPU and TPU. ``` a = tf.random.normal(shape=(2, 2)) b = tf.random.normal(shape=(2, 2)) c = a + b d = tf.square(c) e = tf.exp(d) ``` ## Computing gradients with `GradientTape` Oh, and there's another big difference with Numpy: you can automatically retrieve the gradient of any differentiable expression. Just open a [`GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape), start "watching" a tensor via `tape.watch()`, and compose a differentiable expression using this tensor as input: ``` a = tf.random.normal(shape=(2, 2)) b = tf.random.normal(shape=(2, 2)) with tf.GradientTape() as tape: tape.watch(a) # Start recording the history of operations applied to `a` c = tf.sqrt(tf.square(a) + tf.square(b)) # Do some math using `a` # What's the gradient of `c` with respect to `a`? dc_da = tape.gradient(c, a) print(dc_da) ``` By default, variables are watched automatically, so you don't need to manually `watch` them: ``` a = tf.Variable(a) with tf.GradientTape() as tape: c = tf.sqrt(tf.square(a) + tf.square(b)) dc_da = tape.gradient(c, a) print(dc_da) ``` Note that you can compute higher-order derivatives by nesting tapes: ``` with tf.GradientTape() as outer_tape: with tf.GradientTape() as tape: c = tf.sqrt(tf.square(a) + tf.square(b)) dc_da = tape.gradient(c, a) d2c_da2 = outer_tape.gradient(dc_da, a) print(d2c_da2) ``` ## An end-to-end example: linear regression So far you've learned that TensorFlow is a Numpy-like library that is GPU or TPU accelerated, with automatic differentiation. Time for an end-to-end example: let's implement a linear regression, the FizzBuzz of Machine Learning. For the sake of demonstration, we won't use any of the higher-level Keras components like `Layer` or `MeanSquaredError`. Just basic ops. ``` input_dim = 2 output_dim = 1 learning_rate = 0.01 # This is our weight matrix w = tf.Variable(tf.random.uniform(shape=(input_dim, output_dim))) # This is our bias vector b = tf.Variable(tf.zeros(shape=(output_dim,))) def compute_predictions(features): return tf.matmul(features, w) + b def compute_loss(labels, predictions): return tf.reduce_mean(tf.square(labels - predictions)) def train_on_batch(x, y): with tf.GradientTape() as tape: predictions = compute_predictions(x) loss = compute_loss(y, predictions) # Note that `tape.gradient` works with a list as well (w, b). dloss_dw, dloss_db = tape.gradient(loss, [w, b]) w.assign_sub(learning_rate * dloss_dw) b.assign_sub(learning_rate * dloss_db) return loss ``` Let's generate some artificial data to demonstrate our model: ``` import numpy as np import random import matplotlib.pyplot as plt %matplotlib inline # Prepare a dataset. num_samples = 10000 negative_samples = np.random.multivariate_normal( mean=[0, 3], cov=[[1, 0.5],[0.5, 1]], size=num_samples) positive_samples = np.random.multivariate_normal( mean=[3, 0], cov=[[1, 0.5],[0.5, 1]], size=num_samples) features = np.vstack((negative_samples, positive_samples)).astype(np.float32) labels = np.vstack((np.zeros((num_samples, 1), dtype='float32'), np.ones((num_samples, 1), dtype='float32'))) plt.scatter(features[:, 0], features[:, 1], c=labels[:, 0]) ``` Now let's train our linear regression by iterating over batch-by-batch over the data and repeatedly calling `train_on_batch`: ``` # Shuffle the data. indices = np.random.permutation(len(features)) features = features[indices] labels = labels[indices] # Create a tf.data.Dataset object for easy batched iteration dataset = tf.data.Dataset.from_tensor_slices((features, labels)) dataset = dataset.shuffle(buffer_size=1024).batch(256) for epoch in range(10): for step, (x, y) in enumerate(dataset): loss = train_on_batch(x, y) print('Epoch %d: last batch loss = %.4f' % (epoch, float(loss))) ``` Here's how our model performs: ``` predictions = compute_predictions(features) plt.scatter(features[:, 0], features[:, 1], c=predictions[:, 0] > 0.5) ``` ## Making it fast with `tf.function` But how fast is our current code running? ``` import time t0 = time.time() for epoch in range(20): for step, (x, y) in enumerate(dataset): loss = train_on_batch(x, y) t_end = time.time() - t0 print('Time per epoch: %.3f s' % (t_end / 20,)) ``` Let's compile the training function into a static graph. Literally all we need to do is add the `tf.function` decorator on it: ``` @tf.function def train_on_batch(x, y): with tf.GradientTape() as tape: predictions = compute_predictions(x) loss = compute_loss(y, predictions) dloss_dw, dloss_db = tape.gradient(loss, [w, b]) w.assign_sub(learning_rate * dloss_dw) b.assign_sub(learning_rate * dloss_db) return loss ``` Let's try this again: ``` t0 = time.time() for epoch in range(20): for step, (x, y) in enumerate(dataset): loss = train_on_batch(x, y) t_end = time.time() - t0 print('Time per epoch: %.3f s' % (t_end / 20,)) ``` 40% reduction, neat. In this case we used a trivially simple model; in general the bigger the model the greater the speedup you can get by leveraging static graphs. Remember: eager execution is great for debugging and printing results line-by-line, but when it's time to scale, static graphs are a researcher's best friends. # Part 2: The Keras API Keras is a Python API for deep learning. It has something for everyone: - If you're an engineer, Keras provides you with reusable blocks such as layers, metrics, training loops, to support common use cases. It provides a high-level user experience that's accessible and productive. - If you're a researcher, you may prefer not to use these built-in blocks such as layers and training loops, and instead create your own. Of course, Keras allows you to do this. In this case, Keras provides you with templates for the blocks you write, it provides you with structure, with an API standard for things like Layers and Metrics. This structure makes your code easy to share with others and easy to integrate in production workflows. - The same is true for library developers: TensorFlow is a large ecosystem. It has many different libraries. In order for different libraries to be able to talk to each other and share components, they need to follow an API standard. That's what Keras provides. Crucially, Keras brings high-level UX and low-level flexibility together fluently: you no longer have on one hand, a high-level API that's easy to use but inflexible, and on the other hand a low-level API that's flexible but only approachable by experts. Instead, you have a spectrum of workflows, from the very high-level to the very low-level. Workflows that are all compatible because they're built on top of the same concepts and objects. ![Spectrum of Keras workflows](https://keras-dev.s3.amazonaws.com/tutorials-img/spectrum-of-workflows.png) ## The base `Layer` class The first class you need to know is [`Layer`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer). Pretty much everything in Keras derives from it. A Layer encapsulates a state (weights) and some computation (defined in the `call` method). ``` from tensorflow.keras.layers import Layer class Linear(Layer): """y = w.x + b""" def __init__(self, units=32, input_dim=32): super(Linear, self).__init__() w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(input_dim, units), dtype='float32'), trainable=True) b_init = tf.zeros_initializer() self.b = tf.Variable( initial_value=b_init(shape=(units,), dtype='float32'), trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b # Instantiate our layer. linear_layer = Linear(4, 2) ``` A layer instance works like a function. Let's call it on some data: ``` y = linear_layer(tf.ones((2, 2))) assert y.shape == (2, 4) ``` The `Layer` class takes care of tracking the weights assigned to it as attributes: ``` # Weights are automatically tracked under the `weights` property. assert linear_layer.weights == [linear_layer.w, linear_layer.b] ``` Note that's also a shortcut method for creating weights: `add_weight`. Instead of doing ```python w_init = tf.random_normal_initializer() self.w = tf.Variable(initial_value=w_init(shape=shape, dtype='float32')) ``` You would typically do: ```python self.w = self.add_weight(shape=shape, initializer='random_normal') ``` It’s good practice to create weights in a separate `build` method, called lazily with the shape of the first inputs seen by your layer. Here, this pattern prevents us from having to specify input_dim in the constructor: ``` class Linear(Layer): """y = w.x + b""" def __init__(self, units=32): super(Linear, self).__init__() self.units = units def build(self, input_shape): self.w = self.add_weight(shape=(input_shape[-1], self.units), initializer='random_normal', trainable=True) self.b = self.add_weight(shape=(self.units,), initializer='random_normal', trainable=True) def call(self, inputs): return tf.matmul(inputs, self.w) + self.b # Instantiate our lazy layer. linear_layer = Linear(4) # This will also call `build(input_shape)` and create the weights. y = linear_layer(tf.ones((2, 2))) assert len(linear_layer.weights) == 2 ``` ## Trainable and non-trainable weights Weights created by layers can be either trainable or non-trainable. They're exposed in `trainable_weights` and `non_trainable_weights`. Here's a layer with a non-trainable weight: ``` from tensorflow.keras.layers import Layer class ComputeSum(Layer): """Returns the sum of the inputs.""" def __init__(self, input_dim): super(ComputeSum, self).__init__() # Create a non-trainable weight. self.total = tf.Variable(initial_value=tf.zeros((input_dim,)), trainable=False) def call(self, inputs): self.total.assign_add(tf.reduce_sum(inputs, axis=0)) return self.total my_sum = ComputeSum(2) x = tf.ones((2, 2)) y = my_sum(x) print(y.numpy()) # [2. 2.] y = my_sum(x) print(y.numpy()) # [4. 4.] assert my_sum.weights == [my_sum.total] assert my_sum.non_trainable_weights == [my_sum.total] assert my_sum.trainable_weights == [] ``` ## Recursively composing layers Layers can be recursively nested to create bigger computation blocks. Each layer will track the weights of its sublayers (both trainable and non-trainable). ``` # Let's reuse the Linear class # with a `build` method that we defined above. class MLP(Layer): """Simple stack of Linear layers.""" def __init__(self): super(MLP, self).__init__() self.linear_1 = Linear(32) self.linear_2 = Linear(32) self.linear_3 = Linear(10) def call(self, inputs): x = self.linear_1(inputs) x = tf.nn.relu(x) x = self.linear_2(x) x = tf.nn.relu(x) return self.linear_3(x) mlp = MLP() # The first call to the `mlp` object will create the weights. y = mlp(tf.ones(shape=(3, 64))) # Weights are recursively tracked. assert len(mlp.weights) == 6 ``` ## Built-in layers Keras provides you with a [wide range of built-in layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers/), so that you don't have to implement your own layers all the time. - Convolution layers - Transposed convolutions - Separateable convolutions - Average and max pooling - Global average and max pooling - LSTM, GRU (with built-in cuDNN acceleration) - BatchNormalization - Dropout - Attention - ConvLSTM2D - etc. Keras follows the principles of exposing good default configurations, so that layers will work fine out of the box for most use cases if you leave keyword arguments to their default value. For instance, the `LSTM` layer uses an orthogonal recurrent matrix initializer by default, and initializes the forget gate bias to one by default. ## The `training` argument in `call` Some layers, in particular the `BatchNormalization` layer and the `Dropout` layer, have different behaviors during training and inference. For such layers, it is standard practice to expose a `training` (boolean) argument in the `call` method. By exposing this argument in `call`, you enable the built-in training and evaluation loops (e.g. `fit`) to correctly use the layer in training and inference. ``` from tensorflow.keras.layers import Layer class Dropout(Layer): def __init__(self, rate): super(Dropout, self).__init__() self.rate = rate def call(self, inputs, training=None): if training: return tf.nn.dropout(inputs, rate=self.rate) return inputs class MLPWithDropout(Layer): def __init__(self): super(MLPWithDropout, self).__init__() self.linear_1 = Linear(32) self.dropout = Dropout(0.5) self.linear_3 = Linear(10) def call(self, inputs, training=None): x = self.linear_1(inputs) x = tf.nn.relu(x) x = self.dropout(x, training=training) return self.linear_3(x) mlp = MLPWithDropout() y_train = mlp(tf.ones((2, 2)), training=True) y_test = mlp(tf.ones((2, 2)), training=False) ``` ## A more Functional way of defining models To build deep learning models, you don't have to use object-oriented programming all the time. Layers can also be composed functionally, like this (we call it the "Functional API"): ``` # We use an `Input` object to describe the shape and dtype of the inputs. # This is the deep learning equivalent of *declaring a type*. # The shape argument is per-sample; it does not include the batch size. # The functional API focused on defining per-sample transformations. # The model we create will automatically batch the per-sample transformations, # so that it can be called on batches of data. inputs = tf.keras.Input(shape=(16,)) # We call layers on these "type" objects # and they return updated types (new shapes/dtypes). x = Linear(32)(inputs) # We are reusing the Linear layer we defined earlier. x = Dropout(0.5)(x) # We are reusing the Dropout layer we defined earlier. outputs = Linear(10)(x) # A functional `Model` can be defined by specifying inputs and outputs. # A model is itself a layer like any other. model = tf.keras.Model(inputs, outputs) # A functional model already has weights, before being called on any data. # That's because we defined its input shape in advance (in `Input`). assert len(model.weights) == 4 # Let's call our model on some data. y = model(tf.ones((2, 16))) assert y.shape == (2, 10) ``` The Functional API tends to be more concise than subclassing, and provides a few other advantages (generally the same advantages that functional, typed languages provide over untyped OO development). However, it can only be used to define DAGs of layers -- recursive networks should be defined as `Layer` subclasses instead. Key differences between models defined via subclassing and Functional models are explained in [this blog post](https://medium.com/tensorflow/what-are-symbolic-and-imperative-apis-in-tensorflow-2-0-dfccecb01021). Learn more about the Functional API [here](https://www.tensorflow.org/alpha/guide/keras/functional). In your research workflows, you may often find yourself mix-and-matching OO models and Functional models. For models that are simple stacks of layers with a single input and a single output, you can also use the `Sequential` class which turns a list of layers into a `Model`: ``` from tensorflow.keras import Sequential model = Sequential([Linear(32), Dropout(0.5), Linear(10)]) y = model(tf.ones((2, 16))) assert y.shape == (2, 10) ``` ## Loss classes Keras features a wide range of built-in loss classes, like `BinaryCrossentropy`, `CategoricalCrossentropy`, `KLDivergence`, etc. They work like this: ``` bce = tf.keras.losses.BinaryCrossentropy() y_true = [0., 0., 1., 1.] # Targets y_pred = [1., 1., 1., 0.] # Predictions loss = bce(y_true, y_pred) print('Loss:', loss.numpy()) ``` Note that loss classes are stateless: the output of `__call__` is only a function of the input. ## Metric classes Keras also features a wide range of built-in metric classes, such as `BinaryAccuracy`, `AUC`, `FalsePositives`, etc. Unlike losses, metrics are stateful. You update their state using the `update_state` method, and you query the scalar metric result using `result`: ``` m = tf.keras.metrics.AUC() m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) print('Intermediate result:', m.result().numpy()) m.update_state([1, 1, 1, 1], [0, 1, 1, 0]) print('Final result:', m.result().numpy()) ``` The internal state can be cleared with `metric.reset_states`. You can easily roll out your own metrics by subclassing the `Metric` class: - Create the state variables in `__init__` - Update the variables given `y_true` and `y_pred` in `update_state` - Return the metric result in `result` - Clear the state in `reset_states` Here's a quick implementation of a `BinaryTruePositives` metric as a demonstration: ``` class BinaryTruePositives(tf.keras.metrics.Metric): def __init__(self, name='binary_true_positives', **kwargs): super(BinaryTruePositives, self).__init__(name=name, **kwargs) self.true_positives = self.add_weight(name='tp', initializer='zeros') def update_state(self, y_true, y_pred, sample_weight=None): y_true = tf.cast(y_true, tf.bool) y_pred = tf.cast(y_pred, tf.bool) values = tf.logical_and(tf.equal(y_true, True), tf.equal(y_pred, True)) values = tf.cast(values, self.dtype) if sample_weight is not None: sample_weight = tf.cast(sample_weight, self.dtype) values = tf.multiply(values, sample_weight) self.true_positives.assign_add(tf.reduce_sum(values)) def result(self): return self.true_positives def reset_states(self): self.true_positive.assign(0) m = BinaryTruePositives() m.update_state([0, 1, 1, 1], [0, 1, 0, 0]) print('Intermediate result:', m.result().numpy()) m.update_state([1, 1, 1, 1], [0, 1, 1, 0]) print('Final result:', m.result().numpy()) ``` ## Optimizer classes & a quick end-to-end training loop You don't normally have to define by hand how to update your variables during gradient descent, like we did in our initial linear regression example. You would usually use one of the built-in Keras optimizer, like `SGD`, `RMSprop`, or `Adam`. Here's a simple MNSIT example that brings together loss classes, metric classes, and optimizers. ``` from tensorflow.keras import layers # Prepare a dataset. (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train[:].reshape(60000, 784).astype('float32') / 255 dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.shuffle(buffer_size=1024).batch(64) # Instantiate a simple classification model model = tf.keras.Sequential([ layers.Dense(256, activation=tf.nn.relu), layers.Dense(256, activation=tf.nn.relu), layers.Dense(10) ]) # Instantiate a logistic loss function that expects integer targets. loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Instantiate an accuracy metric. accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Instantiate an optimizer. optimizer = tf.keras.optimizers.Adam() # Iterate over the batches of the dataset. for step, (x, y) in enumerate(dataset): # Open a GradientTape. with tf.GradientTape() as tape: # Forward pass. logits = model(x) # Loss value for this batch. loss_value = loss(y, logits) # Get gradients of loss wrt the weights. gradients = tape.gradient(loss_value, model.trainable_weights) # Update the weights of our linear layer. optimizer.apply_gradients(zip(gradients, model.trainable_weights)) # Update the running accuracy. accuracy.update_state(y, logits) # Logging. if step % 100 == 0: print('Step:', step) print('Loss from last step: %.3f' % loss_value) print('Total running accuracy so far: %.3f' % accuracy.result()) ``` We can reuse our `SparseCategoricalAccuracy` metric instance to implement a testing loop: ``` x_test = x_test[:].reshape(10000, 784).astype('float32') / 255 test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(128) accuracy.reset_states() # This clears the internal state of the metric for step, (x, y) in enumerate(test_dataset): logits = model(x) accuracy.update_state(y, logits) print('Final test accuracy: %.3f' % accuracy.result()) ``` ## The `add_loss` method Sometimes you need to compute loss values on the fly during a foward pass (especially regularization losses). Keras allows you to compute loss values at any time, and to recursively keep track of them via the `add_loss` method. Here's an example of a layer that adds a sparsity regularization loss based on the L2 norm of the inputs: ``` from tensorflow.keras.layers import Layer class ActivityRegularization(Layer): """Layer that creates an activity sparsity regularization loss.""" def __init__(self, rate=1e-2): super(ActivityRegularization, self).__init__() self.rate = rate def call(self, inputs): # We use `add_loss` to create a regularization loss # that depends on the inputs. self.add_loss(self.rate * tf.reduce_sum(tf.square(inputs))) return inputs ``` Loss values added via `add_loss` can be retrieved in the `.losses` list property of any `Layer` or `Model`: ``` from tensorflow.keras import layers class SparseMLP(Layer): """Stack of Linear layers with a sparsity regularization loss.""" def __init__(self, output_dim): super(SparseMLP, self).__init__() self.dense_1 = layers.Dense(32, activation=tf.nn.relu) self.regularization = ActivityRegularization(1e-2) self.dense_2 = layers.Dense(output_dim) def call(self, inputs): x = self.dense_1(inputs) x = self.regularization(x) return self.dense_2(x) mlp = SparseMLP(1) y = mlp(tf.ones((10, 10))) print(mlp.losses) # List containing one float32 scalar ``` These losses are cleared by the top-level layer at the start of each forward pass -- they don't accumulate. So `layer.losses` always contain only the losses created during the last forward pass. You would typically use these losses by summing them before computing your gradients when writing a training loop. ``` # Losses correspond to the *last* forward pass. mlp = SparseMLP(1) mlp(tf.ones((10, 10))) assert len(mlp.losses) == 1 mlp(tf.ones((10, 10))) assert len(mlp.losses) == 1 # No accumulation. # Let's demonstrate how to use these losses in a training loop. # Prepare a dataset. (x_train, y_train), _ = tf.keras.datasets.mnist.load_data() dataset = tf.data.Dataset.from_tensor_slices( (x_train.reshape(60000, 784).astype('float32') / 255, y_train)) dataset = dataset.shuffle(buffer_size=1024).batch(64) # A new MLP. mlp = SparseMLP(10) # Loss and optimizer. loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) optimizer = tf.keras.optimizers.SGD(learning_rate=0.1) for step, (x, y) in enumerate(dataset): with tf.GradientTape() as tape: # Forward pass. logits = mlp(x) # External loss value for this batch. loss = loss_fn(y, logits) # Add the losses created during the forward pass. loss += sum(mlp.losses) # Get gradients of loss wrt the weights. gradients = tape.gradient(loss, mlp.trainable_weights) # Update the weights of our linear layer. optimizer.apply_gradients(zip(gradients, mlp.trainable_weights)) # Logging. if step % 100 == 0: print('Loss at step %d: %.3f' % (step, loss)) ``` ## A detailed end-to-end example: a Variational AutoEncoder (VAE) If you want to take a break from the basics and look at a slightly more advanced example, check out this [Variational AutoEncoder](https://www.tensorflow.org/guide/keras/custom_layers_and_models#putting_it_all_together_an_end-to-end_example) implementation that demonstrates everything you've learned so far: - Subclassing `Layer` - Recursive layer composition - Loss classes and metric classes - `add_loss` - `GradientTape` ## Using built-in training loops It would be a bit silly if you had to write your own low-level training loops every time for simple use cases. Keras provides you with a built-in training loop on the `Model` class. If you want to use it, either subclass from the `Model` class, or create a `Functional` or `Sequential` model. To demonstrate it, let's reuse the MNIST setup from above: ``` # Prepare a dataset. (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) dataset = dataset.shuffle(buffer_size=1024).batch(64) # Instantiate a simple classification model model = tf.keras.Sequential([ layers.Dense(256, activation=tf.nn.relu), layers.Dense(256, activation=tf.nn.relu), layers.Dense(10) ]) # Instantiate a logistic loss function that expects integer targets. loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Instantiate an accuracy metric. accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Instantiate an optimizer. optimizer = tf.keras.optimizers.Adam() ``` First, call `compile` to configure the optimizer, loss, and metrics to monitor. ``` model.compile(optimizer=optimizer, loss=loss, metrics=[accuracy]) ``` Then we call `fit` on our model to pass it the data: ``` model.fit(dataset, epochs=3) ``` Done! **Note:** When you use `fit`, by default execution uses static graphs, so you don't need to add any `tf.function` decorators to your model or your layers. Now let's test it: ``` x_test = x_test[:].reshape(10000, 784).astype('float32') / 255 test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(128) loss, acc = model.evaluate(test_dataset) print('loss: %.3f - acc: %.3f' % (loss, acc)) ``` Note that you can also monitor your loss and metrics on some validation data during `fit`. Also, you can call `fit` directly on Numpy arrays, so no need for the dataset conversion: ``` (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 num_val_samples = 10000 x_val = x_train[-num_val_samples:] y_val = y_train[-num_val_samples:] x_train = x_train[:-num_val_samples] y_train = y_train[:-num_val_samples] # Instantiate a simple classification model model = tf.keras.Sequential([ layers.Dense(256, activation=tf.nn.relu), layers.Dense(256, activation=tf.nn.relu), layers.Dense(10) ]) # Instantiate a logistic loss function that expects integer targets. loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Instantiate an accuracy metric. accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Instantiate an optimizer. optimizer = tf.keras.optimizers.Adam() model.compile(optimizer=optimizer, loss=loss, metrics=[accuracy]) model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=3, batch_size=64) ``` ## Callbacks One of the neat features of `fit` (besides built-in support for sample weighting and class weighting) is that you can easily customize what happens during training and evaluation by using [callbacks](https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/). A callback is an object that is called at different points during training (e.g. at the end of every batch or at the end of every epoch) and takes actions, such as saving a model, mutating variables on the model, loading a checkpoint, stopping training, etc. There's a bunch of built-in callback available, like `ModelCheckpoint` to save your models after each epoch during training, or `EarlyStopping`, which interrupts training when your validation metrics start stalling. And you can easily [write your own callbacks](https://www.tensorflow.org/guide/keras/custom_callback). ``` # Instantiate a simple classification model model = tf.keras.Sequential([ layers.Dense(256, activation=tf.nn.relu), layers.Dense(256, activation=tf.nn.relu), layers.Dense(10) ]) # Instantiate a logistic loss function that expects integer targets. loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) # Instantiate an accuracy metric. accuracy = tf.keras.metrics.SparseCategoricalAccuracy() # Instantiate an optimizer. optimizer = tf.keras.optimizers.Adam() model.compile(optimizer=optimizer, loss=loss, metrics=[accuracy]) # Instantiate some callbacks callbacks = [tf.keras.callbacks.EarlyStopping(), tf.keras.callbacks.ModelCheckpoint(filepath='my_model.keras', save_best_only=True)] model.fit(x_train, y_train, validation_data=(x_val, y_val), epochs=30, batch_size=64, callbacks=callbacks) ``` # Parting words I hope this guide has given you a good overview of what's possible with TensorFlow 2.0 and Keras! Remember that TensorFlow and Keras don't represent a single workflow. It's a spectrum of workflows, each with its own trade-off between usability and flexibility. For instance, you've noticed that it's much easier to use `fit` than to write a custom training loop, but `fit` doesn't give you the same level of granular control for research use cases. So use the right tool for the job! A core principle of Keras is "progressive disclosure of complexity": it's easy to get started, and you can gradually dive into workflows where you write more and more logic from scratch, providing you with complete control. This applies to both model definition, and model training. ![Model definition: spectrum of workflows](https://keras-dev.s3.amazonaws.com/tutorials-img/model-building-spectrum.png) ![Model training: spectrum of workflows](https://keras-dev.s3.amazonaws.com/tutorials-img/model-training-spectrum.png) ## What to learn next Next, there are many more topics you may be interested in: - [Saving and serialization](https://www.tensorflow.org/guide/keras/save_and_serialize) - [Distributed training on multiple GPUS](https://www.tensorflow.org/guide/distributed_training) - [Exporting models to TFLite for deployment on Android or embedded systems](https://www.tensorflow.org/lite/convert/python_api#converting_a_keras_model_) - [Exporting models to TensorFlow.js for deployment in the browser](https://www.tensorflow.org/js/tutorials/conversion/import_keras)
github_jupyter
# GLM: Robust Linear Regression Author: [Thomas Wiecki](https://twitter.com/twiecki) This tutorial first appeard as a post in small series on Bayesian GLMs on my blog: 1. [The Inference Button: Bayesian GLMs made easy with PyMC3](http://twiecki.github.com/blog/2013/08/12/bayesian-glms-1/) 2. [This world is far from Normal(ly distributed): Robust Regression in PyMC3](http://twiecki.github.io/blog/2013/08/27/bayesian-glms-2/) 3. [The Best Of Both Worlds: Hierarchical Linear Regression in PyMC3](http://twiecki.github.io/blog/2014/03/17/bayesian-glms-3/) In this blog post I will write about: - How a few outliers can largely affect the fit of linear regression models. - How replacing the normal likelihood with Student T distribution produces robust regression. - How this can easily be done with `PyMC3` and its new `glm` module by passing a `family` object. This is the second part of a series on Bayesian GLMs (click [here for part I about linear regression](http://twiecki.github.io/blog/2013/08/12/bayesian-glms-1/)). In this prior post I described how minimizing the squared distance of the regression line is the same as maximizing the likelihood of a Normal distribution with the mean coming from the regression line. This latter probabilistic expression allows us to easily formulate a Bayesian linear regression model. This worked splendidly on simulated data. The problem with simulated data though is that it's, well, simulated. In the real world things tend to get more messy and assumptions like normality are easily violated by a few outliers. Lets see what happens if we add some outliers to our simulated data from the last post. Again, import our modules. ``` %matplotlib inline import pymc3 as pm import matplotlib.pyplot as plt import numpy as np import theano ``` Create some toy data but also add some outliers. ``` size = 100 true_intercept = 1 true_slope = 2 x = np.linspace(0, 1, size) # y = a + b*x true_regression_line = true_intercept + true_slope * x # add noise y = true_regression_line + np.random.normal(scale=.5, size=size) # Add outliers x_out = np.append(x, [.1, .15, .2]) y_out = np.append(y, [8, 6, 9]) data = dict(x=x_out, y=y_out) ``` Plot the data together with the true regression line (the three points in the upper left corner are the outliers we added). ``` fig = plt.figure(figsize=(7, 7)) ax = fig.add_subplot(111, xlabel='x', ylabel='y', title='Generated data and underlying model') ax.plot(x_out, y_out, 'x', label='sampled data') ax.plot(x, true_regression_line, label='true regression line', lw=2.) plt.legend(loc=0); ``` ## Robust Regression Lets see what happens if we estimate our Bayesian linear regression model using the `glm()` function as before. This function takes a [`Patsy`](http://patsy.readthedocs.org/en/latest/quickstart.html) string to describe the linear model and adds a Normal likelihood by default. ``` with pm.Model() as model: pm.glm.GLM.from_formula('y ~ x', data) trace = pm.sample(2000, cores=2) ``` To evaluate the fit, I am plotting the posterior predictive regression lines by taking regression parameters from the posterior distribution and plotting a regression line for each (this is all done inside of `plot_posterior_predictive()`). ``` plt.figure(figsize=(7, 5)) plt.plot(x_out, y_out, 'x', label='data') pm.plot_posterior_predictive_glm(trace, samples=100, label='posterior predictive regression lines') plt.plot(x, true_regression_line, label='true regression line', lw=3., c='y') plt.legend(loc=0); ``` As you can see, the fit is quite skewed and we have a fair amount of uncertainty in our estimate as indicated by the wide range of different posterior predictive regression lines. Why is this? The reason is that the normal distribution does not have a lot of mass in the tails and consequently, an outlier will affect the fit strongly. A Frequentist would estimate a [Robust Regression](http://en.wikipedia.org/wiki/Robust_regression) and use a non-quadratic distance measure to evaluate the fit. But what's a Bayesian to do? Since the problem is the light tails of the Normal distribution we can instead assume that our data is not normally distributed but instead distributed according to the [Student T distribution](http://en.wikipedia.org/wiki/Student%27s_t-distribution) which has heavier tails as shown next (I read about this trick in ["The Kruschke"](http://www.indiana.edu/~kruschke/DoingBayesianDataAnalysis/), aka the puppy-book; but I think [Gelman](http://www.stat.columbia.edu/~gelman/book/) was the first to formulate this). Lets look at those two distributions to get a feel for them. ``` normal_dist = pm.Normal.dist(mu=0, sigma=1) t_dist = pm.StudentT.dist(mu=0, lam=1, nu=1) x_eval = np.linspace(-8, 8, 300) plt.plot(x_eval, theano.tensor.exp(normal_dist.logp(x_eval)).eval(), label='Normal', lw=2.) plt.plot(x_eval, theano.tensor.exp(t_dist.logp(x_eval)).eval(), label='Student T', lw=2.) plt.xlabel('x') plt.ylabel('Probability density') plt.legend(); ``` As you can see, the probability of values far away from the mean (0 in this case) are much more likely under the `T` distribution than under the Normal distribution. To define the usage of a T distribution in `PyMC3` we can pass a family object -- `T` -- that specifies that our data is Student T-distributed (see `glm.families` for more choices). Note that this is the same syntax as `R` and `statsmodels` use. ``` with pm.Model() as model_robust: family = pm.glm.families.StudentT() pm.glm.GLM.from_formula('y ~ x', data, family=family) trace_robust = pm.sample(2000, cores=2) plt.figure(figsize=(7, 5)) plt.plot(x_out, y_out, 'x') pm.plot_posterior_predictive_glm(trace_robust, label='posterior predictive regression lines') plt.plot(x, true_regression_line, label='true regression line', lw=3., c='y') plt.legend(); ``` There, much better! The outliers are barely influencing our estimation at all because our likelihood function assumes that outliers are much more probable than under the Normal distribution. ## Summary - `PyMC3`'s `glm()` function allows you to pass in a `family` object that contains information about the likelihood. - By changing the likelihood from a Normal distribution to a Student T distribution -- which has more mass in the tails -- we can perform *Robust Regression*. The next post will be about logistic regression in PyMC3 and what the posterior and oatmeal have in common. *Extensions*: - The Student-T distribution has, besides the mean and variance, a third parameter called *degrees of freedom* that describes how much mass should be put into the tails. Here it is set to 1 which gives maximum mass to the tails (setting this to infinity results in a Normal distribution!). One could easily place a prior on this rather than fixing it which I leave as an exercise for the reader ;). - T distributions can be used as priors as well. I will show this in a future post on hierarchical GLMs. - How do we test if our data is normal or violates that assumption in an important way? Check out this [great blog post](http://allendowney.blogspot.com/2013/08/are-my-data-normal.html) by Allen Downey.
github_jupyter
# Live Data The 'Getting Started' guide has up until this point demonstrated how HoloViews objects can wrap your data and be given a rich, useful representation. All of the visualizations assumed that the data was already available in memory so that it could be used to construct the appropriate object, and all of the resulting visualizations can be viewed in static HTML pages, no longer requiring Python when users interact with them. In many important scenarios, the assumption that the data is immediately available in memory does not hold. The data of interest may exist on some remote server, making it unavailable locally until it is fetched. In other situations, the data may exist on the local disk, but be too large to fit into memory. Perhaps the data doesn't even exist yet: it may be the result of some computation yet to be performed or the outcome of some live process with the corresponding measurement not yet made. All these scenarios are examples of *live data* that can be made available to HoloViews using the appropriate Python process. In this section, we will see how HoloViews allows you to build visualizations that can update dynamically to newly available data and that can respond to live user interaction. <p><center><div class="alert alert-info" role="alert"><b>Note: </b>To work with live data, you need a live Python server, not a static web site, which is why the outputs shown below are GIF animations. If you run this notebook yourself, you will be able to try out your own interactions and compare them to the displayed GIF animations.</div></center></p> ## A computational process Let us start by importing NumPy and HoloViews and setting some suitable defaults for the ``Curve`` element we will be using (disabling axes and grid lines): ``` import holoviews as hv import numpy as np hv.extension('bokeh') %opts Curve [show_grid=False xaxis=None yaxis=None] ``` There are many possible examples of live data, including financial data feeds, real-time scientific measurements, and sophisticated numerical simulations. Here we will consider the path traced by two very simple equations: $$x_{n+1} = \sin(ay_n) + c \cos(ax_n)$$ $$y_{n+1} = \sin(bx_n) + d \cos(by_n)$$ These equations define the 'Clifford Attractor' described in the book "Chaos In Wonderland" by [Cliff Pickover](https://en.wikipedia.org/wiki/Clifford_A._Pickover). Now let's write a simple Python function to iterate these two equations starting from position ``(x0,y0)``: ``` def clifford(a,b,c,d,x0,y0): xn,yn = x0,y0 coords = [(x0,y0)] for i in range(10000): x_n1 = np.sin(a*yn) + c*np.cos(a*xn) y_n1 = np.sin(b*xn) + d*np.cos(b*yn) xn,yn = x_n1,y_n1 coords.append((xn,yn)) return coords ``` If we run this function now, we'll get a list of 10000 tuples, which won't be very informative. The ``Curve`` element accepts the output of our ``clifford`` function, making it trivial to define a function that when called gives us a visualization: ``` def clifford_attractor(a,b,c,d): return hv.Curve(clifford(a,b,c,d,x0=0,y0=0)) ``` We can then view the output for some combination of values for ``a,b,c,d``, starting from the origin: ``` %%opts Curve (line_width=0.03 color='red') clifford_attractor(a =-1.5, b=1.5, c=1, d=0.75 ) ``` This HoloViews element gives us a snapshot for the four chosen values, but what we really would like to do is to interact with the four-dimensional parameter space directly, even though that parameter space is too large to compute all possible combinations feasibly. ## Live parameter exploration To dynamically explore these parameters, we can start by declaring a ``DynamicMap``, passing in our function instead of the dictionary of ``Image`` elements we saw in the [Introduction](1-Introduction.ipynb). We declare the four arguments of our function as ``kdims``: ``` dmap = hv.DynamicMap(clifford_attractor, kdims=['a','b','c','d']) dmap ``` As you can see from the error message, HoloViews does not yet have the information needed to give us a visualization--it has no way to guess any value to use for the 'a','b','c', and 'd' dimensions. Since we know what suitable values look like, we can easily specify appropriate ranges using the ``redim`` method: ``` %%opts Curve (line_width=0.03 color='green') # When run live, this cell's output should match the behavior of the GIF below dmap.redim.range(a=(-1.5,-1),b=(1.5,2),c=(1,1.2),d=(0.75,0.8)) ``` <img src='https://assets.holoviews.org/gifs/guides/getting_started/5-Live_Data/live_data_1.gif'> These ranges supplied with ``redim.range`` are semantic specifications of suitable values for each of the parameters and they are used to define suitable ranges for the interactive sliders above. Note how the HoloViews options system described in the [Customization section](2-Customization.ipynb) continues to work with the ``DynamicMap``. ## Live interaction The live visualizations above are indistinguishable from standard HoloViews visualization, apart from the speed and memory usage. With a live Python server and the Bokeh backend, HoloViews can also be used to build highly customized interactive displays using ``DynamicMap`` and the *streams system*. A HoloViews stream is simply a parameter of a corresponding stream class configured to track some variable that reflects a user interaction. For instance, let's write a function that accepts an initial ``x`` and ``y`` value and computes a more complex version of the above plot, showing the ``x``,``y`` point as a dot along with a line segment indicating the first step taken when computing the attractor and some text indicating the starting point: ``` def interactive_clifford(a,b,c,d,x=0,y=0): coords = clifford(a,b,c,d,x0=x,y0=y) return (hv.Curve(coords) * hv.Points(coords[0]) * hv.Curve(coords[:2], group='Init') * hv.Text(-0.75,1.35, 'x:{x:.2f} y:{y:.2f}'.format(x=coords[0][0],y=coords[0][1]))) ``` All we have done is create an ``Overlay`` as described in the [Introduction](1-Introduction.ipynb) containing our Clifford attractor curve and a few other HoloViews elements parameterized accordingly, including ``Points`` and the ``Text`` annotation. Passing this function to ``DynamicMap`` together with a `PointerXY` stream that grabs the x,y locations of the mouse (in data space) creates an explorable visualization that you can interact with directly. The plot now shows the attractor (in blue) and the starting point and first step (in red), with the starting point following the mouse position: ``` from holoviews.streams import PointerXY %%opts Curve (line_width=0.03 color='blue') Points (color='red' size=10) Curve.Init (color='red' line_width=2) # When run live, this cell's output should match the behavior of the GIF below dmap = hv.DynamicMap(interactive_clifford, kdims=['a','b','c','d'], streams=[PointerXY(x=0,y=0)]) dmap.redim.range(a=(-1.4,-1),b=(1.6,1.8),c=(1,1.5),d=(0.7,0.8)) ``` <img src='https://assets.holoviews.org/gifs/guides/getting_started/5-Live_Data/live_data_2.gif'></img> By exploring with the mouse, see if you can find the fixed-point location (where the next step maps you to the same position) located at ``x=0.18,y=0.65`` with parameters ``a=-1.4, b=1.6, c=1`` and ``d=0.7``. To learn more about the streams system please consult the [user guide](../user_guide/06-Live_Data.ipynb) and check out our [Streams gallery](../reference/index.html#streams). ## Tradeoffs using live data ``DynamicMap`` and ``Streams`` allow specification of exciting, dynamic visualizations that let you build full-featured interactive applications and simulations with very little code (particularly when combined with a declarative widget library like [ParamNB](https://github.com/ioam/paramnb) or [ParamBokeh](https://github.com/ioam/parambokeh)). The way these dynamic visualizations work is that HoloViews runs JavaScript in your browser, which then communicates with a running Python server process that may be running in the Jupyter notebook server or in the [Bokeh server](http://bokeh.pydata.org/en/latest/docs/user_guide/server.html). This Python process may be running locally on your machine or on some remote internet or local-network server. Regardless of where it is running, this Python process executes the callback you supply to ``DynamicMap``, allowing HoloViews to update your visualization whenever the parameters change. This architecture is powerful and fully general, as you can always make static content in memory into dynamic output generated by a function (see the [User Guide](../user_guide/06-Live_Data.ipynb) to learn more). Using live data is not always recommended, however, because using purely static content also has some important advantages: ### Reasons to use live data * Your data is inherently coming from a live source and your visualization needs to reflect this in real time. * You wish to explore a large parameter space and statically sampling this space adequately is prohibitive in memory or computation time. * Your data is too big to fit in memory and you only need to explore a portion of it that you can stream in from disk. * You want an open-ended visualization that keeps updating indefinitely. ### Reasons to use static data * You wish to archive or record your visualization in such a way that it exists independently of code execution in a potentially changing codebase. * You wish to share visualizations in a static HTML file that does not require running a live server (e.g a file that can be e-mailed and immediately viewed or placed on an HTML server). The general recommendation is to visualize your data with ``HoloMap`` (as in the introduction to this guide) when you have a small amount of data (typically a few megabytes) that can be quickly computed and can reasonably be embedded into an HTML file. Otherwise, you can use ``DynamicMap`` that you can sample from to generate a ``HoloMap`` from when you wish to share your results (see the [user guide](../user_guide/06-Live_Data.ipynb) for more information on how to turn your ``DynamicMap`` objects into ``HoloMap``s). Now that you have explored the basic capabilities of HoloViews, you should try it out on your own data, guided by the [user guide](../user_guide/) and following examples in the component [reference gallery](../reference/) and other demos in the [gallery](../gallery/).
github_jupyter
# SIT742: Modern Data Science **(Week 06: Big Data Platform (I))** --- - Materials in this module include resources collected from various open-source online repositories. - You are free to use, change and distribute this package. - If you found any issue/bug for this document, please submit an issue at [tulip-lab/sit742](https://github.com/tulip-lab/sit742/issues) Prepared by **SIT742 Teaching Team** --- ## Session 6A - A Touch of Apache Spark --- This lab session will teach you how to use [Apache Spark](http://spark.apache.org/), a framework for large-scale data processing, within a notebook. You may need to do the following before running this notebook in Cloud platform such as Google Colab, DataBricks, or IBM Cloud: - Create one cluster to run your tasks and the following lab sessions - Install the necessary packages such as * matplotlib * test_helper * pyzmq * pandas * hashlib - Import all related lab sessions into Workspace - Import all needed data into Tables or S3, and note down the address of each file. ## Content Part A: Apache Spark * [Basic notebook usage and Python integration](#note) * [PySpark](#pyspark) * [RDD Transformations and Actions](#rdd) * [Lambda functions](#lambda) * [Additional RDD actions](#actions) * [Additional RDD transformations](#transfoormations) * [Caching RDDs and storage options](#cache) * [Debugging Spark applications and lazy evaluation](#debug) Part B: Test Spark functionality * [Check Spark Functionality](#spark) * [Check class Testing Functionality](#test) * [Check Plotting](#plot) --- ## Introduction ## Many traditional frameworks were designed to be run on a single computer. However, many datasets today are too large to be stored on a single computer, and even when a dataset can be stored on one computer (such as the datasets in this tutorial), the dataset can often be processed much more quickly using multiple computers. Spark has efficient implementations of a number of transformations and actions that can be composed together to perform data processing and analysis. Spark excels at distributing these operations across a cluster while abstracting away many of the underlying implementation details. Spark has been designed with a focus on scalability and efficiency. With Spark you can begin developing your solution on your laptop, using a small dataset, and then use that same code to process terabytes or even petabytes across a distributed cluster. The following transformations will be covered: * `map()`, `mapPartitions()`, `mapPartitionsWithIndex()`, `filter()`, `flatMap()`, `reduceByKey()`, `groupByKey()` The following actions will be covered: * `first()`, `take()`, `takeSample()`, `takeOrdered()`, `collect()`, `count()`, `countByValue()`, `reduce()`, `top()` Also covered: * `cache()`, `unpersist()`, `id()`, `setName()` Note that, for reference, you can look up the details of these methods in [Spark's Python API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) After that you will test that the virtual machine (VM) is functioning properly. You will not need to solve any problems to complete this lab. You can run a cell by pressing "shift-enter", which will compute the current cell and advance to the next cell, or by clicking in a cell and pressing "control-enter", which will compute the current cell and remain in that cell. <a id = "note"></a> ## Part A: Apache Spark ## ### **1. Notebook usage and [Python](https://docs.python.org/3.8/) integration ** ### #### **(1a) Notebook usage** A notebook is comprised of a linear sequence of cells. These cells can contain either markdown or code, but we won't mix both in one cell. When a markdown cell is executed it renders formatted text, images, and links just like HTML in a normal webpage. The text you are reading right now is part of a markdown cell. Python code cells allow you to execute arbitrary Python commands just like in any Python shell. Place your cursor inside the cell below, and press "Shift" + "Enter" to execute the code and advance to the next cell. You can also press "Ctrl" + "Enter" to execute the code and remain in the cell. These commands work the same in both markdown and code cells. ``` # This is a Python cell. You can run normal Python code here... print ('The sum of 1 and 1 is {0}'.format(1+1)) # Here is another Python cell, this time with a variable (x) declaration and an if statement: x = 42 if x > 40: print ('The sum of 1 and 2 is {0}'.format(1+2)) ``` #### **(1b) Notebook state** As you work through a notebook it is important that you run all of the code cells. The notebook is stateful, which means that variables and their values are retained until the notebook is detached (in Databricks Cloud) or the kernel is restarted (in IPython notebooks). If you do not run all of the code cells as you proceed through the notebook, your variables will not be properly initialized and later code might fail. You will also need to rerun any cells that you have modified in order for the changes to be available to other cells. ``` # This cell relies on x being defined already. # If we didn't run the cells from part (1a) this code would fail. print (x * 2) ``` #### **(1c) Library imports** We can import standard Python libraries ([modules](https://docs.python.org/3.8/tutorial/modules.html)) the usual way. An `import` statement will import the specified module. In this tutorial and future labs, we will provide any imports that are necessary. ``` # Import the regular expression library import re m = re.search('(?<=abc)def', 'abcdef') m.group(0) # Import the datetime library import datetime print ('This was last run on: {0}'.format(datetime.datetime.now())) ``` <a id = "pyspark"></a> ### **2. Introduction to [pySpark API](https://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) running in the browser** #### **Spark Context** In Spark, communication occurs between a driver and executors. The driver has Spark jobs that it needs to run and these jobs are split into tasks that are submitted to the executors for completion. The results from these tasks are delivered back to the driver. In section 1, we saw that normal python code can be executed via cells. When using Databricks Cloud this code gets executed in the Spark driver's Java Virtual Machine (JVM) and not in an executor's JVM, and when using an IPython notebook it is executed within the kernel associated with the notebook. Since no Spark functionality is actually being used, no tasks are launched on the executors. In order to use Spark functionality and its API we will need to use a `SparkContext`. When running Spark, you start a new Spark application by creating a [SparkContext](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext). When the `SparkContext` is created, it asks the master for some cores to use to do work. The master sets these cores aside just for you; they won't be used for other applications. When using Databricks Cloud or the virtual machine provisioned for this class, the `SparkContext` is created for you automatically as `sc`. #### **(2a) Example Cluster** The diagram below shows an example cluster, where the cores allocated for an application are outlined in purple. ![executors](http://spark-mooc.github.io/web-assets/images/executors.png) At a high level, every Spark application consists of a driver program that launches various parallel operations on executor Java Virtual Machines (JVMs) running either in a cluster or locally on the same machine. When running a cluster, such as ...In Databricks Cloud, "Databricks Shell" is the driver program. When running locally, "PySparkShell" is the driver program. In all cases, this driver program contains the main loop for the program and creates distributed datasets on the cluster, then applies operations (transformations & actions) to those datasets. Driver programs access Spark through a SparkContext object, which represents a connection to a computing cluster. A Spark context object (`sc`) is the main entry point for Spark functionality. A Spark context can be used to create Resilient Distributed Datasets (RDDs) on a cluster. ``` !apt-get install openjdk-8-jdk-headless -qq > /dev/null !wget -q https://archive.apache.org/dist/spark/spark-2.4.0/spark-2.4.0-bin-hadoop2.7.tgz !tar xf spark-2.4.0-bin-hadoop2.7.tgz !pip install -q findspark import os os.environ["JAVA_HOME"] = "/usr/lib/jvm/java-8-openjdk-amd64" os.environ["SPARK_HOME"] = "/content/spark-2.4.0-bin-hadoop2.7" import findspark findspark.init() from pyspark import SparkContext sc = SparkContext.getOrCreate() ``` ##### ** Predefined Variables** Notebooks already have some of the most useful Apache Spark variables that you’re going to need: - sc - sqlContext ``` # A SQLContext is also already created for you. # Do not create another or unspecified behavior may occur. # As you can see below, the sqlContext provided is a HiveContext. #For the difference between the HiveContext and sqlContext, please refer the URL #https://stackoverflow.com/questions/33666545/what-is-the-difference-between-apache-spark-sqlcontext-vs-hivecontext from pyspark.sql import SQLContext sqlContext = SQLContext(sc) sqlContext # A Spark Context is already created for you. # Do not create another or unspecified behavior may occur. #The sc command will show SparkContext Infomation including the Version, Master name and APP sc # Run this to check Spark Context sc is already loded # Display the type of the Spark Context sc print("The sc type is ",type(sc),"\nThe sqlContext type is ",type(sqlContext)) ``` #### **(2b) `SparkContext` attributes** You can use Python's [dir()](https://docs.python.org/2/library/functions.html?highlight=dir#dir) function to get a list of all the attributes (including methods) accessible through the `sc` object. ``` # List sc's attributes dir(sc) ``` #### **(2c) Getting help** Alternatively, you can use Python's [help()](https://docs.python.org/2/library/functions.html?highlight=help#help) function to get an easier to read list of all the attributes, including examples, that the `sc` object has. ``` # Use help to obtain more detailed information #Please note tha xrange() is replaced by the range() in Python 3 help(sc) # After reading the help we've decided we want to use sc.version to see what version of Spark we are running sc.version # Help can be used on any Python object help(map) ``` <a id = "rdd"></a> ### **3. Using RDDs and chaining together transformations and actions** #### **Working with your first RDD** In Spark, we first create a base [Resilient Distributed Dataset](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) (RDD). We can then apply one or more transformations to that base RDD. *An RDD is immutable, so once it is created, it cannot be changed.* As a result, each transformation creates a new RDD. Finally, we can apply one or more actions to the RDDs. Note that Spark uses lazy evaluation, so transformations are not actually executed until an action occurs. We will perform several exercises to obtain a better understanding of RDDs: * Create a Python collection of 10,000 integers * Create a Spark base RDD from that collection * Subtract one from each value using `map` * Perform action `collect` to view results * Perform action `count` to view counts * Apply transformation `filter` and view results with `collect` * Learn about lambda functions * Explore how lazy evaluation works and the debugging challenges that it introduces #### **(3a) Create a Python collection of integers in the range of 1 .. 10000** In python 3, the xrang() is removed. We will use the [range()](https://docs.python.org/3.8/library/functions.html?highlight=xrange#func-range) function to create a constructor to represent an immutable sequence of numbers. ` ``` data = range(1, 10001) # Data is just a normal Python list # Obtain data's first element data[0] # We can check the size of the list using the len() function len(data) ``` #### **(3b) Distributed data and using a collection to create an RDD** In Spark, datasets are represented as a list of entries, where the list is broken up into many different partitions that are each stored on a different machine. Each partition holds a unique subset of the entries in the list. Spark calls datasets that it stores "Resilient Distributed Datasets" (RDDs). One of the defining features of Spark, compared to other data analytics frameworks (e.g., Hadoop), is that it stores data in memory rather than on disk. This allows Spark applications to run much more quickly, because they are not slowed down by needing to read data from disk. The figure below illustrates how Spark breaks a list of data entries into partitions that are each stored in memory on a worker. ![partitions](http://spark-mooc.github.io/web-assets/images/partitions.png) To create the RDD, we use `sc.parallelize()`, which tells Spark to create a new set of input data based on data that is passed in. In this example, we will provide an `range`. The second argument to the [sc.parallelize()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.SparkContext.parallelize) method tells Spark how many partitions to break the data into when it stores the data in memory (we'll talk more about this later in this tutorial). Note that for better performance when using `parallelize`, `range()` is recommended if the input represents a range. This is the reason why we used `range()` in 3a. There are many different types of RDDs. The base class for RDDs is [pyspark.RDD](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD) and other RDDs subclass `pyspark.RDD`. Since the other RDD types inherit from `pyspark.RDD` they have the same APIs and are functionally identical. We'll see that `sc.parallelize()` generates a `pyspark.rdd.PipelinedRDD` when its input is an `range`, and a `pyspark.RDD` when its input is a `range`. After we generate RDDs, we can view them in the "Storage" tab of the web UI. You'll notice that new datasets are not listed until Spark needs to return a result due to an action being executed. This feature of Spark is called "lazy evaluation". This allows Spark to avoid performing unnecessary calculations. ``` # Parallelize data including 10000 using 8 partitions # This operation is a transformation of data into an RDD # Spark uses lazy evaluation, so no Spark jobs are run at this point rangeRDD = sc.parallelize(data, 8) # Let's view help on parallelize help(sc.parallelize) # Let's see what type sc.parallelize() returned print ('type of rangeRDD: {0}'.format(type(rangeRDD))) # Each RDD gets a unique ID print ('rangeRDD id: {0}'.format(rangeRDD.id())) # We can name each newly created RDD using the setName() method rangeRDD.setName('My first RDD') # Let's view the lineage (the set of transformations) of the RDD using toDebugString() print (rangeRDD.toDebugString()) # Let's use help to see what methods we can call on this RDD help(rangeRDD) # Let's see how many partitions the RDD will be split into by using the getNumPartitions()rangeRDD.getNumPartitions() print(rangeRDD.getNumPartitions()) ``` #### **(3c): Subtract one from each value using `map`** So far, we've created a distributed dataset that is split into many partitions, where each partition is stored on a single machine in our cluster. Let's look at what happens when we do a basic operation on the dataset. Many useful data analysis operations can be specified as "do something to each item in the dataset". These data-parallel operations are convenient because each item in the dataset can be processed individually: the operation on one entry doesn't effect the operations on any of the other entries. Therefore, Spark can parallelize the operation. `map(f)`, the most common Spark transformation, is one such example: it applies a function `f` to each item in the dataset, and outputs the resulting dataset. When you run `map()` on a dataset, a single *stage* of tasks is launched. A *stage* is a group of tasks that all perform the same computation, but on different input data. One task is launched for each partitition, as shown in the example below. A task is a unit of execution that runs on a single machine. When we run `map(f)` within a partition, a new *task* applies `f` to all of the entries in a particular partition, and outputs a new partition. In this example figure, the dataset is broken into four partitions, so four `map()` tasks are launched. ![tasks](http://spark-mooc.github.io/web-assets/images/tasks.png) The figure below shows how this would work on the smaller data set from the earlier figures. Note that one task is launched for each partition. ![foo](http://spark-mooc.github.io/web-assets/images/map.png) When applying the `map()` transformation, each item in the parent RDD will map to one element in the new RDD. So, if the parent RDD has twenty elements, the new RDD will also have twenty items. Now we will use `map()` to subtract one from each value in the base RDD we just created. First, we define a Python function called `sub()` that will subtract one from the input integer. Second, we will pass each item in the base RDD into a `map()` transformation that applies the `sub()` function to each element. And finally, we print out the RDD transformation hierarchy using `toDebugString()`. ``` # Create sub function to subtract 1 def sub(value): """"Subtracts one from `value`. Args: value (int): A number. Returns: int: `value` minus one. """ return (value - 1) # Transform rangeRDD through map transformation using sub function # Because map is a transformation and Spark uses lazy evaluation, no jobs, stages, # or tasks will be launched when we run this code. subRDD = rangeRDD.map(sub) # Let's see the RDD transformation hierarchy print (subRDD.toDebugString()) ``` #### ** (3d) Perform action `collect` to view results ** To see a list of elements decremented by one, we need to create a new list on the driver from the the data distributed in the executor nodes. To do this we call the `collect()` method on our RDD. `collect()` is often used after a filter or other operation to ensure that we are only returning a *small* amount of data to the driver. This is done because the data returned to the driver must fit into the driver's available memory. If not, the driver will crash. The `collect()` method is the first action operation that we have encountered. Action operations cause Spark to perform the (lazy) transformation operations that are required to compute the RDD returned by the action. In our example, this means that tasks will now be launched to perform the `parallelize`, `map`, and `collect` operations. In this example, the dataset is broken into four partitions, so four `collect()` tasks are launched. Each task collects the entries in its partition and sends the result to the SparkContext, which creates a list of the values, as shown in the figure below. ![collect](http://spark-mooc.github.io/web-assets/images/collect.png) The above figures showed what would happen if we ran `collect()` on a small example dataset with just four partitions. Now let's run `collect()` on `subRDD`. ``` # Let's collect the data print (subRDD.collect()) ``` #### ** (3d) Perform action `count` to view counts ** One of the most basic jobs that we can run is the `count()` job which will count the number of elements in an RDD using the `count()` action. Since `map()` creates a new RDD with the same number of elements as the starting RDD, we expect that applying `count()` to each RDD will return the same result. Note that because `count()` is an action operation, if we had not already performed an action with `collect()`, then Spark would now perform the transformation operations when we executed `count()`. Each task counts the entries in its partition and sends the result to your SparkContext, which adds up all of the counts. The figure below shows what would happen if we ran `count()` on a small example dataset with just four partitions. ![count](http://spark-mooc.github.io/web-assets/images/count.png) ``` print (rangeRDD.count()) print (subRDD.count()) ``` #### ** (3e) Apply transformation `filter` and view results with `collect` ** Next, we'll create a new RDD that only contains the values less than ten by using the `filter(f)` data-parallel operation. The `filter(f)` method is a transformation operation that creates a new RDD from the input RDD by applying filter function `f` to each item in the parent RDD and only passing those elements where the filter function returns `True`. Elements that do not return `True` will be dropped. Like `map()`, filter can be applied individually to each entry in the dataset, so is easily parallelized using Spark. The figure below shows how this would work on the small four-partition dataset. ![filter](http://spark-mooc.github.io/web-assets/images/filter.png) To filter this dataset, we'll define a function called `ten()`, which returns `True` if the input is less than 10 and `False` otherwise. This function will be passed to the `filter()` transformation as the filter function `f`. To view the filtered list of elements less than ten, we need to create a new list on the driver from the distributed data on the executor nodes. We use the `collect()` method to return a list that contains all of the elements in this filtered RDD to the driver program. ``` # Define a function to filter a single value def ten(value): """Return whether value is below ten. Args: value (int): A number. Returns: bool: Whether `value` is less than ten. """ if (value < 10): return True else: return False # The ten function could also be written concisely as: def ten(value): return value < 10 # Pass the function ten to the filter transformation # Filter is a transformation so no tasks are run filteredRDD = subRDD.filter(ten) # View the results using collect() # Collect is an action and triggers the filter transformation to run print (filteredRDD.collect()) def number(value): if (value > 10): return True else: return False filterlessRDD = subRDD.filter(number) print (filteredRDD) print (filterlessRDD.collect()) ``` <a id = "lambda"></a> ### ** 4. Lambda Functions ** #### ** (4a) Using Python `lambda()` functions ** Python supports the use of small one-line anonymous functions that are not bound to a name at runtime. Borrowed from LISP, these `lambda` functions can be used wherever function objects are required. They are syntactically restricted to a single expression. Remember that `lambda` functions are a matter of style and using them is never required - semantically, they are just syntactic sugar for a normal function definition. You can always define a separate normal function instead, but using a `lambda()` function is an equivalent and more compact form of coding. Ideally you should consider using `lambda` functions where you want to encapsulate non-reusable code without littering your code with one-line functions. Here, instead of defining a separate function for the `filter()` transformation, we will use an inline `lambda()` function. ``` lambdaRDD = subRDD.filter(lambda x: x < 10) lambdaRDD.collect() # Let's collect the even values less than 10 evenRDD = lambdaRDD.filter(lambda x: x % 2 == 0) evenRDD.collect() ``` <a id = "actions"></a> ### ** 5. Additional RDD actions ** #### ** (5a) Other common actions ** Let's investigate the additional actions: [first()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.first), [take()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.take), [top()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.top), [takeOrdered()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeOrdered), and [reduce()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduce) One useful thing to do when we have a new dataset is to look at the first few entries to obtain a rough idea of what information is available. In Spark, we can do that using the `first()`, `take()`, `top()`, and `takeOrdered()` actions. Note that for the `first()` and `take()` actions, the elements that are returned depend on how the RDD is *partitioned*. Instead of using the `collect()` action, we can use the `take(n)` action to return the first n elements of the RDD. The `first()` action returns the first element of an RDD, and is equivalent to `take(1)`. The `takeOrdered()` action returns the first n elements of the RDD, using either their natural order or a custom comparator. The key advantage of using `takeOrdered()` instead of `first()` or `take()` is that `takeOrdered()` returns a deterministic result, while the other two actions may return differing results, depending on the number of partions or execution environment. `takeOrdered()` returns the list sorted in *ascending order*. The `top()` action is similar to `takeOrdered()` except that it returns the list in *descending order.* The `reduce()` action reduces the elements of a RDD to a single value by applying a function that takes two parameters and returns a single value. The function should be commutative and associative, as `reduce()` is applied at the partition level and then again to aggregate results from partitions. If these rules don't hold, the results from `reduce()` will be inconsistent. Reducing locally at partitions makes `reduce()` very efficient. ``` # Let's get the first element print (filteredRDD.first()) # The first 4 print (filteredRDD.take(4)) # Note that it is ok to take more elements than the RDD has print (filteredRDD.take(12)) # Retrieve the three smallest elements print (filteredRDD.takeOrdered(3)) # Retrieve the five largest elements print (filteredRDD.top(5)) # Pass a lambda function to takeOrdered to reverse the order filteredRDD.takeOrdered(4, lambda s: -s) # Obtain Python's add function from operator import add # Efficiently sum the RDD using reduce print (filteredRDD.reduce(add)) # Sum using reduce with a lambda function print (filteredRDD.reduce(lambda a, b: a + b)) # Note that subtraction is not both associative and commutative print (filteredRDD.reduce(lambda a, b: a - b)) print (filteredRDD.repartition(4).reduce(lambda a, b: a - b)) # While addition is print (filteredRDD.repartition(4).reduce(lambda a, b: a + b)) ``` #### ** (5b) Advanced actions ** Here are two additional actions that are useful for retrieving information from an RDD: [takeSample()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.takeSample) and [countByValue()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.countByValue) The `takeSample()` action returns an array with a random sample of elements from the dataset. It takes in a `withReplacement` argument, which specifies whether it is okay to randomly pick the same item multiple times from the parent RDD (so when `withReplacement=True`, you can get the same item back multiple times). It also takes an optional `seed` parameter that allows you to specify a seed value for the random number generator, so that reproducible results can be obtained. The `countByValue()` action returns the count of each unique value in the RDD as a dictionary that maps values to counts. ``` # takeSample reusing elements print (filteredRDD.takeSample(withReplacement=True, num=6)) # takeSample without reuse print (filteredRDD.takeSample(withReplacement=False, num=6)) # Set seed for predictability print (filteredRDD.takeSample(withReplacement=False, num=6, seed=500)) # Try reruning this cell and the cell above -- the results from this cell will remain constant # Use ctrl-enter to run without moving to the next cell # Create new base RDD to show countByValue repetitiveRDD = sc.parallelize([1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 3, 3, 3, 4, 5, 4, 6]) print (repetitiveRDD.countByValue()) ``` <a id = "transformations"></a> ### ** 6. Additional RDD transformations ** #### ** (6a) `flatMap` ** When performing a `map()` transformation using a function, sometimes the function will return more (or less) than one element. We would like the newly created RDD to consist of the elements outputted by the function. Simply applying a `map()` transformation would yield a new RDD made up of iterators. Each iterator could have zero or more elements. Instead, we often want an RDD consisting of the values contained in those iterators. The solution is to use a [flatMap()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.flatMap) transformation, `flatMap()` is similar to `map()`, except that with `flatMap()` each input item can be mapped to zero or more output elements. To demonstrate `flatMap()`, we will first emit a word along with its plural, and then a range that grows in length with each subsequent operation. ``` # Let's create a new base RDD to work from wordsList = ['cat', 'elephant', 'rat', 'rat', 'cat'] wordsRDD = sc.parallelize(wordsList, 4) # Use map singularAndPluralWordsRDDMap = wordsRDD.map(lambda x: (x, x + 's')) # Use flatMap singularAndPluralWordsRDD = wordsRDD.flatMap(lambda x: (x, x + 's')) # View the results print (singularAndPluralWordsRDDMap.collect()) print (singularAndPluralWordsRDD.collect()) # View the number of elements in the RDD print (singularAndPluralWordsRDDMap.count()) print (singularAndPluralWordsRDD.count()) simpleRDD = sc.parallelize([2, 3, 4]) print (simpleRDD.map(lambda x: range(1, x)).collect()) print (simpleRDD.flatMap(lambda x: range(1, x)).collect()) ``` #### ** (6b) `groupByKey` and `reduceByKey` ** Let's investigate the additional transformations: [groupByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.groupByKey) and [reduceByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.reduceByKey). Both of these transformations operate on pair RDDs. A pair RDD is an RDD where each element is a pair tuple (key, value). For example, `sc.parallelize([('a', 1), ('a', 2), ('b', 1)])` would create a pair RDD where the keys are 'a', 'a', 'b' and the values are 1, 2, 1. The `reduceByKey()` transformation gathers together pairs that have the same key and applies a function to two associated values at a time. `reduceByKey()` operates by applying the function first within each partition on a per-key basis and then across the partitions. While both the `groupByKey()` and `reduceByKey()` transformations can often be used to solve the same problem and will produce the same answer, the `reduceByKey()` transformation works much better for large distributed datasets. This is because Spark knows it can combine output with a common key on each partition *before* shuffling (redistributing) the data across nodes. Only use `groupByKey()` if the operation would not benefit from reducing the data before the shuffle occurs. Look at the diagram below to understand how `reduceByKey` works. Notice how pairs on the same machine with the same key are combined (by using the lamdba function passed into reduceByKey) before the data is shuffled. Then the lamdba function is called again to reduce all the values from each partition to produce one final result. ![reduceByKey() figure](http://spark-mooc.github.io/web-assets/images/reduce_by.png) On the other hand, when using the `groupByKey()` transformation - all the key-value pairs are shuffled around, causing a lot of unnecessary data to being transferred over the network. To determine which machine to shuffle a pair to, Spark calls a partitioning function on the key of the pair. Spark spills data to disk when there is more data shuffled onto a single executor machine than can fit in memory. However, it flushes out the data to disk one key at a time, so if a single key has more key-value pairs than can fit in memory an out of memory exception occurs. This will be more gracefully handled in a later release of Spark so that the job can still proceed, but should still be avoided. When Spark needs to spill to disk, performance is severely impacted. ![groupByKey() figure](http://spark-mooc.github.io/web-assets/images/group_by.png) As your dataset grows, the difference in the amount of data that needs to be shuffled, between the `reduceByKey()` and `groupByKey()` transformations, becomes increasingly exaggerated. Here are more transformations to prefer over `groupByKey()`: + [combineByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.combineByKey) can be used when you are combining elements but your return type differs from your input value type. + [foldByKey()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.foldByKey) merges the values for each key using an associative function and a neutral "zero value". Now let's go through a simple `groupByKey()` and `reduceByKey()` example. ``` pairRDD = sc.parallelize([('a', 1), ('a', 2), ('b', 1)]) # mapValues only used to improve format for printing print(pairRDD.groupByKey().mapValues(lambda x: list(x)).collect()) # Using mapValues, which is recommended when they key doesn't change print(pairRDD.groupByKey().mapValues(lambda x: sum(x)).collect()) # reduceByKey is more efficient / scalable print(pairRDD.reduceByKey(add).collect()) ``` #### ** (6c) Advanced transformations ** [Optional] Let's investigate the advanced transformations: [mapPartitions()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.mapPartitions) and [mapPartitionsWithIndex()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.mapPartitionsWithIndex) The `mapPartitions()` transformation uses a function that takes in an iterator (to the items in that specific partition) and returns an iterator. The function is applied on a partition by partition basis. The `mapPartitionsWithIndex()` transformation uses a function that takes in a partition index (think of this like the partition number) and an iterator (to the items in that specific partition). For every partition (index, iterator) pair, the function returns a tuple of the same partition index number and an iterator of the transformed items in that partition. ``` # mapPartitions takes a function that takes an iterator and returns an iterator print (wordsRDD.collect()) itemsRDD = wordsRDD.mapPartitions(lambda iterator: [','.join(iterator)]) print (itemsRDD.collect()) itemsByPartRDD = wordsRDD.mapPartitionsWithIndex(lambda index, iterator: [(index, list(iterator))]) # We can see that three of the (partitions) workers have one element and the fourth worker has two # elements, although things may not bode well for the rat... print (itemsByPartRDD.collect()) # Rerun without returning a list (acts more like flatMap) itemsByPartRDD = wordsRDD.mapPartitionsWithIndex(lambda index, iterator: (index, list(iterator))) print (itemsByPartRDD.collect()) ``` <a id = "cache"></a> ### ** 7. Caching RDDs and storage options ** #### ** (7a) Caching RDDs ** For efficiency Spark keeps your RDDs in memory. By keeping the contents in memory, Spark can quickly access the data. However, memory is limited, so if you try to keep too many RDDs in memory, Spark will automatically delete RDDs from memory to make space for new RDDs. If you later refer to one of the RDDs, Spark will automatically recreate the RDD for you, but that takes time. So, if you plan to use an RDD more than once, then you should tell Spark to cache that RDD. You can use the `cache()` operation to keep the RDD in memory. However, if you cache too many RDDs and Spark runs out of memory, it will delete the least recently used (LRU) RDD first. Again, the RDD will be automatically recreated when accessed. You can check if an RDD is cached by using the `is_cached` attribute, and you can see your cached RDD in the "Storage" section of the Spark web UI. If you click on the RDD's name, you can see more information about where the RDD is stored. ``` # Name the RDD filteredRDD.setName('My Filtered RDD') # Cache the RDD filteredRDD.cache() # Is it cached print (filteredRDD.is_cached) ``` #### ** (7b) Unpersist and storage options ** Spark automatically manages the RDDs cached in memory and will save them to disk if it runs out of memory. For efficiency, once you are finished using an RDD, you can optionally tell Spark to stop caching it in memory by using the RDD's `unpersist()` method to inform Spark that you no longer need the RDD in memory. You can see the set of transformations that were applied to create an RDD by using the `toDebugString()` method, which will provide storage information, and you can directly query the current storage information for an RDD using the `getStorageLevel()` operation. ** Advanced: ** Spark provides many more options for managing how RDDs are stored in memory or even saved to disk. You can explore the API for RDD's [persist()](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.RDD.persist) operation using Python's [help()](https://docs.python.org/2/library/functions.html?highlight=help#help) command. The `persist()` operation, optionally, takes a pySpark [StorageLevel](http://spark.apache.org/docs/latest/api/python/pyspark.html#pyspark.StorageLevel) object. ``` # Note that toDebugString also provides storage information print (filteredRDD.toDebugString()) # If we are done with the RDD we can unpersist it so that its memory can be reclaimed filteredRDD.unpersist() # Storage level for a non cached RDD print (filteredRDD.getStorageLevel()) filteredRDD.cache() # Storage level for a cached RDD print (filteredRDD.getStorageLevel()) ``` <a id = "debug"></a> ### ** 8. Debugging Spark applications and lazy evaluation ** #### ** How Python is Executed in Spark ** Internally, Spark executes using a Java Virtual Machine (JVM). pySpark runs Python code in a JVM using [Py4J](http://py4j.sourceforge.net). Py4J enables Python programs running in a Python interpreter to dynamically access Java objects in a Java Virtual Machine. Methods are called as if the Java objects resided in the Python interpreter and Java collections can be accessed through standard Python collection methods. Py4J also enables Java programs to call back Python objects. Because pySpark uses Py4J, coding errors often result in a complicated, confusing stack trace that can be difficult to understand. In the following section, we'll explore how to understand stack traces. #### ** (8a) Challenges with lazy evaluation using transformations and actions ** Spark's use of lazy evaluation can make debugging more difficult because code is not always executed immediately. To see an example of how this can happen, let's first define a broken filter function. Next we perform a `filter()` operation using the broken filtering function. No error will occur at this point due to Spark's use of lazy evaluation. The `filter()` method will not be executed *until* an action operation is invoked on the RDD. We will perform an action by using the `collect()` method to return a list that contains all of the elements in this RDD. ``` def brokenTen(value): """Incorrect implementation of the ten function. Note: The `if` statement checks an undefined variable `val` instead of `value`. Args: value (int): A number. Returns: bool: Whether `value` is less than ten. Raises: NameError: The function references `val`, which is not available in the local or global namespace, so a `NameError` is raised. """ if (val < 10): return True else: return False brokenRDD = subRDD.filter(brokenTen) # Now we'll see the error brokenRDD.collect() ``` #### ** (8b) Finding the bug ** When the `filter()` method is executed, Spark evaluates the RDD by executing the `parallelize()` and `filter()` methods. Since our `filter()` method has an error in the filtering function `brokenTen()`, an error occurs. Scroll through the output "Py4JJavaError Traceback (most recent call last)" part of the cell and first you will see that the line that generated the error is the `collect()` method line. There is *nothing wrong with this line*. However, it is an action and that caused other methods to be executed. Continue scrolling through the Traceback and you will see the following error line: NameError: global name 'val' is not defined Looking at this error line, we can see that we used the wrong variable name in our filtering function `brokenTen()`. #### ** (8c) Moving toward expert style ** As you are learning Spark, I recommend that you write your code in the form: RDD.transformation1() RDD.action1() RDD.transformation2() RDD.action2() Using this style will make debugging your code much easier as it makes errors easier to localize - errors in your transformations will occur when the next action is executed. Once you become more experienced with Spark, you can write your code with the form: RDD.transformation1().transformation2().action() We can also use `lambda()` functions instead of separately defined functions when their use improves readability and conciseness. ``` # Cleaner code through lambda use subRDD.filter(lambda x: x < 10).collect() # Even better by moving our chain of operators into a single line. sc.parallelize(data).map(lambda y: y - 1).filter(lambda x: x < 10).collect() ``` #### ** (8d) Readability and code style ** To make the expert coding style more readable, enclose the statement in parentheses and put each method, transformation, or action on a separate line. ``` # Final version (sc .parallelize(data) .map(lambda y: y - 1) .filter(lambda x: x < 10) .collect()) ``` ## ** Part B: Test Spark functionality ** <a id = "spark"></a> ### 9. Spark Functionality #### ** (9a) Parallelize, filter, and reduce ** ``` # Check that Spark is working largeRange = sc.parallelize(range(100000)) reduceTest = largeRange.reduce(lambda a, b: a + b) filterReduceTest = largeRange.filter(lambda x: x % 7 == 0).sum() print (reduceTest) print (filterReduceTest) # If the Spark jobs don't work properly these will raise an AssertionError assert reduceTest == 4999950000 assert filterReduceTest == 714264285 ``` #### ** (9b) Loading a text file ** You can upload your file to Amazon S3 or other online storage so that the Spark cluster can access it. In DataBricks, you can check this URL for information: - https://docs.databricks.com/user-guide/advanced/filestore.html You can import the data through Tables, Create Table, Data Import functions. Please remember to note down the prompt address, such as: - /FileStore/tables/dnsmv70v1491290162600/shakespeare.txt If the file uploading is properly done, you can access the file via Web browser using this URL: - https://community.cloud.databricks.com/files/my-stuff/my-file.txt?o=###### Replace ###### with your community edition ID. Such as - https://community.cloud.databricks.com/files/tables/dnsmv70v1491290162600/shakespeare.txt?o=48280648682457 ``` # You can check the content of a specified folder on DataBricks' DBFS. # display(dbutils.fs.ls("/FileStore/tables")) !pip install wget import wget link_to_data = 'https://github.com/tuliplab/mds/raw/master/Jupyter/data/shakespeare.txt' DataSet = wget.download(link_to_data) # Check loading data with sc.textFile #import os.path #baseDir = os.path.join('data') #inputPath = os.path.join('cs100', 'lab1', 'shakespeare.txt') #fileName = os.path.join(baseDir, inputPath) # Replace this address by your own filestore address rawData = sc.textFile('shakespeare.txt') shakespeareCount = rawData.count() print(shakespeareCount) # If the text file didn't load properly an AssertionError will be raised assert shakespeareCount == 122395 ``` <a id = "test"></a> ### ** 10. Check plotting ** #### ** (10a) Our first plot ** After executing the code cell below, you should see a plot with 50 blue circles. The circles should start at the bottom left and end at the top right. ``` # Check matplotlib plotting import matplotlib.pyplot as plt import matplotlib.cm as cm from math import log # %matplotlib inline # function for generating plot layout def preparePlot(xticks, yticks, figsize=(10.5, 6), hideLabels=False, gridColor='#999999', gridWidth=1.0): plt.close() fig, ax = plt.subplots(figsize=figsize, facecolor='white', edgecolor='white') ax.axes.tick_params(labelcolor='#999999', labelsize='10') for axis, ticks in [(ax.get_xaxis(), xticks), (ax.get_yaxis(), yticks)]: axis.set_ticks_position('none') axis.set_ticks(ticks) axis.label.set_color('#999999') if hideLabels: axis.set_ticklabels([]) plt.grid(color=gridColor, linewidth=gridWidth, linestyle='-') map(lambda position: ax.spines[position].set_visible(False), ['bottom', 'top', 'left', 'right']) return fig, ax # generate layout and plot data x = range(1, 50) y = [log(x1 ** 2) for x1 in x] fig, ax = preparePlot(range(5, 60, 10), range(0, 12, 1)) plt.scatter(x, y, s=14**2, c='#d6ebf2', edgecolors='#8cbfd0', alpha=0.75) ax.set_xlabel(r'$range(1, 50)$'), ax.set_ylabel(r'$\log_e(x^2)$') display(fig) pass ```
github_jupyter
``` import pyscisci.all as pyscisci import os import networkx as nx import pandas as pd import numpy as np import matplotlib.pylab as plt try: import seaborn as sns sns.set_style('white') from cdlib import algorithms from clusim.clustering import Clustering except: print('This example requires the optional packages seaborn, cdlib and clusim.') print('Please run:') print('pip install seaborn cdlib clusim') %matplotlib inline # set this path to where the APS database will be stored path2aps = '/home/ajgates/APS' path2aps = '/Volumes/GatesExpansionDrive/DataSets/APS/APS2019' myaps = pyscisci.APS(path2aps, keep_in_memory=False) # override the default APS data with the author disambiguation myaps.set_new_data_path('paa', 'publicationauthoraffiliation_supp2010') # we need the linkages between publications and authors paa = myaps.paa # and we need the linkages between publications and their references pub2ref = myaps.pub2ref # finally, we need temporal career information so load the year mapping pub2year = myaps.pub2year # and add the years to the paa paa['Year'] = [pub2year.get(pid, None) for pid in paa['PublicationId'].values] # Lets take the publication career of Giorgio Parisi parisi_paa = paa[paa['FullName'] == 'giorgio..parisi'] parisi_paa.drop_duplicates(subset=['PublicationId'], inplace=True) parisi_paa.nunique() career_topics, cocite_network = pyscisci.career_cociting_network_topics(parisi_paa, pub2ref, randomize=42, return_network=True, show_progress=False) print(nx.info(cocite_network)) topic_community = nx.get_node_attributes(cocite_network, 'TopicCommunity') louvain_communities = Clustering().from_membership_list([topic_community[i] for i in range(len(topic_community))]) pos = nx.drawing.nx_agraph.graphviz_layout(cocite_network, prog='sfdp',) fig, ax = plt.subplots(1,1,figsize=(12,12)) nx.draw_networkx_edges(cocite_network, pos, edgelist=cocite_network.edges(), edge_color = '0.4', alpha=0.2, ax=ax) for c in louvain_communities.clusters: nx.draw_networkx_nodes(cocite_network.subgraph(louvain_communities.clu2elm_dict[c]), pos, node_size= 100, node_color=[c]*len(louvain_communities.clu2elm_dict[c]), cmap = sns.color_palette("Set2", as_cmap=True), edgecolors='0.2', # node outline vmin = 0, vmax = louvain_communities.n_clusters, label = str(c), ax = ax) plt.legend() plt.show() ncomm = louvain_communities.n_clusters community_connectivity = np.zeros((ncomm, ncomm)) cociting_adjmat = nx.to_scipy_sparse_matrix(cocite_network) for ic, jc in itertools.product(range(ncomm), repeat=2): idx = np.sort(list(louvain_communities.clu2elm_dict[ic])) jdx = np.sort(list(louvain_communities.clu2elm_dict[jc])) community_connectivity[ic,jc] = cociting_adjmat[idx][:,jdx].sum() community_connectivity = community_connectivity/community_connectivity.sum(axis=0) fig, ax = plt.subplots(1,1,figsize=(6,6)) ax.imshow(community_connectivity, cmap='YlOrRd', vmin=0, vmax=1) plt.show() cmap = sns.color_palette("Set2", as_cmap=True) fig, ax = plt.subplots(1,1,figsize=(10, 4)) for y, topic, degree in career_topics[['Year', 'TopicCommunity', 'Degree']].values: ax.scatter(y, degree, c=[topic], cmap=cmap, vmin=0, alpha=0.8, vmax=louvain_communities.n_clusters, zorder=5) ax.plot([y,y], [0,degree], color='k', lw=0.25, zorder=4) plt.show() ```
github_jupyter
``` %pylab inline ``` # Shift-Ciphers A shift-cipher, or ROT cipher, is a pretty simple encryption. First we convert all the letters into digits, which is a function $f : \{ a \to 0, b \to 1, \ldots \}$, the inverse of this is $f^{-1} : \{0 \to a, 1 \to b, \ldots \}$. First we convert all the letters to digits, then add a number $x \mod 26$, and then convert it back into letters. If the shift is 3, it is called the Ceasar cipher. Instead of using 0 to 25, we will use the ASCII table. We can convert from a letter to a number with `ord(x)`, and back with `chr(x)`. This also means we will do $x \mod 127$. ![ASCII Table](ASCII-table.jpg) ``` def rot_n(string, n):\ # 1. split the string into characters # 2. convert the character to the ASCII number with ord(x) # 3. add the shift modulo 127 # 4. convert it back into a character with chr(x) # 5. concat the list of characters into a string return ''.join([chr((ord(c) + n) % 127) for c in string]) rot_n('The big brown fox jumps over the lazy dog.', 3) ``` The reverse is easy, just use $n=-3$. Because we do a modulo, we can't use a shift greater than 5 if we also want to use this function to decrypt. For higher shifts, we need an addition decrypt function. ``` rot_n('Wkh#elj#eurzq#ir{#mxpsv#ryhu#wkh#od}|#grj1', -3) ``` However, this cipher is easily cracked by applying a letter frequency analysis. 1. Perform a letter frequency analysis on an English dictionary. 2. Notice that `e` has the highest frequency. 3. Perform a letter frequency analysis on the cipher text. 4. The letter with the highest frequency is _probably_ `e`, so calculate the difference and you got the shift. (This only works under the assumption it is English, but you get the idea.) # Van Eck's Sequence Let $a_0 = 0$. Then for $n \geq 0$, if there exists an $m < n$ such that $a_m = a_n$, take the largest such $m$ and set $a_{n+1}=n-m$; otherwise $a_{n+1}=0$. In normal language: if you see a new number, then $a_{n+1} = 0$. If you have a number that is already written down, count how many steps away it occured, and set $a_{n+1}$ equal to the number of steps. # Helpers Some functions that aid in making Python's list indexing like `X[-1:]` stuff more readable. ``` """ Returns the first element in the list X, as a single element. """ def head(X): if len(X) > 0: return X[0] return X """ Returns the entire list, except the last element. """ def tail(X): if len(X) > 0: return X[:-1] return [] """ Returns the last element in the list X, as a single element. """ def last(X): if len(X) > 0: return head(X[::-1]) return [] """ Reverses a list. """ def reverse(X): return X[::-1] """ Returns the index of the element in the list. Returns -1 if the element is not contained in the list. """ def index(x,X): if x in X: return X.index(x) return -1 ``` # Van Eck Sequence Generator ``` def vaneck(n): L = [0] for _ in range(n): # is the last element in the sequence? if contains(last(L), tail(L)): # index of the last element in the sequence. m = len(L) - 1 # index of the previous occurance i = len(L) - index(last(L), reverse(tail(L))) - 2 # add number of steps back to find the last element in the sequence L.append(m - i) else: L.append(0) return L vaneck(10) ``` We can do some simplification. Let $X = \textrm{len(X)}$, and $a = \textrm{index(last(L), reverse(tail(L)))}$. Then $m=X-1$, and likewise $d=X-a-2$. This can be simplified: $$ d=m-i \\ d = X-1-(X-a-2) \\ d = a + 1. $$ ``` def vaneck2(n): L = [0] for _ in range(n): if contains(last(L), tail(L)): L.append(index(last(L), reverse(tail(L))) + 1) else: L.append(0) return L vaneck2(10) ``` Here we get rid of the `index` function. ``` def vaneck3(n): L = [0] for _ in range(n): if contains(last(L), tail(L)): L.append(reverse(tail(L)).index(last(L)) + 1) else: L.append(0) return L vaneck3(10) ``` Here we get rid of the `head`, `tail`, `last`, and `reverse` functions. ``` def vaneck4(n): L = [0] for _ in range(n): if L[::-1][0] in set(L[:-1]): L.append(L[:-1][::-1].index(L[::-1][0]) + 1) else: L.append(0) return L vaneck4(10) ``` Here we rewrite it as a ternary expression. ``` def vaneck5(n): L = [0] for _ in range(n): L.append(L[:-1][::-1].index(L[::-1][0]) + 1 if L[::-1][0] in set(L[:-1]) else 0) return L vaneck5(10) ``` And now it is fucking unreadable, but it works anyway. # ROT with a rotating shift determined by Van Eck's sequence Now instead of a fixed shift, we will use the Van Eck's sequence to indicate the shift. Note that a decrypt function is a little bit more difficult, and it's not implemented here. ``` def rot_vaneck(text): X = np.array([ord(c) for c in text]) seq = vaneck5(len(X)-1) X = (X + seq) % 127 return ''.join([chr(d) for d in X]) rot_vaneck('The big brown fox jumps over the lazy dog.') ``` # Distribution of Van Eck's Sequence Here we look at the distribution of Van Eck's sequence. ``` plt.figure(figsize=(20,10)) N = 100 plot(vaneck5(N), c='black'); %%time plt.figure(figsize=(20,10)) N = 2500 scatter(range(N+1), vaneck5(N), marker="x", c='black') title('Distribution of Van Eck\'s Sequence (N={})'.format(N)) xlabel('Index') ylabel('Distance') ``` # Distribution of zero's Because there are a lot of zero's in the sequence, the shift that is applied to ciphering will also be $0$. This means that an $x$ percentage of characters will not be encrypted. We can look at the distribution of zeros for various $N$'s. ``` import pandas as pd N = [10, 100, 1000, 10000] X = [] for n in N: # calculate the fraction by counting all the zeros # and dividing through n. fraction = len([x for x in vaneck5(n) if x == 0]) / n X.append(fraction) df = pd.DataFrame(X,N) df.columns = ['Fraction of zeros (%)'] df ``` Here we can see that if we encrypt 10 characters, only 50% will be encrypted. If the message is 100 characters, then only 25% will be encrypted. Now you can argue that you can add another shift, for all the zeros, but this is susceptible to the same vulnerability as the original Ceasar Cipher. # Initial starting condition Originally the sequence starts by writing down a zero. But it is also possible to start at another number. To do this, we first modify the `vaneck5` function to accept a different starting parameter: ``` def vaneck5(n, a=0): L = [a] for _ in range(n): L.append(L[:-1][::-1].index(L[::-1][0]) + 1 if L[::-1][0] in set(L[:-1]) else 0) return L for i in range(0, 25): print('a={} => {}'.format(i, vaneck5(20, i))) ``` Well, this doesn't seem to get rid of the problem where we have a lot of zeros. It _seems_ it just shifts the sequence by a bit. # Another idea If the zeros are a problem, then just delete them, and we are done! ``` def vaneck_zeroless(n, a=0): L = [a] for _ in range(n): L.append(L[:-1][::-1].index(L[::-1][0]) + 1 if L[::-1][0] in set(L[:-1]) else 0) return [l for l in L if l != 0] %%time plt.figure(figsize=(20,10)) N = 100 L = vaneck_zeroless(N) scatter(range(len(L)), L, marker="x", c='black') title('Distribution of Van Eck\'s Sequence wwithout zeros (N={})'.format(N)) xlabel('Index') ylabel('Distance') ``` A problem with this is, for any given $N$, we have no clue how big the resulting sequence is. So we cannot easily generate a sequence which has the same size as the text.
github_jupyter
This notebook first displays the location of PROMICE AWSs and calculated the annual velocity based on the GPS record. Then it will extract the satellite pixel values and MODIS albedo prodcut at each AWS site. Results will be saved in csv files under the promice folder. Users should change the size of spatial window when extracting the pixel values. ``` import geemap import ee import pandas as pd import utm import numpy as np import plotly.express as px ``` # PROMICE ``` df = pd.read_csv(r'promice/promice.csv') df['Longitude'] = df['Longitude'] * -1 df['velocity(m/y)'] = df['Longitude'] # just create a new column by copying longitude i = 0 for station in df.Station: # url = df.iloc[i]['url'] url = df.url[i] dfs = pd.read_table(url, sep=r'\s{1,}', engine='python') dfs = dfs[(dfs['Albedo_theta<70d'] > 0) & (dfs['LatitudeGPS_HDOP<1(degN)'] > 0) & (dfs['LatitudeGPS_HDOP<1(degN)'] >0)] dfs['LongitudeGPS_HDOP<1(degW)'] = dfs['LongitudeGPS_HDOP<1(degW)'] * -1 lat = dfs['LatitudeGPS_HDOP<1(degN)'] lon = dfs['LongitudeGPS_HDOP<1(degW)'] utmx, utmy, utmzoneNum, utmzoneLetter = utm.from_latlon(lat.values, lon.values) dist = np.sqrt((utmx[0] - utmx[-1])**2 + (utmy[0] - utmy[-1])**2) / (dfs.DayOfCentury.tail(1).values - dfs.DayOfCentury.head(1).values) * 365 df.at[i, 'velocity(m/y)'] = np.around(dist, 2) print('The station is: %s lat: %f, lon: %f' % (df.Station[i], lat.mean(), lon.mean()) ) print("the annual average ice flow rate is %.2f m\N{DOT OPERATOR}a\u207B\N{SUPERSCRIPT ONE}" %dist) i += 1 fig = px.scatter_mapbox(df, lat=df.Latitude, lon=df.Longitude, # color="", # which column to use to set the color of markers hover_name="Station", hover_data=["m.a.s.l", "velocity(m/y)"], zoom=2, width=650, height=500, center=dict( lat=72.603506, lon=-41.352658 )) # column added to hover information) # fig.update_layout(mapbox_style="stamen-terrain") # fig.update_layout(mapbox_style="open-street-map") fig.update_layout( mapbox_style="white-bg", mapbox_layers=[ { "below": 'traces', "sourcetype": "raster", "sourceattribution": "United States Geological Survey", "source": [ "https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}" ] } ]) fig.update_layout(margin={"r":0,"t":0,"l":0,"b":0}) fig.show() # fig.write_html(r'C:\Users\au686295\Documents\GitHub\personal\shunan.feng\assets\interactive_figure\promice.html') ``` # GEE ``` Map = geemap.Map() Map ``` ## Landsat and Sentinel ``` rmaCoefficients = { 'itcpsL7': ee.Image.constant([-0.0084, -0.0065, 0.0022, -0.0768, -0.0314, -0.0022]), 'slopesL7': ee.Image.constant([1.1017, 1.0840, 1.0610, 1.2100, 1.2039, 1.2402]), 'itcpsS2': ee.Image.constant([0.0210, 0.0167, 0.0155, -0.0693, -0.0039, -0.0112]), 'slopesS2': ee.Image.constant([1.0849, 1.0590, 1.0759, 1.1583, 1.0479, 1.0148]) }; #rma # Function to get and rename bands of interest from OLI. def renameOli(img): return img.select( ['SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B6', 'SR_B7'], #'QA_PIXEL', 'QA_RADSAT' ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) #'QA_PIXEL', 'QA_RADSAT' # Function to get and rename bands of interest from ETM+, TM. def renameEtm(img): return img.select( ['SR_B1', 'SR_B2', 'SR_B3', 'SR_B4', 'SR_B5', 'SR_B7'], #, 'QA_PIXEL', 'QA_RADSAT' ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) #, 'QA_PIXEL', 'QA_RADSAT' # Function to get and rename bands of interest from Sentinel 2. def renameS2(img): return img.select( ['B2', 'B3', 'B4', 'B8', 'B11', 'B12', 'QA60', 'SCL'], ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2', 'QA60', 'SCL'] ) def oli2oli(img): return img.select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) \ .toFloat() def etm2oli(img): return img.select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) \ .multiply(rmaCoefficients["slopesL7"]) \ .add(rmaCoefficients["itcpsL7"]) \ .toFloat() # .round() \ # .toShort() # .addBands(img.select('pixel_qa')) def s22oli(img): return img.select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) \ .multiply(rmaCoefficients["slopesS2"]) \ .add(rmaCoefficients["itcpsS2"]) \ .toFloat() # .round() \ # .toShort() # convert to Int16 # .addBands(img.select('pixel_qa')) def imRangeFilter(image): maskMax = image.lt(1) maskMin = image.gt(0) return image.updateMask(maskMax).updateMask(maskMin) ''' Cloud mask for Landsat data based on fmask (QA_PIXEL) and saturation mask based on QA_RADSAT. Cloud mask and saturation mask by sen2cor. Codes provided by GEE official. ''' # the Landsat 8 Collection 2 def maskL8sr(image): # Bit 0 - Fill # Bit 1 - Dilated Cloud # Bit 2 - Cirrus # Bit 3 - Cloud # Bit 4 - Cloud Shadow qaMask = image.select('QA_PIXEL').bitwiseAnd(int('11111', 2)).eq(0) saturationMask = image.select('QA_RADSAT').eq(0) # Apply the scaling factors to the appropriate bands. # opticalBands = image.select('SR_B.').multiply(0.0000275).add(-0.2) # thermalBands = image.select('ST_B.*').multiply(0.00341802).add(149.0) # Replace the original bands with the scaled ones and apply the masks. #image.addBands(opticalBands, {}, True) \ maybe not available in python api return image.select('SR_B.').multiply(0.0000275).add(-0.2) \ .updateMask(qaMask) \ .updateMask(saturationMask) # the Landsat 4, 5, 7 Collection 2 def maskL457sr(image): # Bit 0 - Fill # Bit 1 - Dilated Cloud # Bit 2 - Unused # Bit 3 - Cloud # Bit 4 - Cloud Shadow qaMask = image.select('QA_PIXEL').bitwiseAnd(int('11111', 2)).eq(0) saturationMask = image.select('QA_RADSAT').eq(0) # Apply the scaling factors to the appropriate bands. # opticalBands = image.select('SR_B.') # opticalBands = image.select('SR_B.').multiply(0.0000275).add(-0.2) # thermalBand = image.select('ST_B6').multiply(0.00341802).add(149.0) # Replace the original bands with the scaled ones and apply the masks. return image.select('SR_B.').multiply(0.0000275).add(-0.2) \ .updateMask(qaMask) \ .updateMask(saturationMask) # # Function to mask clouds using the Sentinel-2 QA band # @param {ee.Image} image Sentinel-2 image # @return {ee.Image} cloud masked Sentinel-2 image # def maskS2sr(image): qa = image.select('QA60') # Bits 10 and 11 are clouds and cirrus, respectively. cloudBitMask = 1 << 10 cirrusBitMask = 1 << 11 # Bits 1 is saturated or defective pixel not_saturated = image.select('SCL').neq(1) # Both flags should be set to zero, indicating clear conditions. mask = qa.bitwiseAnd(cloudBitMask).eq(0) \ .And(qa.bitwiseAnd(cirrusBitMask).eq(0)) return image.updateMask(mask).updateMask(not_saturated).divide(10000) # Define function to prepare OLI images. def prepOli(img): orig = img img = maskL8sr(img) img = renameOli(img) img = oli2oli(img) img = imRangeFilter(img) # img = addAlbedo(img) return ee.Image(img.copyProperties(orig, orig.propertyNames())) # Define function to prepare ETM+/TM images. def prepEtm(img): orig = img img = maskL457sr(img) img = renameEtm(img) img = etm2oli(img) img = imRangeFilter(img) # img = addAlbedo(img) return ee.Image(img.copyProperties(orig, orig.propertyNames())) # Define function to prepare S2 images. def prepS2(img): orig = img img = renameS2(img) img = maskS2sr(img) img = s22oli(img) img = imRangeFilter(img) # img = addAlbedo(img) return ee.Image(img.copyProperties(orig, orig.propertyNames()).set('SATELLITE', 'SENTINEL_2')) # https://developers.google.com/earth-engine/tutorials/community/intro-to-python-api-guiattard by https://github.com/guiattard def ee_array_to_df(arr, list_of_bands): """Transforms client-side ee.Image.getRegion array to pandas.DataFrame.""" df = pd.DataFrame(arr) # Rearrange the header. headers = df.iloc[0] df = pd.DataFrame(df.values[1:], columns=headers) # Remove rows without data inside. df = df[['longitude', 'latitude', 'time', *list_of_bands]]#.dropna() # Convert the data to numeric values. for band in list_of_bands: df[band] = pd.to_numeric(df[band], errors='coerce') # Convert the time field into a datetime. df['datetime'] = pd.to_datetime(df['time'], unit='ms') # Keep the columns of interest. df = df[['time','datetime', *list_of_bands]] return df for i in range(len(df.Station)): stationName = df.Station[i] url = df.url[i] url = df.url[i] dfall = pd.read_table(url, sep=r'\s{1,}', engine='python') dfs = dfall[['Year', 'MonthOfYear', 'DayOfMonth', 'Albedo_theta<70d', 'LatitudeGPS_HDOP<1(degN)', 'LongitudeGPS_HDOP<1(degW)']] dfs = dfs[(dfs['Albedo_theta<70d'] > 0)] dfs = dfs.replace(-999, np.nan) # dfs = dfs.interpolate(method='bfill') dfs['LatitudeGPS_HDOP<1(degN)'] = dfs['LatitudeGPS_HDOP<1(degN)'].interpolate(limit_direction='both') dfs['LongitudeGPS_HDOP<1(degW)'] = dfs['LongitudeGPS_HDOP<1(degW)'].interpolate(limit_direction='both') dfs['lat'] = dfs['LatitudeGPS_HDOP<1(degN)'] dfs['lon'] = dfs['LongitudeGPS_HDOP<1(degW)'] * -1 dfs['time'] = pd.to_datetime(dict(year=dfs.Year, month=dfs.MonthOfYear, day = dfs.DayOfMonth)) # utmx, utmy, utmzoneNum, utmzoneLetter = utm.from_latlon(dfs.lat.values, dfs.lon.values) # dist = np.sqrt((utmx[0] - utmx[-1])**2 + (utmy[0] - utmy[-1])**2) / (dfs.Year.tail(1).values - dfs.Year.head(1).values) print('The station is: %s' %df.Station[i]) print('start from: %s end on: %s' % (dfs.time.head(1).values, dfs.time.tail(1).values)) # print("the annual average ice flow rate is %.2f m\N{DOT OPERATOR}a\u207B\N{SUPERSCRIPT ONE}" %dist) dfsYear = dfs.groupby(['Year']).mean() dfsYear.reset_index(inplace=True) ''' This part could help examine the annual ice velocity calculated from promice data. ''' # for j in range(len(dfsYear)): # # aoi = ee.Geometry.Point([dfsYear.lon[i], dfsYear.lat[i]]).buffer(300) # # Map.addLayer(aoi, {}, str(dfsYear.Year[i])) # utmx, utmy, utmzoneNum, utmzoneLetter = utm.from_latlon(dfsYear.lat[j], dfsYear.lon[j]) # dist = np.sqrt((utmx - utmx)**2 + (utmy - utmy)**2) / (dfsYear.Year.tail(1).values - dfs.Year.head(1).values) # print('year: %d, coordinates:(%f, %f)' %(dfsYear.Year[j], dfsYear.lon[j], dfsYear.lat[j])) # print("the average ice flow rate is %.2f m\N{DOT OPERATOR}a\u207B\N{SUPERSCRIPT ONE}" %dist) for j in range(len(dfsYear)): aoi = ee.Geometry.Point([dfsYear.lon[j], dfsYear.lat[j]]) Map.addLayer(aoi, {}, str(dfsYear.Year[j])) date_start = str(dfsYear.Year[j]) + '-' + str(1) + '-' + str(1) date_end = str(dfsYear.Year[j]) + '-' + str(12) + '-' + str(31) # print(date_start) # create filter for image collection colFilter = ee.Filter.And( ee.Filter.geometry(aoi), # filterbounds not available on python api https://github.com/google/earthengine-api/issues/83 ee.Filter.date(date_start, date_end) # ee.Filter.calendarRange(5, 9, 'month'), # ee.Filter.lt('CLOUD_COVER', 50) ) s2colFilter = ee.Filter.And( ee.Filter.geometry(aoi), # filterbounds not available on python api https://github.com/google/earthengine-api/issues/83 ee.Filter.date(date_start, date_end), # ee.Filter.calendarRange(5, 9, 'month'), ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 50) ) oliCol = ee.ImageCollection('LANDSAT/LC08/C02/T1_L2') \ .filter(colFilter) \ .map(prepOli) \ .select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) etmCol = ee.ImageCollection('LANDSAT/LE07/C02/T1_L2') \ .filter(colFilter) \ .map(prepEtm) \ .select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) tmCol = ee.ImageCollection('LANDSAT/LT05/C02/T1_L2') \ .filter(colFilter) \ .map(prepEtm) \ .select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) tm4Col = ee.ImageCollection('LANDSAT/LT04/C02/T1_L2') \ .filter(colFilter) \ .map(prepEtm) \ .select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) s2Col = ee.ImageCollection('COPERNICUS/S2_SR') \ .filter(s2colFilter) \ .map(prepS2) \ .select(['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) # landsatCol = etmCol.merge(tmCol) landsatCol = oliCol.merge(etmCol).merge(tmCol).merge(tm4Col) multiSat = landsatCol.merge(s2Col).sort('system:time_start', True) # // Sort chronologically in descending order. if multiSat.size().getInfo()==0: continue pointValue = multiSat.getRegion(aoi, 90).getInfo() # The number e.g. 500 is the buffer size dfpoint = ee_array_to_df(pointValue, ['Blue', 'Green', 'Red', 'NIR', 'SWIR1', 'SWIR2']) pointValueFile = 'promice/multiSat90m/' + stationName.replace("*", "-") + '.csv' # if os.path.exists(pointValueFile): if j==0: dfpoint.to_csv(pointValueFile, mode='w', index=False, header=True) else: dfpoint.to_csv(pointValueFile, mode='a', index=False, header=False) ``` ## MODIS ``` for i in range(len(df.Station)): stationName = df.Station[i] url = df.url[i] dfall = pd.read_table(url, sep=r'\s{1,}', engine='python') dfs = dfall[['Year', 'MonthOfYear', 'DayOfMonth', 'Albedo_theta<70d', 'LatitudeGPS_HDOP<1(degN)', 'LongitudeGPS_HDOP<1(degW)']] dfs = dfs[(dfs['Albedo_theta<70d'] > 0)] dfs = dfs.replace(-999, np.nan) # dfs = dfs.interpolate(method='bfill') dfs['LatitudeGPS_HDOP<1(degN)'] = dfs['LatitudeGPS_HDOP<1(degN)'].interpolate(limit_direction='both') dfs['LongitudeGPS_HDOP<1(degW)'] = dfs['LongitudeGPS_HDOP<1(degW)'].interpolate(limit_direction='both') dfs['lat'] = dfs['LatitudeGPS_HDOP<1(degN)'] dfs['lon'] = dfs['LongitudeGPS_HDOP<1(degW)'] * -1 dfs['time'] = pd.to_datetime(dict(year=dfs.Year, month=dfs.MonthOfYear, day = dfs.DayOfMonth)) # utmx, utmy, utmzoneNum, utmzoneLetter = utm.from_latlon(dfs.lat.values, dfs.lon.values) # dist = np.sqrt((utmx[0] - utmx[-1])**2 + (utmy[0] - utmy[-1])**2) / (dfs.Year.tail(1).values - dfs.Year.head(1).values) print('The station is: %s' %df.Station[i]) print('start from: %s end on: %s' % (dfs.time.head(1).values, dfs.time.tail(1).values)) # print("the annual average ice flow rate is %.2f m\N{DOT OPERATOR}a\u207B\N{SUPERSCRIPT ONE}" %dist) dfsYear = dfs.groupby(['Year']).mean() dfsYear.reset_index(inplace=True) ''' This part could help examine the annual ice velocity calculated from promice data. ''' # for j in range(len(dfsYear)): # # aoi = ee.Geometry.Point([dfsYear.lon[i], dfsYear.lat[i]]).buffer(300) # # Map.addLayer(aoi, {}, str(dfsYear.Year[i])) # utmx, utmy, utmzoneNum, utmzoneLetter = utm.from_latlon(dfsYear.lat[j], dfsYear.lon[j]) # dist = np.sqrt((utmx - utmx)**2 + (utmy - utmy)**2) / (dfsYear.Year.tail(1).values - dfs.Year.head(1).values) # print('year: %d, coordinates:(%f, %f)' %(dfsYear.Year[j], dfsYear.lon[j], dfsYear.lat[j])) # print("the average ice flow rate is %.2f m\N{DOT OPERATOR}a\u207B\N{SUPERSCRIPT ONE}" %dist) for j in range(len(dfsYear)): # aoi = ee.Geometry.Point([dfsYear.lon[i], dfsYear.lat[i]]).buffer(300) aoi = ee.Geometry.Point([dfsYear.lon[j], dfsYear.lat[j]]) Map.addLayer(aoi, {}, str(dfsYear.Year[j])) date_start = str(dfsYear.Year[j]) + '-' + str(1) + '-' + str(1) date_end = str(dfsYear.Year[j]) + '-' + str(12) + '-' + str(31) # print(date_start) # create filter for image collection colFilter = ee.Filter.And( # ee.Filter.bounds(aoi), # ee.Filter.intersects('.geo', aoi), ee.Filter.geometry(aoi), ee.Filter.date(date_start, date_end) ) # def maskMODIS(image): # # 150 Cloud # # 151 Cloud detected as snow # qa = image.select('Snow_Albedo_Daily_Tile_Class') # cloudMask = qa.neq(150) # # cloudFalseMask = qa.neq(151) # return image.updateMask(cloudMask)#.updateMask(cloudFalseMask) # MOD10A1.006 Terra Snow Cover Daily Global 500m modisCol = ee.ImageCollection('MODIS/006/MOD10A1').select(['Snow_Albedo_Daily_Tile', 'Snow_Albedo_Daily_Tile_Class']) \ .filter(colFilter)#.map(maskMODIS) # if multiSat.size().getInfo()==0: # continue pointValue = modisCol.getRegion(aoi, 500).getInfo() # 300 is the buffer radius dfpoint = ee_array_to_df(pointValue, ['Snow_Albedo_Daily_Tile']) pointValueFile = 'promice/modis500m/' + stationName.replace("*", "-") + '.csv' # if os.path.exists(pointValueFile): if j==0: dfpoint.to_csv(pointValueFile, mode='w', index=False) else: dfpoint.to_csv(pointValueFile, mode='a', index=False, header=False) # pointValueFile = 'promice/' + stationName + '_' + str(dfsYear.Year[i]) + '.csv' # all_list = all_list.map(func_bbz) # work_dir = os.path.expanduser('~/Downloads') # out_csv = os.path.join(work_dir, 'landsat.csv') # out_csv = os.path.join('landsat.csv') # geemap.extract_values_to_points(aoi, multiSat, out_csv) ```
github_jupyter
# Time Series Analysis San Francisco International Airport (IATA code: SFO) is located south of San Francisco downtown and it’s a very important air transportation hub for both domestic and international flights. It is equipped with four asphalt runways – two perpendicular pairs. This airport is one of the main hubs for the United Airline which generates relatively a big chunk of airport's traffic. It is also worth to check airport's annual report to learn more about the operational side of the airport: https://s3.amazonaws.com/media.flysfo.com/2017_Annual_Report.pdf ``` import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import seaborn as sns # data visualisation import matplotlib.pyplot as plt # data visualisation import datetime as dt # working with time data import warnings warnings.filterwarnings('ignore') PAX_raw = pd.read_csv("./data/air_traffic/air-traffic-passenger-statistics.csv") PAX = PAX_raw.copy() PAX.head() PAX.loc[:,"Activity Period"] = pd.to_datetime(PAX.loc[:,"Activity Period"].astype(str), format="%Y%m") PAX.loc[:,"Year"] = PAX["Activity Period"].dt.year PAX.loc[:,"Month"] = PAX["Activity Period"].dt.month time_begin = PAX.loc[:,"Activity Period"].min() time_end = PAX.loc[:,"Activity Period"].max() time_range = time_end-time_begin print("First date: ", str(time_begin)[:11]) print("Last date: ", str(time_end)[:11]) print("Time range in days:", time_range.days) print("Time range in months:", time_range/np.timedelta64(1,"M")) ``` # SARIMAX Preparation ``` TS1 = PAX.groupby("Activity Period")["Passenger Count"].sum().to_frame() f, ax1 = plt.subplots(1,1,figsize=(15,5)) TS1.plot(ax=ax1) ax1.set_xlabel("Date") ax1.set_ylabel("Passenger Count") plt.grid(True) ``` The Augmented Dickey-Fuller test can be used to test for stationarity of our time series. The null hypothesis of the test is that the time series is not stationary (has some time-dependent structure). - Null Hypothesis (H0): if failed to be rejected (high p-value) means it is non-stationary - Null Hypothesis (H1): if H0 is rejected (low p-value) means it is stationary ``` from statsmodels.tsa.stattools import adfuller results = adfuller(TS1["Passenger Count"]) print('ADF Statistic: %f' % results[0]) print('p-value: %f' % results[1]) ``` No surprise. P-value is 0.99 (we don't reject H0) - time series is not stationary. To better understand the time series behaviour I will decompose it into trend, seasonality and residuals. ``` from statsmodels.tsa.seasonal import seasonal_decompose plt.rcParams['figure.figsize'] = 20, 10 # Additive decomposition decomposed_add = seasonal_decompose(TS1, model="additive",freq=10) add = decomposed_add.plot() plt.show() TS1_diff = TS1.diff().dropna() plt.figure(figsize=(12,5)) ax1 = TS1_diff["Passenger Count"].plot() ax1.set_xlabel("Date") ax1.set_ylabel("Passenger Count 12-months Difference") plt.grid(True) plt.show() results = adfuller(TS1_diff["Passenger Count"]) print('ADF Statistic: %f' % results[0]) print("P-value of a test is: {}".format(results[1])) ``` The p-value of 0.0397 is small enough to reject the null hipothesis that the time series is non-stationary. However, the plot above shows still a seasonality. Let's see what would be result of the same test is we take a second difference. Pleas note that you have to be aware of risk of so called overdifferencing time series. ``` results = adfuller(TS1.diff().diff().dropna()["Passenger Count"]) print('ADF Statistic: %f' % results[0]) print("P-value of a test is: {}".format(results[1])) ``` Indeed the p-value test dropped even more. However, often the simplest solutions work the best - therefore I will stay with a simple single difference. AutoCorrelation Plot (ACF) shows a correlation between "a signal" (our value) with its delayed occurance (observation) in a function of time delta between them (so called lag). Partial AutoCorrelation Plot (PCAF) is similar to ACF but the effect of the shorter lags is removed. ``` from pandas.plotting import autocorrelation_plot from statsmodels.graphics.tsaplots import plot_acf, plot_pacf fig,ax = plt.subplots(2,1,figsize=(20,10)) plot_acf(TS1_diff, lags=36, ax=ax[0]) plot_pacf(TS1_diff, lags=36, ax=ax[1]) plt.show() ``` The lag plot below allows to check a lag correlation. If you can identify a structure in the plot, the data doesn't look random and the autocorrelation is indeed present. In this case I will check the lag plot for lags of 1 and 2 months. ``` from pandas.plotting import lag_plot fig, ax = plt.subplots(figsize=(10, 10)) ax = lag_plot(TS1_diff, lag=1) ax = lag_plot(TS1_diff, lag=2, c="orange") plt.show() ``` In the next step we will iterate over p and q parameters of ARIMA model and compute for each Akaike Information Critera (AIC) and Bayesian Information Criteria (BIC). BIC is giving additional penalty for more parameters. ``` from statsmodels.tsa.arima_model import ARIMA arima_df = pd.DataFrame(columns=["p","q","aic","bic"]) i=0 # Loop over p values from 0-3 for p in range(4): # Loop over q values from 0-3 for q in range(4): try: # creating and fitting ARIMA(p,1,q) model model = ARIMA(TS1.astype(float), order=(p,1,q)) results = model.fit() # Printing order, AIC and BIC #print(p, q, results.aic, results.bic) arima_df.loc[i,"p"] = p arima_df.loc[i,"q"] = q arima_df.loc[i,"aic"] = results.aic arima_df.loc[i,"bic"] = results.bic i = i+1 except: #print(p, q, None, None) i = i+1 arima_df["sum_aic_bic"] = arima_df["aic"]+arima_df["bic"] arima_df.sort_values(by="sum_aic_bic", ascending=False, inplace=True) arima_df ``` Let's look now at SARIMAX(2,1,1)x(0,1,0,12) diagnostics summary and diagnostics plots. ``` from statsmodels.tsa.statespace.sarimax import SARIMAX model2 = SARIMAX(TS1, order=(2,1,1), seasonal_order=(0,1,0,12)) results = model2.fit() results.summary() ``` Summary explanations: - Ljung-Box (Q) - This test is used to check for the lack of fit of a time series model. The null hypothesis here is there are no correlations in the residuals. In our case we reject the null hypothesis what means that residuals are somehow correlated. - Jarque-Bera (JB) - This is a test for normality of distributon. It is basing on values of skewness and kurtosis where for the normal distibution these values should be respectively 0 and 3. The null hypothesis of the test is that the sample comes from the normal distribution. Ander this hypothesis the chi-squared distribution with 2 degrees of freedom can be used. In our case JB statistics is 8.76 which gives probability of 0.01 - meaning that we don't reject the null hypothesis. - Heteroskedasticity (H) - Checks if the variance is constant When looking at the diagnostic plots below we are interested in: - Standardized residual - there are no obvious patterns in the residuals - Histogram plus kde estimate - KDE should be very similar to the normal distribution - Normal Q-Q - points should lie on the straight line - Correlogram - 95% of correlations for lag greater than one should not be significant ``` plt.rcParams['figure.figsize'] = 12, 8 plot = results.plot_diagnostics() # Create SARIMA mean forecast forecast = results.get_forecast(steps=48) lower = forecast.conf_int()["lower Passenger Count"] upper = forecast.conf_int()["upper Passenger Count"] # Plot mean SARIMA predictions fig,ax = plt.subplots(1,1,figsize=(20,10)) plt.plot(TS1, label='original') plt.plot(forecast.predicted_mean, label='SARIMAX', c="r") plt.fill_between(forecast.conf_int().index, lower,upper, color='pink') plt.xlabel('Date') plt.ylabel('No of passengers') plt.legend() plt.show() ``` The plot above shows predictions using SARIMAX(2,1,1)x(0,1,0,12). The red line indicates the mean and the pink area is bounded by 95% confidence intervals. According to this prediction in 2022 a number of passengers can reach 8 millions at the peak time. -------------- # Machine Learning Forecast In this secion we will focus on the Machine Learning approach to forecasting time series. First we will prepare our data - input(X) and output(y) variables. ``` from sklearn.svm import SVR, SVR from sklearn.kernel_ridge import KernelRidge from sklearn.model_selection import KFold, train_test_split, TimeSeriesSplit from sklearn.metrics import mean_squared_error def tsplit(X,y,model): tscv = TimeSeriesSplit(n_splits=3) fig,ax = plt.subplots(3, figsize=(15,8)) axis = 0 for train_index, test_index in tscv.split(X): #splitting data into training and test sets X_train, X_test = X.iloc[train_index,:], X.iloc[test_index,:] y_train, y_test = y.iloc[train_index,:], y.iloc[test_index,:] #fitting model model.fit(X_train,y_train.values.ravel()) #predicting predictions = model.predict(X_test) #printing results print("MSE for split {0}:".format(axis+1)) print(mean_squared_error(y_test,predictions)) #ax[axis].plot(X_train.index, y_train) # needs fixing ax[axis].plot(list(X_test.index), predictions) ax[axis].plot(list(X_test.index), y_test) axis += 1 return(None) TS1["Year"] = TS1.index.year TS1["Month"] = TS1.index.month TS1.head() X = TS1[["Year","Month"]] y = TS1[["Passenger Count"]] ``` Let's look at the data again... ``` plt.figure(figsize=(10,5)) plt.scatter(TS1["Year"],TS1["Passenger Count"],c=TS1["Month"]) plt.legend() plt.show() plt.figure(figsize=(10,5)) plt.scatter(TS1["Month"],TS1["Passenger Count"],c=TS1["Year"]) plt.legend() plt.show() ``` They are non-stationary what will cause problems as in SARIMAX approach. ``` X2 = TS1[["Year","Month"]][1:] y2 = np.log(TS1[["Passenger Count"]]).diff().dropna() X2.head() ``` Data for machine learning should be scaled. Scaling data with standard scaler. ``` from sklearn.preprocessing import StandardScaler sc_x = StandardScaler() sc_y = StandardScaler() # Scale x and y (two scale objects) X2_scaled = pd.DataFrame(sc_x.fit_transform(X2)) y2_scaled = pd.DataFrame(sc_y.fit_transform(y2)) ``` ### Ridge Regression ``` from sklearn.model_selection import GridSearchCV from sklearn.kernel_ridge import KernelRidge rdg = KernelRidge(kernel='rbf') parameters = {'alpha':np.arange(0.005,0.02,0.005), 'gamma':np.arange(0.001,0.01,0.001)} tscv = TimeSeriesSplit(n_splits=3) rdg_gs = GridSearchCV(rdg, parameters, cv=tscv, verbose=0, scoring='neg_mean_squared_error') rdg_gs.fit(X2_scaled, y2_scaled) rdg_gs.best_score_ best_rdg = rdg_gs.best_estimator_ print(best_rdg) tsplit(X2_scaled,y2_scaled,best_rdg) ``` ### SVR Prediction ``` svr = SVR() parameters = {'kernel':['rbf','poly'], 'C':np.arange(0.2,0.8,0.1), 'gamma':np.arange(0.2,1.2,0.02), 'degree':[3,4,5]} tscv = TimeSeriesSplit(n_splits=3) reg = GridSearchCV(svr, parameters, cv=tscv, verbose=0, scoring='neg_mean_squared_error') reg.fit(X2_scaled, y2_scaled.values.ravel()) reg.best_score_ best_svr = reg.best_estimator_ print(best_svr) tsplit(X2_scaled,y2_scaled,best_svr) ``` ### Regressor ``` from sklearn.neural_network import MLPRegressor from sklearn.model_selection import TimeSeriesSplit mlp = MLPRegressor(max_iter=600) parameters = {'hidden_layer_sizes':np.arange(800,1400,50),'alpha':[0.0001,0.0002], 'momentum':[0.85,0.9,0.95]} tscv = TimeSeriesSplit(n_splits=3) reg = GridSearchCV(mlp, parameters, cv=tscv, verbose=0, scoring='neg_mean_squared_error') reg.fit(X2_scaled, y2_scaled.values.ravel()) reg.best_score_ best_mlp = reg.best_estimator_ print(best_mlp) tsplit(X2_scaled,y2_scaled,best_mlp) ```
github_jupyter
# 23. Natural Language for Communication **23.1** \[washing-clothes-exercise\]Read the following text once for understanding, and remember as much of it as you can. There will be a test later. > The procedure is actually quite simple. First you arrange things into different groups. Of course, one pile may be sufficient depending on how much there is to do. If you have to go somewhere else due to lack of facilities that is the next step, otherwise you are pretty well set. It is important not to overdo things. That is, it is better to do too few things at once than too many. In the short run this may not seem important but complications can easily arise. A mistake is expensive as well. At first the whole procedure will seem complicated. Soon, however, it will become just another facet of life. It is difficult to foresee any end to the necessity for this task in the immediate future, but then one can never tell. After the procedure is completed one arranges the material into different groups again. Then they can be put into their appropriate places. Eventually they will be used once more and the whole cycle will have to be repeated. However, this is part of life. **23.2** An *HMM grammar* is essentially a standard HMM whose state variable is $N$ (nonterminal, with values such as $Det$, $Adjective$, $Noun$ and so on) and whose evidence variable is $W$ (word, with values such as $is$, $duck$, and so on). The HMM model includes a prior ${\textbf{P}}(N_0)$, a transition model ${\textbf{P}}(N_{t+1}|N_t)$, and a sensor model ${\textbf{P}}(W_t|N_t)$. Show that every HMM grammar can be written as a PCFG. \[Hint: start by thinking about how the HMM prior can be represented by PCFG rules for the sentence symbol. You may find it helpful to illustrate for the particular HMM with values $A$, $B$ for $N$ and values $x$, $y$ for $W$.\] **23.3** Consider the following PCFG for simple verb phrases: > 0.1: VP $\rightarrow$ Verb > 0.2: VP $\rightarrow$ Copula Adjective > 0.5: VP $\rightarrow$ Verb the Noun > 0.2: VP $\rightarrow$ VP Adverb > 0.5: Verb $\rightarrow$ is > 0.5: Verb $\rightarrow$ shoots > 0.8: Copula $\rightarrow$ is > 0.2: Copula $\rightarrow$ seems > 0.5: Adjective $\rightarrow$ **unwell** > 0.5: Adjective $\rightarrow$ **well** > 0.5: Adverb $\rightarrow$ **well** > 0.5: Adverb $\rightarrow$ **badly** > 0.6: Noun $\rightarrow$ **duck** > 0.4: Noun $\rightarrow$ **well** 1. Which of the following have a nonzero probability as a VP? (i) shoots the duck well well well(ii) seems the well well(iii) shoots the unwell well badly 2. What is the probability of generating “is well well”? 3. What types of ambiguity are exhibited by the phrase in (b)? 4. Given any PCFG, is it possible to calculate the probability that the PCFG generates a string of exactly 10 words? **23.4** Consider the following simple PCFG for noun phrases: > 0.6: NP $\rightarrow$ Det\ AdjString\ Noun > 0.4: NP $\rightarrow$ Det\ NounNounCompound > 0.5: AdjString $\rightarrow$ Adj\ AdjString > 0.5: AdjString $\rightarrow$ $\Lambda$ > 1.0: NounNounCompound $\rightarrow$ Noun > 0.8: Det $\rightarrow$ **the** > 0.2: Det $\rightarrow$ **a** > 0.5: Adj $\rightarrow$ **small** > 0.5: Adj $\rightarrow$ **green** > 0.6: Noun $\rightarrow$ **village** > 0.4: Noun $\rightarrow$ **green** where $\Lambda$ denotes the empty string. 1. What is the longest NP that can be generated by this grammar? (i) three words(ii) four words(iii) infinitely many words 2. Which of the following have a nonzero probability of being generated as complete NPs? (i) a small green village(ii) a green green green(iii) a small village green 3. What is the probability of generating “the green green”? 4. What types of ambiguity are exhibited by the phrase in (c)? 5. Given any PCFG and any finite word sequence, is it possible to calculate the probability that the sequence was generated by the PCFG? **23.5** Outline the major differences between Java (or any other computer language with which you are familiar) and English, commenting on the “understanding” problem in each case. Think about such things as grammar, syntax, semantics, pragmatics, compositionality, context-dependence, lexical ambiguity, syntactic ambiguity, reference finding (including pronouns), background knowledge, and what it means to “understand” in the first place. **23.6** This exercise concerns grammars for very simple languages. 1. Write a context-free grammar for the language $a^n b^n$. 2. Write a context-free grammar for the palindrome language: the set of all strings whose second half is the reverse of the first half. 3. Write a context-sensitive grammar for the duplicate language: the set of all strings whose second half is the same as the first half. **23.7** Consider the sentence “Someone walked slowly to the supermarket” and a lexicon consisting of the following words: $Pronoun \rightarrow \textbf{someone} \quad Verb \rightarrow \textbf{walked}$ $Adv \rightarrow \textbf{slowly} \quad Prep \rightarrow \textbf{to}$ $Article \rightarrow \textbf{the} \quad Noun \rightarrow \textbf{supermarket}$ Which of the following three grammars, combined with the lexicon, generates the given sentence? Show the corresponding parse tree(s). | $\quad\quad\quad\quad (A):\quad\quad\quad\quad$ | $\quad\quad\quad\quad(B):\quad\quad\quad\quad$ | $\quad\quad\quad\quad(C):\quad\quad\quad\quad$ | | --- | --- | --- | | $S\rightarrow NP\space VP$ | $S\rightarrow NP\space VP$ | $S\rightarrow NP\space VP$ | | $NP\rightarrow Pronoun$ | $NP\rightarrow Pronoun$ | $NP\rightarrow Pronoun$ | | $NP\rightarrow Article\space Noun $ | $NP\rightarrow Noun$ | $NP\rightarrow Article\space NP$ | | $VP\rightarrow VP\space PP$ | $NP\rightarrow Article\space NP$ | $VP\rightarrow Verb\space Adv$ | | $VP\rightarrow VP\space Adv\space Adv$ | $VP\rightarrow Verb\space Vmod$ | $Adv\rightarrow Adv\space Adv$ | | $VP\rightarrow Verb$ | $Vmod\rightarrow Adv\space Vmod$ | $Adv\rightarrow PP$ | | $PP\rightarrow Prep\space NP$ | $Vmod\rightarrow Adv$ | $PP\rightarrow Prep\space NP$ | | $NP\rightarrow Noun$ | $Adv\rightarrow PP$ | $NP\rightarrow Noun$ | | $\quad$ | $PP\rightarrow Prep\space NP$ | $\quad$ | For each of the preceding three grammars, write down three sentences of English and three sentences of non-English generated by the grammar. Each sentence should be significantly different, should be at least six words long, and should include some new lexical entries (which you should define). Suggest ways to improve each grammar to avoid generating the non-English sentences. **23.8** Collect some examples of time expressions, such as “two o’clock,” “midnight,” and “12:46.” Also think up some examples that are ungrammatical, such as “thirteen o’clock” or “half past two fifteen.” Write a grammar for the time language. **23.9** Some linguists have argued as follows: > Children learning a language hear only *positive > examples* of the language and no *negative > examples*. Therefore, the hypothesis that “every possible > sentence is in the language” is consistent with all the observed > examples. Moreover, this is the simplest consistent hypothesis. > Furthermore, all grammars for languages that are supersets of the true > language are also consistent with the observed data. Yet children do > induce (more or less) the right grammar. It follows that they begin > with very strong innate grammatical constraints that rule out all of > these more general hypotheses *a priori*. Comment on the weak point(s) in this argument from a statistical learning viewpoint. **23.10** \[chomsky-form-exercise\] In this exercise you will transform $\large \varepsilon_0$ into Chomsky Normal Form (CNF). There are five steps: (a) Add a new start symbol, (b) Eliminate $\epsilon$ rules, (c) Eliminate multiple words on right-hand sides, (d) Eliminate rules of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$), (e) Convert long right-hand sides into binary rules. 1. The start symbol, $S$, can occur only on the left-hand side in CNF. Replace ${\it S}$ everywhere by a new symbol ${\it S'}$ and add a rule of the form ${\it S}$ ${{\;}}\rightarrow{{\;}}$${\it S'}$. 2. The empty string, $\epsilon$ cannot appear on the right-hand side in CNF. $\large \varepsilon_0$ does not have any rules with $\epsilon$, so this is not an issue. 3. A word can appear on the right-hand side in a rule only of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$*word*). Replace each rule of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$…*word* …) with (${\it X}$ ${{\;}}\rightarrow{{\;}}$…${\it W'}$ …) and (${\it W'}$ ${{\;}}\rightarrow{{\;}}$*word*), using a new symbol ${\it W'}$. 4. A rule (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$) is not allowed in CNF; it must be (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$ ${\it Z}$) or (${\it X}$ ${{\;}}\rightarrow{{\;}}$*word*). Replace each rule of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$) with a set of rules of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$…), one for each rule (${\it Y}$ ${{\;}}\rightarrow{{\;}}$…), where (…) indicates one or more symbols. 5. Replace each rule of the form (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$ ${\it Z}$ …) with two rules, (${\it X}$ ${{\;}}\rightarrow{{\;}}$${\it Y}$ ${\it Z'}$) and (${\it Z'}$ ${{\;}}\rightarrow{{\;}}$${\it Z}$ …), where ${\it Z'}$ is a new symbol. Show each step of the process and the final set of rules. **23.11** Consider the following toy grammar: > $S \rightarrow NP\space VP$ > $NP \rightarrow Noun$ > $NP \rightarrow NP\space and\space NP$ > $NP \rightarrow NP\space PP$ > $VP \rightarrow Verb$ > $VP \rightarrow VP\space and \space VP$ > $VP \rightarrow VP\space PP$ > $PP \rightarrow Prep\space NP$ > $Noun \rightarrow Sally\space; pools\space; streams\space; swims$ > $Prep \rightarrow in$ > $Verb \rightarrow pools\space; streams\space; swims$ 1. Show all the parse trees in this grammar for the sentence “Sally swims in streams and pools.” 2. Show all the table entries that would be made by a (non-probabalistic) CYK parser on this sentence. **23.12** \[exercise-subj-verb-agree\] Using DCG notation, write a grammar for a language that is just like $\large \varepsilon_1$, except that it enforces agreement between the subject and verb of a sentence and thus does not generate ungrammatical sentences such as “I smells the wumpus.” **23.13** Consider the following PCFG: > $S \rightarrow NP \space VP[1.0] $ > $NP \rightarrow \textit{Noun}[0.6] \space|\space \textit{Pronoun}[0.4] $ > $VP \rightarrow \textit{Verb} \space NP[0.8] \space|\space \textit{Modal}\space \textit{Verb}[0.2]$ > $\textit{Noun} \rightarrow \textbf{can}[0.1] \space|\space \textbf{fish}[0.3] \space|\space ...$ > $\textit{Pronoun} \rightarrow \textbf{I}[0.4] \space|\space ...$ > $\textit{Verb} \rightarrow \textbf{can}[0.01] \space|\space \textbf{fish}[0.1] \space|\space ...$ > $\textit{Modal} \rightarrow \textbf{can}[0.3] \space|\space ...$ The sentence “I can fish” has two parse trees with this grammar. Show the two trees, their prior probabilities, and their conditional probabilities, given the sentence. **23.14** An augmented context-free grammar can represent languages that a regular context-free grammar cannot. Show an augmented context-free grammar for the language $a^nb^nc^n$. The allowable values for augmentation variables are 1 and $SUCCESSOR(n)$, where $n$ is a value. The rule for a sentence in this language is $$S(n) {{{{\;}}\rightarrow{{\;}}}}A(n) {{\;}}B(n) {{\;}}C(n) \ .$$ Show the rule(s) for each of ${\it A}$, ${\it B}$, and ${\it C}$. **23.15** Augment the $\large \varepsilon_1$ grammar so that it handles article–noun agreement. That is, make sure that “agents” and “an agent” are ${\it NP}$s, but “agent” and “an agents” are not. **23.16** Consider the following sentence (from *The New York Times,* July 28, 2008): > Banks struggling to recover from multibillion-dollar loans on real > estate are curtailing loans to American businesses, depriving even > healthy companies of money for expansion and hiring. 1. Which of the words in this sentence are lexically ambiguous? 2. Find two cases of syntactic ambiguity in this sentence (there are more than two.) 3. Give an instance of metaphor in this sentence. 4. Can you find semantic ambiguity? **23.17** \[washing-clothes2-exercise\] Without looking back at Exercise [washing-clothes-exercise](#/), answer the following questions: 1. What are the four steps that are mentioned? 2. What step is left out? 3. What is “the material” that is mentioned in the text? 4. What kind of mistake would be expensive? 5. Is it better to do too few things or too many? Why? **23.18** Select five sentences and submit them to an online translation service. Translate them from English to another language and back to English. Rate the resulting sentences for grammaticality and preservation of meaning. Repeat the process; does the second round of iteration give worse results or the same results? Does the choice of intermediate language make a difference to the quality of the results? If you know a foreign language, look at the translation of one paragraph into that language. Count and describe the errors made, and conjecture why these errors were made. **23.19** The $D_i$ values for the sentence in Figure [mt-alignment-figure](#/) sum to 0. Will that be true of every translation pair? Prove it or give a counterexample. **23.20** (Adapted from [@Knight:1999].) Our translation model assumes that, after the phrase translation model selects phrases and the distortion model permutes them, the language model can unscramble the permutation. This exercise investigates how sensible that assumption is. Try to unscramble these proposed lists of phrases into the correct order: 1. have, programming, a, seen, never, I, language, better 2. loves, john, mary 3. is the, communication, exchange of, intentional, information brought, by, about, the production, perception of, and signs, from, drawn, a, of, system, signs, conventional, shared 4. created, that, we hold these, to be, all men, truths, are, equal, self-evident Which ones could you do? What type of knowledge did you draw upon? Train a bigram model from a training corpus, and use it to find the highest-probability permutation of some sentences from a test corpus. Report on the accuracy of this model. **23.21** Calculate the most probable path through the HMM in Figure [sr-hmm-figure](#/) for the output sequence $[C_1,C_2,C_3,C_4,C_4,C_6,C_7]$. Also give its probability. **23.22** We forgot to mention that the text in Exercise [washing-clothes-exercise](#/) is entitled “Washing Clothes.” Reread the text and answer the questions in Exercise [washing-clothes2-exercise](#/). Did you do better this time? Bransford and Johnson [@Bransford+Johnson:1973] used this text in a controlled experiment and found that the title helped significantly. What does this tell you about how language and memory works?
github_jupyter
``` import os import torch os.environ['CUDA_VISIBLE_DEVICES'] = '0,1' from datetime import datetime from models.handler import train, test, validate import pandas as pd from models.base_model import Model import numpy as np import json from data_loader.forecast_dataloader import ForecastDataset import torch.utils.data as torch_data import torch.nn as nn import time class Args: def __init__(self): self.train = True self.evaluate = True self.dataset = 'ECG_data' self.window_size = 12 self.horizon = 3 self.train_length = 7 self.valid_length = 2 self.test_length = 1 self.epoch = 50 self.lr = 1e-4 self.multi_layer = 5 self.device = 'cpu' self.validate_freq = 1 self.batch_size = 32 self.norm_method = 'z_score' self.optimizer = 'RMSProp' self.early_stop = False self.exponential_decay_step = 5 self.decay_rate = 0.5 self.dropout_rate = 0.5 self.leakyrelu_rate = 0.2 args = Args() print(f'Training configs: {args}') data_file = os.path.join('dataset', args.dataset + '.csv') result_train_file = os.path.join('output', args.dataset, 'train') result_test_file = os.path.join('output', args.dataset, 'test') if not os.path.exists(result_train_file): os.makedirs(result_train_file) if not os.path.exists(result_test_file): os.makedirs(result_test_file) data = pd.read_csv(data_file).values # split data train_ratio = args.train_length / (args.train_length + args.valid_length + args.test_length) valid_ratio = args.valid_length / (args.train_length + args.valid_length + args.test_length) test_ratio = 1 - train_ratio - valid_ratio train_data = data[:int(train_ratio * len(data))] valid_data = data[int(train_ratio * len(data)):int((train_ratio + valid_ratio) * len(data))] test_data = data[int((train_ratio + valid_ratio) * len(data)):] torch.manual_seed(0) result_file = result_train_file node_cnt = train_data.shape[1] model = Model(node_cnt, 2, args.window_size, args.multi_layer, horizon=args.horizon) model.to(args.device) if len(train_data) == 0: raise Exception('Cannot organize enough training data') if len(valid_data) == 0: raise Exception('Cannot organize enough validation data') if args.norm_method == 'z_score': train_mean = np.mean(train_data, axis=0) train_std = np.std(train_data, axis=0) normalize_statistic = {"mean": train_mean.tolist(), "std": train_std.tolist()} elif args.norm_method == 'min_max': train_min = np.min(train_data, axis=0) train_max = np.max(train_data, axis=0) normalize_statistic = {"min": train_min.tolist(), "max": train_max.tolist()} else: normalize_statistic = None if normalize_statistic is not None: with open(os.path.join(result_file, 'norm_stat.json'), 'w') as f: json.dump(normalize_statistic, f) if args.optimizer == 'RMSProp': my_optim = torch.optim.RMSprop(params=model.parameters(), lr=args.lr, eps=1e-08) else: my_optim = torch.optim.Adam(params=model.parameters(), lr=args.lr, betas=(0.9, 0.999)) my_lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer=my_optim, gamma=args.decay_rate) train_set = ForecastDataset(train_data, window_size=args.window_size, horizon=args.horizon, normalize_method=args.norm_method, norm_statistic=normalize_statistic) valid_set = ForecastDataset(valid_data, window_size=args.window_size, horizon=args.horizon, normalize_method=args.norm_method, norm_statistic=normalize_statistic) train_loader = torch_data.DataLoader(train_set, batch_size=args.batch_size, drop_last=False, shuffle=True, num_workers=0) valid_loader = torch_data.DataLoader(valid_set, batch_size=args.batch_size, shuffle=False, num_workers=0) forecast_loss = nn.MSELoss(reduction='mean').to(args.device) total_params = 0 for name, parameter in model.named_parameters(): if not parameter.requires_grad: continue param = parameter.numel() total_params += param print(f"Total Trainable Params: {total_params}") best_validate_mae = np.inf validate_score_non_decrease_count = 0 performance_metrics = {} for epoch in range(args.epoch): epoch_start_time = time.time() model.train() loss_total = 0 cnt = 0 for i, (inputs, target) in enumerate(train_loader): inputs = inputs.to(args.device) target = target.to(args.device) model.zero_grad() forecast, _ = model(inputs) loss = forecast_loss(forecast, target) cnt += 1 loss.backward() my_optim.step() loss_total += float(loss) break print('| end of epoch {:3d} | time: {:5.2f}s | train_total_loss {:5.4f}'.format(epoch, ( time.time() - epoch_start_time), loss_total / cnt)) break # save_model(model, result_file, epoch) if (epoch+1) % args.exponential_decay_step == 0: my_lr_scheduler.step() if (epoch + 1) % args.validate_freq == 0: is_best_for_now = False print('------ validate on data: VALIDATE ------') performance_metrics = \ validate(model, valid_loader, args.device, args.norm_method, normalize_statistic, node_cnt, args.window_size, args.horizon, result_file=result_file) if best_validate_mae > performance_metrics['mae']: best_validate_mae = performance_metrics['mae'] is_best_for_now = True validate_score_non_decrease_count = 0 else: validate_score_non_decrease_count += 1 # save model # if is_best_for_now: # save_model(model, result_file) # early stop if args.early_stop and validate_score_non_decrease_count >= args.early_stop_step: break rnn = nn.GRU(10, 20, 2) input = torch.randn(5, 3, 10) h0 = torch.randn(2, 3, 20) output, hn = rnn(input, h0) input.size(), output.size(), hn.size() ```
github_jupyter
Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the MIT License. ![Impressions](https://PixelServer20190423114238.azurewebsites.net/api/impressions/MachineLearningNotebooks/how-to-use-azureml/automated-machine-learning/manymodels/02_Training/02_Training_Pipeline.png) # Training Pipeline - Automated ML _**Training many models using Automated Machine Learning**_ --- This notebook demonstrates how to train and register 11,973 models using Automated Machine Learning. We will utilize the AutoMLPipelineBuilder to parallelize the process of training 11,973 models. For this notebook we are using an orange juice sales dataset to predict the orange juice quantity for each brand and each store. For more information about the data refer to the Data Preparation Notebook. <span style="color:red"><b>NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 20 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429). </b></span> <span style="color:red"><b> Please ensure you have the latest version of the SDK to ensure AutoML dependencies are consistent.</b></span> ``` # !pip install --upgrade azureml-sdk # !pip install --upgrade azureml-train-automl ``` Install the azureml-contrib-automl-pipeline-steps package that is needed for many models. ``` # !pip install azureml-contrib-automl-pipeline-steps ``` ### Prerequisites At this point, you should have already: 1. Created your AML Workspace using the [00_Setup_AML_Workspace notebook](../../00_Setup_AML_Workspace.ipynb) 2. Run [01_Data_Preparation.ipynb](../../01_Data_Preparation.ipynb) to create the dataset ## 1.0 Set up workspace, datastore, experiment ``` import sys sys.path.append("../../") import azureml.core from azureml.core import Workspace, Datastore import pandas as pd import os from utils.env_variables import Env from utils.aml_workspace import Connect e=Env() # set up workspace ws = Connect().authenticate() # Take a look at Workspace ws.get_details() # set up datastores dstore = Datastore.get(ws, e.blob_datastore_name) use_tabular = False output = {} output['SDK version'] = azureml.core.VERSION output['Subscription ID'] = ws.subscription_id output['Workspace'] = ws.name output['Resource Group'] = ws.resource_group output['Location'] = ws.location output['Default datastore name'] = dstore.name pd.set_option('display.max_colwidth', -1) outputDf = pd.DataFrame(data = output, index = ['']) ``` ### Choose an experiment ``` from azureml.core import Experiment experiment = Experiment(ws, e.experiment_name) print('Experiment name: ' + experiment.name) ``` ## 2.0 Call the registered filedataset We use 11,973 datasets and AutoMLPipelineBuilder to build 11,973 time-series to predict the quantity of each store brand. Each dataset represents a brand's 2 years orange juice sales data that contains 7 columns and 122 rows. You will need to register the datasets in the Workspace first. The Data Preparation notebook demonstrates how to register two datasets to the workspace. The registered 'oj_data_small' file dataset contains the first 10 csv files and 'oj_data' contains all 11,973 csv files. You can choose to pass either filedatasets_10_models_input or filedatasets_all_models_inputs in the AutoMLPipelineBuilder. We recommend to **start with filedatasets_10_models** and make sure everything runs successfully, then scale up to filedatasets_all_models. ### Option A You can now use Tabular reads of the CSV/Parquet files instead of having to use a File Data Sets. ### Option B Using named file data sets ``` from azureml.core import Dataset if use_tabular: ds_name_small = "oj_sales_data_train" input_ds_small = Dataset.Tabular.from_delimited_files( path=dstore.path(ds_name_small + "/"), validate=False ) inference_name_small = "oj_sales_data_inference" inference_ds_small = Dataset.Tabular.from_delimited_files( path=dstore.path(inference_name_small + "/"), validate=False ) else: filedst_10_models = Dataset.get_by_name(ws, name="manymodels_train") filedst_10_models_input = filedst_10_models.as_named_input('train_10_models') ``` ### Option B ## 3.0 Build the training pipeline Now that the dataset, WorkSpace, and datastore are set up, we can put together a pipeline for training. ### Choose a compute target Currently AutoMLPipelineBuilder only supports AMLCompute. You can change to a different compute cluster if one fails. This is the compute target we will pass into our AutoMLPipelineBuilder. ``` from azureml.core.compute import AmlCompute from azureml.core.compute import ComputeTarget # Choose a name for your cluster. amlcompute_cluster_name = e.compute_name found = False # Check if this compute target already exists in the workspace. cts = ws.compute_targets if amlcompute_cluster_name in cts and cts[amlcompute_cluster_name].type == 'AmlCompute': found = True print('Found existing compute target.') compute = cts[amlcompute_cluster_name] if not found: print('Creating a new compute target...') provisioning_config = AmlCompute.provisioning_configuration(vm_size=e.vm_size, min_nodes=e.min_nodes, max_nodes=e.max_nodes) # Create the cluster. compute = ComputeTarget.create(ws, amlcompute_cluster_name, provisioning_config) print('Checking cluster status...') # Can poll for a minimum number of nodes and for a specific timeout. # If no min_node_count is provided, it will use the scale settings for the cluster. compute.wait_for_completion(show_output = True, min_node_count = None, timeout_in_minutes = 20) # For a more detailed view of current AmlCompute status, use get_status(). ``` ## Train This dictionary defines the [AutoML settings](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.automlconfig.automlconfig?view=azure-ml-py#parameters), for this forecasting task we add the name of the time column and the maximum forecast horizon. |Property|Description| |-|-| |**task**|forecasting| |**primary_metric**|This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>spearman_correlation</i><br><i>normalized_root_mean_squared_error</i><br><i>r2_score</i><br><i>normalized_mean_absolute_error</i>| |**blocked_models**|Models in blocked_models won't be used by AutoML. All supported models can be found at [here](https://docs.microsoft.com/en-us/python/api/azureml-train-automl-client/azureml.train.automl.constants.supportedmodels.forecasting?view=azure-ml-py).| |**iterations**|Number of models to train. This is optional but provides customer with greater control.| |**iteration_timeout_minutes**|Maximum amount of time in minutes that the model can train. This is optional and depends on the dataset. We ask customer to explore a bit to get approximate times for training the dataset. For OJ dataset we set it 20 minutes| |**experiment_timeout_hours**|Maximum amount of time in hours that the experiment can take before it terminates.| |**label_column_name**|The name of the label column.| |**n_cross_validations**|Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way.| |**enable_early_stopping**|Flag to enable early termination if the score is not improving in the short term.| |**time_column_name**|The name of your time column.| |**max_horizon**|The number of periods out you would like to predict past your training data. Periods are inferred from your data.| |**grain_column_names**|The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp.| |**partition_column_names**|The names of columns used to group your models. For timeseries, the groups must not split up individual time-series. That is, each group must contain one or more whole time-series.| |**track_child_runs**|Flag to disable tracking of child runs. Only best run (metrics and model) is tracked if the flag is set to False.| |**pipeline_fetch_max_batch_size**|Determines how many pipelines (training algorithms) to fetch at a time for training, this helps reduce throttling when training at large scale.| ``` import logging from azureml.train.automl.runtime._many_models.many_models_parameters import (ManyModelsTrainParameters,) partition_column_names = [e.primary_partition, e.secondary_partition] automl_settings = { "task": "forecasting", "primary_metric": "normalized_root_mean_squared_error", "iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value "iterations": 5, "experiment_timeout_hours": 0.25, "label_column_name": e.label_column_name, "n_cross_validations": 3, "time_column_name": "DateTime", # e.timestamp_column, #"drop_column_names": "Revenue", "max_horizon": 6, "grain_column_names": partition_column_names, "track_child_runs": False, } mm_paramters = ManyModelsTrainParameters( automl_settings=automl_settings, partition_column_names=partition_column_names ) ``` ### Build many model training steps AutoMLPipelineBuilder is used to build the many models train step. You will need to determine the number of workers and nodes appropriate for your use case. The process_count_per_node is based off the number of cores of the compute VM. The node_count will determine the number of master nodes to use, increasing the node count will speed up the training process. * <b>experiment</b>: Current experiment. * <b>automl_settings</b>: AutoML settings dictionary. * <b>train_data</b>: Train dataset. * <b>compute_target</b>: Compute target for training. * <b>partition_column_names</b>: Partition column names. * <b>node_count</b>: The number of compute nodes to be used for running the user script. We recommend to start with 3 and increase the node_count if the training time is taking too long. * <b>process_count_per_node</b>: The number of processes per node. * <b>run_invocation_timeout</b>: The run() method invocation timeout in seconds. The timeout should be set to maximum training time of one AutoML run(with some buffer), by default it's 60 seconds. * <b>output_datastore</b>: Output datastore to output the training results. * <b>train_env(Optional)</b>: Optionally can provide train environment definition to use for training. <span style="color:red"><b>NOTE: There are limits on how many runs we can do in parallel per workspace, and we currently recommend to set the parallelism to maximum of 320 runs per experiment per workspace. If users want to have more parallelism and increase this limit they might encounter Too Many Requests errors (HTTP 429). </b></span> ``` # !pip install azureml.contrib.automl.pipeline.steps from azureml.core import Environment env = Environment.get(ws, "AzureML-AutoML", label="Latest") env.environment_variables = {'AZUREML_OUTPUT_UPLOAD_TIMEOUT_SEC':'7200'} from azureml.contrib.automl.pipeline.steps import AutoMLPipelineBuilder if use_tabular: train_steps = AutoMLPipelineBuilder.get_many_models_train_steps( experiment=experiment, train_data=input_ds_small, compute_target=compute, node_count=2, process_count_per_node=8, run_invocation_timeout=920, train_pipeline_parameters=mm_paramters, output_datastore=dstore) else: train_steps = AutoMLPipelineBuilder.get_many_models_train_steps( experiment=experiment, train_data=filedst_10_models_input, compute_target=compute, node_count=2, process_count_per_node=8, run_invocation_timeout=920, train_pipeline_parameters=mm_paramters, output_datastore=dstore) ``` ## 4.0 Run the training pipeline ### Submit the pipeline to run Next we submit our pipeline to run. The whole training pipeline takes about 1h 11m using a STANDARD_D16S_V3 VM with our current AutoMLPipelineBuilder setting. ``` os.environ.get("BUILDID") from azureml.pipeline.core import Pipeline from azureml.widgets import RunDetails pipelineName = e.experiment_name pipeline = Pipeline(workspace=ws, steps=train_steps) # run = experiment.submit(pipeline, tags={"BuildId": os.environ.get("BUILDID"), "ComputeName": e.vm_size}) pipeline_run = Experiment(ws, pipelineName).submit(pipeline, tags={"BuildId": os.environ.get("BUILDID"), "ComputeName": e.vm_size}) RunDetails(pipeline_run).show() ``` You can run the folowing command if you'd like to monitor the training process in jupyter notebook. It will stream logs live while training. **Note**: This command may not work for Notebook VM, however it should work on your local laptop. ``` #run.wait_for_completion(show_output=True) pipeline_run.wait_for_completion(show_output=True) ``` Succesfully trained, registered Automated ML models. ## 5.0 Review outputs of the training pipeline The training pipeline will train and register models to the Workspace. You can review trained models in the Azure Machine Learning Studio under 'Models'. If there are any issues with training, you can go to 'many-models-training' run under the pipeline run and explore logs under 'Logs'. You can look at the stdout and stderr output under logs/user/worker/<ip> for more details ## 6.0 Get list of AutoML runs along with registered model names and tags The following code snippet will iterate through all the automl runs for the experiment and list the details. **Framework** - AutoML, **Dataset** - input data set, **Run** - AutoML run id, **Status** - AutoML run status, **Model** - Registered model name, **Tags** - Tags for model, **StartTime** - Start time, **EndTime** - End time, **ErrorType** - ErrorType, **ErrorCode** - ErrorCode, **ErrorMessage** - Error Message ``` from scripts.helper import get_training_output import os training_results_name = "training_results" training_output_name = "many_models_training_output" #training_file = get_training_output(run, training_results_name, training_output_name) training_file = get_training_output(pipeline_run, training_results_name, training_output_name) all_columns = ["Framework", "Dataset", "Run", "Status", "Model", "Tags", "StartTime", "EndTime" , "ErrorType", "ErrorCode", "ErrorMessage" ] df = pd.read_csv(training_file, delimiter=" ", header=None, names=all_columns) training_csv_file = "training.csv" df.to_csv(training_csv_file) print("Training output has", df.shape[0], "rows. Please open", os.path.abspath(training_csv_file), "to browse through all the output.") df.head(5) ``` ## 7.0 Publish and schedule the pipeline (Optional) ### 7.1 Publish the pipeline Once you have a pipeline you're happy with, you can publish a pipeline so you can call it programmatically later on. See this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-create-your-first-pipeline#publish-a-pipeline) for additional information on publishing and calling pipelines. ``` from azureml.pipeline.core import PipelineEndpoint pipelineEndpointName = 'automl_train_many_models' published_pipeline = pipeline.publish(name = 'automl_train_many_models', description = 'train many models', version = '1', continue_on_step_failure = False) if pipelineEndpointName in str(PipelineEndpoint.list(ws)): # Add a new Version to an existing Endpoint pipeline_endpoint = PipelineEndpoint.get(workspace = ws, name = pipelineEndpointName) pipeline_endpoint.add_default(published_pipeline) else: # Create a new Endpoint pipeline_endpoint = PipelineEndpoint.publish(workspace = ws, name = pipelineEndpointName, pipeline = published_pipeline, description = "") print(pipeline_run.id) ``` ### 7.2 Schedule the pipeline You can also [schedule the pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/how-to-schedule-pipelines) to run on a time-based or change-based schedule. This could be used to automatically retrain models every month or based on another trigger such as data drift. ``` # from azureml.pipeline.core import Schedule, ScheduleRecurrence # training_pipeline_id = published_pipeline.id # recurrence = ScheduleRecurrence(frequency="Month", interval=1, start_time="2020-01-01T09:00:00") # recurring_schedule = Schedule.create(ws, name="automl_training_recurring_schedule", # description="Schedule Training Pipeline to run on the first day of every month", # pipeline_id=training_pipeline_id, # experiment_name=experiment.name, # recurrence=recurrence) ``` ## 8.0 Bookkeeping of workspace (Optional) ### 8.1 Cancel any runs that are running To cancel any runs that are still running in a given experiment. ``` # from scripts.helper import cancel_runs_in_experiment # failed_experiment = 'Please modify this and enter the experiment name' # # Please note that the following script cancels all the currently running runs in the experiment # cancel_runs_in_experiment(ws, failed_experiment) ```
github_jupyter
``` import os, sys sys.path.append("../") import matplotlib import matplotlib.pyplot as plt import matplotlib.pylab as pylab from matplotlib import gridspec import numpy as np from scipy.optimize import minimize from scipy.stats import chi2 from tqdm import * from grf.grf import FIRAS from grf.units import * from grf.pk_interp import PowerSpectrumGridInterpolator from IPython.display import set_matplotlib_formats set_matplotlib_formats('retina') %matplotlib inline %load_ext autoreload %autoreload 2 ``` # FIRAS data checks and plot ``` # Plot parameters from plot_params import params pylab.rcParams.update(params) cols_default = plt.rcParams['axes.prop_cycle'].by_key()['color'] # Where to save plots plots_dir = "../paper/draft-letter/plots/" ``` ## FIRAS data checks ``` pspec = PowerSpectrumGridInterpolator("nonlin_matter_bj") firas = FIRAS(pspec) chi2_null = minimize(firas.chi2_FIRAS,x0=[2.725],args=(0, np.ones_like(firas.omega_FIRAS)), method='Powell') chi2_null.x ndof = len(firas.d) - 1 print('chi2 is', chi2_null.fun) print('chi2 / ndof is', chi2_null.fun / ndof) chi2.ppf(q=0.95, df=ndof) # Amazing! (firas.d - firas.B_CMB(firas.omega_FIRAS, chi2_null.x) / (1e6 * Jy)) * 1e3 / firas.resid # Get disappearance probabilities for benchmark point p1 = 1 - firas.P_tot_perturb(firas.omega_FIRAS, 6e-6, 4e-15 * eV)[2] ``` ## FIRAS blackbody plot (Fig. 4) ``` gs = gridspec.GridSpec(2, 1, height_ratios=[2.8, 1], hspace=0.04) # Top plot ax0 = plt.subplot(gs[0]) line2 = ax0.errorbar(firas.df['freq'], firas.d, yerr=firas.df['uncert'] / 1e3, c=cols_default[0], fmt='none', capsize=3, capthick=1, elinewidth=1.5, label='COBE/FIRAS data') line1, = ax0.plot(firas.df['freq'], firas.B_CMB(firas.omega_FIRAS, chi2_null.x) / (1e6 * Jy), c='k', ls='-', label='Blackbody $T_{\mathrm{CMB}} = 2.725\,\mathrm{K}$') ax0.set_xlim(np.min(firas.df['freq']), np.max(firas.df['freq'])) ax0.set_ylim(-50, 420) ax0.set_ylabel(r"Intensity\,[MJy\,sr$^{-1}$]") line3, = ax0.plot([],[], c=cols_default[1], ls=':', label=r'$m_{A^\prime}=4\times 10^{-15}$\,eV, $\epsilon=6\times 10^{-6}$') ax0.set_ylim(-100, 410) first_legend = plt.legend(handles=[line1], loc='upper right', fontsize=14) # Add the legend manually to the current Axes. plt.gca().add_artist(first_legend) # Create another legend for the second line. plt.legend(handles=[line2, line3], loc='lower left', fontsize=14, handlelength=1.8) # Bottom plot ax1 = plt.subplot(gs[1]) ax1.errorbar(firas.df['freq'], firas.d - firas.B_CMB(firas.omega_FIRAS, chi2_null.x) / (1e6 * Jy), yerr=firas.df['uncert'] / 1e3, c=cols_default[0], fmt='none', capsize=3, capthick=1, elinewidth=1.5) ax1.plot(firas.df['freq'], p1 * firas.B_CMB(firas.omega_FIRAS, chi2_null.x) / (1e6 * Jy) - firas.B_CMB(firas.omega_FIRAS, chi2_null.x) / (1e6 * Jy), c=cols_default[1], ls=':') ax1.axhline(0., c='k', ls='-', lw=0.5) ax1.set_xlim(np.min(firas.df['freq']), np.max(firas.df['freq'])) ax1.set_ylim(-0.15, 0.15) ax1.set_ylabel("Residuals", labelpad=0.1) ax1.set_xlabel(r"$\nu\,[\mathrm{cm}^{-1}]$") plt.suptitle(r"\bf{COBE/FIRAS CMB spectrum}", y=0.94, fontsize=18) plt.tight_layout() plt.savefig(plots_dir + "firas_bb.pdf", bbox_inches='tight') ```
github_jupyter
``` import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "1" import tensorflow as tf tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth = True sess = tf.Session(config=tf_config) import keras import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from nltk.tokenize import word_tokenize from keras.preprocessing.text import text_to_word_sequence, Tokenizer from keras.preprocessing.sequence import pad_sequences import matplotlib as mpl %matplotlib inline from matplotlib import pyplot as plt from keras.utils import plot_model from IPython.display import Image keras.__version__ tf.__version__ ``` ### Hyperparameters ``` DATA_FILE_PATH = 'data/quora_duplicate_questions.tsv' EMB_DIR = '/collection/hpallika/deeplearning/research_work/word_embeddings/glove.6B.50d.txt' MAX_VOCAB_SIZE = 30000 MAX_SENT_LEN = 15 EMBEDDING_DIM = 50 BATCH_SIZE = 32 N_EPOCHS = 10 df_questions = pd.read_table(DATA_FILE_PATH, sep='\t', nrows=100000) print('Dataset size:', df_questions.shape) df_questions.head(3) # Converting all questions to string df_questions['question1'] = df_questions['question1'].apply(lambda x: str(x)) df_questions['question2'] = df_questions['question2'].apply(lambda x: str(x)) sent_len = lambda x:len(x) df_questions['q1_length'] = df_questions.question1.apply(sent_len) df_questions['q2_length'] = df_questions.question2.apply(sent_len) df_questions[df_questions['q1_length']<10]['question1'].head(10) # Questions having lesser than 10 characters can be discarded. indices = set(df_questions[df_questions['q1_length']<10].index).union(df_questions[df_questions['q2_length']<10].index) # Can drop the character count columns - to save memory df_questions.drop(['q1_length','q2_length'], inplace=True, axis=1) df_questions.drop(indices, inplace=True) df_questions.reset_index() df_questions.is_duplicate.value_counts() word_count = lambda x:len(x.split()) # Word count for each question df_questions['q1_wc'] = df_questions.question1.apply(word_count) df_questions['q2_wc'] = df_questions.question2.apply(word_count) p = 80.0 print('Question-1 :{} % of the sentences have a length less than or equal to {}'.format(p, np.percentile(df_questions['q1_wc'], 80))) print('Question-2 :{} % of the sentences have a length less than or equal to {}'.format(p, np.percentile(df_questions['q2_wc'], 80))) ``` Better to use NLTK tokenizer first and then Keras word to indices <br> Keras: 'what is this?' -> ['what', 'is', 'this?'] ``` %%time question_list = list(df_questions['question1']) + list(df_questions['question2']) question_list = [' '.join(word_tokenize(q)[:MAX_SENT_LEN]) for q in question_list] question_list[:10] # Data is noisy with many unwanted characters # Filters - removed '?' tokenizer = Tokenizer(num_words=MAX_VOCAB_SIZE, filters='!"#$%&()*+,-./:;<=>@[\\]^_`{|}~\t\n') tokenizer.fit_on_texts(question_list) print("Number of words in vocabulary:", len(tokenizer.word_index)) # Limit vocab and idx-word dictionary word_index = word_index = {k: v for k, v in tokenizer.word_index.items() if v < MAX_VOCAB_SIZE} idx_to_word = dict((v,k) for k,v in word_index.items()) X = tokenizer.texts_to_sequences(question_list) X = pad_sequences(X, maxlen=MAX_SENT_LEN, padding='post', truncating='post') X_q1 = X[:len(X)//2] X_q2 = X[len(X)//2:] del X X_q1[:3] X_train_q1, X_test_q1, X_train_q2, X_test_q2, y_train, y_test = train_test_split(X_q1, X_q2, df_questions['is_duplicate'], random_state=10, test_size=0.1) ``` ### Embedding Matrix ``` # Load GloVe word embeddings # Download Link: https://nlp.stanford.edu/projects/glove/ print("[INFO]: Reading Word Embeddings ...") # Data path embeddings = {} f = open(EMB_DIR) for line in f: values = line.split() word = values[0] vector = np.asarray(values[1:], dtype='float32') embeddings[word] = vector f.close() # Create an embedding matrix containing only the word's in our vocabulary # If the word does not have a pre-trained embedding, then randomly initialize the embedding embeddings_matrix = np.random.uniform(-0.05, 0.05, size=(len(word_index)+1, EMBEDDING_DIM)) # +1 is because the matrix indices start with 0 for word, i in word_index.items(): # i=0 is the embedding for the zero padding try: embeddings_vector = embeddings[word] except KeyError: embeddings_vector = None if embeddings_vector is not None: embeddings_matrix[i] = embeddings_vector del embeddings ``` ### CNN with Keras Model API Also called Graph/Functional API ``` from keras.models import Model from keras.layers import Layer, Input, Dense, Concatenate, Conv2D, Reshape, MaxPooling1D, Flatten, BatchNormalization, Activation, Dropout, Embedding # Bigram and trigram filters bi_filter_size = 2 tri_filter_size = 3 num_filters = 20 ``` #### Question 1 Computational Graph ``` input_1 = Input(shape=(MAX_SENT_LEN, ), name='q1_input') # Common embedding lookup layer emb_look_up = Embedding(input_dim=MAX_VOCAB_SIZE, output_dim=EMBEDDING_DIM, weights = [embeddings_matrix], trainable=False, mask_zero=False, name='q_embedding_lookup') emb_1 = emb_look_up(input_1) # Need to be reshaped because the CONV layer assumes 1 dimnesion as num of channels emb_1 = Reshape(target_shape=(1, MAX_SENT_LEN, EMBEDDING_DIM), name='q1_embedding_reshape')(emb_1) # Convolutional Layers conv_1_bi = Conv2D(filters=num_filters, kernel_size=(bi_filter_size, EMBEDDING_DIM), padding='valid', activation='relu', data_format='channels_first', name='q1_bigram_conv')(emb_1) conv_1_tri = Conv2D(filters=num_filters, kernel_size=(tri_filter_size, EMBEDDING_DIM), padding='valid', activation='relu', data_format='channels_first', name='q1_trigram_conv')(emb_1) # Remove channel dimension before max-pooling operation bi_out_timesteps = MAX_SENT_LEN - bi_filter_size + 1 tri_out_timesteps = MAX_SENT_LEN - tri_filter_size + 1 conv_1_bi = Reshape(target_shape=(bi_out_timesteps, num_filters), name='q1_bigram_conv_reshape')(conv_1_bi) # (MAX_SENT_LEN - bi_filter_size + 1, num_filters) conv_1_tri = Reshape(target_shape=(tri_out_timesteps, num_filters), name='q1_trigram_conv_reshape')(conv_1_tri) # Max-pooling Layer # Pool across timesteps to get 1 feature per filter, i.e., each filter captures 1 feature about the sentence/question max_pool_1_bi = MaxPooling1D(pool_size = bi_out_timesteps, name='q1_bigram_maxpool')(conv_1_bi) max_pool_1_tri = MaxPooling1D(pool_size = tri_out_timesteps, name='q1_trigram_maxpool')(conv_1_tri) # Merge the features learnt by bi and tri filters merged_1 = Concatenate(name='q1_maxpool_concat')([max_pool_1_bi, max_pool_1_tri]) # Inputs dropped out randomly so that there is no heavy dependence on specific features for prediction dropout_1 = Dropout(rate=0.2, name='q1_dropout')(merged_1) flatten_1 = Flatten(name='q1_flatten')(dropout_1) ``` #### Question 2 Computational Graph ``` input_2 = Input(shape=(MAX_SENT_LEN, ), name='q2_input') emb_2 = emb_look_up(input_2) # Need to be reshaped because the CONV layer assumes 1 dimnesion as num of channels emb_2 = Reshape((1, MAX_SENT_LEN, EMBEDDING_DIM), name='q2_embedding_reshape')(emb_2) # Convolutional Layers conv_2_bi = Conv2D(filters=num_filters, kernel_size=(bi_filter_size, EMBEDDING_DIM), padding='valid', activation='relu', data_format='channels_first', name='q2_bigram_conv')(emb_2) conv_2_tri = Conv2D(filters=num_filters, kernel_size=(tri_filter_size, EMBEDDING_DIM), padding='valid', activation='relu', data_format='channels_first', name='q2_trigram_conv')(emb_2) # Remove channel dimension before max-pooling operation conv_2_bi = Reshape((bi_out_timesteps, num_filters), name='q2_bigram_conv_reshape')(conv_2_bi) # (MAX_SENT_LEN - bi_filter_size + 1, num_filters) conv_2_tri = Reshape((tri_out_timesteps, num_filters), name='q2_trigram_conv_reshape')(conv_2_tri) # Max-pooling Layer # Pool across timesteps to get 1 feature per filter, i.e., each filter captures 1 feature about the sentence/question max_pool_2_bi = MaxPooling1D(pool_size = bi_out_timesteps, name='q2_bigram_maxpool')(conv_2_bi) max_pool_2_tri = MaxPooling1D(pool_size = tri_out_timesteps, name='q2_trigram_maxpool')(conv_2_tri) # Merge the features learnt by bi and tri filters merged_2 = Concatenate(name='q2_maxpool_flatten')([max_pool_2_bi, max_pool_2_tri]) # Inputs dropped out randomly so that there is no heavy dependence on specific features for prediction dropout_2 = Dropout(rate=0.2, name='q2_dropout')(merged_2) flatten_2 = Flatten(name='q2_flatten')(dropout_2) ``` #### Merge outputs of Q1 and Q2 ``` # With batch-normalization, the output of a previous layer is mu-sigma normalized, # before it is fed into the next layer. # For feed-forward networks, batch-normalization is carried out # after/before applying RELU activation (?) # https://www.reddit.com/r/MachineLearning/comments/67gonq/d_batch_normalization_before_or_after_relu/ merged = Concatenate(name='q1_q2_concat')([flatten_1, flatten_2]) # Dense layers dense_1 = Dense(units=10, name='q1_q2_dense')(merged) bn_1 = BatchNormalization(name='batchnorm')(dense_1) relu_1 = Activation(activation='relu', name='relu_activation')(bn_1) dense_1_dropout = Dropout(0.2, name='dense_dropout')(relu_1) output_prob = Dense(units=1, activation='sigmoid', name='output_layer')(dense_1_dropout) model = Model(inputs=[input_1, input_2], outputs=output_prob, name='text_pair_cnn') model.summary() model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy']) model.fit(x = [X_train_q1, X_train_q2], y = y_train, batch_size=BATCH_SIZE, epochs=N_EPOCHS, validation_data=([X_test_q1, X_test_q2], y_test)) plot_model(model, to_file='text_pair_cnn_classifier.png', show_layer_names=True) Image('text_pair_cnn_classifier.png') ``` #### Notes - This model does not consider interation between sentences - We could include some attention mechanisms - CNN Siamese Architectures - Design and input hand-crafted features along with CNN learnt features at the dense layer ``` # This custom layer is required if you want to set mask_zero to be True at the embedding layer class NonMasking(Layer): def __init__(self, **kwargs): self.supports_masking = True super(NonMasking, self).__init__(**kwargs) def build(self, input_shape): input_shape = input_shape def compute_mask(self, input, input_mask=None): # do not pass the mask to the next layers return None def call(self, x, mask=None): return x def compute_output_shape(self, input_shape): return input_shape emb_1 = NonMasking(name='emb_masking_q1')(emb_1) emb_2 = NonMasking(name='emb_masking_q2')(emb_2) ```
github_jupyter
<a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a> $ \newcommand{\bra}[1]{\langle #1|} $ $ \newcommand{\ket}[1]{|#1\rangle} $ $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $ $ \newcommand{\dot}[2]{ #1 \cdot #2} $ $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $ $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $ $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $ $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $ $ \newcommand{\mypar}[1]{\left( #1 \right)} $ $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $ $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $ $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $ $ \newcommand{\onehalf}{\frac{1}{2}} $ $ \newcommand{\donehalf}{\dfrac{1}{2}} $ $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $ $ \newcommand{\vzero}{\myvector{1\\0}} $ $ \newcommand{\vone}{\myvector{0\\1}} $ $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $ $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $ $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $ $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $ $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $ $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $ $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $ $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $ $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $ $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $ $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $ $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $ $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $ $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $ $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $ <font style="font-size:28px;" align="left"><b><font color="blue"> Solutions for </font>Two Probabilistic Bits </b></font> <br> _prepared by Abuzer Yakaryilmaz_ <br><br> <a id="task2"></a> <h3> Task 2 </h3> Find the probabilistic state of the composite system. <i> Rule 1: Tensor product distributes over addition in the same way as the distribution of multiplication over addition. Rule 2: $ \big( 0.3 \pstate{1} \big) \otimes \big( 0.7 \pstate{0} \big) = (0.3 \cdot 0.7) \big( \pstate{1} \otimes \pstate{0} \big) = 0.21 \pstate{10} $. </i> <h3>Solution</h3> The probabilistic state of the composite system is $ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) $. $ \big( 0.2 \pstate{0} + 0.8 \pstate{1} \big) \otimes \big( 0.6 \pstate{0} + 0.4 \pstate{1} \big) = $ $ 0.12 \big( \pstate{0} \otimes \pstate{0} \big) + 0.08 \big( \pstate{0} \otimes \pstate{1} \big) + 0.48 \big( \pstate{1} \otimes \pstate{0} \big) + 0.32 \big( \pstate{1} \otimes \pstate{1} \big) = $ $ 0.12 \pstate{00} + 0.08 \pstate{01} + 0.48 \pstate{10} + 0.32 \pstate{11}. $ <a id="task3"></a> <h3> Task 3 </h3> Find the probabilistic state of the composite system by calculating this tensor product $ \myvector{0.2 \\ 0.8} \otimes \myvector{0.6 \\ 0.4 } $. <h3>Solution</h3> $ \myvector{0.2 \\ 0.8} \otimes \myvector{0.6 \\ 0.4 } = \myvector{ 0.2 \myvector{0.6 \\ 0.4} \\ 0.8 \myvector{0.6 \\ 0.4} } = \myvector{0.12 \\ 0.08 \\ 0.48 \\ 0.32} $. <a id="task4"></a> <h3> Task 4 </h3> Find the vector representations of $ \pstate{00} $, $ \pstate{01} $, $\pstate{10}$, and $ \pstate{11} $. <i>The vector representation of $ \pstate{ab} $ is $ \pstate{a} \otimes \pstate{b} $ for $ a,b \in \{0,1\} $.</i> <h3>Solution</h3> $ \pstate{00} = \pstate{0} \otimes \pstate{0} = \myvector{1 \\ 0} \otimes \myvector{1 \\ 0} = \myvector{1 \myvector{1 \\ 0} \\ 0 \myvector{1 \\ 0} } = \myvector{1 \\ 0 \\ 0 \\ 0} $. $ \pstate{01} = \pstate{0} \otimes \pstate{1} = \myvector{1 \\ 0} \otimes \myvector{0 \\ 1} = \myvector{1 \myvector{0 \\ 1} \\ 0 \myvector{0 \\ 1} } = \myvector{0 \\ 1 \\ 0 \\ 0} $. $ \pstate{10} = \pstate{1} \otimes \pstate{0} = \myvector{0 \\ 1} \otimes \myvector{1 \\ 0} = \myvector{0 \myvector{1 \\ 0} \\ 1 \myvector{1 \\ 0} } = \myvector{0 \\ 0 \\ 1 \\ 0} $. $ \pstate{11} = \pstate{1} \otimes \pstate{1} = \myvector{0 \\ 1} \otimes \myvector{0 \\ 1} = \myvector{0 \myvector{0 \\ 1} \\ 1 \myvector{0 \\ 1} } = \myvector{0 \\ 0 \\ 0 \\ 1} $.
github_jupyter
# Iterating with .iterrows() In the video, we discussed that .iterrows() returns each DataFrame row as a tuple of (index, pandas Series) pairs. But, what does this mean? Let's explore with a few coding exercises. A pandas DataFrame has been loaded into your session called pit_df. This DataFrame contains the stats for the Major League Baseball team named the Pittsburgh Pirates (abbreviated as 'PIT') from the year 2008 to the year 2012. It has been printed into your console for convenience. ``` import pandas as pd pit_df = pd.read_csv('pit.csv') pit_df # Iterate over pit_df and print each row for i,row in pit_df.iterrows(): print(row) # Iterate over pit_df and print each index variable and then each row for i,row in pit_df.iterrows(): print(i) print(row) print(type(row)) # Use one variable instead of two to store the result of .iterrows() for row_tuple in pit_df.iterrows(): print(row_tuple) # Print the row and type of each row for row_tuple in pit_df.iterrows(): print(row_tuple) print(type(row_tuple)) ``` # Run differentials with .iterrows() You've been hired by the San Francisco Giants as an analyst—congrats! The team's owner wants you to calculate a metric called the run differential for each season from the year 2008 to 2012. This metric is calculated by subtracting the total number of runs a team allowed in a season from the team's total number of runs scored in a season. 'RS' means runs scored and 'RA' means runs allowed. The below function calculates this metric: ```python def calc_run_diff(runs_scored, runs_allowed): run_diff = runs_scored - runs_allowed return run_diff ``` A DataFrame has been loaded into your session as giants_df and printed into the console. Let's practice using .iterrows() to add a run differential column to this DataFrame. ``` import pandas as pd giants_df = pd.read_csv('giants.csv') def calc_run_diff(runs_scored, runs_allowed): run_diff = runs_scored - runs_allowed return run_diff # Create an empty list to store run differentials run_diffs = [] # Write a for loop and collect runs allowed and runs scored for each row for i,row in giants_df.iterrows(): runs_scored = row['RS'] runs_allowed = row['RA'] # Use the provided function to calculate run_diff for each row run_diff = calc_run_diff(runs_scored, runs_allowed) # Append each run differential to the output list run_diffs.append(run_diff) giants_df['RD'] = run_diffs print(giants_df) ``` # Iterating with .itertuples() Remember, .itertuples() returns each DataFrame row as a special data type called a namedtuple. You can look up an attribute within a namedtuple with a special syntax. Let's practice working with namedtuples. A pandas DataFrame has been loaded into your session called rangers_df. This DataFrame contains the stats ('Team', 'League', 'Year', 'RS', 'RA', 'W', 'G', and 'Playoffs') for the Major League baseball team named the Texas Rangers (abbreviated as 'TEX'). ``` rangers_df = pd.read_csv('rangers.csv') rangers_df # Loop over the DataFrame and print each row for row_tuple in rangers_df.itertuples(): print(row_tuple) # Loop over the DataFrame and print each row's Index, Year and Wins (W) for row in rangers_df.itertuples(): i = row.Index year = row.Year wins = row.W print(i, year, wins) # Loop over the DataFrame and print each row's Index, Year and Wins (W) for row in rangers_df.itertuples(): i = row.Index year = row.Year wins = row.W # Check if rangers made Playoffs (1 means yes; 0 means no) if row.Playoffs == 1: print(i, year, wins) ``` # Run differentials with .itertuples() The New York Yankees have made a trade with the San Francisco Giants for your analyst contract— you're a hot commodity! Your new boss has seen your work with the Giants and now wants you to do something similar with the Yankees data. He'd like you to calculate run differentials for the Yankees from the year 1962 to the year 2012 and find which season they had the best run differential. You've remembered the function you used when working with the Giants and quickly write it down: ```python def calc_run_diff(runs_scored, runs_allowed): run_diff = runs_scored - runs_allowed return run_diff ``` Let's use .itertuples() to loop over the yankees_df DataFrame (which has been loaded into your session) and calculate run differentials. ``` yankees_df = pd.read_csv('yankees.csv') yankees_df def calc_run_diff(runs_scored, runs_allowed): run_diff = runs_scored - runs_allowed return run_diff run_diffs = [] # Loop over the DataFrame and calculate each row's run differential for row in yankees_df.itertuples(): runs_scored = row.RS runs_allowed = row.RA run_diff = calc_run_diff(runs_scored, runs_allowed) run_diffs.append(run_diff) # Append new column yankees_df['RD'] = run_diffs print(yankees_df) ``` # Analyzing baseball stats with .apply() The Tampa Bay Rays want you to analyze their data. They'd like the following metrics: The sum of each column in the data The total amount of runs scored in a year ('RS' + 'RA' for each year) The 'Playoffs' column in text format rather than using 1's and 0's The below function can be used to convert the 'Playoffs' column to text: ```python def text_playoffs(num_playoffs): if num_playoffs == 1: return 'Yes' else: return 'No' ``` Use .apply() to get these metrics. A DataFrame (rays_df) has been loaded and printed to the console. This DataFrame is indexed on the 'Year' column. ``` rays_df = pd.read_csv('rays.csv') rays_df rays_df.set_index('Year') # Gather sum of all columns stat_totals = rays_df.apply(sum, axis=0) print(stat_totals) # Gather total runs scored in all games per year total_runs_scored = rays_df[['RS', 'RA']].apply(sum, axis=1) print(total_runs_scored) def text_playoffs(num_playoffs): if num_playoffs == 1: return 'Yes' else: return 'No' # Convert numeric playoffs to text by applying text_playoffs() textual_playoffs = rays_df.apply(lambda row: text_playoffs(row['Playoffs']), axis=1) print(textual_playoffs) ``` # Settle a debate with .apply() Word has gotten to the Arizona Diamondbacks about your awesome analytics skills. They'd like for you to help settle a debate amongst the managers. One manager claims that the team has made the playoffs every year they have had a win percentage of 0.50 or greater. Another manager says this is not true. Let's use the below function and the .apply() method to see which manager is correct. ```python def calc_win_perc(wins, games_played): win_perc = wins / games_played return np.round(win_perc,2) ``` A DataFrame named dbacks_df has been loaded into your session. ``` dbacks_df = pd.read_csv('dbacks.csv') dbacks_df # Display the first five rows of the DataFrame print(dbacks_df.head()) import numpy as np def calc_win_perc(wins, games_played): win_perc = wins / games_played return np.round(win_perc,2) # Create a win percentage Series win_percs = dbacks_df.apply(lambda row: calc_win_perc(row['W'], row['G']), axis=1) print(win_percs, '\n') # Append a new column to dbacks_df dbacks_df['WP'] = win_percs print(dbacks_df, '\n') # Display dbacks_df where WP is greater than 0.50 print(dbacks_df[dbacks_df['WP'] >= 0.50]) ``` # Replacing .iloc with underlying arrays Now that you have a better grasp on a DataFrame's internals let's update one of your previous analyses to leverage a DataFrame's underlying arrays. You'll revisit the win percentage calculations you performed row by row with the .iloc method: ```python def calc_win_perc(wins, games_played): win_perc = wins / games_played return np.round(win_perc,2) win_percs_list = [] for i in range(len(baseball_df)): row = baseball_df.iloc[i] wins = row['W'] games_played = row['G'] win_perc = calc_win_perc(wins, games_played) win_percs_list.append(win_perc) baseball_df['WP'] = win_percs_list ``` Let's update this analysis to use arrays instead of the .iloc method. A DataFrame (baseball_df) has been loaded into your session. ``` import numpy as np import pandas as pd baseball_df = pd.read_csv('baseball_stats.csv') baseball_df.head() def calc_win_perc(wins, games_played): win_perc = wins / games_played return np.round(win_perc,2) win_percs_list = [] for i in range(len(baseball_df)): row = baseball_df.iloc[i] wins = row['W'] games_played = row['G'] win_perc = calc_win_perc(wins, games_played) win_percs_list.append(win_perc) baseball_df['WP'] = win_percs_list # Use the W array and G array to calculate win percentages win_percs_np = calc_win_perc(baseball_df['W'], baseball_df['G']) # Use the W array and G array to calculate win percentages win_percs_np = calc_win_perc(baseball_df['W'].values, baseball_df['G'].values) # Append a new column to baseball_df that stores all win percentages baseball_df['WP'] = win_percs_np print(baseball_df.head()) ``` # Bringing it all together: Predict win percentage A pandas DataFrame (baseball_df) has been loaded into your session. For convenience, a dictionary describing each column within baseball_df has been printed into your console. You can reference these descriptions throughout the exercise. You'd like to attempt to predict a team's win percentage for a given season by using the team's total runs scored in a season ('RS') and total runs allowed in a season ('RA') with the following function: ```python def predict_win_perc(RS, RA): prediction = RS ** 2 / (RS ** 2 + RA ** 2) return np.round(prediction, 2) ``` Let's compare the approaches you've learned to calculate a predicted win percentage for each season (or row) in your DataFrame. ``` win_perc_preds_loop = [] def predict_win_perc(RS, RA): prediction = RS ** 2 / (RS ** 2 + RA ** 2) return np.round(prediction, 2) # Use a loop and .itertuples() to collect each row's predicted win percentage for row in baseball_df.itertuples(): runs_scored = row.RS runs_allowed = row.RA win_perc_pred = predict_win_perc(runs_scored, runs_allowed) win_perc_preds_loop.append(win_perc_pred) # Apply predict_win_perc to each row of the DataFrame win_perc_preds_apply = baseball_df.apply(lambda row: predict_win_perc(row['RS'], row['RA']), axis=1) # Calculate the win percentage predictions using NumPy arrays win_perc_preds_np = predict_win_perc(baseball_df['RS'].values, baseball_df['RA'].values) baseball_df['WP_preds'] = win_perc_preds_np print(baseball_df.head()) ```
github_jupyter
# Power Outages This project uses major power outage data in the continental U.S. from January 2000 to July 2016. Here, a major power outage is defined as a power outage that impacted at least 50,000 customers or caused an unplanned firm load loss of atleast 300MW. Interesting questions to consider include: - Where and when do major power outages tend to occur? - What are the characteristics of major power outages with higher severity? Variables to consider include location, time, climate, land-use characteristics, electricity consumption patterns, economic characteristics, etc. What risk factors may an energy company want to look into when predicting the location and severity of its next major power outage? - What characteristics are associated with each category of cause? - How have characteristics of major power outages changed over time? Is there a clear trend? ### Getting the Data The data is downloadable [here](https://engineering.purdue.edu/LASCI/research-data/outages/outagerisks). A data dictionary is available at this [article](https://www.sciencedirect.com/science/article/pii/S2352340918307182) under *Table 1. Variable descriptions*. ### Cleaning and EDA - Note that the data is given as an Excel file rather than a CSV. Open the data in Excel or another spreadsheet application and determine which rows and columns of the Excel spreadsheet should be ignored when loading the data in pandas. - Clean the data. - The power outage start date and time is given by `OUTAGE.START.DATE` and `OUTAGE.START.TIME`. It would be preferable if these two columns were combined into one datetime column. Combine `OUTAGE.START.DATE` and `OUTAGE.START.TIME` into a new datetime column called `OUTAGE.START`. Similarly, combine `OUTAGE.RESTORATION.DATE` and `OUTAGE.RESTORATION.TIME` into a new datetime column called `OUTAGE.RESTORATION`. - Understand the data in ways relevant to your question using univariate and bivariate analysis of the data as well as aggregations. *Hint 1: pandas can load multiple filetypes: `pd.read_csv`, `pd.read_excel`, `pd.read_html`, `pd.read_json`, etc.* *Hint 2: `pd.to_datetime` and `pd.to_timedelta` will be useful here.* *Tip: To visualize geospatial data, consider [Folium](https://python-visualization.github.io/folium/) or another geospatial plotting library.* ### Assessment of Missingness - Assess the missingness of a column that is not missing by design. ### Hypothesis Test Find a hypothesis test to perform. You can use the questions at the top of the notebook for inspiration. # Summary of Findings ### Introduction In this project, we explore the major power outage data in the continental U.S. from January 2000 to July 2016. The data set shows the number of outrages from each States and describes the reasons for them to happen. This data set also carries out some ordinary variables such as how many customers are affected due to the outage and the duration for each outage. Individually, we want to find out if the value of duration is longer if the outage starts at night. Thus, we sets up a hypothesis test regarding to it and see if there is any significance evidence to prove our null hypothesis ### Cleaning and EDA At the first glance from this dataset, we realized the necessity of doing data cleaning. For example, the column names are meaningless and the first few rows are either messy or containing all nan values. From our observation, we explore that the variable names and the unit of variables are provided in the rows. We also reformat the date and time columns to be one column containing both the date and time and set them to datatime object. In exploratory data analysis, we first drew a scatterplot of the peak power loss in MW over years and a barplot of counts of the cause of each outages by category over years. Then, we visualized the duration of outrage by boxplot and histogram and concluded that most of the durations are not high. We also explored the number of customers effected by outages. Later, we carry out an OLS model to check whether TOTAL.CUSTOMERS and CUSTOMERS.AFFECTED have strong correlation or not, from which surprisingly we discovered that the correlation between these two variables are very low. ### Assessment of Missingness Dependence: We select the column OUTAGE.START which contains the starting time of each outage. We compare this column with the YEAR column to see if OUTAGE.START is NMAR or MAR. We observed that the null values in OUTAGE.START existed mostly before the YEAR 2007. Therefore, the missing values of OUTAGE.START might be a MAR. We run this simulation 1000 times and get a p-value 0.00, by comparing it with the significane level of 0.05, we conclude that there is significance evidence that OUTAGE.START is MAR which implies it is depedent on YEAR. Nondependence: We select the column OUTAGE.DURATION which indicates the duration of each outage. We compare this column with the U.S.STATE to check if the outage duration is missing more in specific US States. By only looking at the columns and missing values, we believe that the missing values have nothing to do with different US States. Thus, we perform a permutation test to compare the TVD between the null values of OUTAGE.DURATION and the (shuffled / non shuffled) columns of U.S.STATE. After running this simulation 1000 times and get a p-value 0.251, we concluded that there the missingness of OURAGE.DURATION is nondependence to the U.S.STATE. ### Hypothesis Test Our test hypothesis is that H0: Average power outages duration is equal when it occurs during the day and night. H1: Average power outages duration is longer when it occurs during the night. (8 pm to 4 am) We set the significance level to be 0.05 to decide whether we should reject our null hypothesis or not. We perform a hypothesis test by running 1000 test statistics which we randomly shuffled the day and night.We obtain a p-value of 0.127 and by comapring with the significance level 0.05, we fail to reject the null hypothesis and conluced that there is not enough evidence to say that average power outages duration is longer when it occurs during the night. # Code ``` import matplotlib.pyplot as plt import numpy as np import os import pandas as pd import seaborn as sns %matplotlib inline %config InlineBackend.figure_format = 'retina' # Higher resolution figures ``` ### Cleaning and EDA It is vital for us to do data cleaning before performing EDA and further testing since the original data contains invalid column names and some invalid rows. ``` #Read in Excel file df = pd.read_excel('outage.xlsx') df.head() # Get all the variables variables = df.loc[4] # Set the column names to variables df.columns = variables # Extract the units for the variables units = df.loc[5].values[1:] # Drop the useless columns df = df.drop([0, 1, 2, 3, 4, 5]) df = df.drop(columns = 'variables') # Reset the index df = df.reset_index(drop = True) # Combine OUTAGE START DATE and OUTAGE START TIME to one column df['OUTAGE.START'] = pd.to_datetime(df['OUTAGE.START.DATE'].apply(lambda x: str(x)) + " " + df['OUTAGE.START.TIME'].apply(lambda x: str(x)), errors='coerce') # Combine OUTAGE RESTORATION DATE and OUTAGE RESTORATION TIME to one column df['OUTAGE.RESTORATION'] = pd.to_datetime(df['OUTAGE.RESTORATION.DATE'].apply(lambda x: str(x)) + " " + df['OUTAGE.RESTORATION.TIME'].apply(lambda x: str(x)), errors='coerce') # Drop the orginial columns df = df.drop(columns = ['OUTAGE.START.DATE', 'OUTAGE.START.TIME', 'OUTAGE.RESTORATION.DATE','OUTAGE.RESTORATION.TIME' ]) df.head(10) ``` We now perform a boxplot to see how long the outrage duration usually were. ``` # Boxplot for outrage duration sns.boxplot(x = df['OUTAGE.DURATION']) ``` From the boxplot above, we can see that most of the data are cluster at the left end of the plot. Howeverm the range of the data is significant large due to the outliers. To have a better look on how the distribution of the data without these outlier looks like, we now perform histograms of the same dataset wihtout these outliers. ``` # Histogram omitting outliers sns.histplot(data=df[df['OUTAGE.DURATION'] < 25000], x="OUTAGE.DURATION") ``` The histogram clearly shows the right-skewed distribution which demonstrated how the duration of outrage is usually not that long. Noticing that lots of the duration of outrage are not long, we will like to see how these outrages effect the customer. ``` # Boxplot for customer effect sns.boxplot(x = df['CUSTOMERS.AFFECTED']) ``` Again the above boxplot shows that there are a lot of outliers with most of the dataset cluster in the left-hand side of the boxplot. Thus, we perform the histogram for the same dataset but omitting the outliers. ``` # Histogram omitting outliers sns.histplot(data=df[df['CUSTOMERS.AFFECTED'] < 10000], x="CUSTOMERS.AFFECTED") ``` Again, we can see a right-skewed distribution in this histrogram which implies that the customers that were affected by the outage were not significantly large. Getting deeper, we performed ordinary least squares to see if TOTAL CUSTOMERS and CUSTOMERS.AFFECTED have some correlation. OLS estimates a multi-variate regression model and provides a variety of fit-statistics ``` # Import OLS statsmodel import statsmodels.api as sm # Drop nan values in boht TOTAL.CUSTOMERS and CUSTOMERS.AFFECTED columns cleaned_df = df.dropna(subset=['TOTAL.CUSTOMERS', 'CUSTOMERS.AFFECTED']) Y = np.array(cleaned_df['CUSTOMERS.AFFECTED'],dtype=float) X = np.array(cleaned_df['TOTAL.CUSTOMERS'], dtype=float) X = sm.add_constant(X) model = sm.OLS(Y,X) results = model.fit() print(results.summary()) ``` We find out that these two variables have very less correlation wiht the R-squared to be 0.028 which is quite amusing since we originaly belive that if there are more total customers, then the customers who got affected by the outage should be higher. Below, we plotted a scatterplot to check how peak demand loss various over years and found out that majority of the outages are relatively small in comparison to few rare dramatic events, which matches our expectation. ``` eda1 = df[["OUTAGE.START", "DEMAND.LOSS.MW", 'CAUSE.CATEGORY', 'CLIMATE.REGION']] dims = (20, 15) fig, ax = plt.subplots(figsize=dims) sns.scatterplot(data = eda1, x="OUTAGE.START", y="DEMAND.LOSS.MW" , hue = 'CAUSE.CATEGORY', style="CLIMATE.REGION", size = "DEMAND.LOSS.MW", ax=ax).set_title('Peak Demand loss vs Time') ``` We then draw a barplot of frequency of power outages by category over years to checkl if there is any interesting trends and we indeed find something veyr interesting such as before 2011, severe weather is the main cause and, after 2010, intentional attacks became another main cause. ``` eda2 = df[["OUTAGE.START", 'CAUSE.CATEGORY']] eda2 = eda2.dropna() eda2['year'] = eda2["OUTAGE.START"].dt.year eda2['count'] = eda2.groupby(['CAUSE.CATEGORY', 'year'])['CAUSE.CATEGORY'].transform('count') dims = (20, 15) fig, ax = plt.subplots(figsize=dims) sns.barplot(data = eda2, x="year", y="count", hue="CAUSE.CATEGORY", ax=ax).set_title('What causes a Power Outage') ``` ### Assessment of Missingness ``` # check the how many columns have missingness over the whole dataframe df_missingness = df.loc[:, df.isnull().any()] len(df_missingness) # enumerate all columns with missing values df_missingness.columns # check the missingness in column 'OUTAGE.START' time_missingness = df.loc[pd.isnull(df['OUTAGE.START'])] len(time_missingness) # check the missingness in column 'YEAR' time_missingness = df.loc[pd.isnull(df['YEAR'])] len(time_missingness) ``` As we found that we only had 9 missing values for the 'OUTAGE.START' column, which indicates the starting time, we decided to just drop them for our analysis. Here, we believe it is MAR as the year of the entry might help better predict whether column is missing any values. The reason for that, we speculate, is that though it (0.6%) really is a small portion of my column values mssing and it really seems that is depedent on year, it will still be good if we can check numerically about how likely this event happens. We make the following hypothesis: # H0: The null value occurs equally frequently before (2000 - 2006) and after 2007 # H1: The null values occurs mainly before 2007. (2000 - 2006) # (with p-value = 0.05) We randomly perumute the Year column to counts null values to check how many null values will normally land before 2006. ``` miss1 = df[['YEAR', 'OUTAGE.START']] miss1['time_isnull'] = miss1['OUTAGE.START'].isnull() miss1 n_repetitions = 1000 obs = len(df.loc[pd.isnull(df['OUTAGE.START'])]) miss1['YEAR'] = miss1['YEAR'].astype(int) before_2007 = [] for _ in range(n_repetitions): # shuffle the Year shuffled_years = ( miss1['YEAR'] .sample(replace=False, frac=1) .reset_index(drop=True) ) # put them in a table shuffled = ( miss1 .assign(**{'Shuffled Year': shuffled_years}) ) # compute the count of null that belong to year before 2007 count = sum((shuffled['Shuffled Year']<2007) & shuffled['time_isnull']) # add it to the list of results before_2007.append(count) #: visualize pd.Series(before_2007).plot(kind='hist', density=True, alpha=0.8) plt.scatter(obs, 0.01, color='red', s=40, zorder=2); p_value = np.count_nonzero(np.array(before_2007) >= obs) / n_repetitions p_value ``` From the graph, we can conclude that this is two is highly likely dependent as p-value is 0 when we only ran 1000 simulations. ``` # Then, we check the missingness in column 'OUTAGE.DURATION' time_missingness = df.loc[pd.isnull(df['OUTAGE.DURATION'])] len(time_missingness) ``` As we found that we had 58 missing values for the 'OUTAGE.DURATION' column, which indicates the duration of each outage. Here, we believe that the missingness of OUTAGE.DURATION has nothing to do with the column 'U.S.STATE'. Specifically saying, we are checking if the outage duration is missing more in specific US States. Below, we run permuation check to verify our claim. ``` #CHECK FOR NO DEPDENDENCE in column 'OUTAGE.DURATION' def perm4missing(df, col, N): dist = df.assign(is_null = df['OUTAGE.DURATION'].isnull()).pivot_table(index = 'is_null', columns = col, aggfunc='size').apply(lambda x:x/x.sum(), axis=1) #dist.T.plot(kind='bar') tvds = [] #tvds array #simulations for _ in range(N): #shuffle the col column shuffled_col = df[col].sample(replace=False, frac=1).reset_index(drop=True) #put into a new table with shuffled values shuffled = df.assign(**{col: shuffled_col, 'is_null':df['OUTAGE.DURATION'].isnull()}) #compute TVD shuffled = shuffled.pivot_table(index='is_null', columns = col, aggfunc='size').apply(lambda x:x/x.sum(), axis=1) #shuffled.T.plot(kind='bar') tvd=shuffled.diff().iloc[-1].abs().sum()/2 #append to array tvds.append(tvd) #calculate observed value obs = dist.diff().iloc[-1].abs().sum()/2 #calculate pvalue pval = np.mean(tvds >= obs) #VISUALIZE PVALUE plot2 = plt.scatter(obs,0,color='red', s=100) plot = pd.Series(tvds).plot(kind='hist', density=True, alpha=.8, title = 'P-Value: Is OUTAGE.DURATION missingness dependent on U.S._STATE') return pval perm4missing(df, 'U.S._STATE', 1000) ``` From the graph, we can conclude that OUTAGE.DURATION is highly not dependent to U.S.STATE as p-value is 0.251 (>0.05) when we ran 1000 simulations. We can also check which other variables were dependent or not dependent to the missingness of OUTAGE.DURATION. ``` # get the column names of all, except outage columns = df.columns.tolist() columns.remove('OUTAGE.DURATION') result = {} for col in columns: res = perm4missing(df, col, 1000) result[col] = res ``` As we can see from below, many other variables were also not dependent to the missingness of OUTAGE.DURATION. ``` # print out all column with p-value larger than 0.05 which indicates nondependence for col, p in result.items(): if p > 0.05: print(col + ' has p-value ' + str(p)) ``` ### Hypothesis Test We set up a hypothesis test with a significance level of 0.05 saying that: # H0: Average power outages duration is equal when it occurs during the day and night. # H1: Average power outages duration is longer when it occurs during the night. (8 pm to 4 am) ``` # Drop nan values in OUTAGE.START and OUTAGE.DURATION data = df[['OUTAGE.START', 'OUTAGE.DURATION']].dropna() # Adding a new column with the outage started hour data['Time_of_day'] = data['OUTAGE.START'].dt.hour # Convert the type of year to int data['OUTAGE.DURATION'] = data['OUTAGE.DURATION'].astype('int') data # function to convert hour into 'time of the day category' def convertTime(x): if (x <= 4) or (x >= 20): return 'Night' else: return 'Day' # convert hour into 'time of the day category' data['Time_of_day'] = data['Time_of_day'].apply(convertTime) data n_repetitions = 1000 obs = np.diff(data.groupby('Time_of_day')['OUTAGE.DURATION'].mean())[0] differences = [] for _ in range(n_repetitions): # shuffle the Time_of_day shuffled_labels = ( data['Time_of_day'] .sample(replace=False, frac=1) .reset_index(drop=True) ) # put them in a table shuffled = ( data .assign(**{'Shuffled Labels': shuffled_labels}) ) # compute the count of null that belong to year before 2007 dif = np.diff(shuffled.groupby('Shuffled Labels')['OUTAGE.DURATION'].mean())[0] # add it to the list of results differences.append(dif) #: visualize pd.Series(differences).plot(kind='hist', density=True, alpha=0.8) plt.scatter(obs, 0.0, color='red', s=40, zorder=2); p_value = np.count_nonzero(np.array(differences) >= obs) / n_repetitions p_value ``` p-value is bigger than 0.05, hence we fail to reject the null hypothesis
github_jupyter
# Web Scraping using Selenium and Beautiful Soup Selenium is a browser automation tool that can not only be used for testing, but also for many other purposes. It's especially useful because using it we can also scrape data that are client side rendered. Installation: ``` !pip install selenium ``` or ``` !conda install selenium ``` To use Selenuim a WebDriver for your favorite web browser must also be installed. The Firefox WebDriver(GeckoDriver) can be installed by going to [this page](https://github.com/mozilla/geckodriver/releases/) and downloading the appropriate file for your operating system. After the download has finished the file has to be extracted. Now the file can either be [added to path](https://www.architectryan.com/2018/03/17/add-to-the-path-on-windows-10/) or copied into the working directory. I chose to copy it to my working directory because I’m not using it that often. Importing: ``` from selenium import webdriver from selenium.webdriver.common.keys import Keys from selenium.webdriver.support.ui import WebDriverWait from bs4 import BeautifulSoup import pandas as pd import re import os # website url base_url = "https://programmingwithgilbert.firebaseapp.com/" videos_url = "https://programmingwithgilbert.firebaseapp.com/videos/keras-tutorials" ``` Trying to load the data using urllib. This won't get any data because it can't load data which is loaded after the document.onload function ``` import urllib.request page = urllib.request.urlopen(videos_url) soup = BeautifulSoup(page, 'html.parser') soup ``` Now we can create a firefox session and navigate to the base url of the video section ``` # Firefox session driver = webdriver.Firefox() driver.get(videos_url) driver.implicitly_wait(100) ``` To navigate to the specifc pages we nned to get the buttons which a a text of "Watch" and then navigate to each side, scrape the data, save it and go back to the main page ``` num_links = len(driver.find_elements_by_link_text('Watch')) code_blocks = [] for i in range(num_links): # navigate to link button = driver.find_elements_by_class_name("btn-primary")[i] button.click() # get soup element = WebDriverWait(driver, 10).until(lambda x: x.find_element_by_id('iframe_container')) tutorial_soup = BeautifulSoup(driver.page_source, 'html.parser') tutorial_code_soup = tutorial_soup.find_all('div', attrs={'class': 'code-toolbar'}) tutorial_code = [i.getText() for i in tutorial_code_soup] code_blocks.append(tutorial_code) # go back to initial page driver.execute_script("window.history.go(-1)") code_blocks code_blocks[1] ``` After scraping all the needed data we can close the browser session and save the results into .txt files ``` driver.quit() for i, tutorial_code in enumerate(code_blocks): with open('code_blocks{}.txt'.format(i), 'w') as f: for code_block in tutorial_code: f.write(code_block+"\n") ```
github_jupyter
``` from utils import * import tensorflow as tf from sklearn.cross_validation import train_test_split import time trainset = sklearn.datasets.load_files(container_path = 'data', encoding = 'UTF-8') trainset.data, trainset.target = separate_dataset(trainset,1.0) print (trainset.target_names) print (len(trainset.data)) print (len(trainset.target)) ONEHOT = np.zeros((len(trainset.data),len(trainset.target_names))) ONEHOT[np.arange(len(trainset.data)),trainset.target] = 1.0 train_X, test_X, train_Y, test_Y, train_onehot, test_onehot = train_test_split(trainset.data, trainset.target, ONEHOT, test_size = 0.2) concat = ' '.join(trainset.data).split() vocabulary_size = len(list(set(concat))) data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size) print('vocab from size: %d'%(vocabulary_size)) print('Most common words', count[4:10]) print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]]) GO = dictionary['GO'] PAD = dictionary['PAD'] EOS = dictionary['EOS'] UNK = dictionary['UNK'] def embed_seq(inputs, vocab_size=None, embed_dim=None, zero_pad=False, scale=False): lookup_table = tf.get_variable('lookup_table', dtype=tf.float32, shape=[vocab_size, embed_dim]) if zero_pad: lookup_table = tf.concat((tf.zeros([1, embed_dim]), lookup_table[1:, :]), axis=0) outputs = tf.nn.embedding_lookup(lookup_table, inputs) if scale: outputs = outputs * (embed_dim ** 0.5) return outputs def learned_positional_encoding(inputs, embed_dim, zero_pad=False, scale=False): T = inputs.get_shape().as_list()[1] outputs = tf.range(T) outputs = tf.expand_dims(outputs, 0) outputs = tf.tile(outputs, [tf.shape(inputs)[0], 1]) return embed_seq(outputs, T, embed_dim, zero_pad=zero_pad, scale=scale) def layer_norm(inputs, epsilon=1e-8): mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True) normalized = (inputs - mean) / (tf.sqrt(variance + epsilon)) params_shape = inputs.get_shape()[-1:] gamma = tf.get_variable('gamma', params_shape, tf.float32, tf.ones_initializer()) beta = tf.get_variable('beta', params_shape, tf.float32, tf.zeros_initializer()) return gamma * normalized + beta def pointwise_feedforward(inputs, num_units=[None, None], activation=None): outputs = tf.layers.conv1d(inputs, num_units[0], kernel_size=1, activation=activation) outputs = tf.layers.conv1d(outputs, num_units[1], kernel_size=1, activation=None) outputs += inputs outputs = layer_norm(outputs) return outputs class Model: def __init__(self, dict_size, dimension_input, dimension_output, seq_len, learning_rate, num_heads=8, attn_windows=range(1, 6)): self.size_layer = dimension_input self.num_heads = num_heads self.seq_len = seq_len self.X = tf.placeholder(tf.int32, [None, seq_len]) self.Y = tf.placeholder(tf.float32, [None, dimension_output]) encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, dimension_input], -1, 1)) feed = tf.nn.embedding_lookup(encoder_embeddings, self.X) for i, win_size in enumerate(attn_windows): with tf.variable_scope('attn_masked_window_%d' % win_size): feed = self.multihead_attn(feed, self.window_mask(win_size)) feed += learned_positional_encoding(feed, dimension_input) with tf.variable_scope('multihead'): feed = self.multihead_attn(feed, None) with tf.variable_scope('pointwise'): feed = pointwise_feedforward(feed, num_units=[4*dimension_input, dimension_input], activation=tf.nn.relu) self.logits = tf.layers.dense(feed, dimension_output)[:,-1] self.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits = self.logits, labels = self.Y)) self.optimizer = tf.train.AdamOptimizer(learning_rate = learning_rate).minimize(self.cost) self.correct_pred = tf.equal(tf.argmax(self.logits, 1), tf.argmax(self.Y, 1)) self.accuracy = tf.reduce_mean(tf.cast(self.correct_pred, tf.float32)) def multihead_attn(self, inputs, masks): T_q = T_k = inputs.get_shape().as_list()[1] Q_K_V = tf.layers.dense(inputs, 3*self.size_layer, tf.nn.relu) Q, K, V = tf.split(Q_K_V, 3, -1) Q_ = tf.concat(tf.split(Q, self.num_heads, axis=2), axis=0) K_ = tf.concat(tf.split(K, self.num_heads, axis=2), axis=0) V_ = tf.concat(tf.split(V, self.num_heads, axis=2), axis=0) align = tf.matmul(Q_, tf.transpose(K_, [0,2,1])) align = align / np.sqrt(K_.get_shape().as_list()[-1]) if masks is not None: paddings = tf.fill(tf.shape(align), float('-inf')) align = tf.where(tf.equal(masks, 0), paddings, align) align = tf.nn.softmax(align) outputs = tf.matmul(align, V_) outputs = tf.concat(tf.split(outputs, self.num_heads, axis=0), axis=2) outputs += inputs return layer_norm(outputs) def window_mask(self, h_w): masks = np.zeros([self.seq_len, self.seq_len]) for i in range(self.seq_len): if i < h_w: masks[i, :i+h_w+1] = 1. elif i > self.seq_len - h_w - 1: masks[i, i-h_w:] = 1. else: masks[i, i-h_w:i+h_w+1] = 1. masks = tf.convert_to_tensor(masks) return tf.tile(tf.expand_dims(masks,0), [tf.shape(self.X)[0]*self.num_heads, 1, 1]) embedded_size = 128 dimension_output = len(trainset.target_names) learning_rate = 1e-3 maxlen = 50 batch_size = 128 tf.reset_default_graph() sess = tf.InteractiveSession() model = Model(vocabulary_size+4,embedded_size,dimension_output,maxlen,learning_rate) sess.run(tf.global_variables_initializer()) EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0 while True: lasttime = time.time() if CURRENT_CHECKPOINT == EARLY_STOPPING: print('break epoch:%d\n'%(EPOCH)) break train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0 for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size): batch_x = str_idx(train_X[i:i+batch_size],dictionary,maxlen) acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer], feed_dict = {model.X : batch_x, model.Y : train_onehot[i:i+batch_size]}) train_loss += loss train_acc += acc for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size): batch_x = str_idx(test_X[i:i+batch_size],dictionary,maxlen) acc, loss = sess.run([model.accuracy, model.cost], feed_dict = {model.X : batch_x, model.Y : test_onehot[i:i+batch_size]}) test_loss += loss test_acc += acc train_loss /= (len(train_X) // batch_size) train_acc /= (len(train_X) // batch_size) test_loss /= (len(test_X) // batch_size) test_acc /= (len(test_X) // batch_size) if test_acc > CURRENT_ACC: print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc)) CURRENT_ACC = test_acc CURRENT_CHECKPOINT = 0 else: CURRENT_CHECKPOINT += 1 print('time taken:', time.time()-lasttime) print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'%(EPOCH,train_loss, train_acc,test_loss, test_acc)) EPOCH += 1 logits = sess.run(model.logits, feed_dict={model.X:str_idx(test_X,dictionary,maxlen)}) print(metrics.classification_report(test_Y, np.argmax(logits,1), target_names = trainset.target_names)) ```
github_jupyter
<img align="right" src="images/tf-small.png" width="128"/> <img align="right" src="images/etcbc.png"/> <img align="right" src="images/dans-small.png"/> You might want to consider the [start](search.ipynb) of this tutorial. Short introductions to other TF datasets: * [Dead Sea Scrolls](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/dss.ipynb), * [Old Babylonian Letters](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/oldbabylonian.ipynb), or the * [Q'uran](https://nbviewer.jupyter.org/github/annotation/tutorials/blob/master/lorentz2020/quran.ipynb) # Trees The textual objects of the BHSA text are syntactic, but they are not syntax trees. The BHSA is the result of a data-driven parsing strategy with occasional human decisions. It results in functional objects such as sentences, clauses, and phrases, which are build from chunks called sentece-atoms, clause-atoms, and phrase-atoms. There is no deeper nesting of clauses within phrases, or even clauses within clauses or phrases within phrases. Instead, whenever objects are linguistically nested, there is an edge called `mother` between the objects in question. For people that prefer to think in trees, we have unwrapped the `mother` relationship between clauses and made tree structures out of the data. The whole generation process of trees, including the quirks underway, is documented in the notebook [trees.ipynb](https://nbviewer.jupyter.org/github/etcbc/trees/blob/master/programs/trees.ipynb). You see it done there for version 2017. We have used an ordinary Python program to generate trees for all versions of the BHSA: [alltrees.py](https://github.com/etcbc/trees/blob/master/programs/alltrees.py) Those trees are available as a feature on sentence nodes, and you can load those features alongside the BHSA data. Here we show some examples of what you can do with it. ``` %load_ext autoreload %autoreload 2 ``` # Incantation The ins and outs of installing Text-Fabric, getting the corpus, and initializing a notebook are explained in the [start tutorial](start.ipynb). ``` from utils import structure, layout from tf.app import use ``` Note that we load the trees module. We also load the morphology of Open Scriptures for example usage later on. ``` A = use("bhsa", mod="etcbc/trees/tf,etcbc/bridging/tf", hoist=globals()) ``` We first inspect the nature of these features, lets pick the first, last and middle sentence of the Hebrew Bible ``` sentences = F.otype.s("sentence") examples = (sentences[0], sentences[len(sentences) // 2], sentences[-1]) ``` We examine feature `tree`: ``` for s in examples: print(F.tree.v(s)) ``` Now `treen`: ``` for s in examples: print(F.treen.v(s)) ``` The structure of the trees is the same, but `treen` has numbers between braces in the tags of the nodes. These numbers are the Text-Fabric nodes of the sentences, clauses and phrases that the nodes of the tree correspond to. ## Using trees These strings are not very pleasant to the eye. For one thing, we see numbers instead of words. They also seem a bit unwieldy to integrate with the usual text-fabric business. But nothing is farther from the truth. We show how to * produce a multiline view * see the words (in several representations) * add a gloss * add morphological data from an other project (**Open Scriptures**) Honesty compels us to note that we make use of a bunch of auxiliary functions in an accompanying `utils` pacckage: ``` passage = ("Job", 3, 16) passageStr = "{} {}:{}".format(*passage) verse = T.nodeFromSection(passage) sentence = L.d(verse, otype="sentence")[0] firstSlot = L.d(sentence, otype="word")[0] stringTree = F.tree.v(sentence) print(f"{passageStr} - first word = {firstSlot}\n\ntree =\n{stringTree}") ``` ## Parsing Key to effective manipulation of tree strings is to parse them into tree structures: lists of lists. Here we use the generic utility `structure()`: ``` tree = structure(stringTree) tree ``` ## Apply layout Having the real tree structure in hand, we can layout it in all kinds of ways. We use the generic utility `layout()` to display it a bit more friendly and to replace the numbers by real Text-Fabric slot numbers: ``` print(layout(tree, firstSlot, str)) ``` That opens up the way to get the words in. The third argument of `layout()` above is `str`, which is a function that is applied to the slot numbers. It returns those numbers as string, and this is what ends up in the layout. ## Fillin the words We can pass any function, why not the function that looks up the word? Remember that `F.g_word_utf8.v` is a function that returns the full Hebrew word given a slot node. ``` print(layout(tree, firstSlot, F.g_word_utf8.v)) ``` ## Add a gloss ``` def gloss(n): lexNode = L.u(n, otype="lex")[0] return f'{F.g_word_utf8.v(n)} "{F.gloss.v(lexNode)}"' print(layout(tree, firstSlot, gloss)) ``` ## Morphology In 2018 I compared the morphology of Open Scriptures with that of the BHSA. See [brdiging](https://nbviewer.jupyter.org/github/ETCBC/bridging/blob/master/programs/BHSAbridgeOSM.ipynb). As a by-product I saved their morphology as a Text-Fabric feature on words. So we can add it to our trees. We also show the nesting depth in the resulting tree. ``` def osmPhonoGloss(n): lexNode = L.u(n, otype="lex")[0] return ( f'({F.osm.v(n)}) {F.g_word_utf8.v(n)} [{F.phono.v(n)}] "{F.gloss.v(lexNode)}"' ) print(layout(tree, firstSlot, osmPhonoGloss, withLevel=True)) ``` ## Taking it further We saw how the fact that we have slot numbers in our tree structures opens up all kinds of possibilities for further processing. However, so far, we have only made use of slot nodes. What if we want to draw in side information for the non-terminal nodes? That is where the feature `treen` comes in. It has node information for all non-terminals between braces, so it is fairly easy to write new `structure()` and `layout()` functions that exploit them. # All steps * **[start](start.ipynb)** your first step in mastering the bible computationally * **[display](display.ipynb)** become an expert in creating pretty displays of your text structures * **[search](search.ipynb)** turbo charge your hand-coding with search templates * **[exportExcel](exportExcel.ipynb)** make tailor-made spreadsheets out of your results * **[share](share.ipynb)** draw in other people's data and let them use yours * **[export](export.ipynb)** export your dataset as an Emdros database * **[annotate](annotate.ipynb)** annotate plain text by means of other tools and import the annotations as TF features * **[map](map.ipynb)** map somebody else's annotations to a new version of the corpus * **[volumes](volumes.ipynb)** work with selected books only * **trees** work with the BHSA data as syntax trees CC-BY Dirk Roorda
github_jupyter
``` from google.colab import drive drive.mount('/content/gdrive') ``` Model for RAVDESS dataset using 1d Convolutions ``` import librosa def noise(data): """ Adding White Noise. """ # you can take any distribution from https://docs.scipy.org/doc/numpy-1.13.0/reference/routines.random.html noise_amp = 0.005*np.random.uniform()*np.amax(data) data = data.astype('float64') + noise_amp * np.random.normal(size=data.shape[0]) return data from IPython.display import Audio X, sample_rate = librosa.load('ravdess_data/Actor_01/03-01-01-01-01-01-01.wav', res_type='kaiser_fast') X1 = noise(X) plt.figure(figsize=(14, 3)) librosa.display.waveplot(X, sr=sample_rate) import librosa.display plt.figure(figsize=(14, 3)) librosa.display.waveplot(X1, sr=sample_rate) ``` Loading the RAVDESS speech actors data ``` import os import time import librosa lst_saral = [] lst_transpose = [] clear_lst = [] start_time = time.time() for folder in os.listdir('ravdess_data/'): for file in os.listdir(f'{"ravdess_data/"}{folder}'): try: X, sample_rate = librosa.load(f'{"ravdess_data/"}{folder}{"/"}{file}', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5) mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=20) #Excluding the Calm Data file = int(file[7:8])-1 if file>0: file-=1 arr = mfccs, file lst.append(arr) except ValueError: continue print("--- Data loaded. Loading time: %s seconds ---" % (time.time() - start_time)) ravdess_X, ravdess_y = zip(*lst_transpose) # import pandas as pd # lst = [] # df = pd.DataFrame(columns=['feature']) # labels = [] # bookmark=0 # for folder in os.listdir('ravdess_data/'): # for file in os.listdir(f'{"ravdess_data/"}{folder}'): # X, sample_rate = librosa.load(f'{"ravdess_data/"}{folder}{"/"}{file}', res_type='kaiser_fast',) # sample_rate = np.array(sample_rate) # mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13) # feature = mfccs # file = int(file[7:8])-1 # if file>0: # file-=1 # #[float(i) for i in feature] # #feature1=feature[:135] # labels.append(file) # df.loc[bookmark] = [feature] # bookmark=bookmark+1 df3 = pd.DataFrame(df['feature'].values.tolist()) df3 = df3.fillna(0) features = df3.iloc[:, :-1] # ravdess_X, ravdess_y = drop_calm(ravdess_X, ravdess_y) ravdess_X = np.asarray(features) ravdess_y = np.asarray(labels) ravdess_X = np.expand_dims(ravdess_X, axis=2) ravdess_train_X, ravdess_valid_X, ravdess_train_y, ravdess_valid_y = train_test_split(ravdess_X,ravdess_y,test_size=0.2,random_state=42) from keras.models import Sequential from keras.layers import LSTM, Dense import numpy as np # Expected input batch shape: (batch_size, timesteps, data_dim) # Note that we have to provide the full batch_input_shape since the network is stateful. # the sample of index i in batch k is the follow-up for the sample i in batch k-1. model_ravdess = Sequential() model_ravdess.add(LSTM(32)) model_ravdess.add(BatchNormalization()) model_ravdess.add(Dense(16, activation='softmax')) model_ravdess.add(Dense(7, activation='softmax')) opt = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) import keras import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding from keras.utils import to_categorical from keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization from keras.layers import Conv1D, MaxPooling1D from keras.models import Model from keras.callbacks import ModelCheckpoint from sklearn.model_selection import train_test_split model_ravdess = Sequential() model_ravdess.add(Conv1D(32, 5,strides=2,padding='same', input_shape=(215,1))) model_ravdess.add(Activation('relu')) model_ravdess.add(BatchNormalization()) # model_ravdess.add(MaxPooling1D(pool_size=(8))) model_ravdess.add(Conv1D(64, 5,strides=2,padding='same',)) model_ravdess.add(Activation('relu')) model_ravdess.add(BatchNormalization()) model_ravdess.add(Flatten()) model_ravdess.add(Dense(7)) model_ravdess.add(Activation('softmax')) # opt = keras.optimizers.rmsprop(lr=0.0001, rho=0.9, epsilon=None, decay=0.0) opt = keras.optimizers.Adam(lr=0.00005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model_ravdess.summary() np.any(np.isnan(ravdess_X)) model_ravdess.compile(loss='sparse_categorical_crossentropy', optimizer=opt, metrics=['accuracy']) cnnhistory_ravdess=model_ravdess.fit(ravdess_train_X, ravdess_train_y, batch_size=16, epochs=10, validation_data=(ravdess_valid_X, ravdess_valid_y)) model_ravdess.save('ravdess_clean_model.h5') plt.plot(cnnhistory_ravdess.history['acc']) plt.plot(cnnhistory_ravdess.history['val_acc']) plt.title('model accuracy') plt.ylabel('acc') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() import pandas as pd test = [] bookmark=0 for file in os.listdir(f'{"test_data/"}'): X, sample_rate = librosa.load(f'{"test_data/"}{file}', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5) sample_rate = np.array(sample_rate) mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13) feature = mfccs # file = int(file[7:8])-1 # if file>0: # file-=1 #[float(i) for i in feature] #feature1=feature[:135] file = 0 test.append((feature,file)) test_X, test_y = zip(*test) test_X = np.asarray(test_X) test_X.shape test_X = test_X[:,:215] test_X.shape test_X = np.expand_dims(test_X,axis=2) pred1=model_ravdess.predict_classes(test_X) pred1 # Emotion (0 = Neutral, 1 = happy, 2 = sad, 3 = angry, 4 = fearful, 5 = disgust, 6 = surprised). ``` Model for TESS dataset using 1d Convolutions Ensemble Learning on RAVDESS model and TESS model (Conv1d) ``` !cp 'gdrive/My Drive/Filtered Data/41.wav' test_data test = np.array([]) labels = [] i =0 t = pd.DataFrame() for file in os.listdir('test_data'): X, sample_rate = librosa.load(f'{"test_data/"}{file}', res_type='kaiser_fast',duration=2.5,sr=22050*2,offset=0.5) mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=13) result = np.zeros((13,216)) result[:mfccs.shape[0],:mfccs.shape[1]] = mfccs result.shape arr = mfccs.reshape((2,13,216)) arr.shape test_X, test_y = zip(*test) test_X = np.asarray(test_X) test_X = np.expand_dims(test_X,axis=2) pred1=model_ravdess.predict_proba(test_X) pred2=model_tess.predict_proba(test_X) finalpred=(pred1*0.1+pred2*0.9) import keras import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from keras.preprocessing import sequence from keras.models import Sequential from keras.layers import Dense, Embedding from keras.utils import to_categorical from keras.layers import Input, Flatten, Dropout, Activation, BatchNormalization from keras.layers import Conv2D, MaxPooling1D from keras.models import Model from keras.callbacks import ModelCheckpoint from sklearn.model_selection import train_test_split model_tess = Sequential() model_tess.add(Conv2D(32, 5,strides=2,padding='same', input_shape=(13,216,1))) model_tess.add(Activation('relu')) model_tess.add(BatchNormalization()) # model_tess.add(MaxPooling1D(pool_size=(8))) model_tess.add(Conv2D(64, 5,strides=2,padding='same',)) model_tess.add(Activation('relu')) model_tess.add(BatchNormalization()) model_tess.add(Conv2D(64, 5,strides=2,padding='same',)) model_tess.add(Activation('relu')) model_tess.add(BatchNormalization()) model_tess.add(Flatten()) model_tess.add(Dense(7)) model_tess.add(Activation('softmax')) # opt = keras.optimizers.rmsprop(lr=0.00005, rho=0.9, epsilon=None, decay=0.0) opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False) model_tess.summary() ```
github_jupyter
# Solución del Reto Equipo 9 ``` # Importar librerías import pandas as pd import matplotlib.pyplot as plt import seaborn as sns from collections import Counter # Asignar 'data' al documento CSV data = pd.read_csv("covid19_tweets.csv") # Followers # Max número de seguidores: 13 892 840 sub_df_followers = data[data['user_followers'] > 12000000] sub_df_followers[['user_name','user_followers','hashtags']].head() # Usuarios con más seguidores counter1 = Counter(sub_df_followers.user_name) frequencies1 = [] for user, count in counter1.most_common(5): print ('%s: %7d' % (user, count)) for i in range(count+1): frequencies1.append(user) plt.hist(frequencies1, histtype ='bar', color = 'pink') plt.title('Número de tweets de los top 5 más seguidos \n', fontweight ="bold") plt.ylabel('Número de tweets') plt.show() # Counter para la sub dataframe que muestra los hashtags más comunes counter2 = Counter(sub_df_followers.hashtags) frequencies2 = [] # Imprimir ordenados los top 5 usuarios con más seguidores for hashtag, count in counter2.most_common(5): print ('%s: %7d' % (hashtag, count)) # Agregarlos a una lista para graficar el histograma después for i in range(count+1): frequencies2.append(hashtag) plt.hist(frequencies2, histtype ='bar', color = 'pink') plt.title('Hashtags usados por los usuarios con más seguidores \n', fontweight ="bold") plt.ylabel('Número de veces usado') plt.show() # Likes # Max número de likes: 2 047 197 # Sub dataframe de followers que sean mayores a 1 000 000 sub_df_likes = data[data['user_favourites'] > 1000000] # Sub dataframe de los más seguidos con su respectivo username, followers y hashtags sub_df_likes[['user_name','user_favourites','hashtags']] counter3 = Counter(sub_df_likes.user_name) frequencies3 = [] for user, count in counter3.most_common(5): print ('%s: %7d' % (user, count)) for i in range(count+1): frequencies3.append(user) plt.hist(frequencies3, histtype ='bar', color = 'pink') plt.title('Tweets con más likes \n', fontweight ="bold") plt.ylabel('Número de tweets') plt.show() # Likes # Max número de likes: 2 047 197 # Sub dataframe de followers que sean mayores a 1 000 000 sub_df_likes = data[data['user_favourites'] > 1000000] # Sub dataframe de los más seguidos con su respectivo username, followers y hashtags sub_df_likes[['user_name','date']] counter4 = Counter(data.user_verified) for user, count in counter4.most_common(5): print ('%s: %7d' % (user, count)) falses = 65082 trues = 9354 frequencies4 = [] for i in range(falses+1): frequencies4.append("No verificados") for i in range(trues+1): frequencies4.append("Verificados") plt.hist(frequencies4, histtype ='bar', color = 'pink') plt.title('Verified users \n', fontweight ="bold") plt.ylabel('Número de usuarios') plt.show() # Verified sub_df_verified = data[data['user_verified'] == True] sub_df_verified[['user_name','user_favourites','text']].head() verified_liked = sub_df_verified[sub_df_verified['user_favourites'] > 300000] verified_liked[['user_name','user_favourites', 'text']] counter5 = Counter(verified_liked.user_name) frequencies5 = [] for user, count in counter5.most_common(5): print ('%s: %7d' % (user, count)) for i in range(count+1): frequencies5.append(user) plt.hist(frequencies5, histtype ='bar', color = 'pink') plt.title('Tweets de usuarios verificados con más likes \n', fontweight ="bold") plt.ylabel('Número de tweets') plt.show() # Non Verified sub_df_nonverified = data[data['user_verified'] == False] sub_df_nonverified[['user_name','user_favourites','text']].head() nonverified_liked = sub_df_nonverified[sub_df_nonverified['user_favourites'] > 700000] nonverified_liked[['user_name','user_favourites', 'text']] counter6 = Counter(nonverified_liked.user_name) frequencies6 = [] for user, count in counter6.most_common(5): print ('%s: %7d' % (user, count)) for i in range(count+1): frequencies6.append(user) plt.hist(frequencies6, histtype ='bar', color = 'pink') plt.title('Tweets con más likes de usuarios no verificados \n', fontweight ="bold") plt.ylabel('Número de tweets') plt.show() followers = data['user_followers'] likes = data['user_favourites'] columns = [followers, likes] fig = plt.figure(figsize =(10, 7)) plt.boxplot(columns) plt.title('Box and whiskers de seguidores y likes \n', fontweight ="bold") plt.xlabel('Seguidores Likes') plt.show() # Mostrar la figura pearsoncorr = data.corr(method ='pearson') sns.heatmap(pearsoncorr,xticklabels = pearsoncorr.columns, yticklabels = pearsoncorr.columns,cmap = 'pink',annot=True) # Ubicar las 10 localidades más repetidas df_data = pd.DataFrame(data) locations = df_data['user_location'].value_counts() locations[0:10] # Graficar las 10 localidades más repetidas fig1, ax1 = plt.subplots() locations[0:10].plot(kind='pie',title="Locations with most of the tweet accounts") # Tabla dinámica con índice source para determinar el tamaño de cada una sources = data.pivot_table(index=['source'], aggfunc='size') sources.sort_values() # Graficar fuentes más usadas por usuarios nameLabels_x = ["Web App", "Android", "iPhone","TweetDeck","Hootsuite Inc."] numberSources_y = [] numberSources_y.append(sources["Twitter Web App"]) numberSources_y.append(sources["Twitter for Android"]) numberSources_y.append(sources["Twitter for iPhone"]) numberSources_y.append(sources["TweetDeck"]) numberSources_y.append(sources["Hootsuite Inc."]) plt.bar(nameLabels_x, numberSources_y, color='turquoise') plt.title("Tweet sources") plt.xlabel("Most common sources") plt.ylabel("Users") plt.show() ```
github_jupyter
# Chapter 11: Classes & Instances (Review Questions) The questions below assume that you have read the [first <img height="12" style="display: inline-block" src="../static/link/to_nb.png">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/11_classes/00_content.ipynb), [second <img height="12" style="display: inline-block" src="../static/link/to_nb.png">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/11_classes/02_content.ipynb), [third <img height="12" style="display: inline-block" src="../static/link/to_nb.png">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/11_classes/03_content.ipynb), and [fourth <img height="12" style="display: inline-block" src="../static/link/to_nb.png">](https://nbviewer.jupyter.org/github/webartifex/intro-to-python/blob/develop/11_classes/04_content.ipynb) part of Chapter 11. Be concise in your answers! Most questions can be answered in *one* sentence. ## Essay Questions **Q1**: How are **classes** a way to manage the **state** in a big program? How should we think of classes conceptually? < your answer > **Q2**: What do we mean with **instantiation**? How do **instances** relate to **classes**? < your answer > **Q3:** What is an **implementation detail**? Name two different examples of implementation details regarding the `Vector` and `Matrix` classes! < your answer > **Q4**: How are **instance methods** different from **class methods**? How do **special methods** fit into the picture? < your answer > **Q5**: How do **mutability** and **immutability** come into play when designing a user-defined data type? < your answer > **Q6**: Explain the concept of **method chaining**! < your answer > **Q7**: How can we implement **operator overloading** for a user-defined data type? When do we need to user *reverse* special methods? < your answer > ## True / False Questions Motivate your answer with *one short* sentence! **Q8**: An instance's **text representation** is a `bytes` object with a special encoding. < your answer > **Q9**: Computed **properties** are special kinds of **instance methods**. < your answer > **Q10**: **Sequence emulation** means designing a user-defined data type around the built-in `list` or `tuple` types. < your answer > **Q11**: The **Python Data Model** can be regarded as the "Theory" or "Mental Model" behind the Python language. < your answer > **Q12**: **Polymorphism** means that two instances have the same data type, be it a built-in or user-defined one. < your answer > **Q13**: **Number emulation** means that two instances of the same user-defined data type can be added together. < your answer > **Q14**: **Packages** are a good place to collect all the code to be reused in a data science project, for example, across different Jupyter notebooks. < your answer >
github_jupyter
# Mount Drive ``` from google.colab import drive drive.mount('/content/drive') !pip install -U -q PyDrive !pip install httplib2==0.15.0 import os from pydrive.auth import GoogleAuth from pydrive.drive import GoogleDrive from pydrive.files import GoogleDriveFileList from google.colab import auth from oauth2client.client import GoogleCredentials from getpass import getpass import urllib # 1. Authenticate and create the PyDrive client. auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) # Cloning CLIPPER to access modules. if 'CLIPPER' not in os.listdir(): cmd_string = 'git clone https://github.com/PAL-ML/CLIPPER.git' os.system(cmd_string) ``` # Installation ## Install multi label metrics dependencies ``` ! pip install scikit-learn==0.24 ``` ## Install CLIP dependencies ``` import subprocess CUDA_version = [s for s in subprocess.check_output(["nvcc", "--version"]).decode("UTF-8").split(", ") if s.startswith("release")][0].split(" ")[-1] print("CUDA version:", CUDA_version) if CUDA_version == "10.0": torch_version_suffix = "+cu100" elif CUDA_version == "10.1": torch_version_suffix = "+cu101" elif CUDA_version == "10.2": torch_version_suffix = "" else: torch_version_suffix = "+cu110" ! pip install torch==1.7.1{torch_version_suffix} torchvision==0.8.2{torch_version_suffix} -f https://download.pytorch.org/whl/torch_stable.html ftfy regex ! pip install ftfy regex ! wget https://openaipublic.azureedge.net/clip/bpe_simple_vocab_16e6.txt.gz -O bpe_simple_vocab_16e6.txt.gz !pip install git+https://github.com/Sri-vatsa/CLIP # using this fork because of visualization capabilities ``` ## Install clustering dependencies ``` !pip -q install umap-learn>=0.3.7 ``` ## Install dataset manager dependencies ``` !pip install wget ``` # Imports ``` # ML Libraries import tensorflow as tf import tensorflow_hub as hub import torch import torch.nn as nn import torchvision.models as models import torchvision.transforms as transforms import keras # Data processing import PIL import base64 import imageio import pandas as pd import numpy as np import json from PIL import Image import cv2 import imgaug.augmenters as iaa # Plotting import seaborn as sns import matplotlib.pyplot as plt from IPython.core.display import display, HTML from matplotlib import cm # Models import clip # Datasets import tensorflow_datasets as tfds # Misc import progressbar import logging from abc import ABC, abstractmethod import time import urllib.request import os import itertools from tqdm import tqdm # Modules from CLIPPER.code.ExperimentModules import embedding_models from CLIPPER.code.ExperimentModules import simclr_data_augmentations from CLIPPER.code.ExperimentModules.dataset_manager import DatasetManager from CLIPPER.code.ExperimentModules.clip_few_shot import CLIPFewShotClassifier from CLIPPER.code.ExperimentModules.utils import (save_npy, load_npy, get_folder_id, create_expt_dir, save_to_drive, load_all_from_drive_folder, download_file_by_name, delete_file_by_name) logging.getLogger('googleapicliet.discovery_cache').setLevel(logging.ERROR) ``` # Initialization & Constants **Edited** ``` dataset_name = 'CIFAR10' folder_name = "CIFAR10-Embeddings-28-02-21" # Change parentid to match that of experiments root folder in gdrive parentid = '1bK72W-Um20EQDEyChNhNJthUNbmoSEjD' # Filepaths # train_labels_filename = "train_labels.npz" test_labels_filename = "test_labels.npz" # train_embeddings_filename_suffix = "_embeddings_train.npz" test_embeddings_filename_suffix = "_embeddings_test.npz" # Initialize sepcific experiment folder in drive folderid = create_expt_dir(drive, parentid, folder_name) ``` # Load data ``` def get_ndarray_from_drive(drive, folderid, filename): download_file_by_name(drive, folderid, filename) return np.load(filename)['data'] # train_labels = get_ndarray_from_drive(drive, folderid, train_labels_filename) test_labels = get_ndarray_from_drive(drive, folderid, test_labels_filename) dm = DatasetManager() test_data_generator = dm.load_dataset('cifar10', split='test') class_names = dm.get_class_names() ``` # Create label dictionary ``` unique_labels = np.unique(test_labels) print(len(unique_labels)) label_dictionary = {la:[] for la in unique_labels} for i in range(len(test_labels)): la = test_labels[i] label_dictionary[la].append(i) ``` # CLIP zero shot eval ## Function definitions ``` def start_progress_bar(bar_len): widgets = [ ' [', progressbar.Timer(format= 'elapsed time: %(elapsed)s'), '] ', progressbar.Bar('*'),' (', progressbar.ETA(), ') ', ] pbar = progressbar.ProgressBar( max_value=bar_len, widgets=widgets ).start() return pbar def prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, test_labels, shuffle=False ): eval_indices = [] train_indices = [] wi_y = [] eval_y = [] label_dictionary = {la:label_dictionary[la] for la in label_dictionary if len(label_dictionary[la]) >= (num_shot+num_eval)} unique_labels = list(label_dictionary.keys()) pbar = start_progress_bar(num_episodes) for s in range(num_episodes): # Setting random seed for replicability np.random.seed(s) _train_indices = [] _eval_indices = [] selected_labels = np.random.choice(unique_labels, size=num_ways, replace=False) for la in selected_labels: la_indices = label_dictionary[la] select = np.random.choice(la_indices, size = num_shot+num_eval, replace=False) tr_idx = list(select[:num_shot]) ev_idx = list(select[num_shot:]) _train_indices = _train_indices + tr_idx _eval_indices = _eval_indices + ev_idx if shuffle: np.random.shuffle(_train_indices) np.random.shuffle(_eval_indices) train_indices.append(_train_indices) eval_indices.append(_eval_indices) _wi_y = test_labels[_train_indices] _eval_y = test_labels[_eval_indices] wi_y.append(_wi_y) eval_y.append(_eval_y) pbar.update(s+1) return train_indices, eval_indices, wi_y, eval_y def embed_images( embedding_model, train_indices, num_augmentations, trivial=False ): def augment_image(image, num_augmentations, trivial): """ Perform SimCLR augmentations on the image """ if np.max(image) > 1: image = image/255 augmented_images = [image] # augmentations = iaa.Sequential([ # iaa.Affine( # translate_percent={'x':(-0.1, 0.1), 'y':(-0.1, 0.1)}, # rotate=(-15, 15), # shear=(-15, 15), # ), # iaa.Fliplr(0.5) # ]) def _run_filters(image): width = image.shape[1] height = image.shape[0] image_aug = simclr_data_augmentations.random_crop_with_resize( image, height, width ) image_aug = tf.image.random_flip_left_right(image_aug) image_aug = simclr_data_augmentations.random_color_jitter(image_aug) image_aug = simclr_data_augmentations.random_blur( image_aug, height, width ) image_aug = tf.reshape(image_aug, [image.shape[0], image.shape[1], 3]) image_aug = tf.clip_by_value(image_aug, 0., 1.) return image_aug.numpy() for _ in range(num_augmentations): # aug_image = augmentations(image=image) if trivial: aug_image = image else: aug_image = _run_filters(image) augmented_images.append(aug_image) augmented_images = np.stack(augmented_images) return augmented_images embedding_model.load_model() unique_indices = np.unique(np.array(train_indices)) ds = dm.load_dataset('cifar10', split='test') embeddings = [] IMAGE_IDX = 'image' pbar = start_progress_bar(unique_indices.size+1) num_done=0 for idx, item in enumerate(ds): if idx in unique_indices: image = item[IMAGE_IDX] if num_augmentations > 0: aug_images = augment_image(image, num_augmentations, trivial) else: aug_images = image processed_images = embedding_model.preprocess_data(aug_images) embedding = embedding_model.embed_images(processed_images) embeddings.append(embedding) num_done += 1 pbar.update(num_done+1) if idx == unique_indices[-1]: break embeddings = np.stack(embeddings) return unique_indices, embeddings def evaluate_model_for_episode( model, eval_x, eval_y, label_mapping, filtered_classes, metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], multi_label=True ): zs_weights = model.zeroshot_classifier(filtered_classes) logits = model.predict_scores(eval_x, zs_weights).tolist() pred_y = model.predict_label(eval_x, zs_weights) pred_y = [label_mapping[l] for l in pred_y] met = model.evaluate_single_label_metrics( eval_x, eval_y, label_mapping, zs_weights, metrics ) return pred_y, met, logits def get_label_mapping_n_class_names(eval_y, class_names): label_mapping = {} unique_labels = np.unique(eval_y) filtered_classes = [class_names[x] for x in unique_labels] num_classes = len(unique_labels) for c in range(num_classes): label_mapping[c] = unique_labels[c] return label_mapping, filtered_classes # chenni change def run_episode_through_model( indices_and_embeddings, train_indices, eval_indices, wi_y, eval_y, class_names, num_augmentations=0, train_epochs=None, train_batch_size=5, metrics=['hamming', 'jaccard', 'subset_accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], embeddings=None, multi_label=True ): metrics_values = {m:[] for m in metrics} indices_and_embeddings eval_x = embeddings[eval_indices] ep_logits = [] label_mapping, filtered_classes = get_label_mapping_n_class_names(eval_y, class_names) clip_fs_parameters = { "num_classes": num_ways, "input_dims": eval_x.shape[-1], "multi_label": multi_label } clip_fs_cls = CLIPFewShotClassifier(clip_fs_parameters) pred_labels, metrics_values, logits = evaluate_model_for_episode( clip_fs_cls, eval_x, eval_y, label_mapping, filtered_classes, metrics=metrics, multi_label=False ) ep_logits = logits #cc return metrics_values, ep_logits def run_evaluations( indices_and_embeddings, train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, class_names, verbose=True, normalize=True, train_epochs=None, train_batch_size=5, metrics=['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'], embeddings=None, num_augmentations=0, multi_label=True ): metrics_values = {m:[] for m in metrics} all_logits = [] if verbose: pbar = start_progress_bar(num_episodes) for idx_ep in range(num_episodes): _train_indices = train_indices[idx_ep] _eval_indices = eval_indices[idx_ep] _wi_y = [label for label in wi_y[idx_ep]] _eval_y = [label for label in eval_y[idx_ep]] met, ep_logits = run_episode_through_model( indices_and_embeddings, _train_indices, _eval_indices, _wi_y, _eval_y, class_names, num_augmentations=num_augmentations, train_epochs=train_epochs, train_batch_size=train_batch_size, embeddings=embeddings, metrics=metrics, multi_label=multi_label ) all_logits.append(ep_logits) for m in metrics: metrics_values[m].append(met[m]) if verbose: pbar.update(idx_ep+1) return metrics_values, all_logits def get_best_metric(mt, metric_name, optimal='max'): if optimal=='max': opt_value = np.max(np.mean(np.array(mt[metric_name]), axis=0)) if optimal=='min': opt_value = np.min(np.mean(np.array(mt[metric_name]), axis=0)) return opt_value ``` # 5 way 5 shot ## Picking indices ``` num_ways = 5 num_shot = 5 num_eval = 15 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, test_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + test_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] logits_over_train_epochs = [] train_epochs_arr = [0] multi_label=False # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, class_names, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_test ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) #delete_file_by_name(drive, folderid, results_filename) #save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) # chenni change whole block all_metrics = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] final_dict = {} for ind_metric in all_metrics: vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: ret_val = get_best_metric(mt,ind_metric,"max") vals.append(ret_val) final_array.append(vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 10 way 5 shot ## Picking indices ``` num_ways = 10 num_shot = 5 num_eval = 5 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, test_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + test_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_metrics_clip_zs_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [0] multi_label=False # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, class_names, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_test ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) all_metrics = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] final_dict = {} for ind_metric in all_metrics: vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: ret_val = get_best_metric(mt,ind_metric,"max") vals.append(ret_val) final_array.append(vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 5 way 1 shot ## Picking indices ``` num_ways = 5 num_shot = 1 num_eval = 19 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, test_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + test_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [0] multi_label=False # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, class_names, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_test ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) all_metrics = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] final_dict = {} for ind_metric in all_metrics: vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: ret_val = get_best_metric(mt,ind_metric,"max") vals.append(ret_val) final_array.append(vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ``` # 10 way 1 shot ## Picking indices ``` num_ways = 10 num_shot = 1 num_eval = 10 shuffle = False num_episodes = 100 train_indices, eval_indices, wi_y, eval_y = prepare_indices( num_ways, num_shot, num_eval, num_episodes, label_dictionary, test_labels, shuffle ) embedding_model = embedding_models.CLIPEmbeddingWrapper() num_augmentations = 0 trivial=False indices, embeddings = embed_images( embedding_model, train_indices, num_augmentations, trivial=trivial ) ``` ## CLIP ``` clip_embeddings_test_fn = "clip" + test_embeddings_filename_suffix clip_embeddings_test = get_ndarray_from_drive(drive, folderid, clip_embeddings_test_fn) import warnings warnings.filterwarnings('ignore') if trivial: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_with_logits.json" else: #cc results_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_with_logits.json" auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) download_file_by_name(drive, folderid, results_filename) if results_filename in os.listdir(): with open(results_filename, 'r') as f: #cc json_loaded = json.load(f) #cc clip_metrics_over_train_epochs = json_loaded['metrics'] #cc logits_over_train_epochs = json_loaded["logits"] else: clip_metrics_over_train_epochs = [] #cc logits_over_train_epochs = [] train_epochs_arr = [0] multi_label=False # metrics_vals = ['hamming', 'jaccard', 'f1_score'] # ['accuracy', 'f1_score'] for idx, train_epochs in enumerate(train_epochs_arr): if idx < len(clip_metrics_over_train_epochs): continue print(train_epochs) #cc clip_metrics_thresholds, all_logits = run_evaluations( (indices, embeddings), train_indices, eval_indices, wi_y, eval_y, num_episodes, num_ways, class_names, train_epochs=train_epochs, num_augmentations=num_augmentations, embeddings=clip_embeddings_test ) clip_metrics_over_train_epochs.append(clip_metrics_thresholds) #cc logits_over_train_epochs.append(all_logits) #cc fin_list = [] #cc the whole for loop for a1 in wi_y: fin_a1_list = [] for a2 in a1: new_val = str(a2) fin_a1_list.append(new_val) fin_list.append(fin_a1_list) with open(results_filename, 'w') as f: #cc results = {'metrics': clip_metrics_over_train_epochs, "logits": logits_over_train_epochs, "true_labels": fin_list} json.dump(results, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, results_filename) save_to_drive(drive, folderid, results_filename) if trivial: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_trivial_plots" else: PLOT_DIR = "NewMetrics_clip_zs_Sigmoid_MiniImagenet" + "_0t" + str(num_ways) + "w" + str(num_shot) + "s" + str(num_augmentations) + "a_plots" os.mkdir(PLOT_DIR) all_metrics = ['accuracy', 'ap', 'map', 'c_f1', 'o_f1', 'c_precision', 'o_precision', 'c_recall', 'o_recall', 'top1_accuracy', 'top5_accuracy', 'classwise_accuracy', 'c_accuracy'] final_dict = {} for ind_metric in all_metrics: vals = [] final_array = [] for mt in clip_metrics_over_train_epochs: ret_val = get_best_metric(mt,ind_metric,"max") vals.append(ret_val) final_array.append(vals) final_dict[ind_metric] = final_array if trivial: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_trivial_clip_zs_metrics_graphs.json" else: graph_filename = "new_metrics"+dataset_name+"_0t"+str(num_ways)+"w"+str(num_shot)+"s"+str(num_augmentations)+"a_clip_zs_metrics_graphs.json" with open(graph_filename, 'w') as f: json.dump(final_dict, f) auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) delete_file_by_name(drive, folderid, graph_filename) save_to_drive(drive, folderid, graph_filename) zip_dirname = PLOT_DIR + ".zip" zip_source = PLOT_DIR ! zip -r $zip_dirname $zip_source auth.authenticate_user() gauth = GoogleAuth() gauth.credentials = GoogleCredentials.get_application_default() drive = GoogleDrive(gauth) save_to_drive(drive, folderid, zip_dirname) ```
github_jupyter
# IBM Watson OpenScale Lab instructions This notebook should be run in a Watson Studio project, using the **Default Spark Python 3.6** runtime environment. **If you are viewing this in Watson Studio and do not see `Python 3.6 with Spark` in the upper right corner of your screen, please update the runtime now.** It requires service credentials for the following Cloud services: * IBM Watson OpenScale * Watson Machine Learning If you have a paid Cloud account, you may also provision a **Databases for PostgreSQL** or **Db2 Warehouse** service to take full advantage of integration with Watson Studio and continuous learning services. If you choose not to provision this paid service, you can use the free internal PostgreSQL storage with OpenScale, but will not be able to configure continuous learning for your model. The notebook will train, create and deploy a German Credit Risk model, configure OpenScale to monitor that deployment, and inject seven days' worth of historical records and measurements for viewing in the OpenScale Insights dashboard. ## Test Spark ``` try: from pyspark.sql import SparkSession except: print('Error: Spark runtime is missing. If you are using Watson Studio change the notebook runtime to Spark.') raise ``` ## Package installation ``` !rm -rf $PIP_BUILD !pip install psycopg2-binary | tail -n 1 !pip install --upgrade watson-machine-learning-client --no-cache | tail -n 1 !pip install --upgrade ibm-ai-openscale --no-cache | tail -n 1 !pip install --upgrade numpy --no-cache | tail -n 1 !pip install --upgrade lime --no-cache | tail -n 1 !pip install --upgrade SciPy --no-cache | tail -n 1 ``` # Provision services and configure credentials If you have not already, provision an instance of IBM Watson OpenScale using the [OpenScale link in the Cloud catalog](https://cloud.ibm.com/catalog/services/watson-openscale). Your Cloud API key can be generated by going to the [**Users** section of the Cloud console](https://cloud.ibm.com/iam#/users). * From that page, click your name, scroll down to the **API Keys** section, and click **Create an IBM Cloud API key**. * Give your key a name and click **Create**, then copy the created key and paste it below. Alternately, from the [IBM Cloud CLI](https://console.bluemix.net/docs/cli/reference/ibmcloud/download_cli.html#install_use) : ```bash ibmcloud login --sso ibmcloud iam api-key-create 'my_key' ``` ``` CLOUD_API_KEY = "****" ``` Get the Watson OpenScale GUID using the [IBM Cloud CLI](https://console.bluemix.net/docs/cli/reference/ibmcloud/download_cli.html#install_use) : ```bash ibmcloud resource service-instance <Watson_OpenScale_instance_name> ``` ``` AIOS_GUID = "****" AIOS_CREDENTIALS = { "instance_guid": AIOS_GUID, "apikey": CLOUD_API_KEY, "url": "https://api.aiopenscale.cloud.ibm.com" } ``` Next you will need credentials for Watson Machine Learning. If you already have a WML instance, you may use credentials for it. To provision a new Lite instance of WML, use the [IBM Cloud catalog](https://cloud.ibm.com/catalog/services/machine-learning): * Give your service a name, and click **Create**. * Once your instance is created, click the **Service Credentials** link on the left side of the screen. * Click the **New credential** button, give your credentials a name, and click **Add**. * Your new credentials can be accessed by clicking the **View credentials** button. * Copy and paste your WML credentials into the cell below. ``` WML_CREDENTIALS = { "apikey": "key", "iam_apikey_description": "description", "iam_apikey_name": "auto-generated-apikey", "iam_role_crn": "crn:v1:bluemix:public:iam::::serviceRole:Writer", "iam_serviceid_crn": "crn:v1:bluemix:public:iam-identity::", "instance_id": "instance_id", "password": "password", "url": "https://us-south.ml.cloud.ibm.com", "username": "username" } ``` This lab can use Databases for PostgreSQL, Db2 Warehouse, or a free internal verison of PostgreSQL to create a datamart for OpenScale. **If you have previously configured OpenScale**, it will use your existing datamart, and not interfere with any models you are currently monitoring. **Do not update the cell below where DB_CREDENTIALS are filled out AND comment it out or do not run it. Instead, uncomment the cell for `DB_CREDENTIALS = NONE` AND RUN IT INSTEAD**. **If you do not have a paid Cloud account or would prefer not to provision this paid service**, you may use the free internal PostgreSQL service with OpenScale. **Do not update the cell below where DB_CREDENTIALS are filled out AND comment it out or do not run it. Instead, uncomment the cell for `DB_CREDENTIALS = NONE` AND RUN IT INSTEAD**. **To provision a new instance of Db2 Warehouse**: * Locate [Db2 Warehouse in the Cloud catalog](https://cloud.ibm.com/catalog/services/db2-warehouse) * Give your service a name, and click **Create**. * Once your instance is created, click the **Service Credentials** link on the left side of the screen. * Click the **New credential** button, give your credentials a name, and click **Add**. * Your new credentials can be accessed by clicking the **View credentials** button. * Copy and paste your Db2 Warehouse credentials into the cell below. **To provision a new instance of Databases for PostgreSQL**: * Locate [Databases for PostgreSQL in the Cloud catalog](*https://cloud.ibm.com/catalog/services/databases-for-postgresql) * Give your service a name, and click **Create**. * Once your instance is created, click the **Service Credentials** link on the left side of the screen. * Click the **New credential** button, give your credentials a name, and click **Add**. * Your new credentials can be accessed by clicking the **View credentials** button. * Copy and paste your Databases for PostgreSQL credentials into the cell below. IF you have previously configured OpenScale and are using an existing datamart OR would prefer not to provision the paid service, uncomment the cell below to set `DB_CREDENTIALS = NONE` AND do not run or comment out the cell where `DB_CREDENTIALS` are set. ``` DB_CREDENTIALS = None ``` Comment out the cell below if you have set `DB_CREDENTIALS = None`. ``` #DB_CREDENTIALS = { "hostname": "***", "password": "***", "https_url": "***", "port": 50000, "ssldsn": "***", "host": "***", "jdbcurl": "***", "uri": "***", "db": "***", "dsn": "****", "username": "***", "ssljdbcurl": "****" } ``` __If you previously configured OpenScale to use the free internal version of PostgreSQL, you can switch to a new datamart using a paid database service.__ If you would like to delete the internal PostgreSQL configuration and create a new one using service credentials supplied in the cell above, set the __KEEP_MY_INTERNAL_POSTGRES__ variable below to __False__ below. In this case, the notebook will remove your existing internal PostgreSQL datamart and create a new one with the supplied credentials. __*NO DATA MIGRATION WILL OCCUR.*__ ``` KEEP_MY_INTERNAL_POSTGRES = True ``` # Run the notebook At this point, the notebook is ready to run. You can either run the cells one at a time, or click the **Kernel** option above and select **Restart and Run All** to run all the cells. # Load and explore data ## Load the training data from github ``` !rm german_credit_data_biased_training.csv !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/german_credit_data_biased_training.csv from pyspark.sql import SparkSession import pandas as pd import json spark = SparkSession.builder.getOrCreate() pd_data = pd.read_csv("german_credit_data_biased_training.csv", sep=",", header=0) df_data = spark.read.csv(path="german_credit_data_biased_training.csv", sep=",", header=True, inferSchema=True) df_data.head() ``` ## Explore data ``` df_data.printSchema() print("Number of records: " + str(df_data.count())) ``` # Create a model ``` spark_df = df_data (train_data, test_data) = spark_df.randomSplit([0.8, 0.2], 24) MODEL_NAME = "Spark German Risk Model - Final" DEPLOYMENT_NAME = "Spark German Risk Deployment - Final" print("Number of records for training: " + str(train_data.count())) print("Number of records for evaluation: " + str(test_data.count())) spark_df.printSchema() from pyspark.ml.feature import OneHotEncoder, StringIndexer, IndexToString, VectorAssembler from pyspark.ml.evaluation import BinaryClassificationEvaluator from pyspark.ml import Pipeline, Model si_CheckingStatus = StringIndexer(inputCol = 'CheckingStatus', outputCol = 'CheckingStatus_IX') si_CreditHistory = StringIndexer(inputCol = 'CreditHistory', outputCol = 'CreditHistory_IX') si_LoanPurpose = StringIndexer(inputCol = 'LoanPurpose', outputCol = 'LoanPurpose_IX') si_ExistingSavings = StringIndexer(inputCol = 'ExistingSavings', outputCol = 'ExistingSavings_IX') si_EmploymentDuration = StringIndexer(inputCol = 'EmploymentDuration', outputCol = 'EmploymentDuration_IX') si_Sex = StringIndexer(inputCol = 'Sex', outputCol = 'Sex_IX') si_OthersOnLoan = StringIndexer(inputCol = 'OthersOnLoan', outputCol = 'OthersOnLoan_IX') si_OwnsProperty = StringIndexer(inputCol = 'OwnsProperty', outputCol = 'OwnsProperty_IX') si_InstallmentPlans = StringIndexer(inputCol = 'InstallmentPlans', outputCol = 'InstallmentPlans_IX') si_Housing = StringIndexer(inputCol = 'Housing', outputCol = 'Housing_IX') si_Job = StringIndexer(inputCol = 'Job', outputCol = 'Job_IX') si_Telephone = StringIndexer(inputCol = 'Telephone', outputCol = 'Telephone_IX') si_ForeignWorker = StringIndexer(inputCol = 'ForeignWorker', outputCol = 'ForeignWorker_IX') si_Label = StringIndexer(inputCol="Risk", outputCol="label").fit(spark_df) label_converter = IndexToString(inputCol="prediction", outputCol="predictedLabel", labels=si_Label.labels) va_features = VectorAssembler(inputCols=["CheckingStatus_IX", "CreditHistory_IX", "LoanPurpose_IX", "ExistingSavings_IX", "EmploymentDuration_IX", "Sex_IX", \ "OthersOnLoan_IX", "OwnsProperty_IX", "InstallmentPlans_IX", "Housing_IX", "Job_IX", "Telephone_IX", "ForeignWorker_IX", \ "LoanDuration", "LoanAmount", "InstallmentPercent", "CurrentResidenceDuration", "LoanDuration", "Age", "ExistingCreditsCount", \ "Dependents"], outputCol="features") from pyspark.ml.classification import RandomForestClassifier classifier = RandomForestClassifier(featuresCol="features") pipeline = Pipeline(stages=[si_CheckingStatus, si_CreditHistory, si_EmploymentDuration, si_ExistingSavings, si_ForeignWorker, si_Housing, si_InstallmentPlans, si_Job, si_LoanPurpose, si_OthersOnLoan,\ si_OwnsProperty, si_Sex, si_Telephone, si_Label, va_features, classifier, label_converter]) model = pipeline.fit(train_data) predictions = model.transform(test_data) evaluatorDT = BinaryClassificationEvaluator(rawPredictionCol="prediction") area_under_curve = evaluatorDT.evaluate(predictions) #default evaluation is areaUnderROC print("areaUnderROC = %g" % area_under_curve) ``` # Save and deploy the model ``` from watson_machine_learning_client import WatsonMachineLearningAPIClient import json wml_client = WatsonMachineLearningAPIClient(WML_CREDENTIALS) ``` ### Remove existing model and deployment ``` model_deployment_ids = wml_client.deployments.get_uids() for deployment_id in model_deployment_ids: deployment = wml_client.deployments.get_details(deployment_id) model_id = deployment['entity']['deployable_asset']['guid'] if deployment['entity']['name'] == DEPLOYMENT_NAME: print('Deleting deployment id', deployment_id) wml_client.deployments.delete(deployment_id) print('Deleting model id', model_id) wml_client.repository.delete(model_id) wml_client.repository.list_models() model_props = { wml_client.repository.ModelMetaNames.NAME: "{}".format(MODEL_NAME), wml_client.repository.ModelMetaNames.EVALUATION_METHOD: "binary", wml_client.repository.ModelMetaNames.EVALUATION_METRICS: [ { "name": "areaUnderROC", "value": area_under_curve, "threshold": 0.7 } ] } wml_models = wml_client.repository.get_details() model_uid = None for model_in in wml_models['models']['resources']: if MODEL_NAME == model_in['entity']['name']: model_uid = model_in['metadata']['guid'] break if model_uid is None: print("Storing model ...") published_model_details = wml_client.repository.store_model(model=model, meta_props=model_props, training_data=train_data, pipeline=pipeline) model_uid = wml_client.repository.get_model_uid(published_model_details) print("Done") model_uid wml_deployments = wml_client.deployments.get_details() deployment_uid = None for deployment in wml_deployments['resources']: if DEPLOYMENT_NAME == deployment['entity']['name']: deployment_uid = deployment['metadata']['guid'] break if deployment_uid is None: print("Deploying model...") deployment = wml_client.deployments.create(artifact_uid=model_uid, name=DEPLOYMENT_NAME, asynchronous=False) deployment_uid = wml_client.deployments.get_uid(deployment) print("Model id: {}".format(model_uid)) print("Deployment id: {}".format(deployment_uid)) ``` # Configure OpenScale ``` from ibm_ai_openscale import APIClient from ibm_ai_openscale.engines import * from ibm_ai_openscale.utils import * from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature from ibm_ai_openscale.supporting_classes.enums import * ``` ## Create schema and datamart ``` ai_client = APIClient(aios_credentials=AIOS_CREDENTIALS) ai_client.version ``` #### Note: to delete an existing data_mart, uncomment the next cell and run it. ``` #ai_client.data_mart.delete() ``` ### Set up datamart ``` try: data_mart_details = ai_client.data_mart.get_details() if 'internal_database' in data_mart_details and data_mart_details['internal_database']: if KEEP_MY_INTERNAL_POSTGRES: print('Using existing internal datamart.') else: if DB_CREDENTIALS is None: print('No postgres credentials supplied. Using existing internal datamart') else: print('Switching to external datamart') ai_client.data_mart.delete(force=True) ai_client.data_mart.setup(db_credentials=DB_CREDENTIALS) else: print('Using existing external datamart') except: if DB_CREDENTIALS is None: print('Setting up internal datamart') ai_client.data_mart.setup(internal_db=True) else: print('Setting up external datamart') try: ai_client.data_mart.setup(db_credentials=DB_CREDENTIALS) except: print('Setup failed, trying Db2 setup') ai_client.data_mart.setup(db_credentials=DB_CREDENTIALS, schema=DB_CREDENTIALS['username']) data_mart_details = ai_client.data_mart.get_details() data_mart_details ``` ## Bind machine learning engines ``` binding_uid = ai_client.data_mart.bindings.add('WML instance', WatsonMachineLearningInstance(WML_CREDENTIALS)) if binding_uid is None: binding_uid = ai_client.data_mart.bindings.get_details()['service_bindings'][0]['metadata']['guid'] bindings_details = ai_client.data_mart.bindings.get_details() ai_client.data_mart.bindings.list() print(binding_uid) ai_client.data_mart.bindings.list_assets() ``` ## Subscriptions ### Remove existing credit risk subscriptions ``` subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() for subscription in subscriptions_uids: sub_name = ai_client.data_mart.subscriptions.get_details(subscription)['entity']['asset']['name'] if sub_name == MODEL_NAME: ai_client.data_mart.subscriptions.delete(subscription) print('Deleted existing subscription for', MODEL_NAME) subscription = ai_client.data_mart.subscriptions.add(WatsonMachineLearningAsset( model_uid, problem_type=ProblemType.BINARY_CLASSIFICATION, input_data_type=InputDataType.STRUCTURED, label_column='Risk', prediction_column='predictedLabel', probability_column='probability', feature_columns = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"], categorical_columns = ["CheckingStatus","CreditHistory","LoanPurpose","ExistingSavings","EmploymentDuration","Sex","OthersOnLoan","OwnsProperty","InstallmentPlans","Housing","Job","Telephone","ForeignWorker"] )) if subscription is None: print('Subscription already exists; get the existing one') subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() for sub in subscriptions_uids: if ai_client.data_mart.subscriptions.get_details(sub)['entity']['asset']['name'] == MODEL_NAME: subscription = ai_client.data_mart.subscriptions.get(sub) ``` Get subscription list ``` subscriptions_uids = ai_client.data_mart.subscriptions.get_uids() ai_client.data_mart.subscriptions.list() subscription.get_details() ``` ### Score the model so we can configure monitors ``` credit_risk_scoring_endpoint = None print(deployment_uid) for deployment in wml_client.deployments.get_details()['resources']: if deployment_uid in deployment['metadata']['guid']: credit_risk_scoring_endpoint = deployment['entity']['scoring_url'] print(credit_risk_scoring_endpoint) fields = ["CheckingStatus","LoanDuration","CreditHistory","LoanPurpose","LoanAmount","ExistingSavings","EmploymentDuration","InstallmentPercent","Sex","OthersOnLoan","CurrentResidenceDuration","OwnsProperty","Age","InstallmentPlans","Housing","ExistingCreditsCount","Job","Dependents","Telephone","ForeignWorker"] values = [ ["no_checking",13,"credits_paid_to_date","car_new",1343,"100_to_500","1_to_4",2,"female","none",3,"savings_insurance",46,"none","own",2,"skilled",1,"none","yes"], ["no_checking",24,"prior_payments_delayed","furniture",4567,"500_to_1000","1_to_4",4,"male","none",4,"savings_insurance",36,"none","free",2,"management_self-employed",1,"none","yes"], ["0_to_200",26,"all_credits_paid_back","car_new",863,"less_100","less_1",2,"female","co-applicant",2,"real_estate",38,"none","own",1,"skilled",1,"none","yes"], ["0_to_200",14,"no_credits","car_new",2368,"less_100","1_to_4",3,"female","none",3,"real_estate",29,"none","own",1,"skilled",1,"none","yes"], ["0_to_200",4,"no_credits","car_new",250,"less_100","unemployed",2,"female","none",3,"real_estate",23,"none","rent",1,"management_self-employed",1,"none","yes"], ["no_checking",17,"credits_paid_to_date","car_new",832,"100_to_500","1_to_4",2,"male","none",2,"real_estate",42,"none","own",1,"skilled",1,"none","yes"], ["no_checking",33,"outstanding_credit","appliances",5696,"unknown","greater_7",4,"male","co-applicant",4,"unknown",54,"none","free",2,"skilled",1,"yes","yes"], ["0_to_200",13,"prior_payments_delayed","retraining",1375,"100_to_500","4_to_7",3,"male","none",3,"real_estate",37,"none","own",2,"management_self-employed",1,"none","yes"] ] payload_scoring = {"fields": fields,"values": values} scoring_response = wml_client.deployments.score(credit_risk_scoring_endpoint, payload_scoring) print(scoring_response) ``` ## Quality and feedback monitoring ### Enable quality monitoring Wait 3 minutes to allow the payload logging table to be set up before we begin enabling monitors. ``` time.sleep(180) subscription.quality_monitoring.enable(threshold=0.7, min_records=50) ``` ### Feedback logging ``` !rm additional_feedback_data.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/additional_feedback_data.json with open('additional_feedback_data.json') as feedback_file: additional_feedback_data = json.load(feedback_file) subscription.feedback_logging.store(additional_feedback_data['data']) subscription.feedback_logging.show_table() run_details = subscription.quality_monitoring.run() status = run_details['status'] id = run_details['id'] print(id) print("Run status: {}".format(status)) start_time = time.time() elapsed_time = 0 while status != 'completed' and elapsed_time < 60: time.sleep(10) run_details = subscription.quality_monitoring.get_run_details(run_uid=id) status = run_details['status'] elapsed_time = time.time() - start_time print("Run status: {}".format(status)) subscription.quality_monitoring.get_run_details() subscription.quality_monitoring.show_table() ai_client.data_mart.get_deployment_metrics() ``` ## Fairness monitoring ``` subscription.fairness_monitoring.enable( features=[ Feature("Sex", majority=['male'], minority=['female'], threshold=0.95), Feature("Age", majority=[[26,75]], minority=[[18,25]], threshold=0.95) ], favourable_classes=['No Risk'], unfavourable_classes=['Risk'], min_records=1000, training_data=pd_data ) ``` ## Score the model again now that monitoring is configured ``` !rm german_credit_feed.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/german_credit_feed.json ``` Score 1000 randomly chosen records ``` import random with open('german_credit_feed.json', 'r') as scoring_file: scoring_data = json.load(scoring_file) fields = scoring_data['fields'] values = [] for _ in range(1000): values.append(random.choice(scoring_data['values'])) payload_scoring = {"fields": fields, "values": values} scoring_response = wml_client.deployments.score(credit_risk_scoring_endpoint, payload_scoring) print(scoring_response) subscription.get_details() ``` # Insert historical payloads ``` !rm payload_history*.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_1.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_2.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_3.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_4.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_5.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_6.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/payload_history_7.json historyDays = 7 from ibm_ai_openscale.supporting_classes import PayloadRecord, Feature import datetime import time for day in range(historyDays): print('Loading day {}'.format(day + 1)) history_file = 'payload_history_' + str(day + 1) + '.json' with open(history_file) as f: payloads = json.load(f) hourly_records = int(len(payloads) / 24) index = 0 for hour in range(24): recordsList = [] for i in range(hourly_records): score_time = str(datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))) recordsList.append(PayloadRecord(request=payloads[index]['request'], response=payloads[index]['response'], scoring_timestamp=score_time)) index += 1 subscription.payload_logging.store(records=recordsList) print('Finished') data_mart_id = subscription.get_details()['metadata']['url'].split('/service_bindings')[0].split('marts/')[1] print(data_mart_id) performance_metrics_url = 'https://api.aiopenscale.cloud.ibm.com' + subscription.get_details()['metadata']['url'].split('/service_bindings')[0] + '/metrics' print(performance_metrics_url) ``` ## Insert historical fairness metrics ``` !rm fairness_history.json !wget https://raw.githubusercontent.com/IBM/monitor-wml-model-with-watson-openscale/master/data/fairness_history.json import random token_data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'response_type': 'cloud_iam', 'apikey': AIOS_CREDENTIALS['apikey'] } response = requests.post('https://iam.bluemix.net/identity/token', data=token_data) iam_token = response.json()['access_token'] iam_headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % iam_token } with open('fairness_history.json', 'r') as history_file: payloads = json.load(history_file) for day in range(historyDays): print('Day', day + 1) for hour in range(24): score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ') qualityMetric = { 'metric_type': 'fairness', 'binding_id': binding_uid, 'timestamp': score_time, 'subscription_id': model_uid, 'asset_revision': model_uid, 'deployment_id': deployment_uid, 'value': random.choice(payloads) } response = requests.post(performance_metrics_url, json=[qualityMetric], headers=iam_headers) print('Finished') ``` ## Insert historical quality metrics ``` token_data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'response_type': 'cloud_iam', 'apikey': AIOS_CREDENTIALS['apikey'] } response = requests.post('https://iam.bluemix.net/identity/token', data=token_data) iam_token = response.json()['access_token'] iam_headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % iam_token } measurements = [0.76, 0.78, 0.68, 0.72, 0.73, 0.77, 0.80] for day in range(historyDays): print('Day', day + 1) for hour in range(24): score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ') qualityMetric = { 'metric_type': 'quality', 'binding_id': binding_uid, 'timestamp': score_time, 'subscription_id': model_uid, 'asset_revision': model_uid, 'deployment_id': deployment_uid, 'value': { 'quality': measurements[day], 'threshold': 0.7, 'metrics': [ { 'name': 'auroc', 'value': measurements[day], 'threshold': 0.7 } ] } } response = requests.post(performance_metrics_url, json=[qualityMetric], headers=iam_headers) print('Finished') ``` ## Insert historical performance metrics ``` token_data = { 'grant_type': 'urn:ibm:params:oauth:grant-type:apikey', 'response_type': 'cloud_iam', 'apikey': AIOS_CREDENTIALS['apikey'] } response = requests.post('https://iam.bluemix.net/identity/token', data=token_data) iam_token = response.json()['access_token'] iam_headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer %s' % iam_token } for day in range(historyDays): print('Day', day + 1) for hour in range(24): score_time = (datetime.datetime.utcnow() + datetime.timedelta(hours=(-(24*day + hour + 1)))).strftime('%Y-%m-%dT%H:%M:%SZ') score_count = random.randint(60, 600) score_resp = random.uniform(60, 300) performanceMetric = { 'metric_type': 'performance', 'binding_id': binding_uid, 'timestamp': score_time, 'subscription_id': model_uid, 'asset_revision': model_uid, 'deployment_id': deployment_uid, 'value': { 'response_time': score_resp, 'records': score_count } } response = requests.post(performance_metrics_url, json=[performanceMetric], headers=iam_headers) print('Finished') ``` ## Configure Explainability ``` from ibm_ai_openscale.supporting_classes import * subscription.explainability.enable(training_data=pd_data) subscription.explainability.get_details() ``` ## Run fairness monitor Kick off a fairness monitor run on current data. Depending on how fast the monitor runs, the table may not contain the most recent results. ``` run_details = subscription.fairness_monitoring.run() subscription.fairness_monitoring.show_table() ``` ## Additional data to help debugging You will use the `Deployment` to configure the OpenScale dashboard. ``` print('Datamart:', data_mart_id) print('Model:', model_uid) print('Deployment:', deployment_uid) print('Binding:', binding_uid) print('Scoring URL:', credit_risk_scoring_endpoint) ``` ## Identify transactions for Explainability Transaction IDs identified by the cells below can be copied and pasted into the Explainability tab of the OpenScale dashboard. ``` payload_data = subscription.payload_logging.get_table_content(limit=60) payload_data.filter(items=['scoring_id', 'predictedLabel', 'probability']) ``` ## Congratulations! You have finished the hands-on lab for IBM Watson OpenScale. You can now view the [OpenScale Dashboard](https://aiopenscale.cloud.ibm.com/). Click on the tile for the German Credit model to see fairness, accuracy, and performance monitors. Click on the timeseries graph to get detailed information on transactions during a specific time window. ## Next steps OpenScale shows model performance over time. You have two options to keep data flowing to your OpenScale graphs: * Download, configure and schedule the [model feed notebook](https://raw.githubusercontent.com/emartensibm/german-credit/master/german_credit_scoring_feed.ipynb). This notebook can be set up with your WML credentials, and scheduled to provide a consistent flow of scoring requests to your model, which will appear in your OpenScale monitors. * Re-run this notebook. Running this notebook from the beginning will delete and re-create the model and deployment, and re-create the historical data. Please note that the payload and measurement logs for the previous deployment will continue to be stored in your datamart, and cal be deleted if necessary.
github_jupyter
# Time Series Prediction **Objectives** 1. Build a linear, DNN and CNN model in Keras. 2. Build a simple RNN model and a multi-layer RNN model in Keras. In this lab we will start with a linear, DNN and CNN model Since the features of our model are sequential in nature, we'll next look at how to build various RNN models in Keras. We'll start with a simple RNN model and then see how to create a multi-layer RNN in Keras. We will be exploring a lot of different model types in this notebook. ``` !sudo chown -R jupyter:jupyter /home/jupyter/training-data-analyst !pip install --user google-cloud-bigquery==1.25.0 ``` **Note**: Restart your kernel to use updated packages. Kindly ignore the deprecation warnings and incompatibility errors related to google-cloud-storage. ## Load necessary libraries and set up environment variables ``` PROJECT = "your-gcp-project-here" # REPLACE WITH YOUR PROJECT NAME BUCKET = "your-gcp-bucket-here" # REPLACE WITH YOUR BUCKET REGION = "us-central1" # REPLACE WITH YOUR BUCKET REGION e.g. us-central1 %env PROJECT = PROJECT BUCKET = BUCKET REGION = REGION import os import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np import pandas as pd import tensorflow as tf from google.cloud import bigquery from tensorflow.keras.utils import to_categorical from tensorflow.keras.models import Sequential from tensorflow.keras.layers import (Dense, DenseFeatures, Conv1D, MaxPool1D, Reshape, RNN, LSTM, GRU, Bidirectional) from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint from tensorflow.keras.optimizers import Adam # To plot pretty figures %matplotlib inline mpl.rc('axes', labelsize=14) mpl.rc('xtick', labelsize=12) mpl.rc('ytick', labelsize=12) # For reproducible results. from numpy.random import seed seed(1) tf.random.set_seed(2) ``` ## Explore time series data We'll start by pulling a small sample of the time series data from Big Query and write some helper functions to clean up the data for modeling. We'll use the data from the `percent_change_sp500` table in BigQuery. The `close_values_prior_260` column contains the close values for any given stock for the previous 260 days. ``` %%time bq = bigquery.Client(project=PROJECT) bq_query = ''' #standardSQL SELECT symbol, Date, direction, close_values_prior_260 FROM `stock_market.eps_percent_change_sp500` LIMIT 100 ''' ``` The function `clean_data` below does three things: 1. First, we'll remove any inf or NA values 2. Next, we parse the `Date` field to read it as a string. 3. Lastly, we convert the label `direction` into a numeric quantity, mapping 'DOWN' to 0, 'STAY' to 1 and 'UP' to 2. ``` def clean_data(input_df): """Cleans data to prepare for training. Args: input_df: Pandas dataframe. Returns: Pandas dataframe. """ df = input_df.copy() # Remove inf/na values. real_valued_rows = ~(df == np.inf).max(axis=1) df = df[real_valued_rows].dropna() # TF doesn't accept datetimes in DataFrame. df['Date'] = pd.to_datetime(df['Date'], errors='coerce') df['Date'] = df['Date'].dt.strftime('%Y-%m-%d') # TF requires numeric label. df['direction_numeric'] = df['direction'].apply(lambda x: {'DOWN': 0, 'STAY': 1, 'UP': 2}[x]) return df ``` ## Read data and preprocessing Before we begin modeling, we'll preprocess our features by scaling to the z-score. This will ensure that the range of the feature values being fed to the model are comparable and should help with convergence during gradient descent. ``` STOCK_HISTORY_COLUMN = 'close_values_prior_260' COL_NAMES = ['day_' + str(day) for day in range(0, 260)] LABEL = 'direction_numeric' def _scale_features(df): """z-scale feature columns of Pandas dataframe. Args: features: Pandas dataframe. Returns: Pandas dataframe with each column standardized according to the values in that column. """ avg = df.mean() std = df.std() return (df - avg) / std def create_features(df, label_name): """Create modeling features and label from Pandas dataframe. Args: df: Pandas dataframe. label_name: str, the column name of the label. Returns: Pandas dataframe """ # Expand 1 column containing a list of close prices to 260 columns. time_series_features = df[STOCK_HISTORY_COLUMN].apply(pd.Series) # Rename columns. time_series_features.columns = COL_NAMES time_series_features = _scale_features(time_series_features) # Concat time series features with static features and label. label_column = df[LABEL] return pd.concat([time_series_features, label_column], axis=1) ``` ### Make train-eval-test split Next, we'll make repeatable splits for our train/validation/test datasets and save these datasets to local csv files. The query below will take a subsample of the entire dataset and then create a 70-15-15 split for the train/validation/test sets. ``` def _create_split(phase): """Create string to produce train/valid/test splits for a SQL query. Args: phase: str, either TRAIN, VALID, or TEST. Returns: String. """ floor, ceiling = '2002-11-01', '2010-07-01' if phase == 'VALID': floor, ceiling = '2010-07-01', '2011-09-01' elif phase == 'TEST': floor, ceiling = '2011-09-01', '2012-11-30' return ''' WHERE Date >= '{0}' AND Date < '{1}' '''.format(floor, ceiling) def create_query(phase): """Create SQL query to create train/valid/test splits on subsample. Args: phase: str, either TRAIN, VALID, or TEST. sample_size: str, amount of data to take for subsample. Returns: String. """ basequery = """ #standardSQL SELECT symbol, Date, direction, close_values_prior_260 FROM `stock_market.eps_percent_change_sp500` """ return basequery + _create_split(phase) ``` ## Modeling For experimentation purposes, we'll train various models using data we can fit in memory using the `.csv` files we created above. ``` N_TIME_STEPS = 260 N_LABELS = 3 Xtrain = pd.read_csv('../stock-train.csv') Xvalid = pd.read_csv('../stock-valid.csv') ytrain = Xtrain.pop(LABEL) yvalid = Xvalid.pop(LABEL) ytrain_categorical = to_categorical(ytrain.values) yvalid_categorical = to_categorical(yvalid.values) ``` To monitor training progress and compare evaluation metrics for different models, we'll use the function below to plot metrics captured from the training job such as training and validation loss or accuracy. ``` def plot_curves(train_data, val_data, label='Accuracy'): """Plot training and validation metrics on single axis. Args: train_data: list, metrics obtrained from training data. val_data: list, metrics obtained from validation data. label: str, title and label for plot. Returns: Matplotlib plot. """ plt.plot(np.arange(len(train_data)) + 0.5, train_data, "b.-", label="Training " + label) plt.plot(np.arange(len(val_data)) + 1, val_data, "r.-", label="Validation " + label) plt.gca().xaxis.set_major_locator(mpl.ticker.MaxNLocator(integer=True)) plt.legend(fontsize=14) plt.xlabel("Epochs") plt.ylabel(label) plt.grid(True) ``` ### Baseline Before we begin modeling in Keras, let's create a benchmark using a simple heuristic. Let's see what kind of accuracy we would get on the validation set if we predict the majority class of the training set. ``` sum(yvalid == ytrain.value_counts().idxmax()) / yvalid.shape[0] ``` Ok. So just naively guessing the most common outcome `UP` will give about 29.5% accuracy on the validation set. ### Linear model We'll start with a simple linear model, mapping our sequential input to a single fully dense layer. ``` model = Sequential() model.add(Dense(units=N_LABELS, activation='softmax', kernel_regularizer=tf.keras.regularizers.l1(l=0.1))) model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x=Xtrain.values, y=ytrain_categorical, batch_size=Xtrain.shape[0], validation_data=(Xvalid.values, yvalid_categorical), epochs=30, verbose=0) plot_curves(history.history['loss'], history.history['val_loss'], label='Loss') plot_curves(history.history['accuracy'], history.history['val_accuracy'], label='Accuracy') ``` The accuracy seems to level out pretty quickly. To report the accuracy, we'll average the accuracy on the validation set across the last few epochs of training. ``` np.mean(history.history['val_accuracy'][-5:]) ``` ### Deep Neural Network The linear model is an improvement on our naive benchmark. Perhaps we can do better with a more complicated model. Next, we'll create a deep neural network with Keras. We'll experiment with a two layer DNN here but feel free to try a more complex model or add any other additional techniques to try an improve your performance. ``` dnn_hidden_units = [16, 8] model = Sequential() for layer in dnn_hidden_units: model.add(Dense(units=layer, activation="relu")) model.add(Dense(units=N_LABELS, activation="softmax", kernel_regularizer=tf.keras.regularizers.l1(l=0.1))) model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x=Xtrain.values, y=ytrain_categorical, batch_size=Xtrain.shape[0], validation_data=(Xvalid.values, yvalid_categorical), epochs=10, verbose=0) plot_curves(history.history['loss'], history.history['val_loss'], label='Loss') plot_curves(history.history['accuracy'], history.history['val_accuracy'], label='Accuracy') np.mean(history.history['val_accuracy'][-5:]) ``` ### Convolutional Neural Network The DNN does slightly better. Let's see how a convolutional neural network performs. A 1-dimensional convolutional can be useful for extracting features from sequential data or deriving features from shorter, fixed-length segments of the data set. Check out the documentation for how to implement a [Conv1d in Tensorflow](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv1D). Max pooling is a downsampling strategy commonly used in conjunction with convolutional neural networks. Next, we'll build a CNN model in Keras using the `Conv1D` to create convolution layers and `MaxPool1D` to perform max pooling before passing to a fully connected dense layer. ``` model = Sequential() # Convolutional layer model.add(Reshape(target_shape=[N_TIME_STEPS, 1])) model.add(Conv1D(filters=5, kernel_size=5, strides=2, padding="valid", input_shape=[None, 1])) model.add(MaxPool1D(pool_size=2, strides=None, padding='valid')) # Flatten the result and pass through DNN. model.add(tf.keras.layers.Flatten()) model.add(Dense(units=N_TIME_STEPS//4, activation="relu")) model.add(Dense(units=N_LABELS, activation="softmax", kernel_regularizer=tf.keras.regularizers.l1(l=0.1))) model.compile(optimizer=Adam(lr=0.01), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x=Xtrain.values, y=ytrain_categorical, batch_size=Xtrain.shape[0], validation_data=(Xvalid.values, yvalid_categorical), epochs=10, verbose=0) plot_curves(history.history['loss'], history.history['val_loss'], label='Loss') plot_curves(history.history['accuracy'], history.history['val_accuracy'], label='Accuracy') np.mean(history.history['val_accuracy'][-5:]) ``` ### Recurrent Neural Network RNNs are particularly well-suited for learning sequential data. They retain state information from one iteration to the next by feeding the output from one cell as input for the next step. In the cell below, we'll build a RNN model in Keras. The final state of the RNN is captured and then passed through a fully connected layer to produce a prediction. ``` model = Sequential() # Reshape inputs to pass through RNN layer. model.add(Reshape(target_shape=[N_TIME_STEPS, 1])) model.add(LSTM(N_TIME_STEPS // 8, activation='relu', return_sequences=False)) model.add(Dense(units=N_LABELS, activation='softmax', kernel_regularizer=tf.keras.regularizers.l1(l=0.1))) # Create the model. model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x=Xtrain.values, y=ytrain_categorical, batch_size=Xtrain.shape[0], validation_data=(Xvalid.values, yvalid_categorical), epochs=40, verbose=0) plot_curves(history.history['loss'], history.history['val_loss'], label='Loss') plot_curves(history.history['accuracy'], history.history['val_accuracy'], label='Accuracy') np.mean(history.history['val_accuracy'][-5:]) ``` ### Multi-layer RNN Next, we'll build multi-layer RNN. Just as multiple layers of a deep neural network allow for more complicated features to be learned during training, additional RNN layers can potentially learn complex features in sequential data. For a multi-layer RNN the output of the first RNN layer is fed as the input into the next RNN layer. ``` rnn_hidden_units = [N_TIME_STEPS // 16, N_TIME_STEPS // 32] model = Sequential() # Reshape inputs to pass through RNN layer. model.add(Reshape(target_shape=[N_TIME_STEPS, 1])) for layer in rnn_hidden_units[:-1]: model.add(GRU(units=layer, activation='relu', return_sequences=True)) model.add(GRU(units=rnn_hidden_units[-1], return_sequences=False)) model.add(Dense(units=N_LABELS, activation="softmax", kernel_regularizer=tf.keras.regularizers.l1(l=0.1))) model.compile(optimizer=Adam(lr=0.001), loss='categorical_crossentropy', metrics=['accuracy']) history = model.fit(x=Xtrain.values, y=ytrain_categorical, batch_size=Xtrain.shape[0], validation_data=(Xvalid.values, yvalid_categorical), epochs=50, verbose=0) plot_curves(history.history['loss'], history.history['val_loss'], label='Loss') plot_curves(history.history['accuracy'], history.history['val_accuracy'], label='Accuracy') np.mean(history.history['val_accuracy'][-5:]) ``` Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
github_jupyter
# Module 4. Custom Metric 으로 성능 데이터 및 Cold Start 성능 체크 하기 이번 모듈에서는 모듈2에서 테스트 용으로 분리했던 데이터를 가지고 Custom 지표를 통해 추가적인 성능을 평가해 보도록 합니다. 또한 Coldstart 성능도 추가적으로 확인해 보도록 합니다. ``` import pandas as pd, numpy as np import io import scipy.sparse as ss import json import time import os import boto3 import uuid from botocore.exceptions import ClientError from metrics import mean_reciprocal_rank, ndcg_at_k, precision_at_k !pip install tqdm from tqdm import tqdm_notebook from datetime import datetime from random import randint %store -r # Configure the SDK to Personalize: personalize = boto3.client('personalize') personalize_runtime = boto3.client('personalize-runtime') personalize_events = boto3.client('personalize-events') df_all=pd.read_csv(interaction_all_file) df_train=pd.read_csv(interaction_train_file) df_test=pd.read_csv(interaction_test_file) item_all=pd.read_csv(item_file) item_cold=pd.read_csv(item_cold_file) item_warm=pd.read_csv(item_warm_file) #Unique users unique_users=df_all['USER_ID'].unique() print("unique_user_from_all dataset:", len(unique_users)) unique_user_from_train=df_train['USER_ID'].unique() print("unique_user_from_train dataset:", len(unique_user_from_train)) unique_user_from_test=df_test['USER_ID'].unique() print("unique_user_from_test dataset:", len(unique_user_from_test)) old_user_from_test=df_test['USER_ID'][df_test['USER_ID'].isin(unique_user_from_train)].unique() print("Existing users in test dataset:",len(old_user_from_test)) new_user_from_test=df_test['USER_ID'][-df_test['USER_ID'].isin(unique_user_from_train)].unique() print("New users in test dataset:",len(new_user_from_test)) cold_item_interaction=df_test[df_test['ITEM_ID'].isin(item_cold['ITEM_ID'].unique())] cold_interaction_user=cold_item_interaction['USER_ID'].unique() print("User with cold item interactions in test dataset:",len(cold_interaction_user)) old_user_from_test_with_cold=cold_item_interaction['USER_ID'][cold_item_interaction['USER_ID'].isin(old_user_from_test)].unique() print("Exisiting User with cold item interaction in test dataset:", len(old_user_from_test_with_cold)) ``` #### 조금더 상세하고 Custum 평가 지표를 얻기 위해서 이전에 분리해둔 테스트 데이터를 가지고 캠페인 생성 후 별도 테스트를 진행하도록 합니다. ``` # 테스트 인터렉션 셋에 있는 모든 사용자를 대상으로 선능을 확인해 봅니다. relevance = [] counts=[] for user_id in tqdm_notebook(unique_user_from_test): true_items = set(df_test[df_test['USER_ID']==user_id]['ITEM_ID'].values) #print(true_items) rec_response = personalize_runtime.get_recommendations( campaignArn =user_personalization_campaign_arn , userId = str(user_id) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] relevance.append([int(x in true_items) for x in rec_items]) print('mean_reciprocal_rank', np.mean([mean_reciprocal_rank(r) for r in relevance])) print('precision_at_5', np.mean([precision_at_k(r, 5) for r in relevance])) print('precision_at_10', np.mean([precision_at_k(r, 10) for r in relevance])) print('precision_at_25', np.mean([precision_at_k(r, 25) for r in relevance])) print('normalized_discounted_cumulative_gain_at_5', np.mean([ndcg_at_k(r, 5) for r in relevance])) print('normalized_discounted_cumulative_gain_at_10', np.mean([ndcg_at_k(r, 10) for r in relevance])) print('normalized_discounted_cumulative_gain_at_25', np.mean([ndcg_at_k(r, 25) for r in relevance])) # 퍼스널라이즈에서 제공하는 메트릭과 비슷한지 비교해 봅니다. get_solution_metrics_response = personalize.get_solution_metrics( solutionVersionArn = user_personalization_solution_version_arn ) #print(json.dumps(get_solution_metrics_response, indent=2)) print('mean_reciprocal_rank',get_solution_metrics_response["metrics"]["mean_reciprocal_rank_at_25"], ) print('precision_at_5', get_solution_metrics_response["metrics"]["precision_at_5"]) print('precision_at_10', get_solution_metrics_response["metrics"]["precision_at_10"]) print('precision_at_25', get_solution_metrics_response["metrics"]["precision_at_25"]) print('normalized_discounted_cumulative_gain_at_5', get_solution_metrics_response["metrics"]["normalized_discounted_cumulative_gain_at_5"]) print('normalized_discounted_cumulative_gain_at_10', get_solution_metrics_response["metrics"]["normalized_discounted_cumulative_gain_at_10"]) print('normalized_discounted_cumulative_gain_at_25', get_solution_metrics_response["metrics"]["normalized_discounted_cumulative_gain_at_25"]) ``` ## 이벤트 트랙커 생성 아래 코드 셀은 특정 item과 상호 작용하는 사용자를 시뮬레이트하는 코드 입니다. 이벤트 트레커를 통해 실시간 스트림을 보내도록 하여 고객의 클릭정보에 따라 추천 항목이 변하는 것을 확인할 수 있습니다. ``` event_tracker_name="user-personalization-event-tracker-"+WORK_DATE print(dataset_group_arn) print(event_tracker_name) event_tracker_response = personalize.create_event_tracker( name=event_tracker_name, datasetGroupArn=dataset_group_arn ) event_tracker_arn = event_tracker_response['eventTrackerArn'] event_tracking_id = event_tracker_response['trackingId'] print('eventTrackerArn:{},\n eventTrackingId:{}'.format(event_tracker_arn, event_tracking_id)) ``` ## 사용자 행동 시뮬레이션 아래 코드 셀은 특정 item과 상호 작용하는 사용자를 시뮬레이트하는 코드 샘플을 제공하며, 시작할 때와 다른 추천 목록을 얻습니다. ``` def get_movie_title(movie_id): """ Takes in an ID, returns a title """ movie_id = int(movie_id) movie_title=item_all[item_all['ITEM_ID']==movie_id]['TITLE'] return (movie_title.tolist()) session_dict = {} def send_movie_click(USER_ID, ITEM_ID): """ Simulates a click as an envent to send an event to Amazon Personalize's Event Tracker """ # Configure Session try: session_ID = session_dict[USER_ID] except: session_dict[USER_ID] = str(uuid.uuid1()) session_ID = session_dict[USER_ID] value=randint(0,5) # Configure Properties: event = { "itemId": str(ITEM_ID), "eventValue": value } event_json = json.dumps(event) # Make Call personalize_events.put_events( trackingId = event_tracking_id, # 이벤트트래커에서 생성한 아이디 userId= USER_ID, sessionId = session_ID, eventList = [{ 'sentAt': int(time.time()), 'eventType': 'RATING', 'properties': event_json }] ) user_id=old_user_from_test[0] get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = user_personalization_campaign_arn, userId = str(user_id), ) item_list = get_recommendations_response['itemList'] recommendation_title_list = [] recommendation_id_list=[] for item in item_list: title = get_movie_title(item['itemId']) recommendation_title_list.append(title) recommendation_id_list.append(item['itemId']) recommendations_df = pd.DataFrame(recommendation_title_list ,columns = ['OriginalRecs']) recommendations_df # Pick a movie, we will use ID 270 or Miracle on 34th Street movie_to_click = item_warm['ITEM_ID'][0] movie_title_clicked = get_movie_title(movie_to_click) send_movie_click(USER_ID=str(user_id), ITEM_ID=movie_to_click) get_recommendations_response = personalize_runtime.get_recommendations( campaignArn = user_personalization_campaign_arn, userId = str(user_id) ) print("Recommendations for user: ", user_id) item_list = get_recommendations_response['itemList'] recommendation_list = [] for item in item_list: title = get_movie_title(item['itemId']) recommendation_list.append(title) new_rec_DF = pd.DataFrame(recommendation_list, columns = [movie_title_clicked]) recommendations_df = recommendations_df.join(new_rec_DF) recommendations_df ``` ## Cold Item interaction 내보내기 이번 실험에서는 콜드 스타트 아이템의 인터렉션을 발생하도록 합니다. User-Personalization 레시피를 통해 솔루션을 생성했을경우 새로운 인터렉션이 있을때마다 아마존 퍼스널라이즈는 매 2시간 마다 솔루션 버전을 자동으로 업데이트 합니다. 이때 새로운 아이템이 포함 되어 있다면 설정해 놓은 Exploration Weight 및 Exploration item age cut off 값에 따라 콜드 아이템도 추천하게 됩니다. 자세한 내용은 [여기](https://docs.aws.amazon.com/personalize/latest/dg/recording-events.html)를 참고하세요. 이 번 실습에서는 시간상 강제로 신규 아이템 인터렉션을 발생한 뒤 매뉴얼로 솔루션 버전 및 캠페인을 업데이트 하도록 합니다. ``` new_item_list=item_cold new_item_list['ITEM_ID']=new_item_list['ITEM_ID'].astype(str) new_item_list user_id=old_user_from_test_with_cold[2] for i in range(len(new_item_list)): value=randint(0,5) # Configure Properties: event = { "itemId": str(new_item_list['ITEM_ID'][i]), "eventValue": value } event_json = json.dumps(event) personalize_events.put_events( trackingId = event_tracking_id, userId= str(user_id), sessionId = str(i), eventList = [{ 'sentAt': datetime.now().timestamp(), 'eventType' : 'RATINGS', 'properties' : event_json }]) ``` ### 솔루션 버전 업데이트 및 캠페인 업데이트 #### 이벤트 트레커 정보가 데이터 쌓일때가지 충분한 시간을 주기 위해 약 5분정도 후에 아래 셀을 시작합니다. ``` ## 새로운 인터렉션을 포함한 새로운 솔루션 버전 생성 create_solution_version_response = personalize.create_solution_version( solutionArn = user_personalization_solution_arn, trainingMode='UPDATE' ) user_personalization_solution_version_arn_new = create_solution_version_response['solutionVersionArn'] print(json.dumps(create_solution_version_response, indent=2)) %%time max_time = time.time() + 8*60*60 # 8 hours while time.time() < max_time: #hrnn status describe_solution_version_response = personalize.describe_solution_version( solutionVersionArn = user_personalization_solution_version_arn_new ) status= describe_solution_version_response["solutionVersion"]["status"] print("User-Personalization SolutionVersion: {}".format(status)) if (status== "ACTIVE" or status == "CREATE FAILED"): break time.sleep(300) print("All solution creation completed") ``` 방금 업데이트된 새로운 솔루션 버전으로 캠페인을 업데이트 합니다. 이 때 explorationWeight를 1로 변경하여 최대한 콜드 아이템에서만 추천 될수 있도록 합니다. ``` update_campaing_response = personalize.update_campaign( campaignArn=user_personalization_campaign_arn, solutionVersionArn=user_personalization_solution_version_arn_new, minProvisionedTPS=1, campaignConfig = {"itemExplorationConfig": {"explorationWeight": "1", "explorationItemAgeCutOff": "7"}} ) user_personalization_campaign_arn = update_campaing_response['campaignArn'] print(json.dumps(update_campaing_response, indent=2)) # Wait for campaign update to reflect the new explorationWeight explorationWeight = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_campaign_response = personalize.describe_campaign( campaignArn = user_personalization_campaign_arn ) solution_version = describe_campaign_response["campaign"]["solutionVersionArn"] if (solution_version == user_personalization_solution_version_arn_new): print("Current Campaign solution version {}".format(solution_version)) break time.sleep(60) ``` ## Cold Start Exploration Weight에 따른 테스트 이부분에서는 새롭게 더해진 새로운 아이템(ColdStart)에 대한 추천 성능을 테스트 해보도록 합니다. 또한 Exploration Weight를 변경하면서 추천 아이템 중에 얼마 만큼의 콜드 아이템이 포함 되었는지 확인해 보도록 합니다. ``` metrics=[] def build_metric_matrix(solution,relevance): metrics.append([solution, np.mean([mean_reciprocal_rank(r) for r in relevance]), np.mean([precision_at_k(r, 5) for r in relevance]), np.mean([precision_at_k(r, 10) for r in relevance]), np.mean([precision_at_k(r, 10) for r in relevance]), np.mean([ndcg_at_k(r, 5) for r in relevance]), np.mean([ndcg_at_k(r, 10) for r in relevance]), np.mean([ndcg_at_k(r, 25) for r in relevance]), np.mean(counts)]) def is_cold_item(rec_items): rec_is_cold=[int(item) for item in rec_items if int(item) in item_cold['ITEM_ID'].astype('int64').unique()] #item_cold['ITEM_ID'].astype('int64')] return(rec_is_cold) relevance = [] is_cold_item_list=[] counts=[] for user_id in tqdm_notebook(old_user_from_test): true_items = set(df_test[df_test['USER_ID']==user_id]['ITEM_ID'].values) #print(true_items) rec_response = personalize_runtime.get_recommendations( campaignArn =user_personalization_campaign_arn , userId = str(user_id) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] arr=is_cold_item(rec_items) counts.append(len(arr)) is_cold_item_list.append(arr) relevance.append([int(x in true_items) for x in rec_items]) print('mean_reciprocal_rank', np.mean([mean_reciprocal_rank(r) for r in relevance])) print('precision_at_5', np.mean([precision_at_k(r, 5) for r in relevance])) print('precision_at_10', np.mean([precision_at_k(r, 10) for r in relevance])) print('precision_at_25', np.mean([precision_at_k(r, 25) for r in relevance])) print('normalized_discounted_cumulative_gain_at_5', np.mean([ndcg_at_k(r, 5) for r in relevance])) print('normalized_discounted_cumulative_gain_at_10', np.mean([ndcg_at_k(r, 10) for r in relevance])) print('normalized_discounted_cumulative_gain_at_25', np.mean([ndcg_at_k(r, 25) for r in relevance])) print('average number of cold items', np.mean(counts)) build_metric_matrix('user-personalization-coldstart-meta-update-100%',relevance) ``` 랜덤으로 추천하였을 경우 대비 Coldstart 성능이 얼마나 좋은 것인지 비교하여 보도록 합니다. ``` relevance = [] for user_id in tqdm_notebook(old_user_from_test): true_items = set(df_test[df_test['USER_ID']==user_id]['ITEM_ID'].values) rec_items = np.random.permutation(item_cold['ITEM_ID'].astype('int64').unique())[:25] relevance.append([int(x in true_items) for x in rec_items]) counts=[25] print('mean_reciprocal_rank', np.mean([mean_reciprocal_rank(r) for r in relevance])) print('precision_at_5', np.mean([precision_at_k(r, 5) for r in relevance])) print('precision_at_10', np.mean([precision_at_k(r, 10) for r in relevance])) print('precision_at_25', np.mean([precision_at_k(r, 25) for r in relevance])) print('normalized_discounted_cumulative_gain_at_5', np.mean([ndcg_at_k(r, 5) for r in relevance])) print('normalized_discounted_cumulative_gain_at_10', np.mean([ndcg_at_k(r, 10) for r in relevance])) print('normalized_discounted_cumulative_gain_at_25', np.mean([ndcg_at_k(r, 25) for r in relevance])) print('average number of cold items', np.mean(counts)) build_metric_matrix('random',relevance) ``` ### Update Campaign with 30% Exploration ``` update_campaing_response = personalize.update_campaign( campaignArn=user_personalization_campaign_arn, solutionVersionArn=user_personalization_solution_version_arn_new, minProvisionedTPS=1, campaignConfig = {"itemExplorationConfig": {"explorationWeight": "0.3", "explorationItemAgeCutOff": "7"}} ) user_personalization_campaign_arn = update_campaing_response['campaignArn'] print(json.dumps(update_campaing_response, indent=2)) # Wait for campaign update to reflect the new explorationWeight explorationWeight = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_campaign_response = personalize.describe_campaign( campaignArn = user_personalization_campaign_arn ) explorationWeight = describe_campaign_response["campaign"]["campaignConfig"]['itemExplorationConfig']['explorationWeight'] print("Current Campaign explorationWeight: {}".format(explorationWeight)) if explorationWeight == "0.3": break time.sleep(60) # wait 1 minutes time.sleep(60) relevance = [] is_cold_item_list=[] counts=[] for user_id in tqdm_notebook(old_user_from_test): true_items = set(df_test[df_test['USER_ID']==user_id]['ITEM_ID'].values) #print(true_items) rec_response = personalize_runtime.get_recommendations( campaignArn =user_personalization_campaign_arn , userId = str(user_id) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] arr=is_cold_item(rec_items) counts.append(len(arr)) is_cold_item_list.append(arr) relevance.append([int(x in true_items) for x in rec_items]) print('mean_reciprocal_rank', np.mean([mean_reciprocal_rank(r) for r in relevance])) print('precision_at_5', np.mean([precision_at_k(r, 5) for r in relevance])) print('precision_at_10', np.mean([precision_at_k(r, 10) for r in relevance])) print('precision_at_25', np.mean([precision_at_k(r, 25) for r in relevance])) print('normalized_discounted_cumulative_gain_at_5', np.mean([ndcg_at_k(r, 5) for r in relevance])) print('normalized_discounted_cumulative_gain_at_10', np.mean([ndcg_at_k(r, 10) for r in relevance])) print('normalized_discounted_cumulative_gain_at_25', np.mean([ndcg_at_k(r, 25) for r in relevance])) print('average number of cold items', np.mean(counts)) build_metric_matrix('user-personalization-coldstart-30%',relevance) ``` ### Update Campaign with 0% Exploration ``` update_campaing_response = personalize.update_campaign( campaignArn=user_personalization_campaign_arn, solutionVersionArn=user_personalization_solution_version_arn_new, minProvisionedTPS=1, campaignConfig = {"itemExplorationConfig": {"explorationWeight": "0", "explorationItemAgeCutOff": "7"}} ) user_personalization_campaign_arn = update_campaing_response['campaignArn'] print(json.dumps(update_campaing_response, indent=2)) # Wait for campaign update to reflect the new explorationWeight explorationWeight = None max_time = time.time() + 3*60*60 # 3 hours while time.time() < max_time: describe_campaign_response = personalize.describe_campaign( campaignArn = user_personalization_campaign_arn ) explorationWeight = describe_campaign_response["campaign"]["campaignConfig"]['itemExplorationConfig']['explorationWeight'] print("Current Campaign explorationWeight: {}".format(explorationWeight)) if explorationWeight == "0": break time.sleep(60) # wait 1 minutes time.sleep(60) relevance = [] is_cold_item_list=[] counts=[] for user_id in tqdm_notebook(old_user_from_test): true_items = set(df_test[df_test['USER_ID']==user_id]['ITEM_ID'].values) #print(true_items) rec_response = personalize_runtime.get_recommendations( campaignArn =user_personalization_campaign_arn , userId = str(user_id) ) rec_items = [int(x['itemId']) for x in rec_response['itemList']] arr=is_cold_item(rec_items) counts.append(len(arr)) is_cold_item_list.append(arr) relevance.append([int(x in true_items) for x in rec_items]) print('mean_reciprocal_rank', np.mean([mean_reciprocal_rank(r) for r in relevance])) print('precision_at_5', np.mean([precision_at_k(r, 5) for r in relevance])) print('precision_at_10', np.mean([precision_at_k(r, 10) for r in relevance])) print('precision_at_25', np.mean([precision_at_k(r, 25) for r in relevance])) print('normalized_discounted_cumulative_gain_at_5', np.mean([ndcg_at_k(r, 5) for r in relevance])) print('normalized_discounted_cumulative_gain_at_10', np.mean([ndcg_at_k(r, 10) for r in relevance])) print('normalized_discounted_cumulative_gain_at_25', np.mean([ndcg_at_k(r, 25) for r in relevance])) print('average number of cold items', np.mean(counts)) build_metric_matrix('user-personalization-coldstart-0%',relevance) df_metrics=pd.DataFrame(metrics,columns=['recipe','mrr','p@5','p@10','p@25','ndcg@5','ndcg@10','ndcg@25','cold_item_count']) df_metrics ```
github_jupyter
``` #pip install --upgrade tensorflow import tensorflow as tf print(tf.__version__) # Small LSTM Network to Generate Text for import numpy from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.callbacks import ModelCheckpoint from keras.utils import np_utils # load ascii text and covert to lowercase filename = r"C:\Users\dell\Downloads\sonnets.txt" raw_text = open(filename, 'r', encoding='utf-8').read() raw_text = raw_text.lower() # create mapping of unique chars to integers chars = sorted(list(set(raw_text))) char_to_int = dict((c, i) for i, c in enumerate(chars)) # summarize the loaded data n_chars = len(raw_text) n_vocab = len(chars) print("Total Characters: ", n_chars) print("Total Vocab: ", n_vocab) # prepare the dataset of input to output pairs encoded as integers seq_length = 100 dataX = [] dataY = [] for i in range(0, n_chars - seq_length, 1): seq_in = raw_text[i:i + seq_length] seq_out = raw_text[i + seq_length] dataX.append([char_to_int[char] for char in seq_in]) dataY.append(char_to_int[seq_out]) n_patterns = len(dataX) print("Total Patterns: ", n_patterns) # reshape X to be [samples, time steps, features] X = numpy.reshape(dataX, (n_patterns, seq_length, 1)) # normalize X = X / float(n_vocab) # one hot encode the output variable y = np_utils.to_categorical(dataY) # define the LSTM model model = Sequential() model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(y.shape[1], activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adam') # define the checkpoint filepath="weights-improvement-{epoch:02d}-{loss:.4f}.hdf5" checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint] # fit the model model.fit(X, y, epochs=20, batch_size=128, callbacks=callbacks_list) # Load LSTM network and generate text import sys import numpy from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.callbacks import ModelCheckpoint from keras.utils import np_utils # load ascii text and covert to lowercase filename = "sonnets.txt" raw_text = open(filename, 'r', encoding='utf-8').read() raw_text = raw_text.lower() # create mapping of unique chars to integers, and a reverse mapping chars = sorted(list(set(raw_text))) char_to_int = dict((c, i) for i, c in enumerate(chars)) int_to_char = dict((i, c) for i, c in enumerate(chars)) # summarize the loaded data n_chars = len(raw_text) n_vocab = len(chars) print("Total Characters: ", n_chars) print("Total Vocab: ", n_vocab) # prepare the dataset of input to output pairs encoded as integers seq_length = 100 dataX = [] dataY = [] for i in range(0, n_chars - seq_length, 1): seq_in = raw_text[i:i + seq_length] seq_out = raw_text[i + seq_length] dataX.append([char_to_int[char] for char in seq_in]) dataY.append(char_to_int[seq_out]) n_patterns = len(dataX) print("Total Patterns: ", n_patterns) # reshape X to be [samples, time steps, features] X = numpy.reshape(dataX, (n_patterns, seq_length, 1)) # normalize X = X / float(n_vocab) # one hot encode the output variable y = np_utils.to_categorical(dataY) # define the LSTM model model = Sequential() model.add(LSTM(256, input_shape=(X.shape[1], X.shape[2]))) model.add(Dropout(0.2)) model.add(Dense(y.shape[1], activation='softmax')) # load the network weights filename = "weights-improvement-19-2.2731.hdf5" model.load_weights(filename) model.compile(loss='categorical_crossentropy', optimizer='adam') # pick a random seed start = numpy.random.randint(0, len(dataX)-1) pattern = dataX[start] print("Seed:") print("\"", ''.join([int_to_char[value] for value in pattern]), "\"") # generate characters for i in range(1000): x = numpy.reshape(pattern, (1, len(pattern), 1)) x = x / float(n_vocab) prediction = model.predict(x, verbose=0) index = numpy.argmax(prediction) result = int_to_char[index] seq_in = [int_to_char[value] for value in pattern] sys.stdout.write(result) pattern.append(index) pattern = pattern[1:len(pattern)] print("\nDone.") ```
github_jupyter
# Introduction to Spark Using Spark we are going to read in this data and calculate the average age. First, we need to initialize a SparkSession: ``` from pyspark.sql import SparkSession spark = SparkSession \ .builder \ .appName("Spark Example") \ .getOrCreate() ``` Let’s go ahead and create a Spark Dataset from our Lord of the Rings age data. Included in the Spark directory for this chapter is a file called ages.json which includes the age data in JSON lines format. It looks like: ``` {"Name": "Bilbo", "Age": 28} {"Name": "Frodo", "Age": 26} {"Name": "Gandalf", "Age": 62} {"Name": "Samwise", "Age": 30} {"Name": "Sauron", "Age": 72} {"Name": "Aragorn", "Age": 31} ``` Now, we can read in `ages.json` as a Spark Dataset: ``` df = spark.read.json('ages.json').repartition(10).cache() ``` Now we have a Dataset (also called DataFrame in accordance with Pandas) representing our data. We can leverage the Spark SQL API to calculate an aggregation over the dataset, which in our case is an average: ``` df.agg({"Age": "avg"}).collect() ``` We can also execute calculations at the row level. For example, let’s calculate each of the character’s age in dog years (age times 7): ``` df.withColumn('dog_years', df.Age*7).collect() ``` Best of all, this calculation would have scaled automatically across our computing cluster if we had more than one node. Notice something at the end of each of the commands above? If you are thinking, what does `.collect()` do then you’re onto something. Spark executes code lazily. This means that *transformations* such as calculating the characters’ age in dog years is only executed once an *action* is called. The `.withColumn()` command is a *transformation* while `.collect()` is the *action* which causes the *transformation* to be executed. Often, the *action* which causes execution of our *transformations* is writing the job’s output to disk, HDFS, or S3. Let’s try to create a new Dataset which includes the characters’ ages in dog years, then let’s write this out to disk: ``` df_new = df.withColumn('dog_years', df.Age*7) ``` Now we have a new Dataset called `df_new`. Note that nothing has been calculated yet; we have simply mapped the function we want across the cluster so that when we call an action on `df_new` such as `.collect()` or try to write the output to disk the transformation will be executed. We can write `df_new` to disk with the following: ``` df_new.write.mode('append').json("dog_years.json") ``` We can even execute a filter ``` filtered = df.filter("name = 'Bilbo'") filtered.collect() ``` ## Below are deprecated examples if we want to go back to RDD based examples we can use this code: ``` import pyspark sc = pyspark.SparkContext('local[*]') objects = [] for i in range(1000): msg = {'id': i, 'payload': 'Here is an example payload for id {}'.format(i)} objects.append(msg) ``` Now, let's parallelize the array of messages and convert it into an RDD: ``` rdd = sc.parallelize(objects) ``` Now we can take a random sample of the `rdd` we have just created. ``` # we can take a sample of 5 messages without replacement # this just means that once selected, a message is not available to be selected again rdd.takeSample(False, 5) ``` When we operate on an RDD we need to use map functions which takes a function and applies it across the `rdd`. For example, we can map a function which grabs the message `id`. This new rdd, `rdd_ids` will only contain the `id` field. ``` rdd_ids = rdd.map(lambda x: x['id']) ``` We can take the first 5 items in `rdd_ids` to see what just happened. ``` rdd_ids.take(5) ``` Now we can apply a `reduce` style function, such as a sum, on our new `rdd`. ``` rdd_ids.sum() ``` We can even define more complicated functions which insert or manipulate data and `map` them to the `rdd`. ``` import random def randomize_id(msg): msg['rand_id'] = msg['id'] * random.randint(1,100) return msg rdd_rand_ids = rdd.map(lambda x: randomize_id(x)) ``` Just to check what happened, let's take a look at the first 5 items in `rdd_rand_ids`. Notice that a new field, called `rand_id` has been added to each object. This is a new random id. ``` rdd_rand_ids.take(5) ```
github_jupyter
# Baesyan Data Analysis Course - Chapter 3 Exercises https://github.com/avehtari/BDA_course_Aalto/tree/master/exercises ### Exercise 1 - Inference for normal mean and deviation A factory has a production line for manufacturing car windshields. A sample of windshields has been taken for testing hardness (sample of observed values available in file *windshields1.txt*). We may assume that the observations follow a normal distribution with an unknown standard deviation $\sigma$. We wish to obtain information about the unknown average hardness $\mu$. Here it is not necessary to derive the posterior distribution as it has already been done in the book. #### Formulate (1) model likelihood, (2) the prior, and (3) the resulting posterior: **Answer:** Model: $$y_i|\mu, \sigma^2 \propto \dfrac{1}{\sigma^2}\times exp\left[\dfrac{-(y_i-\mu)^2}{2\sigma^2}\right]$$ Likelihood: $$p(y|\mu, \sigma^2) = \prod_{i = 1}^n p(y_i|\mu, \sigma^2)$$ Non-informative prior: $$p(\mu, \sigma) \propto (\sigma^2)^{-1}$$ Joint posterior: $$p(\mu, \sigma^2|y) \propto \sigma^{-n-2} \times exp\left\{\dfrac{-1}{2\sigma^2}[(n-1)s^2 + n(\bar{y} - \mu)^2]\right\}$$ where $s^2 = \dfrac{1}{n-1}\sum_{i=1}^n(y_i - \bar{y})^2$. #### a) What can you say about the unknown $\mu$? Summarize your results using Bayesian point estimate (i.e. $E(\mu|y)$), a posterior interval (95\%), and plot the density. ``` import pandas as pd import numpy as np import matplotlib.pyplot as plt from scipy.stats import t, chi2, norm ws1 = pd.read_csv('Data//windshieldy1.txt', sep=" ", header=None) ws1 = ws1.to_numpy() ws_test = [13.357, 14.928, 14.896, 14.820] def mu_point_est(ws_data): return(np.mean(ws_data)) def mu_interval(ws_data, prob): y_mean = np.mean(ws_data) n = len(ws_data) s_sqr = 1/(n - 1)*sum((ws_data-y_mean)**2) return t.interval(prob, df = n-1, loc = y_mean, scale = np.sqrt(s_sqr/n)) def mu_plot(ws_data): y_mean = np.mean(ws_data) n = len(ws_data) s_sqr = 1/(n - 1)*sum((ws_data-y_mean)**2) mu_space = np.linspace(y_mean - 20*s_sqr/n, y_mean + 20*s_sqr/n, 200) probs = t.pdf(mu_space, df = n-1, loc=y_mean, scale=np.sqrt(s_sqr/n)) fig, ax = plt.subplots() ax.plot(mu_space, probs) ax.vlines(y_mean, colors = 'blue', ymin = 0, ymax = max(probs)) ax.vlines(mu_interval(ws_data, 0.95), colors = 'red', linestyles="dashed", ymin = 0, ymax = max(probs)) plt.title("Marginal posterior density for μ") plt.ylabel("p(μ|y)") plt.xlabel("μ") plt.show() def show_answer_1a(ws_data, prob=0.95): print("E(μ|y) =", mu_point_est(ws_data)) print("95% C.I:", mu_interval(ws_data, 0.95)) mu_plot(ws_data) show_answer_1a(ws1) ``` #### b) What can you say about the hardness of the next windshield coming from the production line before actually measuring the hardness? Summarize your results using Bayesian point estimate, apredictive interval (95\%), and plot the density. ``` def show_answer_1b(ws_data): y_mean = np.mean(ws_data) n = len(ws_data) s_sqr = 1/(n - 1)*sum((ws_data-y_mean)**2) y_space = np.linspace(y_mean - 5*np.sqrt((1+1/n)*s_sqr), y_mean + 5*np.sqrt((1+1/n)*s_sqr), 200) probs = t.pdf(y_space, df = n-1, loc=y_mean, scale=np.sqrt((1+1/n)*s_sqr)) q95 = t.interval(0.95, df = n-1, loc=y_mean, scale=np.sqrt((1+1/n)*s_sqr)) print("E(y_bar|y) =", y_mean) print("95% C.I:", q95) fig, ax = plt.subplots() ax.plot(y_space, probs) ax.vlines(y_mean, colors = 'blue', ymin = 0, ymax = max(probs)) ax.vlines(q95, colors = 'red', linestyles="dashed", ymin = 0, ymax = max(probs)) plt.title("Posterior predictive distribution") plt.ylabel("p(y_bar|y)") plt.xlabel("y_bar") plt.show() show_answer_1b(ws1) ``` ### Exercise 2 - Inference for the difference between proportions An experiment was performed to estimate the effect of beta-blockers on mortality of cardiac patients. A group of patients was randomly assigned to treatment and control groups: out of 674 patients receiving the control, 39 died, and out of 680 receiving the treatment, 22 died. Assume that the outcomes are independent and binomially distributed, with probabilities of death of $p_0$ and $p_1$ under the control and treatment, respectively. Set up a noninformative or weakly informative prior distribution on $(p_0,p_1)$. #### In the report, formulate (1) model likelihood, (2) the prior, and (3) the resulting posterior. **Answer:** Let $k = 0, 1$, where $0$ indicates control group, and $1$ indicates treatment group. The model likelihood is $$p(y_k|p_k) = Binomial(p_k, n_k)$$. A non-informative prior for $p_k$ is is $Beta(1, 1)$, which is equivalent to a uniform(0, 1). This yields the following posterior: $$p(p_k|y) = Beta(1+y_k, 1+n_k-y_k)$$ so $p(p_0|y_0) = Beta(40, 636)$ and $p(p_1|y_1) = Beta(23, 659)$. #### a) Summarize the posterior distribution for the odds ratio, $(p_1/(1-p_1))/(p_0/(1-p_0))$. Compute the point estimate, a posterior interval (95\%), and plot the histogram. Use [Frank Harrell's recommendations](http://www.fharrell.com/2017/10/bayesian-vs-frequentist-statements.html) how to state results in Bayesian two group comparison. ``` from scipy.stats import beta α0, β0, α1, β1 = 40, 636, 23, 659 p0 = beta.rvs(a = α0, b = β0, size = 100000) p1 = beta.rvs(a = α1, b = β1, size = 100000) odds = (p1/(1-p1)) / (p0/(1-p0)) point_estimate = np.mean(odds) q95 = np.sort(odds)[2500:97500] q95 = [min(q95), max(q95)] # also calculate probability that odds ratio < 1: prob_odds_leq_1 = 1 -len(odds[odds > 1])/len(odds) print("point estimate:", np.round(point_estimate, 3)) print("95% CI:", np.round(q95, 3)) print("probability odds ratio < 1:", prob_odds_leq_1) fig, ax = plt.subplots() ax.hist(odds, bins = 50, color='c', edgecolor='k', alpha=0.65) ax.axvline(x=point_estimate, ymin = 0, ymax = max(odds), color = 'red', linewidth=1) ax.axvline(x=q95[0], ymin = 0, ymax = max(odds), color = 'blue', linestyle = "dashed", linewidth=1) ax.axvline(x=q95[1], ymin = 0, ymax = max(odds), color = 'blue', linestyle = "dashed", linewidth=1) ax.set_title('Odds ratio - effect of beta blockers') ax.set_ylabel('count') ax.set_xlabel('Odds ratio') ax.tick_params(axis='x', reset=True, top=False) plt.show() ``` Assuming uniform prior for both $p_0$ and $p_1$, the odds are probabily (0.988) lower under beta blockers. The posterior mean of the odds ratio is 0.57, and the probability that it is between 0.321 and 0.925 is 95\%. #### b) Discuss the sensitivity of your inference to your choice of prior density with a couple of sentences. **Answer:** The non-informative Beta(1, 1) prior was assigned, but it is not necessarily the best choise. Prior knowledge about the actual probability of mortality under no treatment could be useful in assign a more reasonable prior. But since the sample sizes are large (674 and 680), the weight of the prior in the posterior is actually low, and a prior more centered in low values (instead of uniform in \[0, 1\]) is better but would result in almost equal conclusions. What our prior do, that isn't reasonable, is put a heavier tail in the right side of the posterior for $p_0$ and $p_1$. But this is "outweighted", given the sample size. ### Exercise 3 - Inference for the difference between normal means Consider a case where the same factory has two production lines for manufacturing car windshields. Independent samples from the two production lines were tested for hardness. The hardness measurements for the two samples $\mathbf{y}_1$ and $\mathbf{y}_2$ are given in the files *windshieldy1.txt* and *windshieldy2.txt*. We assume that the samples have unknown standard deviations $\sigma_1$ and $\sigma_2$. #### In the report, formulate (1) model likelihood, (2) the prior, and (3) the resulting posterior. **Answer:** Same as in exercise 1 (non-informative prior). #### a) What can you say about $\mu_d = \mu_1 - \mu_2$? Summarize your results using a Bayesian point estimate, a posterior interval (95\%), and plot the histogram. Use [Frank Harrell's recommendations](http://www.fharrell.com/2017/10/bayesian-vs-frequentist-statements.html) how to state results in Bayesian two group comparison. ``` ws2 = pd.read_csv('Data//windshieldy2.txt', sep=" ", header=None) ws2 = ws2.to_numpy() def sim_mu(ws_data, size): y_mean = np.mean(ws_data) n = len(ws_data) s_sqr = 1/(n - 1)*sum((ws_data-y_mean)**2) return t.rvs(df = n-1, loc = y_mean, scale = np.sqrt(s_sqr/n), size = size) size = 500000 mu_ws1 = sim_mu(ws1, size) mu_ws2 = sim_mu(ws2, size) μd = mu_ws1 - mu_ws2 point_estimate = np.mean(μd) q95 = np.sort(μd)[int(size*0.025):int(size*0.975)] q95 = [min(q95), max(q95)] μd_geq0 = len(μd[μd > 0])/len(μd) print("point estimate:", np.round(point_estimate, 3)) print("95% CI:", np.round(q95, 3)) print("p(μd > 0) =", μd_geq0) fig, ax = plt.subplots() ax.hist(μd, bins = 50, color='c', edgecolor='k', alpha=0.65) ax.axvline(x=point_estimate, ymin = 0, ymax = max(odds), color = 'red', linewidth=1) ax.axvline(x=q95[0], ymin = 0, ymax = max(odds), color = 'blue', linestyle = "dashed", linewidth=1) ax.axvline(x=q95[1], ymin = 0, ymax = max(odds), color = 'blue', linestyle = "dashed", linewidth=1) ax.set_title('Difference in true mean distribution') ax.set_ylabel('count') ax.set_xlabel('Difference in mean') ax.tick_params(axis='x', reset=True, top=False) plt.show() ``` Considering non-informative priors, windshields hardness of production line 1, $\mathbf(y_1)$ are probably (0.972) higher than windshields hardness of production line 2. The probability that production line 1 hardness is inferior is 0.028. The posterior mean difference was $1.21$, and the $0.95$ C.I. $[-0.035, 2.459]$. The probability is 0.95 that the true mean difference is in the interval $[-0.035, 2.459]$. #### b) What is the probability that the means are the same? Explain your reasoning with a couple of sentences. The probability that the means are the same is zero, since $\mu_d$ has a continous distribution. But we can assume a minimally important difference. For example, if a minimal important difference is $m$, the probability that $\mu_1$ is within $m$ of $\mu_2$ is the probability that $\mu_d \in [-m, m]$. If $m = 1$, this probability is: ``` minimal_importance = 1 a = μd[(μd >= -minimal_importance)] a = a[a <= minimal_importance] print("p(-1<μd<1|y) =", len(a)/len(μd)) ```
github_jupyter
``` # # google colab tesla P100 # ! pip install numpy==1.17.4 scipy==1.3.1 pandas==0.25.3 tensorflow-gpu==2.0.0 torch==1.3.1 torchvision==0.4.2 scikit-learn==0.21.3 # ! pip install transformers==2.2.1 # ! pip install git+https://github.com/huggingface/transformers.git # # linux系統指令 可省略 win可能跑不了 # ! nvidia-smi # ! lscpu # ! free -h try: import os f = os.popen('nvidia-smi') f = f.read() print(f) f = os.popen('lscpu') f = f.read() print(f) f = os.popen('free -h') f = f.read() print(f) except: pass import pandas as pd import numpy as np import tensorflow as tf from transformers import BertTokenizer, TFBertModel, TFBertForSequenceClassification, TFBertMainLayer, AdamW, BertConfig # 讀取tsv # \t tab做區隔 df_news = pd.read_csv("https://github.com/roccqqck/news_bert/raw/master/data/2015_Company.tsv", sep="\t", encoding="utf-8") df_news['text'] = df_news['text'].astype(str) df_news df_news['label'].value_counts() # 文章字數 > 510了話 去尾 # 字數小於512-2 因為還有CLS SEP def remove_510(text): if len(text) > 510: text = text[:510] # 只取前510個字 return text df_news["text"] = df_news["text"].apply(remove_510) df_news.head(2) # Load pre-trained model tokenizer, to convert our text into tokens that correspond to BERT’s vocabulary. tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') ``` ![avatar](https://github.com/roccqqck/news_bert/raw/master/bert_input_encoding.jpg) https://github.com/roccqqck/news_bert/raw/master/bert_input_encoding.jpg bert input features 有3個 input_ids: 代表識別每個 token 的索引值,用 tokenizer 轉換即可 token_type_ids: 用來識別句子界限。第一句為 0,第二句則為 1。另外注意句子間的 [SEP] 為 0 (optional) 輸入有1句非必要 輸入有2句則必要 attention_mask: 用來界定自注意力機制範圍。1 讓 BERT 關注該位置,0 則代表是 padding 不需關注 (optional) https://huggingface.co/transformers/model_doc/bert.html#tfbertforsequenceclassification ``` # Tokenize input text = "很好看的動作片,不會浪費錢跟時間。很久沒有這樣的探險片。可說是女版的印第安那瓊。" tokens = tokenizer.tokenize(text) # 每個字切詞成一個list print(type(tokens)) # list np.array(tokens) # 轉成numpy input_ids = tokenizer.convert_tokens_to_ids(tokens) # 每個字轉成id print(type(input_ids)) # list print(len(input_ids)) np.array(input_ids) token_type_ids = tokenizer.create_token_type_ids_from_sequences(input_ids) # token_type_ids 必須input還沒加CLS SEP print(type(token_type_ids)) # list print(len(token_type_ids)) np.array(token_type_ids) input_ids = tokenizer.build_inputs_with_special_tokens(input_ids) # 句子前後加上 CLS SEP 的 id print(type(input_ids)) print(len(input_ids)) np.array(input_ids) n = 512 - len(input_ids) input_ids2 = np.pad(input_ids, (0, n), mode ='constant', constant_values=(0)) # array右邊append n 個 0 補長度到512 print(len(input_ids2)) input_ids2 # input如果是兩個句子 text = "很好看的動作片" tokens = tokenizer.tokenize(text) # 每個字切詞成一個list print(type(tokens)) # list np.array(tokens) # 轉成numpy text2 = "不會浪費錢跟時間" tokens2 = tokenizer.tokenize(text2) # 每個字切詞成一個list print(type(tokens2)) # list np.array(tokens2) # 轉成numpy input_ids = tokenizer.convert_tokens_to_ids(tokens) # 每個字轉成id print(type(input_ids)) # list print(len(input_ids)) np.array(input_ids) input_ids2 = tokenizer.convert_tokens_to_ids(tokens2) # 每個字轉成id print(type(input_ids2)) # list print(len(input_ids2)) np.array(input_ids2) token_type_ids = tokenizer.create_token_type_ids_from_sequences(input_ids, input_ids2) # token_type_ids 必須input還沒加CLS SEP print(type(token_type_ids)) # list print(len(token_type_ids)) np.array(token_type_ids) input_ids3 = tokenizer.build_inputs_with_special_tokens(input_ids, input_ids2) # 句子前後加上 CLS SEP 的 id print(type(input_ids3)) print(len(input_ids3)) np.array(input_ids3) ``` numpy.pad 補0到某長度 https://docs.scipy.org/doc/numpy/reference/generated/numpy.pad.html 也可以使用 ```from keras.preprocessing.sequence import pad_sequences``` ``` def input_ids_all(text): # tokenizer = BertTokenizer.from_pretrained('bert-base-chinese') tokens = tokenizer.tokenize(text) # 每個字切詞成一個list input_ids = tokenizer.convert_tokens_to_ids(tokens) # 每個字轉成id input_ids = tokenizer.build_inputs_with_special_tokens(input_ids) # 句子前後加上 CLS SEP 的 id input_ids = np.array(input_ids) # list 轉 numpy if len(input_ids) < 512: n = 512 - len(input_ids) input_ids = np.pad(input_ids, (0, n), mode ='constant', constant_values=(0)) # array右邊append n 個 0 補長度到512 return input_ids text = "很好看的動作片,不會浪費錢跟時間。很久沒有這樣的探險片。可說是女版的印第安那瓊。" input_ids_all(text) def attention_mask_all(text): tokens = tokenizer.tokenize(text) # 每個字切詞成一個list input_ids = tokenizer.convert_tokens_to_ids(tokens) # 每個字轉成id input_ids = tokenizer.build_inputs_with_special_tokens(input_ids) # 句子前後加上 CLS SEP 的 id input_ids = np.array(input_ids) # list 轉 numpy attention_mask = np.array([1,1]) attention_mask = np.pad(attention_mask, (0, len(input_ids)-2 ), mode ='constant', constant_values=(1)) # array右邊append 1 到跟segment一樣長 if len(attention_mask) < 512: n = 512 - len(attention_mask) attention_mask = np.pad(attention_mask, (0, n), mode ='constant', constant_values=(0)) # array右邊append n 個 0 補長度到512 return attention_mask attention_mask_all(text) # 事實上單一句子 出來都是0 不做也沒差 def token_type_ids_all(text): tokens = tokenizer.tokenize(text) # 每個字切詞成一個list input_ids = tokenizer.convert_tokens_to_ids(tokens) # 每個字轉成id input_ids = np.array(input_ids) # list 轉 numpy token_type_ids = tokenizer.create_token_type_ids_from_sequences(input_ids) # token_type_ids 必須input還沒加CLS SEP token_type_ids = np.array(token_type_ids) # list 轉numpy if len(token_type_ids) < 512: n = 512 - len(token_type_ids) token_type_ids = np.pad(token_type_ids, (0, n), mode ='constant', constant_values=(0)) # array右邊append n 個 0 補長度到512 return token_type_ids token_type_ids_all(text) ``` 最後我決定用pandas的apply 比較好視覺化理解 ``` # df['text2'] = df['text']].apply(lambda x: " ".join(jieba.cut(x))) # df_news['tokens'] = df_news['text'].apply(lambda x: tokenizer.tokenize(x) ) df_news['input_ids'] = df_news['text'].apply(input_ids_all) df_news['attention_mask'] = df_news['text'].apply(attention_mask_all) df_news['token_type_ids'] = df_news['text'].apply(token_type_ids_all) df_news.head(2) # df_news['input_ids'].to_numpy() # 提出來 竟然不是2d numpy 不能這樣做 print(len(df_news['input_ids'][0])) df_news['input_ids'][0] print(len(df_news['token_type_ids'][0])) df_news['token_type_ids'][0] print(len(df_news['attention_mask'][0])) df_news['attention_mask'][0] # # df_news['input_ids'].to_numpy() 出來不是一個2d numpy # # 只好用for loop一個一個拿出來合併 # # 用np vstack超級慢 不知道為何 改用最外層是list append # input_ids = np.zeros((1, 512)).astype(int) #宣吿一個都是0的1*512 numpy # np.zeros預設是float 改成int 不然bert餵不進去 # for index, row in df_news.iterrows(): # element = df_news.loc[index,'input_ids'] # input_ids = np.vstack((input_ids, np.array([element]))) # 2維 合併 # input_ids = np.delete(input_ids, 0, 0) # 刪掉一開始都是0的那一個宣告 # input_ids ``` https://www.quora.com/Is-it-better-to-use-np-append-or-list-append ``` # df_news['input_ids'].to_numpy() 出來不是一個2d numpy # 只好用for loop一個一個拿出來合併 # 用np vstack超級慢 不知道為何 改用最外層是list append input_ids = [] # list for index, row in df_news.iterrows(): np_1d = df_news.loc[index,'input_ids'] # 1d np arrary input_ids.append(np_1d) # 1d np的 list # list[np_1, np_2, np_3, ....] input_ids = np.array(input_ids) # 轉成2d np input_ids input_ids.shape # torch.tensor(input_ids) # numpy 轉 torch tensor tf.convert_to_tensor(input_ids) # numpy 轉 tf tensor attention_mask = [] # list for index, row in df_news.iterrows(): np_1d = df_news.loc[index,'attention_mask'] # 1d np arrary attention_mask.append(np_1d) # 1d np的 list # list[np_1, np_2, np_3, ....] attention_mask = np.array(attention_mask) # 轉成2d np attention_mask attention_mask.shape token_type_ids = [] # list for index, row in df_news.iterrows(): np_1d = df_news.loc[index,'token_type_ids'] # 1d np arrary token_type_ids.append(np_1d) # 1d np的 list # list[np_1, np_2, np_3, ....] token_type_ids = np.array(token_type_ids) # 轉成2d np token_type_ids token_type_ids.shape label = df_news['label'].to_numpy() label # 把2個input_ids, attention_mask , token_type_ids 還有label 切成training data, validation data from sklearn.model_selection import train_test_split # Use train_test_split to split our data into train and validation sets for training # # 設定 stratify = label 把每個類別平均 train_input_ids, validation_input_ids, train_label, validation_label = train_test_split(input_ids, label, random_state=2018, test_size=0.5, stratify=label ) train_attention_mask, validation_attention_mask, _, _ = train_test_split(attention_mask, label, random_state=2018, test_size=0.5, stratify=label ) train_token_type_ids, validation_token_type_ids, _, _ = train_test_split(token_type_ids, label, random_state=2018, test_size=0.5, stratify=label ) # # input 可用 numpy 或 tf tensor 下面是numpy轉tf tensor # train_input_ids = tf.convert_to_tensor(train_input_ids) # validation_input_ids = tf.convert_to_tensor(validation_input_ids) # train_label = tf.convert_to_tensor(train_label) # validation_label = tf.convert_to_tensor(validation_label) # train_attention_mask = tf.convert_to_tensor(train_attention_mask) # validation_attention_mask = tf.convert_to_tensor(validation_attention_mask) # train_token_type_ids = tf.convert_to_tensor(train_token_type_ids) # validation_token_type_ids = tf.convert_to_tensor(validation_token_type_ids) from tensorflow.keras import Sequential, Model, Input from tensorflow.keras.layers import Dense, Embedding, SpatialDropout1D, Dropout, Activation, Flatten, InputLayer # Prepare training: Compile tf.keras model with optimizer, loss and learning rate schedule # model = TFBertForSequenceClassification.from_pretrained('bert-base-chinese', num_labels=5) # 下面model跟 TFBertForSequenceClassification一樣 input_layer = Input(shape = (512,), dtype='int64') # 預設是float 要改成input_id的int64 bert = TFBertModel.from_pretrained('bert-base-chinese')(input_layer) bert = bert[0] # 有bug 修正後可能不需要這行 dropout = Dropout(0.1)(bert) flat = Flatten()(dropout) classifier = Dense(units=5, activation="softmax")(flat) # 分5類 # sigmoid改softmax model = Model(inputs=input_layer, outputs=classifier) model.summary() # softmax只好用5e-6 # sigmoid可以用1e-5 # 預設無activation了話可以3e-5 optimizer = tf.keras.optimizers.Adam(learning_rate=7e-6, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy('accuracy') model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) %%time # Train and evaluate using tf.keras.Model.fit() # batch size 8就會error 是GPU記憶體爆掉 model_fit = model.fit(train_input_ids, train_label, batch_size=4, epochs=4, validation_data=(validation_input_ids, validation_label) # steps_per_epoch=115, # validation_steps=7) ) ``` https://huggingface.co/transformers/model_doc/bert.html#tfbertforsequenceclassification attention_mask 或 token_type_ids 不一定要放輸入 可選擇 要放了話要加```[ ]``` ```model.fit([train_input_ids, train_attention_mask, train_token_type_ids], train_label)``` 就是```model.fit(X_train, Y_train)``` ```[train_input_ids, train_attention_mask, train_token_type_ids]``` 就是 ```X_train``` ```train_label``` 就是 ```Y_train``` ``` # model_fit = model.fit([train_input_ids, train_attention_mask, train_token_type_ids], train_label, # batch_size=4, epochs=1, # validation_data=([validation_input_ids, validation_attention_mask, validation_token_type_ids], validation_label) # ) # # 如果train到一半 想要重新train 在jupyter裡面interrupt kernel # # 這時候 model還是存在在記憶體裡面 只是train到一半 要重新train要釋放model的記憶體 # del model # model.evaluate(validation_input_ids, validation_label, verbose=1) ``` https://blog.csdn.net/zds13257177985/article/details/80638384 ```predictions = model.predict(test)```預測的是數值,而且輸出的是n*5的編碼值array 要經過```predictions = np.argmax(predictions, axis=1)```才是類別 ``` %%time predictions = model.predict(validation_input_ids) # 輸出的是n*5的編碼值array print(predictions.shape) predictions predictions = np.argmax(predictions, axis=1) # axis = 1是取行的最大值的索引,0是列的最大值的索引 predictions from sklearn.metrics import accuracy_score # from sklearn.metrics import precision_score # from sklearn.metrics import recall_score # from sklearn.metrics import f1_score # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report print(accuracy_score(validation_label, predictions)) # print(precision_score(validation_label, predictions)) # print(recall_score(validation_label, predictions)) # print(f1_score(validation_label, predictions)) print(confusion_matrix(validation_label, predictions)) print(classification_report(validation_label, predictions)) # model 存起來 tf.keras.models.save_model( model, "model/model_bert_eland_softmax_1", overwrite=True, include_optimizer=True, ) # # Save the entire model to a HDF5 file. # # The '.h5' extension indicates that the model shuold be saved to HDF5. # # model 存起來 # model.save('model/my_model.h5') # 失敗 tensorflow2.0 bug 修正後可能可以存成h5 # Recreate the exact same model, including its weights and the optimizer # 讀取存的model input_layer = Input(shape = (512,), dtype='int64') # 預設是float 要改成input_id的int64 # tensorflow2.0 bug 修正後可能不需要這行 load_model = tf.keras.models.load_model('model/model_bert_eland_softmax_1')(input_layer) new_model = Model(inputs=input_layer, outputs=load_model) # tensorflow2.0 bug 修正後可能不需要這行 # Show the model architecture new_model.summary() %%time predictions = new_model.predict(validation_input_ids) # 輸出的是n*5的編碼值array print(predictions.shape) predictions predictions = np.argmax(predictions, axis=1) # axis = 1是取行的最大值的索引,0是列的最大值的索引 predictions from sklearn.metrics import accuracy_score # from sklearn.metrics import precision_score # from sklearn.metrics import recall_score # from sklearn.metrics import f1_score # from sklearn.metrics import cohen_kappa_score # from sklearn.metrics import roc_auc_score from sklearn.metrics import confusion_matrix from sklearn.metrics import classification_report print(accuracy_score(validation_label, predictions)) # print(precision_score(validation_label, predictions)) # print(recall_score(validation_label, predictions)) # print(f1_score(validation_label, predictions)) print(confusion_matrix(validation_label, predictions)) print(classification_report(validation_label, predictions)) ```
github_jupyter
##### Copyright 2020 Google LLC. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title License header # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # MNIST Model TensorFlow Training, IREE Execution ## Overview This notebook creates and trains a TensorFlow 2.0 model for recognizing handwritten digits using the [MNIST dataset](https://en.wikipedia.org/wiki/MNIST_database), then compiles and executes that trained model using IREE. ## Running Locally * Refer to [iree/docs/using_colab.md](https://github.com/google/iree/blob/master/docs/using_colab.md) for general information * Ensure that you have a recent version of TensorFlow 2.0 [installed on your system](https://www.tensorflow.org/install) * Enable IREE/TF integration by adding to your user.bazelrc: `build --define=iree_tensorflow=true` * Start colab by running `python colab/start_colab_kernel.py` (see that file for additional instructions) * Note: you may need to restart your runtime in order to re-run certain cells. Some of the APIs are not yet stable enough for repeated invocations # Setup Steps ``` import os import numpy as np import tensorflow as tf from matplotlib import pyplot as plt from pyiree.tf import compiler as ireec from pyiree import rt as ireert tf.compat.v1.enable_eager_execution() SAVE_PATH = os.path.join(os.environ["HOME"], "saved_models") os.makedirs(SAVE_PATH, exist_ok=True) # Print version information for future notebook users to reference. print("TensorFlow version: ", tf.__version__) print("Numpy version: ", np.__version__) #@title Notebook settings { run: "auto" } #@markdown ----- #@markdown ### Configuration backend_choice = "GPU (vulkan-spirv)" #@param [ "GPU (vulkan-spirv)", "CPU (VMLA)" ] if backend_choice == "GPU (vulkan-spirv)": backend_name = "vulkan-spirv" driver_name = "vulkan" else: backend_name = "vmla" driver_name = "vmla" tf.print("Using IREE compiler backend '%s' and runtime driver '%s'" % (backend_name, driver_name)) #@markdown ----- #@markdown ### Training Parameters #@markdown <sup>Batch size used to subdivide the training and evaluation samples</sup> batch_size = 200 #@param { type: "slider", min: 10, max: 400 } #@markdown <sup>Epochs for training/eval. Higher values take longer to run but generally produce more accurate models</sup> num_epochs = 5 #@param { type: "slider", min: 1, max: 20 } #@markdown ----- ``` # Create and Train MNIST Model in TensorFlow The specific details of the training process here aren't critical to the model compilation and execution through IREE. ``` #@title Load MNIST dataset, setup training and evaluation NUM_CLASSES = 10 # One per digit [0, 1, 2, ..., 9] IMG_ROWS, IMG_COLS = 28, 28 (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data() tf.print("Loaded MNIST dataset!") x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1) x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1) input_shape = (IMG_ROWS, IMG_COLS, 1) # Scale pixel values from [0, 255] integers to [0.0, 1.0] floats. x_train = x_train.astype("float32") / 255 x_test = x_test.astype("float32") / 255 steps_per_epoch = int(x_train.shape[0] / batch_size) steps_per_eval = int(x_test.shape[0] / batch_size) # Convert class vectors to binary class matrices. y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES) y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES) # Construct batched datasets for training/evaluation. train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)) train_dataset = train_dataset.batch(batch_size, drop_remainder=True) test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)) test_dataset = test_dataset.batch(batch_size, drop_remainder=True) # Create a distribution strategy for the dataset (single machine). strategy = tf.distribute.experimental.CentralStorageStrategy() train_dist_ds = strategy.experimental_distribute_dataset(train_dataset) test_dist_ds = strategy.experimental_distribute_dataset(test_dataset) tf.print("Configured data for training and evaluation!") tf.print(" sample shape: %s" % str(x_train[0].shape)) tf.print(" training samples: %s" % x_train.shape[0]) tf.print(" test samples: %s" % x_test.shape[0]) tf.print(" epochs: %s" % num_epochs) tf.print(" steps/epoch: %s" % steps_per_epoch) tf.print(" steps/eval : %s" % steps_per_eval) tf.print("") tf.print("Sample image from the dataset:") SAMPLE_EXAMPLE_INDEX = 1 sample_image = x_test[SAMPLE_EXAMPLE_INDEX] sample_image_batch = np.expand_dims(sample_image, axis=0) sample_label = y_test[SAMPLE_EXAMPLE_INDEX] plt.imshow(sample_image.reshape(IMG_ROWS, IMG_COLS)) plt.show() tf.print("\nGround truth labels: %s" % str(sample_label)) #@title Define MNIST model architecture using tf.keras API def simple_mnist_model(input_shape): """Creates a simple (multi-layer perceptron) MNIST model.""" model = tf.keras.models.Sequential() # Flatten to a 1d array (e.g. 28x28 -> 784) model.add(tf.keras.layers.Flatten(input_shape=input_shape)) # Fully-connected neural layer with 128 neurons, RELU activation model.add(tf.keras.layers.Dense(128, activation='relu')) # Fully-connected neural layer returning probability scores for each class model.add(tf.keras.layers.Dense(10, activation='softmax')) return model #@title Train the Keras model with strategy.scope(): model = simple_mnist_model(input_shape) tf.print("Constructed Keras MNIST model, training...") optimizer = tf.keras.optimizers.SGD(learning_rate=0.05) training_loss = tf.keras.metrics.Mean("training_loss", dtype=tf.float32) training_accuracy = tf.keras.metrics.CategoricalAccuracy( "training_accuracy", dtype=tf.float32) test_loss = tf.keras.metrics.Mean("test_loss", dtype=tf.float32) test_accuracy = tf.keras.metrics.CategoricalAccuracy( "test_accuracy", dtype=tf.float32) @tf.function def train_step(iterator): """Training StepFn.""" def step_fn(inputs): """Per-Replica StepFn.""" images, labels = inputs with tf.GradientTape() as tape: logits = model(images, training=True) loss = tf.keras.losses.categorical_crossentropy(labels, logits) loss = tf.reduce_mean(loss) / strategy.num_replicas_in_sync grads = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients(zip(grads, model.trainable_variables)) training_loss.update_state(loss) training_accuracy.update_state(labels, logits) strategy.experimental_run_v2(step_fn, args=(next(iterator),)) @tf.function def test_step(iterator): """Evaluation StepFn.""" def step_fn(inputs): images, labels = inputs logits = model(images, training=False) loss = tf.keras.losses.categorical_crossentropy(labels, logits) loss = tf.reduce_mean(loss) / strategy.num_replicas_in_sync test_loss.update_state(loss) test_accuracy.update_state(labels, logits) strategy.experimental_run_v2(step_fn, args=(next(iterator),)) for epoch in range(0, num_epochs): tf.print("Running epoch #%s" % (epoch + 1)) train_iterator = iter(train_dist_ds) for step in range(steps_per_epoch): train_step(train_iterator) tf.print(" Training loss: %f, accuracy: %f" % (training_loss.result(), training_accuracy.result() * 100)) training_loss.reset_states() training_accuracy.reset_states() test_iterator = iter(test_dist_ds) for step in range(steps_per_eval): test_step(test_iterator) tf.print(" Test loss : %f, accuracy: %f" % (test_loss.result(), test_accuracy.result() * 100)) test_loss.reset_states() test_accuracy.reset_states() tf.print("Completed training!") tf.print("") # Run a single prediction on the trained model tf_prediction = model(sample_image_batch, training=False) tf.print("Sample prediction:") tf.print(tf_prediction[0] * 100.0, summarize=100) tf.print("") #@title Export the trained model as a SavedModel, with IREE-compatible settings # Since the model was written in sequential style, explicitly wrap in a module. saved_model_dir = "/tmp/mnist.sm" inference_module = tf.Module() inference_module.model = model # Hack: Convert to static shape. Won't be necessary once dynamic shapes are in. dynamic_input_shape = list(model.inputs[0].shape) dynamic_input_shape[0] = 1 # Make fixed (batch=1) # Produce a concrete function. inference_module.predict = tf.function( input_signature=[ tf.TensorSpec(dynamic_input_shape, model.inputs[0].dtype)])( lambda x: model.call(x, training=False)) save_options = tf.saved_model.SaveOptions(save_debug_info=True) tf.print("Exporting SavedModel to %s" % saved_model_dir) tf.saved_model.save(inference_module, saved_model_dir, options=save_options) ``` # Compile and Execute MNIST Model using IREE ``` #@title Load the SavedModel into IREE's compiler as MLIR xla_hlo compiler_module = ireec.tf_load_saved_model( saved_model_dir, exported_names=["predict"]) tf.print("Imported MLIR:\n", compiler_module.to_asm(large_element_limit=100)) # Write to a file for use outside of this notebook. mnist_mlir_path = os.path.join(SAVE_PATH, "mnist.mlir") with open(mnist_mlir_path, "wt") as output_file: output_file.write(compiler_module.to_asm()) print("Wrote MLIR to path '%s'" % mnist_mlir_path) #@title Compile the xla_hlo MLIR and prepare a context to execute it # Compile the MLIR module into a VM module for execution flatbuffer_blob = compiler_module.compile(target_backends=[backend_name]) vm_module = ireert.VmModule.from_flatbuffer(flatbuffer_blob) # Register the module with a runtime context config = ireert.Config(driver_name) ctx = ireert.SystemContext(config=config) ctx.add_module(vm_module) #@title Execute the compiled module and compare the results with TensorFlow # Invoke the 'predict' function with a single image as an argument iree_prediction = ctx.modules.module.predict(sample_image_batch) tf.print("IREE prediction ('%s' backend, '%s' driver):" % (backend_name, driver_name)) tf.print(tf.convert_to_tensor(iree_prediction[0]) * 100.0, summarize=100) tf.print("") tf.print("TensorFlow prediction:") tf.print(tf_prediction[0] * 100.0, summarize=100) ```
github_jupyter
# NXP imx8qm x AWS NEO Object Detection Example 1. [Introduction](#Introduction) 2. [Compile model using NEO](#Compile-model-using-NEO) 3. [Inference on device](#Inference-on-device) ## Introduction This notebook will demo how to compile pretrained Gluoncv ssd mobilenet model using AWS Neo for NXP imx8qm. First, we need download the pretained model, in this example we use `ssd_512_mobilenet1.0_voc`, then use Neo to cpmpile this model. At last, we will deploy compiled model to device and do inference using the Neo Deep Learning Runtime. To get started, we need to set up the environment for AWS S3 permissions, configurations, and so on. Please refer to [Configuration](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html#configuration) for more information. To use Boto 3, first import it and tell it what service we are going to use. ``` import boto3 sm = boto3.client('sagemaker', region_name='us-west-2') s3 = boto3.client('s3', region_name='us-west-2') account = boto3.client('sts').get_caller_identity().get('Account') print(account) ``` ### Get the pretrained model To avoid installing frameworks like mxnet, gluoncv on device to download latest pretained models, we store some pretrained models in a S3 bucket with public access. Download pretrained gluoncv ssd model from S3 bucket to device. ``` model_name = 'ssd_512_mobilenet1.0_voc' model = model_name + '.tar.gz' model_zoo = 'gluon_cv_object_detection' s3.download_file('neo-ai-dlr-test-artifacts', 'neo-ai-notebook/{}/{}'.format(model_zoo, model), model) ``` ### Upload model to S3 bucket Create a S3 bucket `imx8qm-demo` to store pretrained model. ``` bucket = 'imx8qm-demo' if boto3.resource('s3').Bucket(bucket) not in boto3.resource('s3').buckets.all(): s3.create_bucket( Bucket=bucket, CreateBucketConfiguration={ 'LocationConstraint': 'us-west-2' } ) else: print('Bucket %s already exists' %bucket) ``` Upload the pretained model to S3 bucket just created. ``` s3.upload_file(model, bucket, model) ``` ### Create IAM role In order to use the sagemaker service and have access to S3 bucket, we need to create a IAM role. ``` iam = boto3.client('iam') role_name = 'imx8qm-demo-test-role' policy = { 'Statement': [{ 'Action': 'sts:AssumeRole', 'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}}, ], 'Version': '2012-10-17'} import json roles = iam.list_roles() role_arn = None for role in roles['Roles']: if role['RoleName'] == role_name: role_arn = role['Arn'] if role_arn == None: new_role = iam.create_role( AssumeRolePolicyDocument=json.dumps(policy), Path='/', RoleName=role_name, ) role_arn = new_role['Role']['Arn'] iam.attach_role_policy( RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/AmazonSageMakerFullAccess' ) iam.attach_role_policy( RoleName=role_name, PolicyArn='arn:aws:iam::aws:policy/AmazonS3FullAccess' ) ``` ## Compile model using NEO ``` s3_output_location = 's3://{}/output'.format(bucket) data_shape = '{"data":[1,3,512,512]}' framework = 'mxnet' target_device = 'imx8qm' import time compilation_job_name = 'imx8qm-demo'+ str(time.time()).split('.')[0] print('Compilation job for %s started' % compilation_job_name) response = sm.create_compilation_job( CompilationJobName=compilation_job_name, RoleArn=role_arn, InputConfig={ 'S3Uri': 's3://{}/{}'.format(bucket, model), 'DataInputConfig': data_shape, 'Framework': framework.upper() }, OutputConfig={ 'S3OutputLocation': s3_output_location, 'TargetDevice': target_device }, StoppingCondition={ 'MaxRuntimeInSeconds': 900 } ) print(response) # Poll every 30 sec while True: response = sm.describe_compilation_job(CompilationJobName=compilation_job_name) if response['CompilationJobStatus'] == 'COMPLETED': break elif response['CompilationJobStatus'] == 'FAILED': raise RuntimeError('Compilation failed') print('Compiling ...') time.sleep(30) print('Done!') ``` ## Inference on device ### Download compiled model from S3 to device ``` object_path = 'output/{}-{}.tar.gz'.format(model_name, target_device) neo_compiled_model = 'compiled-'+ model s3.download_file(bucket, object_path, neo_compiled_model) %%bash mkdir compiled_model tar -xf $neo_compiled_model -C ./compiled_model ``` ### Use DLR to read compiled model ``` from dlr import DLRModel import numpy as np import time # Load the model model_path = "./compiled_model" device = 'cpu' model = DLRModel(model_path, device) ``` ### Download an image to prepare for predictions ``` %%bash wget -O test.jpg https://upload.wikimedia.org/wikipedia/commons/c/c6/Newark-broad-street.jpg file_name = "test.jpg" # test image from IPython.display import Image Image(file_name) ``` ### Image pre-process ``` import PIL.Image image = PIL.Image.open(file_name) image = np.asarray(image.resize((512, 512))) # Normalize mean_vec = np.array([0.485, 0.456, 0.406]) stddev_vec = np.array([0.229, 0.224, 0.225]) image = (image/255- mean_vec)/stddev_vec # Transpose if len(image.shape) == 2: # for greyscale image image = np.expand_dims(image, axis=2) image = np.rollaxis(image, axis=2, start=0)[np.newaxis, :] print(image.shape) ``` ### Inference and prediction ``` #flatten within a input array input_data = {'data': image} # dry run for _ in range(5): model.run(input_data) print('Testing inference...') start_time = time.time() detection = model.run(input_data) #need to be a list of input arrays matching input names print('inference time is ' + str((time.time()-start_time)) + ' seconds') object_categories = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] obj = detection[0][0] score = detection[1][0] bbox = detection[2][0] # print the first 10 detetction for i in range(10): print(object_categories[int(obj[i][0])] + ' with score ' + str(score[i][0])) ```
github_jupyter
# Version information ``` from datetime import date print("Running date:", date.today().strftime("%B %d, %Y")) import pyleecan print("Pyleecan version:" + pyleecan.__version__) import SciDataTool print("SciDataTool version:" + SciDataTool.__version__) ``` # How to define a simulation to call FEMM This tutorial shows the different steps to **compute magnetic flux and electromagnetic torque** with Pyleecan **automated coupling with FEMM**. This tutorial was tested with the release [21Apr2019 of FEMM](http://www.femm.info/wiki/Download). Please note that the coupling with FEMM is only available on Windows. The notebook related to this tutorial is available on [GitHub](https://github.com/Eomys/pyleecan/tree/master/Tutorials/tuto_Simulation_FEMM.ipynb). Every electrical machine defined in Pyleecan can be automatically drawn in FEMM to compute torque, airgap flux and electromotive force. ## Defining or loading the machine The first step is to define the machine to simulate. For this tutorial we use the Toyota Prius 2004 machine defined in [this tutorial](https://www.pyleecan.org/tuto_Machine.html). ``` %matplotlib notebook # Load the machine from os.path import join from pyleecan.Functions.load import load from pyleecan.definitions import DATA_DIR IPMSM_A = load(join(DATA_DIR, "Machine", "Toyota_Prius.json")) IPMSM_A.plot() ``` ## Simulation definition ### Inputs The simulation is defined with a [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object. This object corresponds to a simulation with 5 sequential physics (or modules): - electrical - magnetic - force - structural - acoustic Each physics/modules can have several models to solve them. For now pyleecan includes: - an Electrical model for PMSM machine with FEMM - a Magnetic model with FEMM for all machines - a Force model (Maxwell Tensor) - Magnetic and Structural models with GMSH/Elmer [**Simu1**](http://www.pyleecan.org/pyleecan.Classes.Simu1.html) object enforces a weak coupling between each physics: the input of each physic is the output of the previous one. The Magnetic physics is defined with the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) and the other physics are deactivated (set to None). We define the starting point of the simulation with an [**InputCurrent**](http://www.pyleecan.org/pyleecan.Classes.InputCurrent.html) object to enforce the electrical module output with: - angular and the time discretization - rotor speed - stator currents ``` from os.path import join from numpy import ones, pi, array, linspace, cos, sqrt from pyleecan.Classes.Simu1 import Simu1 from pyleecan.Classes.InputCurrent import InputCurrent from pyleecan.Classes.MagFEMM import MagFEMM # Create the Simulation simu_femm = Simu1(name="FEMM_simulation", machine=IPMSM_A) p = simu_femm.machine.stator.winding.p qs = simu_femm.machine.stator.winding.qs # Defining Simulation Input simu_femm.input = InputCurrent() # Rotor speed [rpm] simu_femm.input.N0 = 2000 # time discretization [s] time = linspace(start=0, stop=60/simu_femm.input.N0, num=32*p, endpoint=False) # 32*p timesteps simu_femm.input.time = time # Angular discretization along the airgap circonference for flux density calculation simu_femm.input.angle = linspace(start = 0, stop = 2*pi, num=2048, endpoint=False) # 2048 steps # Stator currents as a function of time, each column correspond to one phase [A] I0_rms = 250/sqrt(2) felec = p * simu_femm.input.N0 /60 # [Hz] rot_dir = simu_femm.machine.stator.comp_rot_dir() Phi0 = 140*pi/180 # Maximum Torque Per Amp Ia = ( I0_rms * sqrt(2) * cos(2 * pi * felec * time + 0 * rot_dir * 2 * pi / qs + Phi0) ) Ib = ( I0_rms * sqrt(2) * cos(2 * pi * felec * time + 1 * rot_dir * 2 * pi / qs + Phi0) ) Ic = ( I0_rms * sqrt(2) * cos(2 * pi * felec * time + 2 * rot_dir * 2 * pi / qs + Phi0) ) simu_femm.input.Is = array([Ia, Ib, Ic]).transpose() ``` In this example stator currents are enforced as a function of time for each phase. Sinusoidal current can also be defined with Id/Iq as explained in [this tutorial](https://www.pyleecan.org/tuto_Operating_point.html). ### MagFEMM configuration For the configuration of the Magnetic module, we use the object [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) that computes the airgap flux density by calling FEMM. The model parameters are set though the properties of the [**MagFEMM**](https://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) object. In this tutorial we will present the main ones, the complete list is available by looking at [**Magnetics**](http://www.pyleecan.org/pyleecan.Classes.Magnetics.html) and [**MagFEMM**](http://www.pyleecan.org/pyleecan.Classes.MagFEMM.html) classes documentation. *type_BH_stator* and *type_BH_rotor* enable to select how to model the B(H) curve of the laminations in FEMM. The material parameters and in particular the B(H) curve are setup directly [in the machine lamination material](https://www.pyleecan.org/tuto_Machine.html). ``` from pyleecan.Classes.MagFEMM import MagFEMM simu_femm.mag = MagFEMM( type_BH_stator=0, # 0 to use the material B(H) curve, # 1 to use linear B(H) curve according to mur_lin, # 2 to enforce infinite permeability (mur_lin =100000) type_BH_rotor=0, # 0 to use the material B(H) curve, # 1 to use linear B(H) curve according to mur_lin, # 2 to enforce infinite permeability (mur_lin =100000) file_name = "", # Name of the file to save the FEMM model ) # Only the magnetic module is defined simu_femm.elec = None simu_femm.force = None simu_femm.struct = None ``` Pyleecan coupling with FEMM enables to define the machine with symmetry and with sliding band to optimize the computation time. The angular periodicity of the machine will be computed and (in the particular case) only 1/8 of the machine will be drawn (4 symmetries + antiperiodicity): ``` simu_femm.mag.is_periodicity_a=True ``` The same is done for time periodicity only half of one electrical period is calculated (i.e: 1/8 of mechanical period): ``` simu_femm.mag.is_periodicity_t=True ``` Pyleecan enable to parallelize the call to FEMM by simply setting: ``` simu_femm.mag.nb_worker = 4 # Number of FEMM instances to run at the same time (1 by default) ``` At the end of the simulation, the mesh and the solution can be saved in the **Output** object with: ``` simu_femm.mag.is_get_meshsolution = True # To get FEA mesh for latter post-procesing simu_femm.mag.is_save_meshsolution_as_file = False # To save FEA results in a dat file ``` ## Run simulation ``` out_femm = simu_femm.run() ``` When running the simulation, an FEMM window runs in background. You can open it to see pyleecan drawing the machine and defining the surfaces. ![](https://www.pyleecan.org/_static/IPMSM_FEMM.png) The simulation will compute 32*p/8 different timesteps by updating the current and the sliding band boundary condition. If the parallelization is activated (simu_femm.mag.nb_worker >1) then the time steps are computed out of order. Once the simulation is finished, an Output object is return. The results are stored in the magnetic part of the output (i.e. _out_femm.mag_ ) and different plots can be called. This _out_femm.mag_ contains: - *Time*: magnetic time axis - *Angle*: magnetic position - *B*: airgap flux density (contains radial and tangential components) - *Tem*: electromagnetic torque - *Tem_av*: average electromagnetic torque - *Tem_rip_pp* : Peak to Peak Torque ripple - *Tem_rip_norm*: Peak to Peak Torque ripple normalized according to average torque - *Phi_wind_stator*: stator winding flux - *emf*: electromotive force Some of these properties are "Data objects" from the [SciDataTool](https://github.com/Eomys/SciDataTool) project. These object enables to handle unit conversion, interpolation, fft, periodicity... ## Plot results **Output** object embbed different plots to visualize results easily. A dedicated tutorial is available [here](https://www.pyleecan.org/tuto_Plots.html). For instance, the radial and tangential magnetic flux in the airgap at a specific timestep can be plotted with: ``` # Radial magnetic flux out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["radial"]) out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["radial"]) # Tangential magnetic flux out_femm.mag.B.plot_2D_Data("angle","time[1]",component_list=["tangential"]) out_femm.mag.B.plot_2D_Data("wavenumber=[0,76]","time[1]",component_list=["tangential"]) ``` The torque can be plotted with: ``` out_femm.mag.Tem.plot_2D_Data("time") ``` One can notice that the torque matrix includes the periodicity (only the meaningful part is stored) ``` print(out_femm.mag.Tem.values.shape) print(simu_femm.input.Nt_tot) ``` If the mesh was saved in the output object (mySimu.mag.is_get_meshsolution = True), it can be plotted with: ``` out_femm.mag.meshsolution.plot_contour(label="B", group_names="stator core") ``` <div> <img src="https://www.pyleecan.org/_static/tuto_Simulation_FEMM_Bmesh.png" width="800"/> </div> Finally, it is possible to extend pyleecan by implementing new plot by using the results from output. For instance, the following plot requires plotly to display the radial flux density in the airgap over time and angle. ``` #%run -m pip install plotly # Uncomment this line to install plotly import plotly.graph_objects as go from plotly.offline import init_notebook_mode init_notebook_mode() result = out_femm.mag.B.components["radial"].get_along("angle{°}", "time") x = result["angle"] y = result["time"] z = result["B_r"] fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)]) fig.update_layout( ) fig.update_layout(title='Radial flux density in the airgap over time and angle', autosize=True, scene = dict( xaxis_title='Angle [°]', yaxis_title='Time [s]', zaxis_title='Flux [T]' ), width=700, margin=dict(r=20, b=100, l=10, t=100), ) fig.show(config = {"displaylogo":False}) ```
github_jupyter
# Deep CNN Models Constructing and training your own ConvNet from scratch can be Hard and a long task. A common trick used in Deep Learning is to use a **pre-trained** model and finetune it to the specific data it will be used for. ## Famous Models with Keras This notebook contains code and reference for the following Keras models (gathered from [https://github.com/fchollet/keras/tree/master/keras/applications]()) - VGG16 - VGG19 - ResNet50 - Inception v3 - Xception - ... more to come ## References - [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556) - please cite this paper if you use the VGG models in your work. - [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) - please cite this paper if you use the ResNet model in your work. - [Rethinking the Inception Architecture for Computer Vision](http://arxiv.org/abs/1512.00567) - please cite this paper if you use the Inception v3 model in your work. All architectures are compatible with both TensorFlow and Theano, and upon instantiation the models will be built according to the image dimension ordering set in your Keras configuration file at `~/.keras/keras.json`. For instance, if you have set `image_data_format="channels_last"`, then any model loaded from this repository will get built according to the TensorFlow dimension ordering convention, "Width-Height-Depth". # VGG16 <img src="imgs/vgg16.png" > # VGG19 <img src="imgs/vgg19.png" > # `keras.applications` ``` from keras.applications import VGG16 from keras.applications.imagenet_utils import preprocess_input, decode_predictions import os # -- Jupyter/IPython way to see documentation # please focus on parameters (e.g. include top) VGG16?? vgg16 = VGG16(include_top=True, weights='imagenet') ``` If you're wondering **where** this `HDF5` files with weights is stored, please take a look at `~/.keras/models/` #### HandsOn VGG16 - Pre-trained Weights ``` IMAGENET_FOLDER = 'imgs/imagenet' #in the repo !ls imgs/imagenet ``` <img src="imgs/imagenet/strawberry_1157.jpeg" > ``` from keras.preprocessing import image import numpy as np img_path = os.path.join(IMAGENET_FOLDER, 'strawberry_1157.jpeg') img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = vgg16.predict(x) print('Predicted:', decode_predictions(preds)) ``` <img src="imgs/imagenet/apricot_696.jpeg" > ``` img_path = os.path.join(IMAGENET_FOLDER, 'apricot_696.jpeg') img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = vgg16.predict(x) print('Predicted:', decode_predictions(preds)) ``` <img src="imgs/imagenet/apricot_565.jpeg" > ``` img_path = os.path.join(IMAGENET_FOLDER, 'apricot_565.jpeg') img = image.load_img(img_path, target_size=(224, 224)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = preprocess_input(x) print('Input image shape:', x.shape) preds = vgg16.predict(x) print('Predicted:', decode_predictions(preds)) ``` # Hands On: ### Try to do the same with VGG19 Model ``` # from keras.applications import VGG19 ``` # Residual Networks <img src="imgs/resnet_bb.png" > ## ResNet 50 <img src="imgs/resnet34.png" > ``` ## from keras.applications import ... ```
github_jupyter
# Probabilistic Grammar Fuzzing Let us give grammars even more power by assigning _probabilities_ to individual expansions. This allows us to control how many of each element should be produced, and thus allows us to _target_ our generated tests towards specific functionality. We also show how to learn such probabilities from given sample inputs, and specifically direct our tests towards input features that are uncommon in these samples. **Prerequisites** * You should have read the [chapter on grammars](Grammars.ipynb). * Our implementation hooks into the grammar-based fuzzer introduced in ["Efficient Grammar Fuzzing"](GrammarFuzzer.ipynb) * For learning probabilities from samples, we make use of [parsers](Parser.ipynb) ## The Law of Leading Digits In all our examples so far, you may have noted that inputs generated by a program differ quite a bit from "natural" inputs as they occur in real life. This is true even for innocuous elements such as numbers – yes, the numbers we have generated so far actually _differ_ from numbers in the real world. This is because in real-life sets of numerical data, the _leading significant digit_ is likely to be small: Actually, on average, the leading digit `1` occurs more than _six times_ as often as the leading digit `8` or `9`. It has been shown that this result applies to a wide variety of data sets, including electricity bills, street addresses, stock prices, house prices, population numbers, death rates, lengths of rivers, physical and mathematical constants (Wikipedia). This law, first observed by Newcomb \cite{Newcomb1881), was formalized by Benford in \cite{Benford1938). Let us take a look at the conditions that determine the first digit of a number. We can easily compute the first digit by converting the number into a string and take the first character: ``` def first_digit_via_string(x): return ord(repr(x)[0]) - ord('0') first_digit_via_string(2001) ``` To do this mathematically, though, we have to take the fractional part of their logarithm, or formally $$ d = 10^{\{\log_{10}(x)\}} $$ where $\{x\}$ is the fractional part of $x$ (i.e. $\{1.234\} = 0.234$). ``` import math def first_digit_via_log(x): frac, whole = math.modf(math.log10(x)) return int(10 ** frac) first_digit_via_log(2001) ``` Most sets of "naturally" occurring numbers should not have any bias in the fractional parts of their logarithms, and hence, the fractional part $\{\log_{10}(x)\}$ is typically uniformly distributed. However, the fractional parts for the individual digits are _not_ evenly distributed. For a number to start with a digit $d$, the condition $d < 10^{\{\log_{10}(x)\}} < d + 1$ must hold. To start with the digit 1, the fractional part $\{\log_{10}(x)\}$ must thus be in the range ``` (math.log10(1), math.log10(2)) ``` To start with the digit 2, though, it must be in the range ``` (math.log10(2), math.log10(3)) ``` which is much smaller. Formally, the probability $P(d)$ for a leading digit $d$ (again, assuming uniformly distributed fractional parts) is known as Benford's law: $$ P(d) = \log_{10}(d + 1) - \log_{10}(d) $$ which gives us: ``` def prob_leading_digit(d): return math.log10(d + 1) - math.log10(d) ``` Let us compute these probabilities for all digits: ``` digit_probs = [prob_leading_digit(d) for d in range(1, 10)] [(d, "%.2f" % digit_probs[d - 1]) for d in range(1, 10)] import matplotlib.pyplot as plt labels = range(1, 10) fig1, ax1 = plt.subplots() ax1.pie(digit_probs, labels=labels, shadow=True, autopct='%1.1f%%', counterclock=False, startangle=90) ax1.axis('equal') ``` We see that a leading 1 is indeed six times a probable than a leading 9. Benford's law has a number of applications. Most notably, it can be used to detect "non-natural" numbers, i.e. numbers that apparently were created randomly rather than coming from a "natural" source. if you write a scientific paper and fake data by putting in random numbers (for instance, [using our grammar fuzzer](GrammarFuzzer.ipynb) on integers), you will likely violate Benford's law, and this can indeed be spotted. On the other hand, how would we proceed if we _wanted_ to create numbers that adhere to Benson's law? To this end, we need to be able to _encode_ probabilities such as the above in our grammar, such that we can ensure that a leading digit is indeed a `1` in 30% of all cases. ## Specifying Probabilities The goal of this chapter is to assign _probabilities_ to individual expansions in the grammar, such that we can express that some expansion alternatives should be favored over others. This is not only useful to generate "natural"-looking numbers, but even more so to _direct_ test generation towards a specific goal. If you recently have changed some code in your program, you would probably like to generate inputs that exercise precisely this code. By raising the probabilities on the input elements associated with the changed code, you will get more tests that exercise the changed code. Our concept for expressing probabilities is to _annotate_ individual expansions with attributes such as probabilities. To this end, we allow that an expansion cannot only be a string, but also a _pair_ of a string and a set of attributes, as in ```python "<expr>": [("<term> + <expr>", opts(prob=0.1)), ("<term> - <expr>", opts(prob=0.2)), "<term>"] ``` Here, the `opts()` function would allow us to express probabilities for choosing the individual expansions. The addition would have a probability of 10%, the subtraction of 20%. The remaining probability (in this case 70%) is equally distributed over the non-attributed expansions (in this case the single last one). Our `opts()` helper function returns a mapping of its arguments to values: ``` def opts(**kwargs): return kwargs opts(prob=0.50) ``` We can now use pairs with `opts()` to assign probabilities to our expression grammar: ``` import fuzzingbook_utils from GrammarFuzzer import GrammarFuzzer, all_terminals from Grammars import is_valid_grammar, EXPR_GRAMMAR, START_SYMBOL, crange PROBABILISTIC_EXPR_GRAMMAR = { "<start>": ["<expr>"], "<expr>": [("<term> + <expr>", opts(prob=0.1)), ("<term> - <expr>", opts(prob=0.2)), "<term>"], "<term>": [("<factor> * <term>", opts(prob=0.1)), ("<factor> / <term>", opts(prob=0.1)), "<factor>" ], "<factor>": ["+<factor>", "-<factor>", "(<expr>)", "<leadinteger>", "<leadinteger>.<integer>"], "<leadinteger>": ["<leaddigit><integer>", "<leaddigit>"], # Benford's law: frequency distribution of leading digits "<leaddigit>": [("1", opts(prob=0.301)), ("2", opts(prob=0.176)), ("3", opts(prob=0.125)), ("4", opts(prob=0.097)), ("5", opts(prob=0.079)), ("6", opts(prob=0.067)), ("7", opts(prob=0.058)), ("8", opts(prob=0.051)), ("9", opts(prob=0.046)), ], # Remaining digits are equally distributed "<integer>": ["<digit><integer>", "<digit>"], "<digit>": ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"], } assert is_valid_grammar(PROBABILISTIC_EXPR_GRAMMAR) ``` This is how the grammar expansions are represented internally: ``` PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"] ``` However, we typically access the expansion string and the associated probability via designated helper functions, `exp_string()` and `exp_prob()`: ``` def exp_string(expansion): """Return the string to be expanded""" if isinstance(expansion, str): return expansion return expansion[0] leaddigit_expansion = PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"][0] leaddigit_expansion exp_string(leaddigit_expansion) def exp_opts(expansion): """Return the options of an expansion""" if isinstance(expansion, str): return None return expansion[1] exp_opts(leaddigit_expansion) def exp_prob(expansion): """Return the specified probability, or None if unspecified""" if isinstance(expansion, str): return None return exp_opts(expansion)['prob'] exp_prob(leaddigit_expansion) ``` Fortunately, our existing fuzzers have been set up to work well with grammars annotated this way. They simply ignore all annotations: ``` f = GrammarFuzzer(PROBABILISTIC_EXPR_GRAMMAR) f.fuzz() from GrammarCoverageFuzzer import GrammarCoverageFuzzer f = GrammarCoverageFuzzer(PROBABILISTIC_EXPR_GRAMMAR) f.fuzz() ``` ## Computing Probabilities Let us define functions that access probabilities for given expansions. While doing so, they also check for inconsistencies. ### Distributing Probabilities Here is how we distribute probabilities for expansions without specified probabilities. Given an expansion rule $$S ::= a_1\:|\: a_2 \:|\: \dots \:|\: a_n \:|\: u_1 \:|\: u_2 \:|\: \dots u_m$$ with $n \ge 0$ alternatives $a_i$ for which the probability $p(a_i)$ is _specified_ and $m \ge 0$ alternatives $u_j$ for which the probability $p(u_j)$ is _unspecified_, the "remaining" probability is distributed equally over all $u_j$; in other words, $$p(u_j) = \frac{1 - \sum_{i = 0}^{n}p(a_i)}{m}$$ If no probabilities are specified ($n = 0$), then all expansions have the same probability. The overall sum of probabilities must be 1: $$\sum_{i = 0}^{n} p(a_i) + \sum_{j = 0}^{m} p(u_i) = 1$$ We check these properties while distributing probabilities. The function `exp_probabilities()` returns a mapping of all expansions in a rule to their respective probabilities. ``` def exp_probabilities(expansions, nonterminal="<symbol>"): probabilities = [exp_prob(expansion) for expansion in expansions] prob_dist = prob_distribution(probabilities, nonterminal) prob_mapping = {} for i in range(len(expansions)): expansion = exp_string(expansions[i]) prob_mapping[expansion] = prob_dist[i] return prob_mapping ``` The gist of `exp_probabilities()` is handled in `prob_distribution()`, which does the actual checking and computation. ``` def prob_distribution(probabilities, nonterminal="<symbol>"): epsilon = 0.00001 number_of_unspecified_probabilities = probabilities.count(None) if number_of_unspecified_probabilities == 0: assert abs(sum(probabilities) - 1.0) < epsilon, \ nonterminal + ": sum of probabilities must be 1.0" return probabilities sum_of_specified_probabilities = 0.0 for p in probabilities: if p is not None: sum_of_specified_probabilities += p assert 0 <= sum_of_specified_probabilities <= 1.0, \ nonterminal + ": sum of specified probabilities must be between 0.0 and 1.0" default_probability = ((1.0 - sum_of_specified_probabilities) / number_of_unspecified_probabilities) all_probabilities = [] for p in probabilities: if p is None: p = default_probability all_probabilities.append(p) assert abs(sum(all_probabilities) - 1.0) < epsilon return all_probabilities ``` Here's the mapping `exp_probabilities()` returns for the annotated `<leaddigit>` element: ``` print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<leaddigit>"])) ``` If no expansion is annotated, all expansions have the same likelihood of being selected, as in our previous grammar fuzzers. ``` print(exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<digit>"])) ``` Here's how `exp_probabilities()` distributes any remaining probability across non-annotated expansions: ``` exp_probabilities(PROBABILISTIC_EXPR_GRAMMAR["<expr>"]) ``` ### Checking Probabilities We can use the checking capabilities of `exp_probabilities()` to check a probabilistic grammar for consistency: ``` def is_valid_probabilistic_grammar(grammar, start_symbol=START_SYMBOL): if not is_valid_grammar(grammar, start_symbol): return False for nonterminal in grammar: expansions = grammar[nonterminal] prob_dist = exp_probabilities(expansions, nonterminal) return True assert is_valid_probabilistic_grammar(PROBABILISTIC_EXPR_GRAMMAR) assert is_valid_probabilistic_grammar(EXPR_GRAMMAR) from ExpectError import ExpectError with ExpectError(): assert is_valid_probabilistic_grammar({"<start>": [("1", opts(prob=0.5))]}) with ExpectError(): assert is_valid_probabilistic_grammar( {"<start>": [("1", opts(prob=1.5)), "2"]}) ``` ## Expanding by Probability Now that we have seen how to specify probabilities for a grammar, we can actually implement probabilistic expansion. In our `ProbabilisticGrammarFuzzer`, it suffices to overload one method, namely `choose_node_expansion()`. For each of the children we can choose from (typically all expansions of a symbol), we determine their probability (using `exp_probabilities()` defined above), and make a weighted choice using `random.choices()` with a `weight` argument. ``` import random class ProbabilisticGrammarFuzzer(GrammarFuzzer): def choose_node_expansion(self, node, possible_children): (symbol, tree) = node expansions = self.grammar[symbol] probabilities = exp_probabilities(expansions) weights = [] for child in possible_children: expansion = all_terminals((node, child)) child_weight = probabilities[expansion] if self.log: print(repr(expansion), "p =", child_weight) weights.append(child_weight) if sum(weights) == 0: # No alternative (probably expanding at minimum cost) weights = None return random.choices(range(len(possible_children)), weights=weights)[0] ``` Our probabilistic grammar fuzzer works just like the non-probabilistic grammar fuzzer, except that it actually respects probability annotations. Let us generate a couple of "natural" numbers that respect Benford's law: ``` natural_fuzzer = ProbabilisticGrammarFuzzer( PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>") print([natural_fuzzer.fuzz() for i in range(20)]) ``` In contrast, these numbers are pure random: ``` integer_fuzzer = GrammarFuzzer( PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leadinteger>") print([integer_fuzzer.fuzz() for i in range(20)]) ``` Are the "natural" numbers really more "natural" than the random ones? To show that `ProbabilisticGrammarFuzzer` indeed respects the probabilistic annotations, let us create a specific fuzzer for the lead digit: ``` leaddigit_fuzzer = ProbabilisticGrammarFuzzer( PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leaddigit>") leaddigit_fuzzer.fuzz() ``` If we generate thousands of lead digits, their distribution should again follow Benford's law: ``` trials = 10000 count = {} for c in crange('0', '9'): count[c] = 0 for i in range(trials): count[leaddigit_fuzzer.fuzz()] += 1 print([(digit, count[digit] / trials) for digit in count]) ``` Quod erat demonstrandum! The distribution is pretty much exactly as originally specified. We now have a fuzzer where we can exercise control by specifying probabilities. ## Directed Fuzzing Assigning probabilities to individual expansions gives us great control over which inputs should be generated. By choosing probabilities wisely, we can _direct_ fuzzing towards specific functions and features – for instance, towards functions that are particularly critical, prone to failures, or that have been recently changed. As an example, consider the URL grammar from the [chapter on grammars](Grammars.ipynb). Let us assume we have just made a change to our implementation of the secure FTP protocol. By assigning a higher probability to the `ftps` scheme, we can generate more URLs that will specifically test this functionality. First, let us define a helper function that sets a particular option: ``` from Grammars import URL_GRAMMAR def set_opts(grammar, symbol, expansion, opts=None): """Set the options of the given expansion of grammar[symbol] to opts""" expansions = grammar[symbol] for i in range(len(expansions)): exp = expansions[i] if exp_string(exp) == expansion: new_opts = exp_opts(exp) if opts is None or new_opts is None: new_opts = opts else: for key in opts: new_opts[key] = opts[key] if new_opts is None: grammar[symbol][i] = exp_string(exp) else: grammar[symbol][i] = (exp_string(exp), new_opts) return ``` Here's a specialization just for probabilities: ``` def set_prob(grammar, symbol, expansion, prob): """Set the probability of the given expansion of grammar[symbol]""" set_opts(grammar, symbol, expansion, opts(prob=prob)) ``` Let us use `set_prob()` to give the `ftps` expansion a probability of 80%: ``` import copy probabilistic_url_grammar = copy.deepcopy(URL_GRAMMAR) set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.8) assert is_valid_probabilistic_grammar(probabilistic_url_grammar) probabilistic_url_grammar["<scheme>"] ``` If we use this grammar for fuzzing, we will get plenty of `ftps:` prefixes: ``` prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar) for i in range(10): print(prob_url_fuzzer.fuzz()) ``` In a similar vein, we can direct URL generation towards specific hosts or ports; we can favor URLs with queries, fragments, or logins – or URLs without these. All it takes is to set appropriate probabilities. By setting the probability of an expansion to zero, we can effectively disable specific expansions: ``` set_prob(probabilistic_url_grammar, "<scheme>", "ftps", 0.0) assert is_valid_probabilistic_grammar(probabilistic_url_grammar) prob_url_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_url_grammar) for i in range(10): print(prob_url_fuzzer.fuzz()) ``` Note that even if we set the probability of an expansion to zero, we may still see the expansion taken. This can happen during the "closing" phase of [our grammar fuzzer](GrammarFuzzer.ipynb), when the expansion is closed at minimum cost. At this stage, even expansions with "zero" probability will be taken if this is necessary for closing the expansion. Let us illustrate this feature using the `<expr>` rule from our expression grammar: ``` from Grammars import EXPR_GRAMMAR probabilistic_expr_grammar = copy.deepcopy(EXPR_GRAMMAR) probabilistic_expr_grammar["<expr>"] ``` If we set the probability of the `<term>` expansion to zero, the string should expand again and again. ``` set_prob(probabilistic_expr_grammar, "<expr>", "<term>", 0.0) assert is_valid_probabilistic_grammar(probabilistic_expr_grammar) ``` Still, in the "closing" phase, subexpressions will eventually expand into `<term>`, as it is the only way to close the expansion. Tracking `choose_node_expansion()` shows that is is invoked with only one possible expansion `<term>`, which has to be taken even though its specified probability is zero. ``` prob_expr_fuzzer = ProbabilisticGrammarFuzzer(probabilistic_expr_grammar) prob_expr_fuzzer.fuzz() ``` ## Probabilities in Context While specified probabilities give us a means to control which expansions are taken how often, this control by itself may not be enough. As an example, consider the following grammar for IPv4 addresses: ``` def decrange(start, end): """Return a list with string representations of numbers in the range [start, end)""" return [repr(n) for n in range(start, end)] IP_ADDRESS_GRAMMAR = { "<start>": ["<address>"], "<address>": ["<octet>.<octet>.<octet>.<octet>"], # ["0", "1", "2", ..., "255"] "<octet>": list(sorted(decrange(0, 256), reverse=True)) } IP_ADDRESS_GRAMMAR["<octet>"] assert is_valid_grammar(IP_ADDRESS_GRAMMAR) ``` We can easily use this grammar to create IP addresses: ``` ip_fuzzer = ProbabilisticGrammarFuzzer(IP_ADDRESS_GRAMMAR) ip_fuzzer.fuzz() ``` However, if we want to assign a specific probability to one of the four octets, we are out of luck. All we can do is to assign the same probability distribution for all four octets: ``` probabilistic_ip_address_grammar = copy.deepcopy(IP_ADDRESS_GRAMMAR) set_prob(probabilistic_ip_address_grammar, "<octet>", "127", 0.8) probabilistic_ip_fuzzer = ProbabilisticGrammarFuzzer( probabilistic_ip_address_grammar) probabilistic_ip_fuzzer.fuzz() ``` If we want to assign _different_ probabilities to each of the four octets, what do we do? The answer lies in the concept of _context_, which we already have seen [while discussing coverage-driven fuzzers](GrammarCoverageFuzzer.ipynb). As with coverage-driven fuzzing, the idea is to _duplicate_ the element whose probability we want to set dependent on its context. In our case, this means to duplicate the `<octet>` element to four individual ones, each of which can then get an individual probability distribution. We can do this programmatically, using the `duplicate_context()` method: ``` from GrammarCoverageFuzzer import duplicate_context probabilistic_ip_address_grammar = copy.deepcopy(IP_ADDRESS_GRAMMAR) duplicate_context(probabilistic_ip_address_grammar, "<address>") probabilistic_ip_address_grammar["<address>"] ``` Our original `<octet>` definition is now no longer required: ``` del probabilistic_ip_address_grammar["<octet>"] ``` We can now assign different probabilities to each of the `<octet>` symbols. For instance, we can force specific expansions by setting their probability to 100%: ``` set_prob(probabilistic_ip_address_grammar, "<octet-1>", "127", 1.0) set_prob(probabilistic_ip_address_grammar, "<octet-2>", "0", 1.0) assert is_valid_probabilistic_grammar(probabilistic_ip_address_grammar) ``` The remaining two octets `<octet-3>` and `<octet-4>` have no specific probabilities set. During fuzzing, all their expansions (all octets) are thus still available: ``` probabilistic_ip_fuzzer = ProbabilisticGrammarFuzzer( probabilistic_ip_address_grammar) [probabilistic_ip_fuzzer.fuzz() for i in range(5)] ``` Just as with coverage, we can duplicate grammar rules arbitrarily often to get more and more finer-grained control over probabilities. However, this finer-grained control also comes at the cost of having to maintain these probabilities. In the next section, we will therefore discuss means to assign and tune such probabilities automatically. ## Learning Probabilities from Samples Probabilities need not be set manually all the time. They can also be _learned_ from other sources. ``` from GrammarFuzzer import display_tree from Parser import PEGParser parser = PEGParser(IP_ADDRESS_GRAMMAR) tree = parser.parse("127.0.0.1")[0] display_tree(tree) from Parser import EarleyParser parser = EarleyParser(IP_ADDRESS_GRAMMAR) tree = parser.parse("127.0.0.1")[0] display_tree(tree) ``` \todo{FIXME: "127" should be one string} 1. Count individual expansions over a sample as weights. 2. Assign these as probabilities to the grammar. ## Auto-Tuning Probabilities 1. First, generate a set of inputs. 2. Then, measure coverage. 3. Pick the slice of inputs that satisfies a particular goal (say, coverage). 4. Learn probabilities from these. 5. Repeat :-) ``` from Coverage import Coverage, cgi_decode from Grammars import CGI_GRAMMAR cgi_fuzzer = GrammarFuzzer(CGI_GRAMMAR) trials = 100 coverage = {} for i in range(trials): cgi_input = cgi_fuzzer.fuzz() with Coverage() as cov: cgi_decode(cgi_input) coverage[cgi_input] = cov.coverage() coverage_slice = [cgi_input for cgi_input in coverage if ( 'cgi_decode', 25) in coverage[cgi_input]] print(coverage_slice) len(coverage_slice) / trials ``` Now use this sample for setting probabilities. ## Lessons Learned * _Lesson one_ * _Lesson two_ * _Lesson three_ ## Next Steps _Link to subsequent chapters (notebooks) here, as in:_ * [use _mutations_ on existing inputs to get more valid inputs](MutationFuzzer.ipynb) * [use _grammars_ (i.e., a specification of the input format) to get even more valid inputs](Grammars.ipynb) * [reduce _failing inputs_ for efficient debugging](Reducer.ipynb) ## Background Our exposition of Benford's law follows [this article](https://brilliant.org/wiki/benfords-law/). ## Exercises Close the chapter with a few exercises such that people have things to do. In Jupyter Notebook, use the `exercise2` nbextension to add solutions that can be interactively viewed or hidden: * Mark the _last_ cell of the exercise (this should be a _text_ cell) as well as _all_ cells of the solution. (Use the `rubberband` nbextension and use Shift+Drag to mark multiple cells.) * Click on the `solution` button at the top. (Alternatively, just copy the exercise and solution cells below with their metadata.) ### Exercise 1 Create a class `ProbabilisticGrammarCoverageFuzzer` that extends `GrammarCoverageFuzzer` with probabilistic capabilities. The idea is to first cover all uncovered expansions (like `GrammarCoverageFuzzer`) and once all expansions are covered, to proceed by probabilities (like `ProbabilisticGrammarFuzzer`). To this end, define new instances of the `choose_covered_node_expansion()` and `choose_uncovered_node_expansion()` methods that choose an expansion based on the given weights. If you are an advanced programmer, realize the class via _multiple inheritance_ from `GrammarCoverageFuzzer` and `ProbabilisticGrammarFuzzer` to achieve this. **Solution**. With multiple inheritance, this is fairly easy; we just need to point the three methods to the right places: ``` class ProbabilisticGrammarCoverageFuzzer(GrammarCoverageFuzzer, ProbabilisticGrammarFuzzer): # Choose uncovered expansions first def choose_node_expansion(self, node, possible_children): return GrammarCoverageFuzzer.choose_node_expansion(self, node, possible_children) # Among uncovered expansions, pick by (relative) probability def choose_uncovered_node_expansion(self, node, possible_children): return ProbabilisticGrammarFuzzer.choose_node_expansion(self, node, possible_children) # For covered nodes, pick by probability, too def choose_covered_node_expansion(self, node, possible_children): return ProbabilisticGrammarFuzzer.choose_node_expansion(self, node, possible_children) ``` In the first nine invocations, our fuzzer covers one digit after another: ``` cov_leaddigit_fuzzer = ProbabilisticGrammarCoverageFuzzer( PROBABILISTIC_EXPR_GRAMMAR, start_symbol="<leaddigit>") print([cov_leaddigit_fuzzer.fuzz() for i in range(9)]) ``` After these, we again proceed by probabilities: ``` trials = 10000 count = {} for c in crange('0', '9'): count[c] = 0 for i in range(trials): count[cov_leaddigit_fuzzer.fuzz()] += 1 print([(digit, count[digit] / trials) for digit in count]) ``` ### Exercise 2 Test: \cite{Holler2012} _Solution for the exercise_
github_jupyter
# AWS Elastic Kubernetes Service (EKS) Deep MNIST In this example we will deploy a tensorflow MNIST model in Amazon Web Services' Elastic Kubernetes Service (EKS). This tutorial will break down in the following sections: 1) Train a tensorflow model to predict mnist locally 2) Containerise the tensorflow model with our docker utility 3) Send some data to the docker model to test it 4) Install and configure AWS tools to interact with AWS 5) Use the AWS tools to create and setup EKS cluster with Seldon 6) Push and run docker image through the AWS Container Registry 7) Test our Elastic Kubernetes deployment by sending some data #### Let's get started! 🚀🔥 ## Dependencies: * Helm v3.0.0+ * A Kubernetes cluster running v1.13 or above (minkube / docker-for-windows work well if enough RAM) * kubectl v1.14+ * EKS CLI v0.1.32 * AWS Cli v1.16.163 * Python 3.6+ * Python DEV requirements ## 1) Train a tensorflow model to predict mnist locally We will load the mnist images, together with their labels, and then train a tensorflow model to predict the right labels ``` from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets("MNIST_data/", one_hot = True) import tensorflow as tf if __name__ == '__main__': x = tf.placeholder(tf.float32, [None,784], name="x") W = tf.Variable(tf.zeros([784,10])) b = tf.Variable(tf.zeros([10])) y = tf.nn.softmax(tf.matmul(x,W) + b, name="y") y_ = tf.placeholder(tf.float32, [None, 10]) cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy) init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys}) correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feed_dict = {x: mnist.test.images, y_:mnist.test.labels})) saver = tf.train.Saver() saver.save(sess, "model/deep_mnist_model") ``` ## 2) Containerise the tensorflow model with our docker utility First you need to make sure that you have added the .s2i/environment configuration file in this folder with the following content: ``` !cat .s2i/environment ``` Now we can build a docker image named "deep-mnist" with the tag 0.1 ``` !s2i build . seldonio/seldon-core-s2i-python36:1.2.1-dev deep-mnist:0.1 ``` ## 3) Send some data to the docker model to test it We first run the docker image we just created as a container called "mnist_predictor" ``` !docker run --name "mnist_predictor" -d --rm -p 5000:5000 deep-mnist:0.1 ``` Send some random features that conform to the contract ``` import matplotlib.pyplot as plt # This is the variable that was initialised at the beginning of the file i = [0] x = mnist.test.images[i] y = mnist.test.labels[i] plt.imshow(x.reshape((28, 28)), cmap='gray') plt.show() print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y) from seldon_core.seldon_client import SeldonClient import math import numpy as np # We now test the REST endpoint expecting the same result endpoint = "0.0.0.0:5000" batch = x payload_type = "ndarray" sc = SeldonClient(microservice_endpoint=endpoint) # We use the microservice, instead of the "predict" function client_prediction = sc.microservice( data=batch, method="predict", payload_type=payload_type, names=["tfidf"]) for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)): print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %") !docker rm mnist_predictor --force ``` ## 4) Install and configure AWS tools to interact with AWS First we install the awscli ``` !pip install awscli --upgrade --user ``` #### Configure aws so it can talk to your server (if you are getting issues, make sure you have the permmissions to create clusters) ``` %%bash # You must make sure that the access key and secret are changed aws configure << END_OF_INPUTS YOUR_ACCESS_KEY YOUR_ACCESS_SECRET us-west-2 json END_OF_INPUTS ``` #### Install EKCTL *IMPORTANT*: These instructions are for linux Please follow the official installation of ekctl at: https://docs.aws.amazon.com/eks/latest/userguide/getting-started-eksctl.html ``` !curl --silent --location "https://github.com/weaveworks/eksctl/releases/download/latest_release/eksctl_$(uname -s)_amd64.tar.gz" | tar xz !chmod 755 ./eksctl !./eksctl version ``` ## 5) Use the AWS tools to create and setup EKS cluster with Seldon In this example we will create a cluster with 2 nodes, with a minimum of 1 and a max of 3. You can tweak this accordingly. If you want to check the status of the deployment you can go to AWS CloudFormation or to the EKS dashboard. It will take 10-15 minutes (so feel free to go grab a ☕). ### IMPORTANT: If you get errors in this step... It is most probably IAM role access requirements, which requires you to discuss with your administrator. ``` %%bash ./eksctl create cluster \ --name demo-eks-cluster \ --region us-west-2 \ --nodes 2 ``` ### Configure local kubectl We want to now configure our local Kubectl so we can actually reach the cluster we've just created ``` !aws eks --region us-west-2 update-kubeconfig --name demo-eks-cluster ``` And we can check if the context has been added to kubectl config (contexts are basically the different k8s cluster connections) You should be able to see the context as "...aws:eks:eu-west-1:27...". If it's not activated you can activate that context with kubectlt config set-context <CONTEXT_NAME> ``` !kubectl config get-contexts ``` ## Setup Seldon Core Use the setup notebook to [Setup Cluster](../../seldon_core_setup.ipynb#Setup-Cluster) with [Ambassador Ingress](../../seldon_core_setup.ipynb#Ambassador) and [Install Seldon Core](../../seldon_core_setup.ipynb#Install-Seldon-Core). Instructions [also online](./seldon_core_setup.html). ## Push docker image In order for the EKS seldon deployment to access the image we just built, we need to push it to the Elastic Container Registry (ECR). If you have any issues please follow the official AWS documentation: https://docs.aws.amazon.com/AmazonECR/latest/userguide/docker-basics.html ### First we create a registry You can run the following command, and then see the result at https://us-west-2.console.aws.amazon.com/ecr/repositories?# ``` !aws ecr create-repository --repository-name seldon-repository --region us-west-2 ``` ### Now prepare docker image We need to first tag the docker image before we can push it ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi docker tag deep-mnist:0.1 "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository" ``` ### We now login to aws through docker so we can access the repository ``` !`aws ecr get-login --no-include-email --region us-west-2` ``` ### And push the image Make sure you add your AWS Account ID ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi docker push "$AWS_ACCOUNT_ID.dkr.ecr.$AWS_REGION.amazonaws.com/seldon-repository" ``` ## Running the Model We will now run the model. Let's first have a look at the file we'll be using to trigger the model: ``` !cat deep_mnist.json ``` Now let's trigger seldon to run the model. We basically have a yaml file, where we want to replace the value "REPLACE_FOR_IMAGE_AND_TAG" for the image you pushed ``` %%bash export AWS_ACCOUNT_ID="" export AWS_REGION="us-west-2" if [ -z "$AWS_ACCOUNT_ID" ]; then echo "ERROR: Please provide a value for the AWS variables" exit 1 fi sed 's|REPLACE_FOR_IMAGE_AND_TAG|'"$AWS_ACCOUNT_ID"'.dkr.ecr.'"$AWS_REGION"'.amazonaws.com/seldon-repository|g' deep_mnist.json | kubectl apply -f - ``` And let's check that it's been created. You should see an image called "deep-mnist-single-model...". We'll wait until STATUS changes from "ContainerCreating" to "Running" ``` !kubectl get pods ``` ## Test the model Now we can test the model, let's first find out what is the URL that we'll have to use: ``` !kubectl get svc ambassador -o jsonpath='{.status.loadBalancer.ingress[0].hostname}' ``` We'll use a random example from our dataset ``` import matplotlib.pyplot as plt # This is the variable that was initialised at the beginning of the file i = [0] x = mnist.test.images[i] y = mnist.test.labels[i] plt.imshow(x.reshape((28, 28)), cmap='gray') plt.show() print("Expected label: ", np.sum(range(0,10) * y), ". One hot encoding: ", y) ``` We can now add the URL above to send our request: ``` from seldon_core.seldon_client import SeldonClient import math import numpy as np host = "a68bbac487ca611e988060247f81f4c1-707754258.us-west-2.elb.amazonaws.com" port = "80" # Make sure you use the port above batch = x payload_type = "ndarray" sc = SeldonClient( gateway="ambassador", ambassador_endpoint=host + ":" + port, namespace="default", oauth_key="oauth-key", oauth_secret="oauth-secret") client_prediction = sc.predict( data=batch, deployment_name="deep-mnist", names=["text"], payload_type=payload_type) print(client_prediction) ``` ### Let's visualise the probability for each label It seems that it correctly predicted the number 7 ``` for proba, label in zip(client_prediction.response.data.ndarray.values[0].list_value.ListFields()[0][1], range(0,10)): print(f"LABEL {label}:\t {proba.number_value*100:6.4f} %") ```
github_jupyter
<a href="https://colab.research.google.com/github/pranjaldatta/PyVision/blob/master/demo/segmentation/pspnet/pspnet_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ## Pyramid Scene Parsing Net (PSPNet) demonstration notebook This is a stand alone notebook. Open it in colaboratory, use hardware acceleration if desired and just run the cells ! While the notebook utilizes images that come with PyVision repo, feel free to use your own ``` # Clone the repo! !git clone https://github.com/pranjaldatta/PyVision.git # Lets confirm whether we are using a GPU !nvidia-smi # PyVision's repo comes with some example images so we are going to use them # but feel free to use your own ! examples_path = "PyVision/pyvision/segmentation/pspnet/examples/" # import the pspnet model from PyVision.pyvision.segmentation import pspnet import matplotlib.pyplot as plt # init the pspnet model with the default pspnet-resnet50-voc2012 model # check docs for more info! model1 = pspnet.PSPNet(device="gpu", save=False, show=False) #automatically takes care of the required weights preds, seg_map, blend_img = model1.inference(examples_path+"16.jpg") # preds contain class indices for each pixel print("preds type: {}, shape: {}".format(type(preds), preds.shape)) # displaying the original image for reference plt.imshow(plt.imread(examples_path+"16.jpg")) plt.show() # displaying the segmentation map plt.imshow(seg_map) plt.show() # displaying the blended Image plt.imshow(blend_img) plt.show() # init the pspnet model with pspnet-resnet101-voc2012 model # check docs for more info! model2 = pspnet.PSPNet(model="pspnet-resnet101-voc2012", device="cpu", save=False, show=False) #automatically takes care of the required weights preds, seg_map, blend_img = model2.inference(examples_path+"16.jpg", save=False, show=False) # note the performance diff ! print("preds type: {}, shape: {}".format(type(preds), preds.shape)) plt.imshow(plt.imread(examples_path+"16.jpg")) plt.show() plt.imshow(seg_map) plt.show() plt.imshow(blend_img) plt.show() # init the pspnet model with pspnet-resnet50-ade20k model # check docs for more info! model3 = pspnet.PSPNet(model="pspnet-resnet50-cityscapes", device="gpu", save=False, show=False) #automatically takes care of the required weights preds , seg_map, blend_img = model3.inference(examples_path+"cityscape.png") print("preds type: {}, shape: {}".format(type(preds), preds.shape)) plt.imshow(plt.imread(examples_path+"cityscape.png")) plt.show() plt.imshow(seg_map) plt.show() plt.imshow(blend_img) plt.show() ```
github_jupyter
``` from IPython.display import display from IPython.display import HTML import IPython.core.display as di # Example: di.display_html('<h3>%s:</h3>' % str, raw=True) # This line will hide code by default when the notebook is exported as HTML di.display_html('<script>jQuery(function() {if (jQuery("body.notebook_app").length == 0) { jQuery(".input_area").toggle(); jQuery(".prompt").toggle();}});</script>', raw=True) # This line will add a button to toggle visibility of code blocks, for use with the HTML export version di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Toggle code</button>''', raw=True) import pandas as pd import numpy as np import boto3 import datetime import matplotlib.pyplot as plt from matplotlib.dates import DateFormatter, DayLocator import seaborn as sns import fun_eda, fun_s3 #################################### CARGAMOS BASE ORIGINAL bucket_name = 'movicovid' fname = '{}/{}/{}'.format('data', 'dataforgood', 'movimientos_entre_administraciones_08062020.csv') original_data = fun_s3.abre_file_como_df(bucket_name, fname) #################################### CAMBIO TIPO DE VARIABLES original_data['starting_region_name'] = original_data['starting_region_name'].astype('category') original_data['ending_region_name'] = original_data['ending_region_name'].astype('category') original_data['fecha'] = pd.to_datetime(original_data['fecha']) original_data['hr'] = original_data['hora'].str.slice(start=0, stop=2).astype(int) #################################### SELECCIONAMOS VARIABLES DE INTERES data = original_data[['difference', 'baseline_people_moving', 'crisis_people_moving', 'starting_region_name', 'percent_change', 'ending_region_name', 'length_km', 'hr','fecha']].copy() #################################### DROP REGISTROS DUPLICADOS # elimina todos, menos el primero data.drop_duplicates(subset = ['difference', 'baseline_people_moving', 'crisis_people_moving', 'starting_region_name', 'percent_change', 'ending_region_name', 'length_km', 'hr','fecha'], keep = 'first', inplace = True) delegaciones = ["Álvaro Obregón", "Azcapotzalco", "Benito Juárez", "Coyoacán", "Cuajimalpa de Morelos", "Cuauhtémoc", "Gustavo A. Madero", "Iztacalco", "Iztapalapa", "La Magdalena Contreras", "Miguel Hidalgo", "Milpa Alta", "Tláhuac", "Tlalpan", "Venustiano Carranza", "Xochimilco"] #################################### TRANSFORMACION DE VARIABLES #################################### #### Añado columnas que indican si el Origen/Destino es en CDMX data = data.assign(ori_cdmx=np.where(data["starting_region_name"].isin(delegaciones), 'CDMX', 'Otro'), des_cdmx=np.where(data["ending_region_name"].isin(delegaciones), 'CDMX', 'Otro')) ### Añado una columna que contabilice el numero de semanas data['num_semana'] = data['fecha'].dt.week ### Añado una columna para el dia de la semana dict_days = {0:'Lunes',1:'Martes',2:'Miércoles',3:'Jueves',4:'Viernes',5:'Sábado',6:'Domingo'} data['dia_semana'] = data['fecha'].dt.dayofweek.map(dict_days) ### Anado una columna para identificar la ruta data['ruta'] = data['starting_region_name'].astype(str) + " - " + data['ending_region_name'].astype(str) ``` # Movicovid ### A. Contexto El 23 de Marzo, se inicio oficialmente, la Jornada Nacional de Sana distancia, que se planteó inicialmente para el período del 23 de Marzo al 19 de Abril. Las medidas de esta campaña incluyeron la suspensión temporal de actividades escolares, la suspensión temporal de actividades no esenciales, la reprogramación de los eventos de concentración masiva, y el **repliegue familiar en casa**, es decir, el mantenerse en casa tratando de respetar la sana distancia de 1.5 metros entre los miembros de la familia. - **23 de Marzo 2020**. Inicia Fase 2 y la Jornada Nacional de Sana Distancia [1](http://www.imss.gob.mx/prensa/archivo/202003/142), [2](https://www.dof.gob.mx/nota_detalle.php?codigo=5590339&fecha=24/03/2020), [3](https://www.gob.mx/salud/prensa/095-inicia-fase-2-por-coronavirus-covid-19). - **30 de Marzo 2020**. Declaración de Emergencia Sanitaria [4](https://dof.gob.mx/nota_detalle.php?codigo=5590745&fecha=30/03/2020). - **21 de Abril 2020**. Inicia Fase 3 por COVID-19 [5](https://www.gob.mx/salud/prensa/110-inicia-la-fase-3-por-covid-19). Se extiende la Jornada Nacional de Sana Distancia hasta el 30 de mayo de 2020, [6](https://www.dof.gob.mx/nota_detalle.php?codigo=5592067&fecha=21/04/2020). - **1 de Junio 2020**. Concluye la Jornada Nacional de Sana Distancia. Inician actividades los sectores neo-esenciales como minería, construcción e industria automotriz, bajo protocolos de seguridad enfocados a evitar contagios en los centros de trabajo [7](http://www.imss.gob.mx/prensa/archivo/202005/356). Por su parte, y acorde con la política nacional de *#QuédateEnCasa* y *#SanaDistancia*, el Gobierno de la Ciudad de México también suspendió actividades y establecimientos del 23 de Marzo al 19 de Abril, [8](https://www.cdmx.gob.mx/portal/articulo/cierre-temporal-de-actividades-y-establecimientos-por-covid-19). El 31 de Marzo, se dio a conocer que únicamente se mantendrían activos los establecimientos relacionados con servicios de salud, alimentación e indispensables, como gasolineras y bancos, cerrando centros comerciales y otros estableciomientos considerados no escenciales, [9](https://www.cdmx.gob.mx/portal/articulo/cierre-de-centros-comerciales-por-emergencia-sanitaria). El 24 de abril, se cerraron el 20% de las estaciones del Metro, Metrobús y Tren Ligero, y el Hoy No Circula se aplicó para todos los vehículos particulares, con algunas excepciones, [10](https://www.cdmx.gob.mx/portal/articulo/nuevas-medidas-para-la-fase-3-por-covid-9). El 22 de Mayo, se dio a conocer el plan gradual hacia la nueva normalidad, [11](https://www.cdmx.gob.mx/portal/articulo/plan-gradual-hacia-la-nueva-normalidad), [12](https://covid19.cdmx.gob.mx/storage/app/media/plan%20gradual%20hacia%20la%20nueva%20normalidad%20en%20la%20ciudad%20de%20mexico/plangradualhacialanuevanormalidadenlaciudaddemexico20.pdf). El 1 de Julio, se retomaron actividades económicas esenciales, [13](https://www.cdmx.gob.mx/portal/articulo/lineamientos-para-industrias-que-retoman-actividades). Sin embargo, algunas fuentes periodísticas han afirmado que parte de la población no pudo quedarse en casa durante el período, [14](https://www.elfinanciero.com.mx/nacional/en-mexico-apenas-35-ha-respetado-el-quedate-en-casa-revela-google), [15](https://www.animalpolitico.com/2020/04/municipios-cumplen-quedate-en-casa-covid-19/). ### B. Objetivo Caracterizar a la población que lamentablemente, no pudo cumplir los lineamientos de #SusanaDistancia y #QuedateEnCasa sugeridos por la Secretaría de Salud. *Otros datos de movilidad semanales: [Apple](https://www.apple.com/covid19/mobility), [Google](https://www.gstatic.com/covid19/mobility/2020-06-07_MX_Mobility_Report_en.pdf). ### C. Preguntas a investigar ### 1. De las rutas #### 1.1. Definición de rutas - ¿Consideramos todas las trayectorias origen-destino como ruta? ¿las más transitadas? - Rutas de corto alcance vs. largo alcance (km a la redonda) - ¿Qué se puede inferir de los movimientos que ocurren con mayor/menor frecuencia? - 80-20 de las rutas - 90-10 de las rutas #### 1.2. Características de las rutas en el tiempo - ¿Se debe hacer un baseline para comparar? - Rutas sin cambios, rutas con cambios #### 1.3. Características de las rutas en cuanto a su destino y origen - Rutas de trabajo (zona habitacional -> trabajo), de recreación (zona habitacional -> espacios públicos), de emergencias (zona habitacional -> hospitales) - Rutas por actividad económica - Rutas en transporte público, Uber/Lyft, taxis, particular Existen 284,495 registros de rutas los cuales se distribuyen de la siguiente manera: ``` total_rutas = len(data) rutas = data.groupby(['ori_cdmx', 'des_cdmx']).size() rutas = rutas.reset_index() rutas.columns = ['Origen', 'Destino', 'Rutas'] rutas = rutas.pivot_table(index='Origen', columns='Destino', values='Rutas') rutas_prop = rutas.divide(total_rutas) * 100 fig, ax = plt.subplots(1, 2, figsize=(15, 4)) sns.heatmap( rutas, annot=True, fmt='0,.0f', cmap='RdYlGn', ax=ax[0]) ax[0].set_title('Número de rutas\n(Total {:0,.0f})'.format(total_rutas), fontsize=14) sns.heatmap( rutas_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[1]) ax[1].set_title('Proporción de rutas\n(Total {:0,.1f})'.format(total_rutas), fontsize=14) for t in ax[1].texts: t.set_text(t.get_text() + " %") plt.show() ``` Separados por hora, la distribución es así: ``` ruta_hora = data.groupby(['ori_cdmx', 'des_cdmx', 'hr']).size() ruta_hora = ruta_hora.reset_index() ruta_hora.columns = ['Origen', 'Destino', 'hr', 'Rutas'] ruta0 = ruta_hora[ruta_hora['hr']==0].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta8 = ruta_hora[ruta_hora['hr']==8].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta16 = ruta_hora[ruta_hora['hr']==16].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta0_prop = ruta0.divide(ruta0.values.sum())*100 ruta8_prop = ruta8.divide(ruta8.values.sum())*100 ruta16_prop = ruta16.divide(ruta16.values.sum())*100 fig, ax = plt.subplots(1, 3, figsize=(15, 4)) sns.heatmap( ruta0_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[0]) ax[0].set_title('00 horas') for t in ax[0].texts: t.set_text(t.get_text() + " %") sns.heatmap( ruta8_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[1]) ax[1].set_title('08 horas') for t in ax[1].texts: t.set_text(t.get_text() + " %") sns.heatmap( ruta16_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[2]) ax[2].set_title('16 horas') for t in ax[2].texts: t.set_text(t.get_text() + " %") fig.suptitle('Proporción de rutas por hora', fontsize=14) plt.show() #plt.tight_layout() ``` Del número de personas que transitan las rutas ``` ruta_hora = data.groupby(['ori_cdmx', 'des_cdmx', 'hr']).sum()['crisis_people_moving'] ruta_hora = ruta_hora.reset_index() ruta_hora.columns = ['Origen', 'Destino', 'hr', 'Rutas'] ruta0 = ruta_hora[ruta_hora['hr']==0].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta8 = ruta_hora[ruta_hora['hr']==8].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta16 = ruta_hora[ruta_hora['hr']==16].pivot_table(index='Origen', columns='Destino', values='Rutas') ruta0_prop = ruta0.divide(ruta0.values.sum())*100 ruta8_prop = ruta8.divide(ruta8.values.sum())*100 ruta16_prop = ruta16.divide(ruta16.values.sum())*100 fig, ax = plt.subplots(1, 3, figsize=(15, 4)) sns.heatmap( ruta0_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[0]) ax[0].set_title('00 horas') for t in ax[0].texts: t.set_text(t.get_text() + " %") sns.heatmap( ruta8_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[1]) ax[1].set_title('08 horas') for t in ax[1].texts: t.set_text(t.get_text() + " %") sns.heatmap( ruta16_prop, annot=True, fmt='0,.1f', cmap='RdYlGn', ax=ax[2]) ax[2].set_title('16 horas') for t in ax[2].texts: t.set_text(t.get_text() + " %") fig.suptitle('Proporción de personas transitando por hora', fontsize=14) plt.show() ``` Rutas únicas según Origen-Destino ``` print("Existen {:,.0f} rutas únicas". format(len(data['ruta'].unique()))) ``` Rutas únicas según Origen-Destino y Hora ``` print("{:,.0f} ruta unicas a las 00 horas".format(len(data[data['hr']==0]['ruta'].unique()))) print("{:,.0f} ruta unicas a las 08 horas".format(len(data[data['hr']==8]['ruta'].unique()))) print("{:,.0f} ruta unicas a las 16 horas".format(len(data[data['hr']==16]['ruta'].unique()))) ``` ### 2. De la población #### 2.1. Características de la población que mantuvo rutas constantes durante la jornada de #SusanaDistancia - Población por tipos de rutas: características socio-demográficas, niveles de pobreza #### 2..2 Características de la población que tuvo rutas durante fechas que presentaron picos de conteo de casos, o 15 días antes a esas fechas - #### 2.3 Identificar correlaciones entre rutas y casos de COVID-19 - Rutas vs nuevos casos reportados - Rutas vs defunciones ### D. Recomendaciones -¿Qué recomendaciones de política de movilidad se pueden sugerir dadas las respuestas a las preguntas anteriores?
github_jupyter
# "Qakbot / Qbot" > Qakbot config extraction - toc: true - badges: true - categories: [qakbot,qbot,malware,config] ## Overview Sample (unpacked): `670e990631c0b98ccdd7701c2136f0cb8863a308b07abd0d64480c8a2412bde4` References: - [Unpacked Sample - Malshare](https://malshare.com/sample.php?action=detail&hash=670e990631c0b98ccdd7701c2136f0cb8863a308b07abd0d64480c8a2412bde4) - [Malware Bazaare](https://bazaar.abuse.ch/sample/572c9105266e1390706b72023ad785e461fd8d908e4ca04e7e7599bd3fab12fc/) - [AGDC Config Extractor Tutorial](https://www.youtube.com/watch?v=M22c1JgpG-U) - [Malpedia Articles](https://malpedia.caad.fkie.fraunhofer.de/details/win.qakbot) - [UnpacMe](https://www.unpac.me/results/29326131-6468-4d70-b892-66a818c994ae/#/) ## Expected C2 Config The following are c2 IPs that we expect to be in the config based on our sandbox output. ``` 32.221.229.7:443 140.82.49.12:443 24.152.219.253:995 182.56.99.126:443 76.169.147.192:32103 218.101.110.3:995 89.101.97.139:443 82.152.39.39:443 176.24.150.197:443 96.37.113.36:993 68.186.192.69:443 59.88.168.108:443 75.110.250.187:443 182.191.92.203:995 89.165.88.95:443 103.142.10.177:443 45.9.20.200:2211 24.95.61.62:443 194.36.28.26:443 78.101.82.198:2222 37.211.157.100:61202 70.163.1.219:443 31.215.99.73:443 103.143.8.71:6881 59.6.7.83:61200 63.153.187.104:443 14.96.79.22:61202 93.48.80.198:995 24.53.49.240:443 94.200.181.154:995 149.135.101.20:443 24.178.196.158:2222 209.210.95.228:32100 78.101.82.198:443 67.209.195.198:443 96.80.109.57:995 80.14.196.176:2222 38.70.253.226:2222 24.222.20.254:443 217.165.123.47:61200 74.15.2.252:2222 217.128.93.27:2222 102.65.38.67:443 190.73.3.148:2222 79.167.192.206:995 95.5.133.68:995 114.79.148.170:443 120.150.218.241:995 186.64.87.213:443 65.100.174.110:443 96.21.251.127:2222 136.232.34.70:443 63.143.92.99:995 136.143.11.232:443 39.49.27.10:995 111.125.245.116:995 41.228.22.180:443 217.164.247.241:2222 83.110.107.123:443 76.25.142.196:443 74.5.148.57:443 65.128.74.102:443 67.165.206.193:993 173.21.10.71:2222 71.74.12.34:443 94.60.254.81:443 23.233.146.92:443 73.151.236.31:443 79.160.207.214:443 213.120.26.24:443 89.137.52.44:443 75.188.35.168:443 109.12.111.14:443 106.51.48.170:50001 68.204.7.158:443 78.101.82.198:995 80.6.192.58:443 41.96.250.164:995 114.79.145.28:443 188.54.96.91:443 105.198.236.99:995 50.238.6.36:443 65.100.174.110:8443 70.51.134.181:2222 117.248.109.38:21 86.98.53.83:443 182.176.180.73:443 217.165.11.65:61200 103.143.8.71:995 50.237.134.22:995 187.189.86.168:443 100.1.119.41:443 2.178.67.97:61202 86.198.237.51:2222 88.253.171.236:995 73.171.4.177:443 40.134.247.125:995 72.252.201.34:995 190.39.205.165:443 187.172.146.123:443 92.167.4.71:2222 189.30.244.252:995 105.111.124.76:443 84.199.230.66:443 14.96.67.177:443 182.56.57.23:995 87.70.93.215:443 93.48.58.123:2222 73.5.119.219:443 75.169.58.229:32100 173.71.147.134:995 69.46.15.180:443 23.82.128.108:443 5.36.7.212:443 200.75.131.234:443 82.77.137.101:995 187.201.90.81:443 24.55.112.61:443 201.172.31.95:443 216.238.72.121:443 216.238.71.31:995 207.246.112.221:443 207.246.112.221:995 216.238.72.121:995 216.238.71.31:443 27.223.92.142:995 24.229.150.54:995 117.198.149.221:443 ``` ## Helper Functions ``` def unhex(hex_string): import binascii if type(hex_string) == str: return binascii.unhexlify(hex_string.encode('utf-8')) else: return binascii.unhexlify(hex_string) def tohex(data): import binascii if type(data) == str: return binascii.hexlify(data.encode('utf-8')) else: return binascii.hexlify(data) ``` ## String Tables ``` key = unhex('efb05bc91c9c83400d08dd5087959b8b72a7a75b36b8d290c42ddbc22d292414ed86a03f3672a15b8da8bae6c737a55506eb623c9735a5fb41f213f30e4816fc32d9c4ecf873b394b869545cc2f382562e46cad089290ba543bd0000a990b9399a24d735') data = unhex('8cdf29ac4ff9f136646bb803eff0f7e75cc2df3e0de8b1f38a7996ad4307416c88bdee6b6426f238ecc69483bf52a51844aa2f6ff247d39222973d96762d2d9150b8a98b8d1a9df1c00c5479918af1224b2b98bfe65d2ef910c49ce7149e2aa8df257578b13ff5f0e9a517dfc25b7bcb9fe08143bcec48514114bec9e66b6133f31ed1e5d385b558d63a609f3e71fe56d794329d75872e0978885bb4a5808f12c1f1e42c2c3fae86f13f4128b98cd9487fcd30bdb8f2119655dbcd0f5f4ddd3df7e7b5ef1ecba71759dbb3fc8842acc24f4d457388e8d411530ac460fbdbc983b5418b307e8e594ae446c0893782639f202d6e9932b8b29cd616cbf18302352ab681e32f0023b2b5896a31f90af3bbf509875dd0df1f526db020f3ec9be81fc3893e4eddd2c38b6b8f956c7b6148bae9d70902419315e2ccdfba8a5ec62769980d5ae369f2922f967c847d68529954bcaa889d01efc7c8103a39b6f3de0a001abab9f94c57a53898df8203ec2caedb653d3a8575b7a7c3a6579795031388e0c8e908ebf0750c1426b5ab850f042a846bbff097c3f705fd7036d93a19a707fdde71c04bd63e7a4ed902eb9c91f804d1fe960e3b5cec90e4312e16abb3e2486cc030bd86c033a56cfdf329236cb13c87fcf6ea15c2882b5cc8b7f7c40888bb5e5d4179bfe9cf4b132ef222fefff5b1f103f91a688e264efe43c0a824866683202d6e99328ead82cb41ecc4d6391132b69af62f2e33b9b5fb1a398b27d183b07e9a65eff725605ab23ff3b0c7d80bd4d33e5b8be0cca95ea9a3034c5c71edd1f26c775cc423e8a8cc84b437c62665990b4ce31bc08324f276947b2138994abcff899301ddbadd11315c84b2ce056b46e4b4e5450bd120cd8bc536b932f9fb25367fb43ee3e0f6fb5cc2df3e0ddda6f8a15fbea34107416c88bdd7564417d233ecdad1c8a24fc06e639f1659e556c48b6f976b96353a628f5cb0a28ad616cbf18319353fa996f6354f36bea5fb4c25c03bd8d4d33ab968e9f125636da97ee2edfe8b11d5de2b428be0bea041b7c26c656841bec3f26c6620ee1dc4e4ffe6b45fc922679b0b12f359c9fb329767867e2966951cbda880f805d8fdcb0c7a39ba96b93f5d23b9a2ff076edd26868cdd3fa87bf9ed34236da53587a4a9b84692916c0e81e290a55bbca15e5b526cc3e3d85a0d13d73cfeded99ee952dd303d8a145bf446d78d20dc768b6b48798c57b7c498c556c0b4cc003939ffa8a7661c22f0f5b91b6f9f668dddd476ec2caee76f2838ef34a8b0ffd672f4d2395bd1a6c3a540abae485a677b83f5c5514272f632e3dbce87f737e627638a16599710f6823286769e5c277988178597958b24fcc38e5d0839ba83ee395c23b8feec516ea566eec1956bff789ca96f2708b033f4fdf2ee1ec3893e4eddd2f1b45db7ab4e48507d82e88f471b01c934eec3cd87b15288336a8a11549766eabd15a552a14b14419345eff0dfca3ddcf0dd351935a181ed254120bef0c8477fcc0edc83c73abb79c0d0307446b82487e2e8b92d94957552d4be90e17ea2b1594c494682e9d41a6a21d828dae7edd0f36bdd226f91034ef31bc08324f261966966738457f985a8bc5391dcf3251900e780a0760120eafffd092ed66392999079ec6fbea36f6928ff75f4b79bf81cdfcf3069dabde2a048a99d4050537a8986f35a4252ce39e7fff7af9452d7236f88071caa15e29e35bd71996b2b62d410aead829514dee0cb53767ce4d3a02d472bbab5fb5a64cb22c986df358579eae62c3061b020e2e7e8e41cc6d33e4b998eccea71fea1424650488eefcd4904508851decdcec6a855cf0574840159e44685c661b57687412a7c9951adecce8f1addf9df04202ff881ed395a1aa9b9e45f399f14d4818369964ceeec23687bae72ae9ffef900f5c22f43cabcb0f90db4a04779567b8ee3d34c1831d33eecdcdfcee512d6772acb0c49fb5989db2f877fdf2e2663901bd9b39c9b12c3badc05385cb196ee334d32ead0de40659671e2acdf36b969e8e6325e71ae24e2f89bd80bd4f014618ee690a240b4ac034c5c71edc7d65e4506f22dee86df9ea237ce3074850750a4078b9f2d9e13d65d31658857b49683970796c8eb102728a79eb164722ba5b2fa5065c66dd897d55ba779e8e230643bef7ee3f9f78b31c8c92f53d6a6bd9054aba7170945649deac95c5706c834e387c2cbb040d27860841051ba40d797249c709c6a2d72fc1789b6839f01d2f9fe003839b1dbfa6e186fef8cc0477fc031d38ac47b8c64ecef2f7f6daf0ceef0e3fb1ec8d53e18ddaaf5c44cbfb44c594d27dfa8c4535a72f214cbfceda79572f9186f881053e45ac38f1da57a9d6a27618f129da18a9d1dd7f1ca351124a19ff7254729a4a3d5796ad12bceefd36195409ca613747ba935eac7f4e40682fb3e4ec8beffb648a9ec485141148cf5d757591dca23a3ccd68ac712f62c759f0751c55aca8f64ae768b7e24798e57abea898016b3fccc1d242fc2a0ed305a31aba2ec7546cc20cf80c334af689ca6107f67ba22e6f8dde21ec2d47e6af1bce4a15fb5a75909616c9deacf4d5300fd32e8d0ca8aa845c07b6393073cc44cd68f249f20c10e2c61995cbead829d5dd6ecdd52302ba381e9324f23a7bfe7076edd26868bc72ca868ffeb257f26b828e295cce21c94950466d0abe3ad4ebaae604c497b9fffa04b4f02c466bdd09fd6f36fa57055921148f258f7942e8636af5d31658857b4f7dea40bc4fdc2082638ec96fa332e31bea3e859629671938bdc37c932f8e2340d69ae27effaf4e01389c3375ab89ef1b75999ad425d7164b9efcd5a3621e417c8ebeec6ed17e30749a6426bfe5b96c91ea2619c6d2d658f5dabc4a19709daf8d4087b69ecc3a27e792fa4b4e65e78850de9cf8675f827bcf136373fea7eb7bcbbcc17c4cc34198ae2a1f41deaf21c09627d9fe3c6504e5d966ca398bab1ae59966759bb1053f450d68841a57a9d3d7a49b85baaafa88a1ac5f1b8273531a7f3e4254629b9a4ec5b38976dd897d55b9a6ce5cd25795ab820e8e7efe21cc0a7080c909fdcff16959516121f58baafa068673ea138b7f4d28fa552d7336f874c4fee46d68841a15cb55a1f57ae778589859b01dce7d70f20008f9ae1244135a5b6fd094acb37d4a2d137be7deee61c5e78a41ee2e19bd817d3873454d285dd8d7ebeb05b404771cdbb80785306ee39e7cdd992ef15d23c68860551e3469fd961d433d175217b8c57abb7839612c7fdd7071839b496ee6b472bbab5fb5a64cb22c98acd7a9540b2df656e67b224dbf6f2e6049585723cebb7e4e44eb4ae6b4048719ea69d1f5910cb0cc0e1e983b541cc3663c52744f256f48e24806adb2c1b739057bab0ccd253f5e6d704741f8bbedd124f32ab96e0456e8514d58ac23ee952fdee252d35fd77a2e6bca95bade134449897f1a745fbad4f43627d81e380565852c234e1eed38aa244af3a64812455fb508bb82e826adb2c6d65de1bd38a898007b3f5d90c363fa696e73f4821a2b9e04661ce2fd081df34bc6cedf133797da829f1e2e3f20bc6dd5b75d9a2e4ad42b5ee694c57779fefd04b5f1dcf77dbcdd482a845890363991155f85b89b22f8167926224529d46bce8a59600c7f5d4050733b781e1330216abb3e2486cc00ddc82d55b9a65eff725605ab23ff395c8ce3ee2e40f1692f2d6966296e26c47507dbbefd24a4522d334e9ddd992c712f62c759f0751c55aca8f64ae408a7d1f59ab04ed98819711c0edd60a7a39ba9682214728a3beec5d25c12fd1eff334a471fded244161b33587c6dec737e4f37b1c9894c28b60fb9544471726b2c9d05a4413d532e3cfe99fb443c038069c0b52e441c4cb1d9676956f3d7a88328b8ba3ac2ff0ddf53f665c8192f2224729a4d0da664df114fcbdf5078475fff12f7e67bb24dbc2f2e516c8d02816fcb7f6a143bfa75f75776494c8c54b363cf50edeede8c88376f155458a1248fe5acbd7059760907c2166885bb6aac0bc16c5fddb0c1d18eebee3385b20abb3fd5c79c03191a1d136ac30cccd10496dab39e4f0d2cf5ef4c22940d1b1f5e87eafa3595c571483f2c4535a5cc537e1a8eeb49272a50647bd2358fa5ccba82480659a6d2d38994abcffbf9905e0f1ca1f3d3fa7dde72e4b46e4b5f14c0bcc2edc88d574a36cf9e4407a65b433a7e5e9e411c2d42816dbb3fca80db8b048485071cda1c5474613cf3fad8a9fb5e517877055c945369751cc88319e728a40297b9932f9a29e971e9394cd1a312ea79df4784a2aa6d0fc5b67c82cd3c1d437a51cd5ed297961bc3ceeeff2e51587c33a42d9b0f1b748f5ec03294777bef0c37745068f3ef5cdbac3944ed62163863053f84180a7128b60876b2525ce6e96aa89bc01dae2dd3a3128b783ac335623ca92f05d6ee326d38cd575ac64f983176466ee62d8c5e9e416d2c42f36ef81f3b644abb6037a487188f6801a4378f23ef988d584ad60e81c558e104afe56c0db7cd254967a07749657bab0c4da04dafad50e3928b1c9a0760866e8abe0447bc031ce80de3abd75f3ed0c687eb83cbafcf6fb17d5d43458d9a6f5b90c879e0375017782e9d463551bcc2dbf8a93ec9452d1756989086ce55ac69e328133ce2e0f73887dbbae899b079bb6cf003a31a59ef6251434a5bffd7568cc2ecbdd8a0ca072afb11f5d7ab233e2e6e8a95badc22944eab7e4b15fb5e210094b7687d6d2505517d228a3ebc883a643c07d24ce111ebb15cb8e2d9e3fd3603d7ad012b7b180d179e4c7fb1b3d2cb6ddd13a4b23baf0bb193b9549ee8ac47baf6ff3a37d2d4baf35e6e1fec410cdc2384290f0c3a75fb2b259404a73c3c0c9535321d828f9cdd7a9a55dc03672c94b36f146cad505977f967a2d50955ebceccedd0091bdb800393da596ad314720cab1eb4a6fc025da87d931a270f1ed2f7d79af23f3e0edfc0adedd5b44d7bde4987ebea1585b4d6094c5c5514217d3698d8de99fb443c03854840d48b269f68232a55ca4387c4a9141aba5c29d0bd694e41a326eec97ee3a2e27abb2ea4d6ec025da87d932a377f0ee2e6267ad21f5e6effe07d1d0234fc1a8909344b5f11f76667d82f5a01a452ed222fedcdf8bf405f95575830750fb0697d5259e7ff3') # Create string table function def print_strings_table(strings_key, strings_data): strings_out = [] for i in range(len(data)): strings_out.append(strings_key[i%90] ^ strings_data[i]) strings_table = bytes(strings_out).decode('latin1') # Print strings table for s in strings_table.split('\x00'): print(s) print_strings_table(key, data) # Create string lookup function def string_lookup(strings_key, strings_data, offset): strings_out = [] for i in range(len(data)): strings_out.append(strings_key[i%len(key)] ^ strings_data[i]) strings_table = bytes(strings_out)[offset:].decode('latin1') return strings_table.split("\x00")[0] print("\n\nString lookup(%d): %s" % (0, string_lookup(key, data, 0))) key = unhex('09bcba05a9423e4ad9256dea6dd6916dbfd2d242e45801f25905afde20b6f88e34d60f78a26a952d8f646efba3467cc65e11720d1559bfc27de9a9b12b5644a2444f8cbb7423ee77544330eb389f0c8214f4b35399147d1dcda0') data = unhex('2ccf9a278d674d6ae40531c848a5cd4f84f2f462c07d72d05975c0a945c48be651ba6356c712f02dfc07068fc23517b570740a683576fcb01888ddd40b7916f7646dc2ef5462bb231c0c62a26cc650d14da7e716d4365d329ee329f3f456fd036c1ef90a39a44df3e44d90868062c67d72d0792ae18e0099be8e67b3631e821ef05efb4428baea0a39827f30530d452bd0a50f88c4f54a2225a20926efc91b5081112043478357fe61eb34dbd23ff5142f78a98041ddce25ff2b4c3e906a56bb289bc46dd1b7a662883762933562ddb155c6f8dd7b905b2fe338d071c20d0d89cc3513a02a4d25647b3dd0b50ec9e7e5771531d0362ae2cf22469c043d2c5eb768ed63e47d98d61ff067091da8d27bd3c825db274d77fe001ecd4db3e31f82f7b662883d6fcf7c70af8d45da9eae40b37c0c8225de038f051c8b836b1dc67c3401516620ccb618849a83772527ca302effd0070d8b0f316110c47bed69e36091937ccb415d3f83f429fdef51e10d6c038d7c31b93485c528f2f0f26d903621d72a2580aa5296daab47f42f57f129b562c1272bdb8c1c5ce90d455228256bcaf858d99bc40b7901f6646abc890119cb47663630985bf778e3679fc07dfc6c183de2e46cd0df71cc62110cf90a39a44df3e46dcca0a4328b2b75dc3c7dcae546c491ea55fb7811cc00f04efb0b1cd6cb2310b63b635f3e2777daba18d2cfc34232258f3326e2d111409a18266e588e54ef69f039c2877dfc6c181dbfcf7cc8df25d9305724ad25039901b9fe06caa2f26f952d64802071d6ae458bb9c278f6220ccb07f042fa1053ca936623aa3a7002234a2ddcb2538dca9f743b37c6273ca29e0723801b2026439f18b068ed7995da3dc6600f68bed47a9c9564c52e613eab501e9e1ed6a4558ee0d271d36938f27769c1b520d89dfa47a26e0c8247fb4ce0640dc1ff160ea9396313605138cba37d88dd9f4e2e2182613ab69e0103cc52276110c4719f50d16d87c736f4274f419ac967d8d572da12513dbc573e8208bafd31c9e3fc72b8286e853c77dcb645da94a051ae6a78c307e659fd010f968d2210aa5e7f1779352fd6a70ac986d0473a44d33326e2c80042ee52276315db0ce722a761d49663ad6c5338b8807bd9c93f89674d6aaa4005b519b3e21985f2f737c43b6e9c2a71dc8154d38bfa0ef62a1c821cf849ea100b98d72318fc7e34162d762bdaa3098cd9c3443521d13775ac9e10238d1a30631f8818ec69f614d49c30b9641473aa8e6cc4df25842c1e7cf9145fdd43e6bf5d91e3f264c478758b29608ffc05c5a4dd4da57b1dcf59a771ec0502988d2304a37c314c2d377ccce07db5fac8582221cf777dd0ec1d4d8a18233060844ffa7ed17c91df3fc5624c33fdfc79d3cd60db31562fb549438f15b3913ef0948615a50a44ae146cccac4fc597e8408a5811cc0efa5afc382d8ed13419a82a47177f6630d0ac21bbdcdf2b3c0cda253cf8ff17479d5e3b0e53d652e964b56390e63be1770e79b99209ddc871c0245f29ad0b089208edfc01c8a08d3189286dc92a64c2ae4cd3c3fd55b86b1acd12ae4efa070594cc6b47b03763077e150fd6b0099cc8dd2b0636cd2226e0de3d4e8f103113519f509f5acb46a0e612d5392d5ecdc979dfd56bcf2b596af64401866db8f4199fa1ba23963d01b36a4099ea6583cdd144a4342ee005ed7be6000b94a3630fe602335628667982e221b5f5930e2518fe1813b79b5203ca52271f12eb17eb38826691d420ef664e2fe3c571d99a28da623e') print_strings_table(key, data) #print("\n\nString lookup(%d): %s" % (1246, string_lookup(key, data, 1246))) string_lookup(key, data, 708) ``` ## Decrypt RSA Public Key ``` key = unhex('60441b6a08752e227c50334a08122d395a') data = unhex('50c61a483878282b56d67bccff1f2c3861411b698a7421224cd232400a902c386087aa879d75c99ad8e04da00fecffa6838a0df6c32da3251b749690f07c6d027e3a0b5f1e1ac2522a396702b35bc678b0defb6a2b21e8a69b668a06c48612b593c4495f476d5697d5719c35b7125a4a25b62a44244188381473f8acca2a414c20b49e16dc57378d2f742b77aa0b059c2582068062d5bdb42bf959b0fc89fe6cdddb9965248ecda9ea64103bbfc6b53d14bbee029a68e7f382bcc1ab20a4d6d776359d0b6a1b4fdb71ec45093199a066c70119e1cac7164bbb41af1a35b076b667022298addc67a9e71ca25e9e7eca2c592d1ec28a501394b7a36fde1ed22e5746fe08172aa6ef0685c5d59c98b0658f1f4cb9a5506f0d5c31177379f91569473f46186b08740000') out = [] for i in range(len(data)): out.append(key[i % len(key)] ^ data[i]) tohex(bytes(out)) ``` ## RC4 Decryption ``` def rc4crypt(data, key): #If the input is a string convert to byte arrays if type(data) == str: data = data.encode('utf-8') if type(key) == str: key = key.encode('utf-8') x = 0 box = list(range(256)) for i in range(256): x = (x + box[i] + key[i % len(key)]) % 256 box[i], box[x] = box[x], box[i] x = 0 y = 0 out = [] for c in data: x = (x + 1) % 256 y = (y + box[x]) % 256 box[x], box[y] = box[y], box[x] out.append(c ^ box[(box[x] + box[y]) % 256]) return bytes(out) ``` ## Config Extraction ``` import pefile import hashlib import struct SAMPLE_FILE_PATH = '/tmp/qakbot.bin' data = open(SAMPLE_FILE_PATH, 'rb').read() pe = pefile.PE(data=data) # Get the encrypted config from resource rt_string_idx = [ entry.id for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries ].index(pefile.RESOURCE_TYPE['RT_RCDATA']) rt_string_directory = pe.DIRECTORY_ENTRY_RESOURCE.entries[rt_string_idx] resource_data = None # The resource name is hardcoded -- needs to be dynamic for entry in rt_string_directory.directory.entries: if str(entry.name) == '3719': data_rva = entry.directory.entries[0].data.struct.OffsetToData size = entry.directory.entries[0].data.struct.Size resource_data = pe.get_memory_mapped_image()[data_rva:data_rva+size] # Build decryption key -- change to dynamic extraction key_string = rb'\System32\WindowsPowerShell\v1.0\powershell.exe' m = hashlib.sha1() m.update(key_string) key_bytes = m.digest() out = rc4crypt(resource_data, key_bytes) # Config has a SHA1 intergrity check followed by binary ip addresses print("Config SHA1: %s" % tohex(out[:20])) ip_table = out[21:] for ptr in range(0,len(ip_table),7): ip_string = "%d.%d.%d.%d" % (ord(ip_table[ptr:ptr+1]), ord(ip_table[ptr+1:ptr+2]), ord(ip_table[ptr+2:ptr+3]), ord(ip_table[ptr+3:ptr+4])) port_string = struct.unpack('>H', ip_table[ptr+4:ptr+6])[0] print("%s:%s" % (ip_string,port_string)) ``` ## TODO - add dynamic extraction for the decryption key - add dynamic extraction for the resource names - decrypt the other resource - fix ip extraction to use struct - validate the extract configs using the sha1 hash
github_jupyter
# Autoregressions This notebook introduces autoregression modeling using the `AutoReg` model. It also covers aspects of `ar_select_order` assists in selecting models that minimize an information criteria such as the AIC. An autoregressive model has dynamics given by $$ y_t = \delta + \phi_1 y_{t-1} + \ldots + \phi_p y_{t-p} + \epsilon_t. $$ `AutoReg` also permits models with: * Deterministic terms (`trend`) * `n`: No deterministic term * `c`: Constant (default) * `ct`: Constant and time trend * `t`: Time trend only * Seasonal dummies (`seasonal`) * `True` includes $s-1$ dummies where $s$ is the period of the time series (e.g., 12 for monthly) * Custom deterministic terms (`deterministic`) * Accepts a `DeterministicProcess` * Exogenous variables (`exog`) * A `DataFrame` or `array` of exogenous variables to include in the model * Omission of selected lags (`lags`) * If `lags` is an iterable of integers, then only these are included in the model. The complete specification is $$ y_t = \delta_0 + \delta_1 t + \phi_1 y_{t-1} + \ldots + \phi_p y_{t-p} + \sum_{i=1}^{s-1} \gamma_i d_i + \sum_{j=1}^{m} \kappa_j x_{t,j} + \epsilon_t. $$ where: * $d_i$ is a seasonal dummy that is 1 if $mod(t, period) = i$. Period 0 is excluded if the model contains a constant (`c` is in `trend`). * $t$ is a time trend ($1,2,\ldots$) that starts with 1 in the first observation. * $x_{t,j}$ are exogenous regressors. **Note** these are time-aligned to the left-hand-side variable when defining a model. * $\epsilon_t$ is assumed to be a white noise process. This first cell imports standard packages and sets plats to appear inline. ``` %matplotlib inline import matplotlib.pyplot as plt import pandas as pd import pandas_datareader as pdr import seaborn as sns from statsmodels.tsa.ar_model import AutoReg, ar_select_order from statsmodels.tsa.api import acf, pacf, graphics ``` This cell sets the plotting style, registers pandas date converters for matplotlib, and sets the default figure size. ``` sns.set_style('darkgrid') pd.plotting.register_matplotlib_converters() # Default figure size sns.mpl.rc('figure',figsize=(16, 6)) ``` The first set of examples uses the month-over-month growth rate in U.S. Housing starts that has not been seasonally adjusted. The seasonality is evident by the regular pattern of peaks and troughs. We set the frequency for the time series to "MS" (month-start) to avoid warnings when using `AutoReg`. ``` data = pdr.get_data_fred('HOUSTNSA', '1959-01-01', '2019-06-01') housing = data.HOUSTNSA.pct_change().dropna() # Scale by 100 to get percentages housing = 100 * housing.asfreq('MS') fig, ax = plt.subplots() ax = housing.plot(ax=ax) ``` We can start with an AR(3). While this is not a good model for this data, it demonstrates the basic use of the API. ``` mod = AutoReg(housing, 3, old_names=False) res = mod.fit() print(res.summary()) ``` `AutoReg` supports the same covariance estimators as `OLS`. Below, we use `cov_type="HC0"`, which is White's covariance estimator. While the parameter estimates are the same, all of the quantities that depend on the standard error change. ``` res = mod.fit(cov_type="HC0") print(res.summary()) sel = ar_select_order(housing, 13, old_names=False) sel.ar_lags res = sel.model.fit() print(res.summary()) ``` `plot_predict` visualizes forecasts. Here we produce a large number of forecasts which show the string seasonality captured by the model. ``` fig = res.plot_predict(720, 840) ``` `plot_diagnositcs` indicates that the model captures the key features in the data. ``` fig = plt.figure(figsize=(16,9)) fig = res.plot_diagnostics(fig=fig, lags=30) ``` ## Seasonal Dummies `AutoReg` supports seasonal dummies which are an alternative way to model seasonality. Including the dummies shortens the dynamics to only an AR(2). ``` sel = ar_select_order(housing, 13, seasonal=True, old_names=False) sel.ar_lags res = sel.model.fit() print(res.summary()) ``` The seasonal dummies are obvious in the forecasts which has a non-trivial seasonal component in all periods 10 years in to the future. ``` fig = res.plot_predict(720, 840) fig = plt.figure(figsize=(16,9)) fig = res.plot_diagnostics(lags=30, fig=fig) ``` ## Seasonal Dynamics While `AutoReg` does not directly support Seasonal components since it uses OLS to estimate parameters, it is possible to capture seasonal dynamics using an over-parametrized Seasonal AR that does not impose the restrictions in the Seasonal AR. ``` yoy_housing = data.HOUSTNSA.pct_change(12).resample("MS").last().dropna() _, ax = plt.subplots() ax = yoy_housing.plot(ax=ax) ``` We start by selecting a model using the simple method that only chooses the maximum lag. All lower lags are automatically included. The maximum lag to check is set to 13 since this allows the model to next a Seasonal AR that has both a short-run AR(1) component and a Seasonal AR(1) component, so that $$ (1-\phi_s L^{12})(1-\phi_1 L)y_t = \epsilon_t $$ which becomes $$ y_t = \phi_1 y_{t-1} +\phi_s Y_{t-12} - \phi_1\phi_s Y_{t-13} + \epsilon_t $$ when expanded. `AutoReg` does not enforce the structure, but can estimate the nesting model $$ y_t = \phi_1 y_{t-1} +\phi_{12} Y_{t-12} - \phi_{13} Y_{t-13} + \epsilon_t. $$ We see that all 13 lags are selected. ``` sel = ar_select_order(yoy_housing, 13, old_names=False) sel.ar_lags ``` It seems unlikely that all 13 lags are required. We can set `glob=True` to search all $2^{13}$ models that include up to 13 lags. Here we see that the first three are selected, as is the 7th, and finally the 12th and 13th are selected. This is superficially similar to the structure described above. After fitting the model, we take a look at the diagnostic plots that indicate that this specification appears to be adequate to capture the dynamics in the data. ``` sel = ar_select_order(yoy_housing, 13, glob=True, old_names=False) sel.ar_lags res = sel.model.fit() print(res.summary()) fig = plt.figure(figsize=(16,9)) fig = res.plot_diagnostics(fig=fig, lags=30) ``` We can also include seasonal dummies. These are all insignificant since the model is using year-over-year changes. ``` sel = ar_select_order(yoy_housing, 13, glob=True, seasonal=True, old_names=False) sel.ar_lags res = sel.model.fit() print(res.summary()) ``` ## Industrial Production We will use the industrial production index data to examine forecasting. ``` data = pdr.get_data_fred('INDPRO', '1959-01-01', '2019-06-01') ind_prod = data.INDPRO.pct_change(12).dropna().asfreq('MS') _, ax = plt.subplots(figsize=(16,9)) ind_prod.plot(ax=ax) ``` We will start by selecting a model using up to 12 lags. An AR(13) minimizes the BIC criteria even though many coefficients are insignificant. ``` sel = ar_select_order(ind_prod, 13, 'bic', old_names=False) res = sel.model.fit() print(res.summary()) ``` We can also use a global search which allows longer lags to enter if needed without requiring the shorter lags. Here we see many lags dropped. The model indicates there may be some seasonality in the data. ``` sel = ar_select_order(ind_prod, 13, 'bic', glob=True, old_names=False) sel.ar_lags res_glob = sel.model.fit() print(res.summary()) ``` `plot_predict` can be used to produce forecast plots along with confidence intervals. Here we produce forecasts starting at the last observation and continuing for 18 months. ``` ind_prod.shape fig = res_glob.plot_predict(start=714, end=732) ``` The forecasts from the full model and the restricted model are very similar. I also include an AR(5) which has very different dynamics ``` res_ar5 = AutoReg(ind_prod, 5, old_names=False).fit() predictions = pd.DataFrame({"AR(5)": res_ar5.predict(start=714, end=726), "AR(13)": res.predict(start=714, end=726), "Restr. AR(13)": res_glob.predict(start=714, end=726)}) _, ax = plt.subplots() ax = predictions.plot(ax=ax) ``` The diagnostics indicate the model captures most of the the dynamics in the data. The ACF shows a patters at the seasonal frequency and so a more complete seasonal model (`SARIMAX`) may be needed. ``` fig = plt.figure(figsize=(16,9)) fig = res_glob.plot_diagnostics(fig=fig, lags=30) ``` # Forecasting Forecasts are produced using the `predict` method from a results instance. The default produces static forecasts which are one-step forecasts. Producing multi-step forecasts requires using `dynamic=True`. In this next cell, we produce 12-step-heard forecasts for the final 24 periods in the sample. This requires a loop. **Note**: These are technically in-sample since the data we are forecasting was used to estimate parameters. Producing OOS forecasts requires two models. The first must exclude the OOS period. The second uses the `predict` method from the full-sample model with the parameters from the shorter sample model that excluded the OOS period. ``` import numpy as np start = ind_prod.index[-24] forecast_index = pd.date_range(start, freq=ind_prod.index.freq, periods=36) cols = ['-'.join(str(val) for val in (idx.year, idx.month)) for idx in forecast_index] forecasts = pd.DataFrame(index=forecast_index,columns=cols) for i in range(1, 24): fcast = res_glob.predict(start=forecast_index[i], end=forecast_index[i+12], dynamic=True) forecasts.loc[fcast.index, cols[i]] = fcast _, ax = plt.subplots(figsize=(16, 10)) ind_prod.iloc[-24:].plot(ax=ax, color="black", linestyle="--") ax = forecasts.plot(ax=ax) ``` ## Comparing to SARIMAX `SARIMAX` is an implementation of a Seasonal Autoregressive Integrated Moving Average with eXogenous regressors model. It supports: * Specification of seasonal and nonseasonal AR and MA components * Inclusion of Exogenous variables * Full maximum-likelihood estimation using the Kalman Filter This model is more feature rich than `AutoReg`. Unlike `SARIMAX`, `AutoReg` estimates parameters using OLS. This is faster and the problem is globally convex, and so there are no issues with local minima. The closed-form estimator and its performance are the key advantages of `AutoReg` over `SARIMAX` when comparing AR(P) models. `AutoReg` also support seasonal dummies, which can be used with `SARIMAX` if the user includes them as exogenous regressors. ``` from statsmodels.tsa.api import SARIMAX sarimax_mod = SARIMAX(ind_prod, order=((1,5,12,13),0, 0), trend='c') sarimax_res = sarimax_mod.fit() print(sarimax_res.summary()) sarimax_params = sarimax_res.params.iloc[:-1].copy() sarimax_params.index = res_glob.params.index params = pd.concat([res_glob.params, sarimax_params], axis=1, sort=False) params.columns = ["AutoReg", "SARIMAX"] params ``` ## Custom Deterministic Processes The `deterministic` parameter allows a custom `DeterministicProcess` to be used. This allows for more complex deterministic terms to be constructed, for example one that includes seasonal components with two periods, or, as the next example shows, one that uses a Fourier series rather than seasonal dummies. ``` from statsmodels.tsa.deterministic import DeterministicProcess dp = DeterministicProcess(housing.index, constant=True, period=12, fourier=2) mod = AutoReg(housing,2, trend="n",seasonal=False, deterministic=dp) res = mod.fit() print(res.summary()) fig = res.plot_predict(720, 840) ```
github_jupyter
# Variational inference for Bayesian neural networks This article demonstrates how to implement and train a Bayesian neural network with Keras following the approach described in [Weight Uncertainty in Neural Networks](https://arxiv.org/abs/1505.05424) (*Bayes by Backprop*). The implementation is kept simple for illustration purposes and uses Keras 2.2.4 and Tensorflow 1.12.0. For more advanced implementations of Bayesian methods for neural networks consider using [Tensorflow Probability](https://www.tensorflow.org/probability), for example. Bayesian neural networks differ from plain neural networks in that their weights are assigned a probability distribution instead of a single value or point estimate. These probability distributions describe the uncertainty in weights and can be used to estimate uncertainty in predictions. Training a Bayesian neural network via variational inference learns the parameters of these distributions instead of the weights directly. ## Probabilistic model A neural network can be viewed as probabilistic model $p(y \lvert \mathbf{x},\mathbf{w})$. For classification, $y$ is a set of classes and $p(y \lvert \mathbf{x},\mathbf{w})$ is a categorical distribution. For regression, $y$ is a continuous variable and $p(y \lvert \mathbf{x},\mathbf{w})$ is a Gaussian distribution. Given a training dataset $\mathcal{D} = \left\{\mathbf{x}^{(i)}, y^{(i)}\right\}$ we can construct the likelihood function $p(\mathcal{D} \lvert \mathbf{w}) = \prod_i p(y^{(i)} \lvert \mathbf{x}^{(i)}, \mathbf{w})$ which is a function of parameters $\mathbf{w}$. Maximizing the likelihood function gives the maximimum likelihood estimate (MLE) of $\mathbf{w}$. The usual optimization objective during training is the negative log likelihood. For a categorical distribution this is the *cross entropy* error function, for a Gaussian distribution this is proportional to the *sum of squares* error function. MLE can lead to severe overfitting though. Multiplying the likelihood with a prior distribution $p(\mathbf{w})$ is, by Bayes theorem, proportional to the posterior distribution $p(\mathbf{w} \lvert \mathcal{D}) \propto p(\mathcal{D} \lvert \mathbf{w}) p(\mathbf{w})$. Maximizing $p(\mathcal{D} \lvert \mathbf{w}) p(\mathbf{w})$ gives the maximum a posteriori (MAP) estimate of $\mathbf{w}$. Computing the MAP estimate has a regularizing effect and can prevent overfitting. The optimization objectives here are the same as for MLE plus a regularization term coming from the log prior. Both MLE and MAP give point estimates of parameters. If we instead had a full posterior distribution over parameters we could make predictions that take weight uncertainty into account. This is covered by the posterior predictive distribution $p(y \lvert \mathbf{x},\mathcal{D}) = \int p(y \lvert \mathbf{x}, \mathbf{w}) p(\mathbf{w} \lvert \mathcal{D}) d\mathbf{w}$ in which the parameters have been marginalized out. This is equivalent to averaging predictions from an ensemble of neural networks weighted by the posterior probabilities of their parameters $\mathbf{w}$. ## Variational inference Unfortunately, an analytical solution for the posterior $p(\mathbf{w} \lvert \mathcal{D})$ in neural networks is untractable. We therefore have to approximate the true posterior with a variational distribution $q(\mathbf{w} \lvert \boldsymbol{\theta})$ of known functional form whose parameters we want to estimate. This can be done by minimizing the [Kullback-Leibler divergence](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) between $q(\mathbf{w} \lvert \boldsymbol{\theta})$ and the true posterior $p(\mathbf{w} \lvert \mathcal{D})$ w.r.t. to $\boldsymbol{\theta}$. It can be shown that the corresponding optimization objective or cost function can be written as $$ \mathcal{F}(\mathcal{D},\boldsymbol{\theta}) = \mathrm{KL}(q(\mathbf{w} \lvert \boldsymbol{\theta}) \mid\mid p(\mathbf{w})) - \mathbb{E}_{q(\mathbf{w} \lvert \boldsymbol{\theta})} \log p(\mathcal{D} \lvert \mathbf{w}) \tag{1} $$ This is known as the *variational free energy*. The first term is the Kullback-Leibler divergence between the variational distribution $q(\mathbf{w} \lvert \boldsymbol{\theta})$ and the prior $p(\mathbf{w})$ and is called the *complexity cost*. The second term is the expected value of the likelihood w.r.t. the variational distribution and is called the *likelihood cost*. By re-arranging the KL term, the cost function can also be written as $$ \mathcal{F}(\mathcal{D},\boldsymbol{\theta}) = \mathbb{E}_{q(\mathbf{w} \lvert \boldsymbol{\theta})} \log q(\mathbf{w} \lvert \boldsymbol{\theta}) - \mathbb{E}_{q(\mathbf{w} \lvert \boldsymbol{\theta})} \log p(\mathbf{w}) - \mathbb{E}_{q(\mathbf{w} \lvert \boldsymbol{\theta})} \log p(\mathcal{D} \lvert \mathbf{w}) \tag{2} $$ We see that all three terms in equation $2$ are expectations w.r.t. the variational distribution $q(\mathbf{w} \lvert \boldsymbol{\theta})$. The cost function can therefore be approximated by drawing [Monte Carlo](https://en.wikipedia.org/wiki/Monte_Carlo_method) samples $\mathbf{w}^{(i)}$ from $q(\mathbf{w} \lvert \boldsymbol{\theta})$. $$ \mathcal{F}(\mathcal{D},\boldsymbol{\theta}) \approx {1 \over N} \sum_{i=1}^N \left[ \log q(\mathbf{w}^{(i)} \lvert \boldsymbol{\theta}) - \log p(\mathbf{w}^{(i)}) - \log p(\mathcal{D} \lvert \mathbf{w}^{(i)})\right] \tag{3} $$ In the following example, we'll use a Gaussian distribution for the variational posterior, parameterized by $\boldsymbol{\theta} = (\boldsymbol{\mu}, \boldsymbol{\sigma})$ where $\boldsymbol{\mu}$ is the mean vector of the distribution and $\boldsymbol{\sigma}$ the standard deviation vector. The elements of $\boldsymbol{\sigma}$ are the elements of a diagonal covariance matrix which means that weights are assumed to be uncorrelated. Instead of parameterizing the neural network with weights $\mathbf{w}$ directly we parameterize it with $\boldsymbol{\mu}$ and $\boldsymbol{\sigma}$ and therefore double the number of parameters compared to a plain neural network. ## Network training A training iteration consists of a forward-pass and and backward-pass. During a forward pass a single sample is drawn from the variational posterior distribution. It is used to evaluate the approximate cost function defined by equation $3$. The first two terms of the cost function are data-independent and can be evaluated layer-wise, the last term is data-dependent and is evaluated at the end of the forward-pass. During a backward-pass, gradients of $\boldsymbol{\mu}$ and $\boldsymbol{\sigma}$ are calculated via backpropagation so that their values can be updated by an optimizer. Since a forward pass involves a stochastic sampling step we have to apply the so-called *re-parameterization trick* for backpropagation to work. The trick is to sample from a parameter-free distribution and then transform the sampled $\boldsymbol{\epsilon}$ with a deterministic function $t(\boldsymbol{\mu}, \boldsymbol{\sigma}, \boldsymbol{\epsilon})$ for which a gradient can be defined. Here, $\boldsymbol{\epsilon}$ is drawn from a standard normal distribution i.e. $\boldsymbol{\epsilon} \sim \mathcal{N}(\mathbf{0}, \mathbf{I})$ and function $t(\boldsymbol{\mu}, \boldsymbol{\sigma}, \boldsymbol{\epsilon}) = \boldsymbol{\mu} + \boldsymbol{\sigma} \odot \boldsymbol{\epsilon}$ shifts the sample by mean $\boldsymbol{\mu}$ and scales it with $\boldsymbol{\sigma}$ where $\odot$ is element-wise multiplication. For numeric stability we will parameterize the network with $\boldsymbol{\rho}$ instead of $\boldsymbol{\sigma}$ directly and transform $\boldsymbol{\rho}$ with the softplus function to obtain $\boldsymbol{\sigma} = \log(1 + \exp(\boldsymbol{\rho}))$. This ensures that $\boldsymbol{\sigma}$ is always positive. As prior, a scale mixture of two Gaussians is used $p(\mathbf{w}) = \pi \mathcal{N}(\mathbf{w} \lvert 0,\sigma_1^2) + (1 - \pi) \mathcal{N}(\mathbf{w} \lvert 0,\sigma_2^2)$ where $\sigma_1$, $\sigma_2$ and $\pi$ are shared parameters. Their values are learned during training (which is in contrast to the paper where a fixed prior is used). ## Uncertainty characterization Uncertainty in predictions that arise from the uncertainty in weights is called [epistemic uncertainty](https://en.wikipedia.org/wiki/Uncertainty_quantification). This kind of uncertainty can be reduced if we get more data. Consequently, epistemic uncertainty is higher in regions of no or little training data and lower in regions of more training data. Epistemic uncertainty is covered by the variational posterior distribution. Uncertainty coming from the inherent noise in training data is an example of [aleatoric uncertainty](https://en.wikipedia.org/wiki/Uncertainty_quantification). It cannot be reduced if we get more data. Aleatoric uncertainty is covered by the probability distribution used to define the likelihood function. ## Implementation example Variational inference of neural network parameters is now demonstrated on a simple regression problem. We therefore use a Gaussian distribution for $p(y \lvert \mathbf{x},\mathbf{w})$. The training dataset consists of 32 noisy samples `X`, `y` drawn from a sinusoidal function. ``` import numpy as np import matplotlib.pyplot as plt %matplotlib inline def f(x, sigma): epsilon = np.random.randn(*x.shape) * sigma return 10 * np.sin(2 * np.pi * (x)) + epsilon train_size = 32 noise = 1.0 X = np.linspace(-0.5, 0.5, train_size).reshape(-1, 1) y = f(X, sigma=noise) y_true = f(X, sigma=0.0) plt.scatter(X, y, marker='+', label='Training data') plt.plot(X, y_true, label='Truth') plt.title('Noisy training data and ground truth') plt.legend(); ``` The noise in training data gives rise to aleatoric uncertainty. To cover epistemic uncertainty we implement the variational inference logic in a custom `DenseVariational` Keras layer. The learnable parameters of the mixture prior, $\sigma_1$ $\sigma_2$ and $\pi$, are shared across layers. The complexity cost (`kl_loss`) is computed layer-wise and added to the total loss with the `add_loss` method. Implementations of `build` and `call` directly follow the equations defined above. ``` from keras import backend as K from keras import activations, initializers from keras.layers import Layer import tensorflow as tf import tensorflow_probability as tfp def mixture_prior_params(sigma_1, sigma_2, pi, return_sigma=False): params = K.variable([sigma_1, sigma_2, pi], name='mixture_prior_params') sigma = np.sqrt(pi * sigma_1 ** 2 + (1 - pi) * sigma_2 ** 2) return params, sigma def log_mixture_prior_prob(w): comp_1_dist = tfp.distributions.Normal(0.0, prior_params[0]) comp_2_dist = tfp.distributions.Normal(0.0, prior_params[1]) comp_1_weight = prior_params[2] return K.log(comp_1_weight * comp_1_dist.prob(w) + (1 - comp_1_weight) * comp_2_dist.prob(w)) # Mixture prior parameters shared across DenseVariational layer instances prior_params, prior_sigma = mixture_prior_params(sigma_1=1.0, sigma_2=0.1, pi=0.2) class DenseVariational(Layer): def __init__(self, output_dim, kl_loss_weight, activation=None, **kwargs): self.output_dim = output_dim self.kl_loss_weight = kl_loss_weight self.activation = activations.get(activation) super().__init__(**kwargs) def build(self, input_shape): self._trainable_weights.append(prior_params) self.kernel_mu = self.add_weight(name='kernel_mu', shape=(input_shape[1], self.output_dim), initializer=initializers.normal(stddev=prior_sigma), trainable=True) self.bias_mu = self.add_weight(name='bias_mu', shape=(self.output_dim,), initializer=initializers.normal(stddev=prior_sigma), trainable=True) self.kernel_rho = self.add_weight(name='kernel_rho', shape=(input_shape[1], self.output_dim), initializer=initializers.constant(0.0), trainable=True) self.bias_rho = self.add_weight(name='bias_rho', shape=(self.output_dim,), initializer=initializers.constant(0.0), trainable=True) super().build(input_shape) def call(self, x): kernel_sigma = tf.math.softplus(self.kernel_rho) kernel = self.kernel_mu + kernel_sigma * tf.random.normal(self.kernel_mu.shape) bias_sigma = tf.math.softplus(self.bias_rho) bias = self.bias_mu + bias_sigma * tf.random.normal(self.bias_mu.shape) self.add_loss(self.kl_loss(kernel, self.kernel_mu, kernel_sigma) + self.kl_loss(bias, self.bias_mu, bias_sigma)) return self.activation(K.dot(x, kernel) + bias) def compute_output_shape(self, input_shape): return (input_shape[0], self.output_dim) def kl_loss(self, w, mu, sigma): variational_dist = tfp.distributions.Normal(mu, sigma) return kl_loss_weight * K.sum(variational_dist.log_prob(w) - log_mixture_prior_prob(w)) ``` Our model is a neural network with two `DenseVariational` hidden layers, each having 20 units, and one `DenseVariational` output layer with one unit. Instead of modeling a full probability distribution $p(y \lvert \mathbf{x},\mathbf{w})$ as output the network simply outputs the mean of the corresponding Gaussian distribution. In other words, we do not model aleatoric uncertainty here and assume it is known. We only model epistemic uncertainty via the `DenseVariational` layers. Since the training dataset has only 32 examples we train the network with all 32 examples per epoch so that the number of batches per epoch is 1. For other configurations, the complexity cost (`kl_loss`) must be weighted by $1/M$ as described in section 3.4 of the paper where $M$ is the number of mini-batches per epoch. ``` from keras.layers import Input from keras.models import Model batch_size = train_size num_batches = train_size / batch_size kl_loss_weight = 1.0 / num_batches x_in = Input(shape=(1,)) x = DenseVariational(20, kl_loss_weight=kl_loss_weight, activation='relu')(x_in) x = DenseVariational(20, kl_loss_weight=kl_loss_weight, activation='relu')(x) x = DenseVariational(1, kl_loss_weight=kl_loss_weight)(x) model = Model(x_in, x) ``` The network can now be trained with a Gaussian negative log likelihood function (`neg_log_likelihood`) as loss function assuming a fixed standard deviation (`noise`). This corresponds to the *likelihood cost*, the last term in equation $3$. ``` from keras import callbacks, optimizers def neg_log_likelihood(y_obs, y_pred, sigma=noise): dist = tfp.distributions.Normal(loc=y_pred, scale=sigma) return K.sum(-dist.log_prob(y_obs)) model.compile(loss=neg_log_likelihood, optimizer=optimizers.Adam(lr=0.03), metrics=['mse']) model.fit(X, y, batch_size=batch_size, epochs=1500, verbose=0); ``` When calling `model.predict` we draw a random sample from the variational posterior distribution and use it to compute the output value of the network. This is equivalent to obtaining the output from a single member of a hypothetical ensemble of neural networks. Drawing 500 samples means that we get predictions from 500 ensemble members. From these predictions we can compute statistics such as the mean and standard deviation. In our example, the standard deviation is a measure of epistemic uncertainty. ``` import tqdm X_test = np.linspace(-1.5, 1.5, 1000).reshape(-1, 1) y_pred_list = [] for i in tqdm.tqdm(range(500)): y_pred = model.predict(X_test) y_pred_list.append(y_pred) y_preds = np.concatenate(y_pred_list, axis=1) y_mean = np.mean(y_preds, axis=1) y_sigma = np.std(y_preds, axis=1) plt.plot(X_test, y_mean, 'r-', label='Predictive mean'); plt.scatter(X, y, marker='+', label='Training data') plt.fill_between(X_test.ravel(), y_mean + 2 * y_sigma, y_mean - 2 * y_sigma, alpha=0.5, label='Epistemic uncertainty') plt.title('Prediction') plt.legend(); ``` We can clearly see that epistemic uncertainty is much higher in regions of no training data than it is in regions of existing training data. The predictive mean could have also been obtained with a single forward pass i.e. a single `model.predict` call by using only the mean of the variational posterior distribution which is equivalent to sampling from the variational posterior with $\boldsymbol{\sigma}$ set to $\mathbf{0}$. The corresponding implementation is omitted here but is trivial to add. For an example how to model both epistemic and aleatoric uncertainty I recommend reading [Regression with Probabilistic Layers in TensorFlow Probability](https://medium.com/tensorflow/regression-with-probabilistic-layers-in-tensorflow-probability-e46ff5d37baf) which uses probabilistic Keras layers from the upcoming Tensorflow Probability 0.7.0 release. Their approach to variational inference is similar to the approach described here but differs in some details. For example, they compute the complexity cost analytically instead of estimating it from Monte Carlo samples, among other differences.
github_jupyter
# Optimization and Deep Learning In this section, we will discuss the relationship between optimization and deep learning as well as the challenges of using optimization in deep learning. For a deep learning problem, we will usually define a *loss function* first. Once we have the loss function, we can use an optimization algorithm in attempt to minimize the loss. In optimization, a loss function is often referred to as the *objective function* of the optimization problem. By tradition and convention most optimization algorithms are concerned with *minimization*. If we ever need to maximize an objective there is a simple solution: just flip the sign on the objective. ## Goal of Optimization Although optimization provides a way to minimize the loss function for deep learning, in essence, the goals of optimization and deep learning are fundamentally different. The former is primarily concerned with minimizing an objective whereas the latter is concerned with finding a suitable model, given a finite amount of data. In :numref:`sec_model_selection`, we discussed the difference between these two goals in detail. For instance, training error and generalization error generally differ: since the objective function of the optimization algorithm is usually a loss function based on the training dataset, the goal of optimization is to reduce the training error. However, the goal of deep learning (or more broadly, statistical inference) is to reduce the generalization error. To accomplish the latter we need to pay attention to overfitting in addition to using the optimization algorithm to reduce the training error. ``` %matplotlib inline import numpy as np import tensorflow as tf from mpl_toolkits import mplot3d from d2l import tensorflow as d2l ``` To illustrate the aforementioned different goals, let us consider the empirical risk and the risk. As described in :numref:`subsec_empirical-risk-and-risk`, the empirical risk is an average loss on the training dataset while the risk is the expected loss on the entire population of data. Below we define two functions: the risk function `f` and the empirical risk function `g`. Suppose that we have only a finite amount of training data. As a result, here `g` is less smooth than `f`. ``` def f(x): return x * tf.cos(np.pi * x) def g(x): return f(x) + 0.2 * tf.cos(5 * np.pi * x) ``` The graph below illustrates that the minimum of the empirical risk on a training dataset may be at a different location from the minimum of the risk (generalization error). ``` def annotate(text, xy, xytext): #@save d2l.plt.gca().annotate(text, xy=xy, xytext=xytext, arrowprops=dict(arrowstyle='->')) x = tf.range(0.5, 1.5, 0.01) d2l.set_figsize((4.5, 2.5)) d2l.plot(x, [f(x), g(x)], 'x', 'risk') annotate('min of\nempirical risk', (1.0, -1.2), (0.5, -1.1)) annotate('min of risk', (1.1, -1.05), (0.95, -0.5)) ``` ## Optimization Challenges in Deep Learning In this chapter, we are going to focus specifically on the performance of optimization algorithms in minimizing the objective function, rather than a model's generalization error. In :numref:`sec_linear_regression` we distinguished between analytical solutions and numerical solutions in optimization problems. In deep learning, most objective functions are complicated and do not have analytical solutions. Instead, we must use numerical optimization algorithms. The optimization algorithms in this chapter all fall into this category. There are many challenges in deep learning optimization. Some of the most vexing ones are local minima, saddle points, and vanishing gradients. Let us have a look at them. ### Local Minima For any objective function $f(x)$, if the value of $f(x)$ at $x$ is smaller than the values of $f(x)$ at any other points in the vicinity of $x$, then $f(x)$ could be a local minimum. If the value of $f(x)$ at $x$ is the minimum of the objective function over the entire domain, then $f(x)$ is the global minimum. For example, given the function $$f(x) = x \cdot \text{cos}(\pi x) \text{ for } -1.0 \leq x \leq 2.0,$$ we can approximate the local minimum and global minimum of this function. ``` x = tf.range(-1.0, 2.0, 0.01) d2l.plot(x, [f(x), ], 'x', 'f(x)') annotate('local minimum', (-0.3, -0.25), (-0.77, -1.0)) annotate('global minimum', (1.1, -0.95), (0.6, 0.8)) ``` The objective function of deep learning models usually has many local optima. When the numerical solution of an optimization problem is near the local optimum, the numerical solution obtained by the final iteration may only minimize the objective function *locally*, rather than *globally*, as the gradient of the objective function's solutions approaches or becomes zero. Only some degree of noise might knock the parameter out of the local minimum. In fact, this is one of the beneficial properties of minibatch stochastic gradient descent where the natural variation of gradients over minibatches is able to dislodge the parameters from local minima. ### Saddle Points Besides local minima, saddle points are another reason for gradients to vanish. A *saddle point* is any location where all gradients of a function vanish but which is neither a global nor a local minimum. Consider the function $f(x) = x^3$. Its first and second derivative vanish for $x=0$. Optimization might stall at this point, even though it is not a minimum. ``` x = tf.range(-2.0, 2.0, 0.01) d2l.plot(x, [x**3], 'x', 'f(x)') annotate('saddle point', (0, -0.2), (-0.52, -5.0)) ``` Saddle points in higher dimensions are even more insidious, as the example below shows. Consider the function $f(x, y) = x^2 - y^2$. It has its saddle point at $(0, 0)$. This is a maximum with respect to $y$ and a minimum with respect to $x$. Moreover, it *looks* like a saddle, which is where this mathematical property got its name. ``` x, y = tf.meshgrid( tf.linspace(-1.0, 1.0, 101), tf.linspace(-1.0, 1.0, 101)) z = x**2 - y**2 ax = d2l.plt.figure().add_subplot(111, projection='3d') ax.plot_wireframe(x, y, z, **{'rstride': 10, 'cstride': 10}) ax.plot([0], [0], [0], 'rx') ticks = [-1, 0, 1] d2l.plt.xticks(ticks) d2l.plt.yticks(ticks) ax.set_zticks(ticks) d2l.plt.xlabel('x') d2l.plt.ylabel('y'); ``` We assume that the input of a function is a $k$-dimensional vector and its output is a scalar, so its Hessian matrix will have $k$ eigenvalues (refer to the [online appendix on eigendecompositions](https://d2l.ai/chapter_appendix-mathematics-for-deep-learning/eigendecomposition.html)). The solution of the function could be a local minimum, a local maximum, or a saddle point at a position where the function gradient is zero: * When the eigenvalues of the function's Hessian matrix at the zero-gradient position are all positive, we have a local minimum for the function. * When the eigenvalues of the function's Hessian matrix at the zero-gradient position are all negative, we have a local maximum for the function. * When the eigenvalues of the function's Hessian matrix at the zero-gradient position are negative and positive, we have a saddle point for the function. For high-dimensional problems the likelihood that at least *some* of the eigenvalues are negative is quite high. This makes saddle points more likely than local minima. We will discuss some exceptions to this situation in the next section when introducing convexity. In short, convex functions are those where the eigenvalues of the Hessian are never negative. Sadly, though, most deep learning problems do not fall into this category. Nonetheless it is a great tool to study optimization algorithms. ### Vanishing Gradients Probably the most insidious problem to encounter is the vanishing gradient. Recall our commonly-used activation functions and their derivatives in :numref:`subsec_activation-functions`. For instance, assume that we want to minimize the function $f(x) = \tanh(x)$ and we happen to get started at $x = 4$. As we can see, the gradient of $f$ is close to nil. More specifically, $f'(x) = 1 - \tanh^2(x)$ and thus $f'(4) = 0.0013$. Consequently, optimization will get stuck for a long time before we make progress. This turns out to be one of the reasons that training deep learning models was quite tricky prior to the introduction of the ReLU activation function. ``` x = tf.range(-2.0, 5.0, 0.01) d2l.plot(x, [tf.tanh(x)], 'x', 'f(x)') annotate('vanishing gradient', (4, 1), (2, 0.0)) ``` As we saw, optimization for deep learning is full of challenges. Fortunately there exists a robust range of algorithms that perform well and that are easy to use even for beginners. Furthermore, it is not really necessary to find *the* best solution. Local optima or even approximate solutions thereof are still very useful. ## Summary * Minimizing the training error does *not* guarantee that we find the best set of parameters to minimize the generalization error. * The optimization problems may have many local minima. * The problem may have even more saddle points, as generally the problems are not convex. * Vanishing gradients can cause optimization to stall. Often a reparameterization of the problem helps. Good initialization of the parameters can be beneficial, too. ## Exercises 1. Consider a simple MLP with a single hidden layer of, say, $d$ dimensions in the hidden layer and a single output. Show that for any local minimum there are at least $d!$ equivalent solutions that behave identically. 1. Assume that we have a symmetric random matrix $\mathbf{M}$ where the entries $M_{ij} = M_{ji}$ are each drawn from some probability distribution $p_{ij}$. Furthermore assume that $p_{ij}(x) = p_{ij}(-x)$, i.e., that the distribution is symmetric (see e.g., :cite:`Wigner.1958` for details). 1. Prove that the distribution over eigenvalues is also symmetric. That is, for any eigenvector $\mathbf{v}$ the probability that the associated eigenvalue $\lambda$ satisfies $P(\lambda > 0) = P(\lambda < 0)$. 1. Why does the above *not* imply $P(\lambda > 0) = 0.5$? 1. What other challenges involved in deep learning optimization can you think of? 1. Assume that you want to balance a (real) ball on a (real) saddle. 1. Why is this hard? 1. Can you exploit this effect also for optimization algorithms? [Discussions](https://discuss.d2l.ai/t/489)
github_jupyter
# Calculate and save extremes (both atm and lnd) ## 1. Settings ### 1.1 Import the necessary python libraries ``` from __future__ import print_function import sys import os from getpass import getuser import string import subprocess import numpy as np import matplotlib import matplotlib.pyplot as plt import netCDF4 as netcdf4 import xarray as xr import pandas import regionmask import cartopy.crs as ccrs from IPython.display import display, Math, Latex import warnings from datetime import datetime ``` ### 1.2 General Settings ``` # set directories outdir = '/glade/scratch/ivanderk/' # Define directory where processing is done -- subject to change procdir = '/glade/work/ivanderk/postprocessing/' # go to processing directory os.chdir(procdir) # ignore all runtime warnings warnings.filterwarnings('ignore') ``` ### 1.3 User settings ``` # set case name case_res = 'f.FHIST.f09_f09_mg17.CTL' case_nores = 'f.FHIST.f09_f09_mg17.NORES' # set number of ensemble members n_ens = 5 # set individual case names for reference case_res_ind = 'f.FHIST.f09_f09_mg17.CTL.001' case_nores_ind = 'f.FHIST.f09_f09_mg17.NORES.001' case = 'f.FHIST.f09_f09_mg17.CTL.001' # run settings -- change this to terms directly? block = 'atm' # lnd data # atm data # rof data stream = 'h0' # h0 output block # h1 output block # h2 output block # xtrm calculated (annual) # define start and end year spstartyear = '1979' # spin up start year startyear = '1979' # start year, spin up excluded endyear = '2014' # last year of the simulation # list of hydrological variables which need to be converted from m/s to mm/day hydrol_vars = ['PRECT','PRECMC', 'PRECC','PRECL','Rx1day'] ``` ## 2. Functions ### 2.1 Functions to open datasets ``` # open nc variable as dataset and interpolate lnd variables to atm grid def open_ds(var,case=case,stream=stream, block=block): tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored tseriesdir = outdir + 'archive/' + case + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array if not os.path.isfile(tseriesdir + fn): print(fn + ' does not exists in ') print(tseriesdir) return else: # open the dataset ds = xr.open_dataset(tseriesdir+fn) # the lats of the atm and lnd grid differ with about E-7. # therefore, interpolate land to atm grid to be able to work with exactly the same grids. if block == 'lnd': ds_atm = open_ds('TREFHT',block='atm', stream = stream ) ds = ds.interp_like(ds_atm) return ds # function to cut out analysis period out of data-array (1900-2015) def extract_anaperiod(da, stream): # number of spin up years nspinupyears = int(startyear) - int(spstartyear) if nspinupyears == 0 : # no spin up da = da[:-1,:,:] elif stream == 'h1' : # this option still to test # daily timesteps # last day of previous year is also saved in variable therefore add one nspinupdays = (nspinupyears * 365) + 1 # exclude spin up year and last timestep () da = da[nspinupdays:-1,:,:] else: # spin up with monthly timestep # first month of first year is not saved in variable therefore substract one nspinupmonths = (nspinupyears * 12) - 1 # exclude spin up year and last timestep () da = da[nspinupmonths:-1,:,:] return da # open variable as data-array def open_da(var, case=case, stream=stream, block=block): ds = open_ds(var, case, stream, block=block) da = ds[var] # extract analysis period - not necessary da = extract_anaperiod(da, stream) return da def open_da_delta(var, case, case_ref, stream=stream, block=block): # Load the two datasets da_res = open_da(var,case=case, stream=stream, block=block) da_ctl = open_da(var,case=case_ref, stream=stream, block=block) # calculate difference and update attributes da_delta = da_res - da_ctl da_delta.attrs['long_name'] = '$\Delta$ '+ da_ctl.long_name da_delta.attrs['units'] = da_ctl.units da_delta.name = '$\Delta$ '+ da_ctl.name return da_delta # save dataset as nc in postprocessing dir for extremes def save_da_xtrm(da,var,case=case, block=block): tspan = spstartyear+'01-'+endyear+'12' model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} savedir = procdir + 'postprocessing/extremes/f09_f09/' # define filename fn = case + '.'+ model[block] + '.' + var + '.' + tspan +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: da.to_dataset().to_netcdf(savedir+fn) return # open dataset of extremes def open_da_xtrm(var,case=case, block=block): tspan = spstartyear+'01-'+endyear+'12' model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} savedir = outdir + 'postprocessing/extremes/f09_f09/' # define filename fn = case + '.'+ model[block] + '.' + var + '.' + tspan +'.nc' # check if variable timeseries exists and open variable as data array if not os.path.isfile(savedir + fn): print(fn + ' does not exist') return else: ds = xr.open_dataset(savedir+fn) da = ds[var] return da # check if dataset of extremes exists def exist_da_xtrm(var,case=case, block=block): tspan = spstartyear+'01-'+endyear+'12' model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} savedir = outdir + 'postprocessing/extremes/f09_f09/' # define filename fn = case + '.'+ model[block] + '.' + var + '.' + tspan +'.nc' # check if variable timeseries exists and open variable as data array if not os.path.isfile(savedir + fn): exists = False else: exists = True return exists # remove nc file of extreme (for development purposes) def remove_da_xtrm(var,case=case, block=block): tspan = spstartyear+'01-'+endyear+'12' model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} savedir = outdir + 'postprocessing/extremes/f09_f09/' # define filename fn = case + '.'+ model[block] + '.' + var + '.' + tspan +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print('removed file '+fn) os.system('rm '+savedir + fn) return # fucntion to open (and calculate) delta of extreme def open_da_delta_xtrm(var, case, case_ref, block=block): # Load the two datasets da_res = open_da_xtrm(var,case=case, block=block) da_ctl = open_da_xtrm(var,case=case_ref, block=block) # calculate difference and update attributes da_delta = da_res - da_ctl da_delta.attrs['long_name'] = '$\Delta$ '+ da_ctl.long_name da_delta.attrs['units'] = da_ctl.units da_delta.name = '$\Delta$ '+ da_ctl.name return da_delta ``` ### 2.3 Functions to do conversions ``` def conv_m_s_to_mm_day(da_in): if not da_in.attrs['units'] == 'mm/day': da_out = da_in * 86400000 # update attributes and change units da_out.attrs= da_in.attrs da_out.attrs['units'] = 'mm/day' else: da_out = da_in return da_out ``` ### 2.4 Functions to calculate extremes ``` # process TXx: calculate and save annual maximum of maxdaytime temperature def proc_TXx(var_or, case, block=block): # define new variable name var = 'TXx' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da(var_or,case=case, stream='h1', block=block) # calculate maximum per year da_xtrm= da.groupby('time.year').max(keep_attrs=True) da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Annual maximum of '+da.long_name # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) # process TNn: calculate and save annual maximum of maxdaytime temperature def proc_TNn(var_or, case, block=block): # define new variable name var = 'TNn' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da(var_or,case=case, stream='h1', block=block) # calculate minimum per year da_xtrm = da.groupby('time.year').min(keep_attrs=True) da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Annual minimum of '+da.long_name # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) # calculate 99th percentile of max daytime temperatures def proc_TX99(var_or, case, block=block): # define new variable name var = 'TX99' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate maximum per year da_xtrm = da_lumped.quantile(0.99, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '99th percentile of daily '+da.long_name da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) # calculate 1st percentile of min nighttime temperatures def proc_TN01(var_or, case, block=block): # define new variable name var = 'TN01' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate maximum per year da_xtrm = da_lumped.quantile(0.01, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '1st percentile of daily '+da.long_name da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_TN10(var_or, case, block=block): """calculcate and save cold days 10pctl of days within period """ # define new variable name var = 'TN10' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate maximum per year da_xtrm = da_lumped.quantile(0.1, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '10th percentile of daily'+da.long_name da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_TX90(var_or, case, block=block): """calculcate and save warm days 90th pctl of days within period """ # define new variable name var = 'TX90' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate maximum per year da_xtrm = da_lumped.quantile(0.90, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '90th percentile of daily'+da.long_name da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_Rx1day(var_or, case, block=block): """process Rx1day: calculate annual maximum 1 day precipitation""" # define new variable name var = 'Rx1day' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da(var_or,case=case, stream='h1', block=block) # calculate maximum per year da_xtrm= da.groupby('time.year').max(keep_attrs=True) da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Annual maximum of '+da.long_name # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_R05(var_or, case, block=block): """calculcate and save 5th pctl of monthly precip: drought months """ # define new variable name var = 'R05' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate quantile over months da_xtrm = da_lumped.quantile(0.05, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '5th percentile of monthly'+da.long_name da_xtrm.attrs['units'] = 'mm/year' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_R95(var_or, case, block=block): """calculcate and save 95th pctl of monthly precip: wet months """ # define new variable name var = 'R95' # check if var is already existing if exist_da_xtrm(var,case=case, block=block): print(var +' already exists') else: # do calculations # open da with daily data da = open_da_ens(var_or,case=case, stream='h0', block=block, mode='all') da_lumped = da.stack(dim=("ens_member", "time")) # calculate quantile over months da_xtrm = da_lumped.quantile(0.95, dim=('dim')) da_xtrm.name = var da_xtrm.attrs['long_name'] = '95th percentile of monthly'+da.long_name da_xtrm.attrs['units'] = 'mm/year' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_colddays(case): """calculcate and save annual percent of cold days (T< 10pctl) of days within period for individual ensemble members""" # define new variable name var = 'ColdDays_pct' # check if var is already existing if exist_da_xtrm(var,case=case): print(var +' already exists') else: # do calculations # open da with daily data da = open_da('TREFHTMN', case=case, stream='h1', block='atm') # calculate percentile over whole period da_pctl = da.quantile(0.1, dim=('time')) ncolddays_annual = (da<da_pctl).groupby('time.year').sum() ndays_annual = (da<da_pctl).groupby('time.year').count() # calculate percent da_xtrm = ncolddays_annual/ndays_annual *100 da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Percent of cold days per year (days with TN < TN_10pctl )' da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_warmdays(case): """calculcate and save annual percent of warm days (T> 90pctl) of days within period for individual ensemble members""" # define new variable name var = 'WarmDays_pct' # check if var is already existing if exist_da_xtrm(var,case=case): print(var +' already exists') else: # do calculations # open da with daily data da = open_da('TREFHTMX', case=case, stream='h1', block='atm') # calculate percentile over whole period da_pctl = da.quantile(0.9, dim=('time')) nwarmdays_annual = (da>da_pctl).groupby('time.year').sum() ndays_annual = (da>da_pctl).groupby('time.year').count() # calculate percent da_xtrm = nwarmdays_annual/ndays_annual *100 da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Percent of warm days per year (days with TX > TX_90pctl )' da_xtrm.attrs['units'] = 'K' # save variable into netcdf save_da_xtrm(da_xtrm,var,case=case, block=block) def proc_TXx_monthly(var_or, case, block=block): """ process TXx: calculate and save monthly maximum of daytime temperature and save in monthly folder""" # define new variable name var = 'TXx_m' # open da with daily data da = open_da(var_or,case=case, stream='h1', block=block) # calculate maximum per year da_xtrm = da.resample(time='M').max(keep_attrs=True) da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Monthly maximum of '+da.long_name # save da into netcdf stream = 'h0' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array #if os.path.isfile(savedir + fn): # print(fn + ' already exists') #else: da_xtrm.to_dataset().to_netcdf(savedir+fn) # process TXx: calculate and save annual maximum of maxdaytime temperature def proc_TNn_monthly(var_or, case, block=block): """ process TNn: calculate and save monthly minimum of nighttime temperature and save in monthly folder""" # define new variable name var = 'TNn_m' # open da with daily data da = open_da(var_or,case=case, stream='h1', block=block) # calculate maximum per year da_xtrm = da.resample(time='M').min(keep_attrs=True) da_xtrm.name = var da_xtrm.attrs['long_name'] = 'Monthly minimum of '+da.long_name # save da into netcdf stream = 'h0' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array #if os.path.isfile(savedir + fn): # print(fn + ' already exists') #else: da_xtrm.to_dataset().to_netcdf(savedir+fn) ``` ## 3. Calculate and save extremes ### 3.1 Land variables ### 3.2 Atmosphere variables ``` # get string of individual case names case_res_names = [case_res+'.00'+str(i) for i in range(1,n_ens+1)] case_nores_names = [case_nores+'.00'+str(i) for i in range(1,n_ens+1)] # loop over all cases and calculate extremes for all individual cases for case_res_mem,case_nores_mem in zip(case_res_names, case_nores_names): # calculate annual maximum daytime temperature (based on cam variable) proc_TXx('TREFHTMX', case_res_mem, block='atm') proc_TXx('TREFHTMX', case_nores_mem, block='atm') # calculate annual minimum nighttime temperature (based on cam variable) proc_TNn('TREFHTMN', case_res_mem, block='atm') proc_TNn('TREFHTMN', case_nores_mem, block='atm') # Rx1day annual maximum 1 day precipitation proc_Rx1day('PRECT', case_res_mem, block='atm') proc_Rx1day('PRECT', case_nores_mem, block='atm') # calculate annual % of cold and warm days proc_warmdays(case_res_mem) proc_warmdays(case_nores_mem) proc_colddays(case_res_mem) proc_colddays(case_nores_mem) # calculate monthly maximum daytime temperature (based on cam variable) proc_TXx_monthly('TREFHTMX', case_res_mem, block='atm') proc_TXx_monthly('TREFHTMX', case_nores_mem, block='atm') # calculate monthly minimum nighttime temperature (based on cam variable) proc_TNn_monthly('TREFHTMN', case_res_mem, block='atm') proc_TNn_monthly('TREFHTMN', case_nores_mem, block='atm') from iv_utils import * # calculate 99 pctl of daytime temperature (based on cam variable) proc_TX99('TREFHTMX', case_res, block='atm') proc_TX99('TREFHTMX', case_nores, block='atm') # calculate 1st pctl of nighttime temperature (based on cam variable) proc_TN01('TREFHTMN', case_res, block='atm') proc_TN01('TREFHTMN', case_nores, block='atm') proc_TX90('TREFHTMX', case_res, block='atm') proc_TX90('TREFHTMX', case_nores, block='atm') proc_TN10('TREFHTMN', case_res, block='atm') proc_TN10('TREFHTMN', case_nores, block='atm') proc_R05('PRECT', case_res, block='atm') proc_R05('PRECT', case_nores, block='atm') proc_R95('PRECT', case_res, block='atm') proc_R95('PRECT', case_nores, block='atm') ``` ## 4. Save h1 as h0 time frequency files ### Function to do conversion ``` def save_h1_as_h0(var,case_name, calcsum=False, block=block): """Convert h1 to h0 and save file """ # open da with daily data da = open_da(var,case=case_name, stream='h1', block=block) if calcsum: # calculate monthly mean da_monthly = da.resample(time='1M').sum(keep_attrs=True) else: # calculate monthly mean da_monthly = da.resample(time='1M').mean(keep_attrs=True) stream_new = 'h0' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case_name + '/' + block + '/proc/tseries/' + tfreqs[stream_new] + '/' # define filename fn = case_name + '.'+ model[block] + '.' + stream_new + '.' + var + '.' + tspans[stream_new] +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: da_monthly.to_dataset().to_netcdf(savedir+fn) # loop over all cases del case_res_names,case_nores_names case_res_names = [case_res+'.00'+str(i) for i in range(1,n_ens+1)] case_nores_names = [case_nores+'.00'+str(i) for i in range(1,n_ens+1)] # loop over all cases and calculate extremes for all individual cases for case_res_mem,case_nores_mem in zip(case_res_names, case_nores_names): save_h1_as_h0('TREFHTMX',case_res_mem) save_h1_as_h0('TREFHTMX',case_nores_mem) save_h1_as_h0('TREFHTMN',case_res_mem) save_h1_as_h0('TREFHTMN',case_nores_mem) save_h1_as_h0('PRECT',case_res_mem, calcsum=True) save_h1_as_h0('PRECT',case_nores_mem, calcsum=True) ``` ### Function to calculate DTR (and calculate monthly mean too) ``` def proc_DTR(case_name): TREFHTMN = open_da('TREFHTMN',case=case_name, stream='h1', block=block) TREFHTMX = open_da('TREFHTMX',case=case_name, stream='h1', block=block) DTR = TREFHTMX-TREFHTMN DTR.attrs['long_name']= 'Diurnal temperature range' DTR.attrs['units']= 'K' DTR.name = 'DTR' DTR = DTR[1:,:,:] var = 'DTR' stream = 'h1' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case_name + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case_name + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: DTR.to_dataset().to_netcdf(savedir+fn) # loop over all cases #case_res_names = [case_res+'.00'+str(i) for i in range(1,n_ens+1)] #case_nores_names = [case_nores+'.00'+str(i) for i in range(1,n_ens+1)] # loop over all cases and calculate extremes for all individual cases for case_res_mem,case_nores_mem in zip(case_res_names, case_nores_names): proc_DTR(case_res_mem) proc_DTR(case_nores_mem) save_h1_as_h0('DTR',case_res_mem) save_h1_as_h0('DTR',case_nores_mem) # process albedo def proc_albedo(case_name): SWin = open_da('FSDS', block = 'lnd', case = case_name) SWout= open_da('FSR' , block = 'lnd', case = case_name) albedo = SWout/SWin albedo.attrs['long_name']= 'Albedo' albedo.attrs['units']= '-' albedo.name = 'albedo' var = 'albedo' stream = 'h0' block = 'lnd' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case_name + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case_name + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: albedo.to_dataset().to_netcdf(savedir+fn) from iv_utils import * case_res_names = [case_res+'.00'+str(i) for i in range(1,n_ens+1)] case_nores_names = [case_nores+'.00'+str(i) for i in range(1,n_ens+1)] # loop over all cases and calculate extremes for all individual cases for case_res_mem,case_nores_mem in zip(case_res_names, case_nores_names): proc_albedo(case_res_mem) proc_albedo(case_nores_mem) ``` ### Function to calculate Apparent temperature and wet bulb temperature ``` def proc_AT(case_name): """ compute Apparent temperature - Version including the effects of temperature, humidity, and wind # http://www.bom.gov.au/info/thermal_stress/?cid=003bl08""" TREFHT = open_da('TREFHT',case=case_name, stream='h0', block=block) RHREFHT = open_da('RHREFHT',case=case_name, stream='h0', block=block) U10 = open_da('U10',case=case_name, stream='h0', block=block) p AT = (TREFHT-273.15) + 0.33 * (RHREFHT / 100 * 6.105 * np.exp( 17.27 * (TREFHT-273.15) / ( 237.7 + (TREFHT-273.15) ) ) ) - 0.70 * U10 - 4.00 AT.attrs['long_name']= 'Apparent Temperature' AT.attrs['units']= '°C' AT.name = 'AT' var = 'AT' stream = 'h0' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case_name + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case_name + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: AT.to_dataset().to_netcdf(savedir+fn) ``` ### Function to calculate Wet bulb temperature ``` def proc_WBGT(case_name): """ Wet Bulb Global Temperature - approximation to the WBGT used by the Bureau of Meteorology # http://www.bom.gov.au/info/thermal_stress/?cid=003bl08 # !!! uses 24-h average T and RH !!!""" TREFHT = open_da('TREFHT',case=case_name, stream='h1', block=block) RHREFHT = open_da('RHREFHT',case=case_name, stream='h1', block=block) TREFHTMX = open_da('TREFHTMX',case=case_name, stream='h1', block=block) WBGT = 0.567 * (TREFHT-273.15) + 0.393 * (RHREFHT / 100 * 6.105 * np.exp( 17.27 * (TREFHTMX-273.15) / ( 237.7 + (TREFHTMX-273.15) ) )) + 3.94 WBGT.attrs['long_name']= 'Wet Bulb Global Temperature' WBGT.attrs['units']= '°C' WBGT.name = 'WBGT' var = 'WBGT' stream = 'h1' # save into new file tfreqs = {'h0' : 'month_1' , 'h1' : 'day_1' , 'h2' : 'month_1'} tspans = {'h0' : spstartyear+'01-'+endyear+'12', 'h1' : spstartyear+'0101-'+ endyear+'1231', 'h2' : spstartyear+'01-'+endyear+'12'} model = {'lnd' : 'clm2', 'atm' : 'cam', 'rof' : 'mosart'} # Define directory where timeseries data is stored savedir = outdir + 'archive/' + case_name + '/' + block + '/proc/tseries/' + tfreqs[stream] + '/' # define filename fn = case_name + '.'+ model[block] + '.' + stream + '.' + var + '.' + tspans[stream] +'.nc' # check if variable timeseries exists and open variable as data array if os.path.isfile(savedir + fn): print(fn + ' already exists') else: WBGT.to_dataset().to_netcdf(savedir+fn) case_res_names = [case_res+'.00'+str(i) for i in range(1,n_ens+1)] case_nores_names = [case_nores+'.00'+str(i) for i in range(1,n_ens+1)] # loop over all cases and calculate extremes for all individual cases for case_res_mem,case_nores_mem in zip(case_res_names, case_nores_names): proc_AT(case_res_mem) proc_AT(case_nores_mem) proc_WBGT(case_res_mem) proc_WBGT(case_nores_mem) case_name = 'f.FHIST.f09_f09_mg17.CTL.001' TREFHT = open_da('TREFHT',case=case_name, stream='h0', block=block) RHREFHT = open_da('RHREFHT',case=case_name, stream='h0', block=block) U10 = open_da('U10',case=case_name, stream='h0', block=block) AT = (TREFHT-273.15) + 0.33 * (RHREFHT / 100 * 6.105 * np.exp( 17.27 * (TREFHT-273.15) / ( 237.7 + (TREFHT-273.15) ) ) ) - 0.70 * U10 - 4.00 AT.attrs['long_name']= 'Apparent Temperature' AT.attrs['units']= '°C' AT.name = 'AT' ```
github_jupyter
# Heartattack Data Data taken from: https://www.kaggle.com/carlosdg/a-detail-description-of-the-heart-disease-dataset ``` # Load packages import numpy as np import pandas as pd import matplotlib.pyplot as plt import seaborn as sns # Enable multiple outputs per cell from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" # Read in data heartattack = pd.read_csv("../data/heartattack_data/heart.csv") # Check if data was loaded properly heartattack.head() ``` Q: What attributes does the dataset have, what do they mean and what is their level of measurement? <br> A: age: age -- ratio sex: sex -- nominal cp: Chest Pain type (4 values) -- nominal trtbps: resting blood pressure (in mm Hg) -- ratio chol: cholestoral in mg/dl fetched via BMI sensor -- ratio fbs: (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false) -- originally ratio, now nominal restecg: resting electrocardiographic results -- nominal thalachh: maximum heart rate achieved -- ratio exng: exercise induced angina (1 = yes; 0 = no) -- nominal oldpeak: ST depression induced by exercise relative to rest -- ratio slp: the slope of the peak exercise ST segment -- nominal caa: number of major vessels (0-3) -- ratio thall: Thal rate (0-3) -- nominal output: heartattack yes/no (target variable) -- nominal For details see: https://www.kaggle.com/carlosdg/a-detail-description-of-the-heart-disease-dataset ``` # View attribute names heartattack.columns # Rename attributes for easier handling heartattack.rename( columns = { 'cp' : 'chest_pain_type', 'trtbps' : 'bloodpressure', 'chol' : 'cholesterine', 'fbs' : 'blood_sugar_high', 'restecg' : 'electrocardiogram_on_rest', 'thalachh' : 'max_heartrate_test_achieved', 'exng' : 'angina_during_exercise', 'oldpeak' : 'ecg_displacement', 'slp' : 'slope_ST_during_exercise', 'caa' : 'count_blood_vessels', 'thall' : 'results_bloodflow' }, inplace = True ) # Check if the data types of the attributes were set correctly heartattack.info() # Rename the categories of the categorical variables for easier interpretability sex_mapper = { 0 : 'female', 1 : 'male' } chest_pain_type_mapper = { 0: 'asymptomatic', 1: 'atypical_angina', 2: 'pain_without_angina', 3: 'typical_angina' } blood_sugar_high_mapper = { 0 : 'no', 1 : 'yes' } electrocardiogram_on_rest_mapper = { 0 : 'probable_left_ventricular_hypertrophy', 1 : 'normal', 2 : 'abnormalities' } angina_during_exercise_mapper = { 0 : 'no', 1 : 'yes' } slope_ST_during_exercise_mapper = { 0 : 'descending', 1 : 'flat', 2 : 'ascending' } results_bloodflow_mapper = { 0 : 'NULL', 1 : 'fixed defect', 2 : 'normal', 3 : 'reversible defect' } # output_mapper = { # 0 : 'heartattack', # 1 : 'no heartattack' # } # Convert variables to categorical, where applicable heartattack['sex'] = heartattack['sex'].replace(sex_mapper) heartattack['sex'] = pd.Categorical(heartattack['sex']) heartattack['chest_pain_type'] = heartattack['chest_pain_type'].replace(chest_pain_type_mapper) heartattack['chest_pain_type'] = pd.Categorical(heartattack['chest_pain_type']) heartattack['blood_sugar_high'] = heartattack['blood_sugar_high'].replace(blood_sugar_high_mapper) heartattack['blood_sugar_high'] = pd.Categorical(heartattack['blood_sugar_high']) heartattack['electrocardiogram_on_rest'] = heartattack['electrocardiogram_on_rest'].replace(electrocardiogram_on_rest_mapper) heartattack['electrocardiogram_on_rest'] = pd.Categorical(heartattack['electrocardiogram_on_rest']) heartattack['angina_during_exercise'] = heartattack['angina_during_exercise'].replace(angina_during_exercise_mapper) heartattack['angina_during_exercise'] = pd.Categorical(heartattack['angina_during_exercise']) heartattack['slope_ST_during_exercise'] = heartattack['slope_ST_during_exercise'].replace(slope_ST_during_exercise_mapper) heartattack['slope_ST_during_exercise'] = pd.Categorical(heartattack['slope_ST_during_exercise']) heartattack['results_bloodflow'] = heartattack['results_bloodflow'].replace(results_bloodflow_mapper) heartattack['results_bloodflow'] = pd.Categorical(heartattack['results_bloodflow']) # heartattack['output'] = heartattack['output'].replace(output_mapper) # heartattack['output'] = pd.Categorical(heartattack['output']) # Examine if there are still NULL values in results_bloodflow heartattack['results_bloodflow'].value_counts() # Drop NULL values heartattack.drop(heartattack[heartattack['results_bloodflow'] == 'NULL'].index, inplace=True) # count_blood_vessels == 4 also needs to be dropped, see kaggle link above at original column 'caa' heartattack.drop(heartattack[heartattack['count_blood_vessels'] == 4].index, inplace=True) # Check if removing the instances was successful heartattack.info() # Check for further values == 0 # None found heartattack.describe() # Inspect attribute 'count_blood_vessels' closer # No count_blood_vessels == 4 should be in the data any longer # The higher the value, the more likely it is to have a heartattack # Note: output == 0: heartattack, output == 1: no heartattack sns.countplot(data = heartattack, x = 'count_blood_vessels', hue='output') # Inspect heartattack['oldpeak'] closer # oldpeak is actually a float variable sns.distplot(heartattack['ecg_displacement']) heartattack['ecg_displacement'].describe() # Now the remaining numerical variables can be correlated # Interpretation: see below heartattack_corr = heartattack.corr() heartattack_corr # Create a heatmap for a better first inspection of the correlations # Detailled inspection follows below plt.figure(figsize=(8,6)) sns.set_context('paper', font_scale=1.4) sns.heatmap(heartattack_corr, annot=True, cmap='Blues') # Select only numerical data for the plot heartattack_plot_df = heartattack.loc[:,['age','sex','bloodpressure','cholesterine','max_heartrate_test_achieved','ecg_displacement','count_blood_vessels']] # Differentiate between the sexes pair_plot = sns.PairGrid(data = heartattack_plot_df, hue='sex') # Examine reg plots for connections between variables pair_plot.map_upper(sns.regplot) # Examine hists for (relative) frequencies pair_plot.map_diag(sns.histplot) # Examine KDE plots to get an approximate feeling of each variable's distribution pair_plot.map_lower(sns.kdeplot) # Put legend at the upper right handles = pair_plot._legend_data.values() labels = pair_plot._legend_data.keys() pair_plot.fig.legend(handles = handles, labels = labels, loc='upper right') ``` Interpreting the plot + correlations, i.e. the relationships of the numerical variables With a higher <b>age</b>: <br> bloodpressure seems to rise (both sexes)<br> cholesterine seems to rise (both sexes)<br> max_heartrate_test_achieved seems to shrink (both sexes)<br> ecg_displacement seems to rise (both sexes)<br> count_blood_vessels seems to rise (both sexes) With a higher <b>bloodpressure</b>:<br> cholesterine seems to increase (both sexes)<br> max_heartrate_test_achieved seems to be unrelated (both sexes)<br> ecg_displacement rises for men, while for women it is only rising a little bit<br> count_blood_vessels seems to rise for women With a higher <b>cholesterine</b>:<br> max_heartrate_test_achieved seems to be unaffected (both sexes)<br> ecg_displacement seems to be unaffected (both sexes)<br> count_blood_vessels seems to rise only slightly (both sexes) With a higher <b>max_heartrate_achieved</b>:<br> ecg_displacement seems to sink. This is maybe confounded by age, since with a lower age, a higher max_heartrate_test_achieved can be achieved (both sexes)<br> count_blood_vessels seem to sink slightly (both sexes)<br> With a higher <b>ecg_displacement</b>:<br> count_blood_vessels seem to rise slightly (both sexes), maybe a little bit stronger for women than for men ``` # Q: Are the classes balanced? # A: There are approximately twice as many men in the dataset than women heartattack['sex'].value_counts() # Q: How many males and females have a heartattack? # A: see below # Interpretation: Care must taken. There are only 24 women with a heartattack in the data # Note: output == 0: heartattack, output == 1: no heartattack heartattack.loc[:,['sex','output']].value_counts() # Q: How many people have a heartattack (independet of the sex)? # A: 136 heartattack.output.value_counts() ``` ### Relationships with the output variable ``` # Note: output == 0: heartattack, output == 1: no heartattack # Q: How do men and women compare in their tendency to have a heart disease, depending on their age? # A: Those having heart problems in the dataset seem to be older on average sns.boxplot(data = heartattack, x = 'sex', y = 'age', hue = 'output') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Note: output == 0: heartattack, output == 1: no heartattack # Q: What sex has more young people with heart problems? # A1: There seem to be more young men than women having a heart attack. # A2: Interpret with care! There are twice as many men than women in the dataset and only a 24 women with a heartattack compared to 112 men sns.violinplot(data = heartattack, x = 'sex', y = 'age', hue = 'output') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) # Q: Do women tend to have a lower blood pressure than men? # A: No sns.boxplot(data = heartattack, x = 'sex', y = 'bloodpressure') # Note: 0 == heartattack, 1 == no heartattack # Q: Are there differences in the data with regards to sex, bloodpressure and a heartattack? # A1: Yes, women who have a heartattack seem to have a higher bloodpressure # A2: Men on the other hand seem to have heartattacks regardless of their bloodpressure sns.boxplot(data = heartattack, x = 'sex', y = 'bloodpressure', hue = 'output') plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) ``` ## Classification Six different classifiers were used to classifiy whether a hearattack is present (output == 0). They are further evaluated below. ### Prepare Data #### Set seed value for reproducible results ``` ## Ensure reproducible results # Set a seed value seed_value= 2021 # 1. Set `PYTHONHASHSEED` environment variable at a fixed value import os os.environ['PYTHONHASHSEED']=str(seed_value) # 2. Set `python` built-in pseudo-random generator at a fixed value import random random.seed(seed_value) # 3. Set `numpy` pseudo-random generator at a fixed value import numpy as np np.random.seed(seed_value) ``` #### Scale the numeric data ``` # Required for distance-based algorithms, i.e. kNN from sklearn.preprocessing import StandardScaler scaler = StandardScaler() data_to_scale = heartattack.loc[:, heartattack.columns != 'output'].select_dtypes(include=['float64','int64']) heartattack[data_to_scale.columns] = scaler.fit_transform(data_to_scale) ``` #### Split data into training and test set ``` from sklearn.model_selection import train_test_split # Get dummies without the dependent variable and for each var, drop the first level output = heartattack['output'] heartattack = pd.get_dummies(heartattack.loc[:, heartattack.columns != 'output'], drop_first = True) heartattack['output'] = output # Include the seed value training, test = train_test_split(heartattack, test_size = 0.2, random_state = seed_value) len(training) len(test) # Split up the training and test data into data and output train_x = training.loc[:, training.columns != 'output'] train_y = training.loc[:,['output']] train_y = np.ravel(train_y) test_x = test.loc[:, test.columns != 'output'] test_y = test.loc[:,['output']] ``` #### Linear SVM ``` from sklearn import svm # create clf clf_svm = svm.SVC(kernel = 'linear', probability = True, random_state = seed_value) # train clf clf_svm.fit(train_x, train_y) clf_svm.predict(test_x) ``` #### Decision Tree ``` from sklearn import tree # create clf clf_dec = tree.DecisionTreeClassifier(criterion="entropy", random_state = seed_value) # train clf clf_dec.fit(train_x, train_y) clf_dec.predict(test_x) ``` #### Naive Bayes ``` from sklearn.naive_bayes import GaussianNB import numpy as np # create clf clf_gnb = GaussianNB() # train clf clf_gnb.fit(train_x, train_y) clf_gnb.predict(test_x) ``` #### Logistic Regression ``` from sklearn.linear_model import LogisticRegression clf_log = LogisticRegression(max_iter = 4000, random_state = seed_value) clf_log.fit(train_x, train_y) clf_log.predict(test_x) ``` #### Random Forest ``` from sklearn.ensemble import RandomForestClassifier clf_rfc = RandomForestClassifier(random_state = seed_value, max_depth=5, n_estimators=20) clf_rfc.fit(train_x, train_y) print(clf_rfc.predict(test_x)) ``` #### kNN ``` from sklearn.neighbors import KNeighborsClassifier clf_knn = KNeighborsClassifier(n_neighbors=3) clf_knn.fit(train_x,train_y) print(clf_knn.predict(test_x)) ``` ## Classifier Evaluation ### Cross-Validation ``` def cv_evaluate_clf(dataset: pd.DataFrame, dependent_var: pd.DataFrame, clf, pos_label: str, k: int): # Imports from sklearn.model_selection import cross_validate from sklearn.metrics import make_scorer from sklearn.metrics import accuracy_score from sklearn.metrics import f1_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import roc_auc_score ## Params for cross_validate function # Metrics scoring = { 'accuracy': make_scorer(accuracy_score), 'f1': make_scorer(f1_score, pos_label = 0), 'precision': make_scorer(precision_score, pos_label = 0), 'recall': make_scorer(recall_score, pos_label = 0), 'roc_auc': make_scorer(roc_auc_score) #, #'balanced_accuracy_score': make_scorer(balanced_accuracy_score) } # X, y independent_vars = dataset.loc[:, dataset.columns != f'{dependent_var}'] dependent_var = dataset[f'{dependent_var}'] # CV scores = cross_validate(estimator=clf, X=independent_vars, y=dependent_var, scoring=scoring, cv=k, return_estimator = True) return scores def classifier_results_df(clf_list: list, dataset: pd.DataFrame, pos_label: str, k: int, grouped_output: bool = True, sort_by: str = 'test_accuracy') -> pd.DataFrame: # Cross-validate each classifier and put in list metrics_per_clf = [cv_evaluate_clf(dataset=dataset, dependent_var='output',clf=x, pos_label=pos_label, k=k) for x in clfList] # Put results into DataFrame result_df = pd.DataFrame(metrics_per_clf) # Explode rows result_df = result_df.apply(lambda x: x.explode()) # Set index result_df.index = range(len(clfList) * k) # Automatically infer types result_df = result_df.convert_dtypes() # Set estimator as string "manually" result_df['estimator'] = result_df.estimator.astype("string") if grouped_output: # Group by the used estimator and sort by accuracy by default result_df_grouped = result_df.groupby('estimator').mean().sort_values(by=sort_by, ascending=False) # Remove cols fit_time and score_time columns = ['fit_time', 'score_time'] result_df_grouped = result_df_grouped.drop(columns, axis=1) return result_df_grouped return result_df clfList = [clf_svm,clf_dec,clf_gnb,clf_log,clf_rfc,clf_knn] result_df = classifier_results_df(clf_list=clfList, dataset=heartattack, pos_label=0, k=10) result_df ``` Interpretation: Since the logistic regression scores highest with regards to the metrics, it seems to be the classifier of choice for this dataset. Of course, every classifier could be further optimized with e.g. grid search and other hyperparameter tuning measures. But since the logistic regression is already good as of now and the sample size is very small (n=296), no further measures are taken ### Explaining the model Inspired by: https://www.kaggle.com/tentotheminus9/what-causes-heart-disease-explaining-the-model #### Permutation Importance From a lay person's perspective, I expected high blood pressure, high blood sugar and high cholesterine values to be pivotal for the model I used the permutation importance measure from eli5 Link: https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html#model-inspection Permutation importance in a nutshell: <br> Shuffle the values in a single column, make predictions using the resulting dataset. <br> Use these predictions and the true target values to calculate how much the loss function suffered from shuffling.<br> That performance deterioration measures the importance of the variable you just shuffled. Source: https://www.kaggle.com/dansbecker/permutation-importance ``` import eli5 from eli5.sklearn import PermutationImportance feature_names = train_x.columns.tolist() perm = PermutationImportance(clf_log, random_state=seed_value).fit(train_x, train_y) eli5.show_weights(perm, feature_names = feature_names) ``` Apparently, that is not the case. Instead, the count_blood_vessels has the highest impact. Recall: This feature refers to the number of narrow blood vessels seen. This is why the higher the value of this feature, the more likely it is to have a heart disease Source: https://www.kaggle.com/carlosdg/a-detail-description-of-the-heart-disease-dataset Conclusion: the models seems to make sense regarding count_blood_vessels! #### Partial Dependence Plots ``` # It is known that there is relationship between the variables slope_ST_during_exercise_descending and ecg_displacement (see boxplot) # Source: https://www.kaggle.com/carlosdg/a-detail-description-of-the-heart-disease-dataset # As this relationship is not part of the logistic classifier as an interation, these vars are not recognized by the permutation importance above as being important sns.boxplot(data=heartattack, x ='slope_ST_during_exercise_descending', y ='ecg_displacement', hue='output') ``` The partial dependence plot shows the marginal effect one or two features have on the predicted outcome of a machine learning model Source: https://christophm.github.io/interpretable-ml-book/pdp.html ``` from matplotlib import pyplot as plt from pdpbox import pdp, get_dataset, info_plots features_to_plot = ['ecg_displacement', 'slope_ST_during_exercise_descending'] inter1 = pdp.pdp_interact(model=clf_log, dataset=test_x, model_features=feature_names, features=features_to_plot) pdp.pdp_interact_plot(pdp_interact_out=inter1, feature_names=features_to_plot, plot_type='contour') plt.show() ``` The partial dependence plot clearly shows that: for high ecg_displacement values and slope_ST_during_exercise_descending == 1, a heartattack (output == 0) is rather present. This interaction could probably be put into the classifiers to further improve the classification results. For a deeper understanding of the classification results (e.g. more interaction effects), more domain knowledge is required. ## Conclusion The dataset is best classified by a logistic regression with an average accuracy of 0.854138 (test_size = 0.2, cv = 10, no grid search). Counterintuively, not high blood pressure, high blood sugar or high cholesterine values, but the attribute count_blood_vessels hast the highest impact onto the classification.
github_jupyter
# Softmax exercise *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.* This exercise is analogous to the SVM exercise. You will: - implement a fully-vectorized **loss function** for the Softmax classifier - implement the fully-vectorized expression for its **analytic gradient** - **check your implementation** with numerical gradient - use a validation set to **tune the learning rate and regularization** strength - **optimize** the loss function with **SGD** - **visualize** the final learned weights ``` import random import numpy as np from cs231n.data_utils import load_CIFAR10 import matplotlib.pyplot as plt %matplotlib inline plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots plt.rcParams['image.interpolation'] = 'nearest' plt.rcParams['image.cmap'] = 'gray' # for auto-reloading extenrnal modules # see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython %load_ext autoreload %autoreload 2 def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the linear classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = 'cs231n/datasets/cifar-10-batches-py' X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] mask = np.random.choice(num_training, num_dev, replace=False) X_dev = X_train[mask] y_dev = y_train[mask] # Preprocessing: reshape the image data into rows X_train = np.reshape(X_train, (X_train.shape[0], -1)) X_val = np.reshape(X_val, (X_val.shape[0], -1)) X_test = np.reshape(X_test, (X_test.shape[0], -1)) X_dev = np.reshape(X_dev, (X_dev.shape[0], -1)) # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis = 0) X_train -= mean_image X_val -= mean_image X_test -= mean_image X_dev -= mean_image # add bias dimension and transform into columns X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))]) X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))]) X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))]) X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))]) return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev # Invoke the above function to get our data. X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data() print 'Train data shape: ', X_train.shape print 'Train labels shape: ', y_train.shape print 'Validation data shape: ', X_val.shape print 'Validation labels shape: ', y_val.shape print 'Test data shape: ', X_test.shape print 'Test labels shape: ', y_test.shape print 'dev data shape: ', X_dev.shape print 'dev labels shape: ', y_dev.shape ``` ## Softmax Classifier Your code for this section will all be written inside **cs231n/classifiers/softmax.py**. ``` # First implement the naive softmax loss function with nested loops. # Open the file cs231n/classifiers/softmax.py and implement the # softmax_loss_naive function. from cs231n.classifiers.softmax import softmax_loss_naive import time # Generate a random softmax weight matrix and use it to compute the loss. W = np.random.randn(3073, 10) * 0.0001 loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As a rough sanity check, our loss should be something close to -log(0.1). print 'loss: %f' % loss print 'sanity check: %f' % (-np.log(0.1)) ``` ## Inline Question 1: Why do we expect our loss to be close to -log(0.1)? Explain briefly.** **Your answer:** *Fill this in* ``` # Complete the implementation of softmax_loss_naive and implement a (naive) # version of the gradient that uses nested loops. loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0) # As we did for the SVM, use numeric gradient checking as a debugging tool. # The numeric gradient should be close to the analytic gradient. from cs231n.gradient_check import grad_check_sparse f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # similar to SVM case, do another gradient check with regularization loss, grad = softmax_loss_naive(W, X_dev, y_dev, 1e2) f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 1e2)[0] grad_numerical = grad_check_sparse(f, W, grad, 10) # Now that we have a naive implementation of the softmax loss function and its gradient, # implement a vectorized version in softmax_loss_vectorized. # The two versions should compute the same results, but the vectorized version should be # much faster. tic = time.time() loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.00001) toc = time.time() print 'naive loss: %e computed in %fs' % (loss_naive, toc - tic) from cs231n.classifiers.softmax import softmax_loss_vectorized tic = time.time() loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.00001) toc = time.time() print 'vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic) # As we did for the SVM, we use the Frobenius norm to compare the two versions # of the gradient. grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro') print 'Loss difference: %f' % np.abs(loss_naive - loss_vectorized) print 'Gradient difference: %f' % grad_difference # Use the validation set to tune hyperparameters (regularization strength and # learning rate). You should experiment with different ranges for the learning # rates and regularization strengths; if you are careful you should be able to # get a classification accuracy of over 0.35 on the validation set. from cs231n.classifiers import Softmax results = {} best_val = -1 best_softmax = None learning_rates = [5e-7] regularization_strengths = [3e4] ################################################################################ # TODO: # # Use the validation set to set the learning rate and regularization strength. # # This should be identical to the validation that you did for the SVM; save # # the best trained softmax classifer in best_softmax. # ################################################################################ for l in learning_rates: for r in regularization_strengths: softmax = Softmax() softmax.train(X_train, y_train, learning_rate=l, reg=r, num_iters=1500, batch_size=200) y_train_pred = softmax.predict(X_train) y_val_pred = softmax.predict(X_val) training_accuracy = np.mean(y_train == y_train_pred) validation_accuracy = np.mean(y_val == y_val_pred) results[(l, r)] = (training_accuracy, validation_accuracy) if validation_accuracy > best_val: best_val = validation_accuracy best_softmax = softmax ################################################################################ # END OF YOUR CODE # ################################################################################ # Print out results. for lr, reg in sorted(results): train_accuracy, val_accuracy = results[(lr, reg)] print 'lr %e reg %e train accuracy: %f val accuracy: %f' % ( lr, reg, train_accuracy, val_accuracy) print 'best validation accuracy achieved during cross-validation: %f' % best_val # evaluate on test set # Evaluate the best softmax on test set y_test_pred = best_softmax.predict(X_test) test_accuracy = np.mean(y_test == y_test_pred) print 'softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ) # Visualize the learned weights for each class w = best_softmax.W[:-1,:] # strip out the bias w = w.reshape(32, 32, 3, 10) w_min, w_max = np.min(w), np.max(w) classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] for i in xrange(10): plt.subplot(2, 5, i + 1) # Rescale the weights to be between 0 and 255 wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min) plt.imshow(wimg.astype('uint8')) plt.axis('off') plt.title(classes[i]) ```
github_jupyter
# Proximal Policy Optimization (PPO) ## 背景 Proximal Policy Optimization,简称PPO,即近端策略优化,是对Policy Graident,即策略梯度的一种改进算法。PPO的核心精神在于,通过一种被称之为Importce Sampling的方法,将Policy Gradient中On-policy的训练过程转化为Off-policy,即从在线学习转化为离线学习,某种意义上与基于值迭代算法中的Experience Replay有异曲同工之处。通过这个改进,训练速度与效果在实验上相较于Policy Gradient具有明显提升。 ## Policy Gradient Policy Gradient是一种基于策略迭代的强化学习算法,不同于基于值迭代的DQN、Double-DQN、Duling-DQN通过间接地估计动作-状态值函数来学习的过程,Policy Gradient直接地通过采样状态、动作、奖励,然后期望直接最大化奖励的期望。PPO与PG都希望最大化奖励的期望,当采样足够充分时,奖励的期望可以近似为N回合的奖励的平均值: $$ \bar{R}_{\theta} = \sum_{\tau} R(\tau) P(\tau \lvert \theta) \approx \frac{1}{N} \sum^{N}_{n=1} R(\tau^{n}) $$ 上式中的第n回合的奖励值之和$R(\tau^n)$被定义为如下形式: $$ R(\tau) = \sum^{T}_{t=1} r_t $$ 在前篇专门介绍Policy Gradient文章中,已经详细地推导了关于$\nabla \bar{R}_{\theta}$的计算方法,所以在这里的具体推导过程将略过,最后关于$\nabla \bar{R}_{\theta}$的计算公式将有如下形式: $$ \nabla \bar{R}_{\theta} = \frac{1}{N} \sum^{N}_{n=1} \sum^{T_n}_{t=1} R(\tau^n) \nabla \log p(a_t \lvert s_t, \theta) $$ 本质上是最小化N回合采样出的动作与网络输出的动作的交叉熵的基础上乘以$R(\tau^n)$,奖励值给了梯度下降的方向,推导出了$\nabla \bar{R}_{\theta}$,其实就已经可以根据梯度下降法反向传播改进网络进行训练了,但是通常情况下我们会根据具体的问题对$R(\tau^n)$做一些修正。 ## Actor-Critic Model 对$R(\tau^n)$的修正通常情况下是必须的,也是有意义的,符合直觉的。以CartPole-v0与MountainCar-v0,即小车倒立杆和过山车游戏为例,每一个状态采取的动作对整个回合的奖励和是不同的,对于小车倒立杆问题而言,初始的几个状态采取的动作直接决定了杆是否会很快地倒,所以直觉地他们更加重要,而对于过山车问题而言,在小车即将爬上山时的这些状态采取的动作直接决定了小车能不能爬上山,所以直觉地他们更加重要。 这将引入我们的第一个改进,对于小车倒立杆问题而言,我们需要针对每一个状态、动作元组对$R(\tau^n)$进行如下替换: $$ R(\tau^n) \rightarrow \sum^{T_n}_{t=t^{\prime}} \gamma^{t} r^{n}_{t} $$ 这样原来的梯度公式将会被改写为以下形式: $$ \nabla \bar{R}_{\theta} = \frac{1}{N} \sum^{N}_{n=1} \sum^{T_n}_{t=1} \sum^{T_n}_{t=t^{\prime}} \gamma^{t} r^{n}_{t} \nabla \log p(a_t \lvert s_t, \theta) $$ 但是这样还存在一个称之为Overestimate,即过估计的问题。因为在实际情况中,我们的状态-动作采样通常是不充分的,这会导致一些一些动作或者状态几乎不会被采样,这样在进行梯度下降训练网络时,在这些状态对应的动作将可能被极大的放大或者缩小。由于输出层是soft-max,这些概率会此消彼长,这显然不是我们想看到的。所以我们需要做第二个改进:引入Baseline,通常可能是一个待调整的常超参数,或者Critic,通常是一个待训练的网络。 如果引入的是一个Critic,这样的模型将会被称之为Actor-Critic Model,即演员-评论家模型,而N回合平均奖励值的梯度将会被改写为以下形式: $$ \nabla \bar{R}_{\theta} = \frac{1}{N} \sum^{N}_{n=1} \sum^{T_n}_{t=1} A^{\theta}(a_t \lvert s_t) \nabla \log p(a_t \lvert s_t, \theta) $$ 在一次训练过程中,我们会按顺序同时更新这两个网络,目前这样的模型已经被广泛使用,并在实验上证明了较好的效果。 ## Importance Sampling 在前面提到,PPO的一个核心改进是将Policy Gradient中On-policy的训练过程转化为Off-policy,即从在线学习转化为离线学习,这个转化过程被称之为Importance Sampling,是一种数学手段。如果我们有连续随机变量X,它的概率密度函数记作$p(x)$,则$f(x)$的期望通过如下公式计算: $$ E_{x \sim p} \left[ f(x) \right] = \int^{}_{} f(x)p(x)dx $$ 若我们对于连续随机变量X,有另一个概率密度函数记作$q(x)$,那么他们将有以下关系: $$ E_{x \sim p} \left[ f(x) \right] = \int f(x) \cdot p(x)dx = \int f(x) \frac{p(x)}{q(x)} \cdot q(x) dx = E_{x \sim q} \left[ f(x) \frac{p(x)}{q(x)} \right] $$ 在上式中最右边的项中,$\frac{p(x)}{q(x)}$被称之为Importance Weight,类比到我们的问题,$f(x)$是$A^{\theta}(a_t \lvert s_t)$,而$\frac{p(x)}{q(x)}$,则是新老策略对于当前状态采取当前动作对应的概率之比,这句话比较费解,更加具体一些,对于小车倒立杆为例,动作是离散的,在网络的输出是一组离散的概率分布,以这个概率分布选择动作,这个动作在新老策略中,在当前状态中都对应了一个概率值,$\frac{p(x)}{q(x)}$即是他们的比值。 通过这一操作,在采样充分的情况下,我们可以认为: $$ E_{x \sim p} \left[ f(x) \right] = E_{x \sim q} \left[ f(x) \frac{p(x)}{q(x)} \right] $$ ## Proximal Policy Optimization 最终我们将推导出PPO,Importance Sampling将给我们将On-policy的训练过程转化为Off-policy以基础,即我们可以通过老策略,即$q(x)$进行充分采样,然后改进新策略$p(x)$,这个过程可以在一回合重复N次,而不再是1次,这样大幅度减少了原始PG算法在线学习进行采样状态-动作-奖励元组对时间,同时保证了训练效果,而N回合平均奖励值的梯度也将被改写为以下形式: $$ \nabla \bar{R}_{\theta} = \frac{1}{N} \sum^{N}_{n=1} \sum^{T_n}_{t=1} \frac{p_{\theta}(a_t \lvert s_t)}{p_{\theta^{\prime}}(a_t \lvert s_t)} A^{\theta}(a_t \lvert s_t) \nabla \log p(a_t \lvert s_t, \theta) $$ 在实际训练过程中,会有一个对$\frac{p_{\theta}(a_t \lvert s_t)}{p_{\theta^{\prime}}(a_t \lvert s_t)}$的clip的操作: $$ clip(\frac{p_{\theta}(a_t \lvert s_t)}{p_{\theta^{\prime}}(a_t \lvert s_t)}, 1 - \epsilon, 1 + \epsilon) $$ 相当于一个正则化的操作,其中$\epsilon$是一个可调整的超参数,至此,PPO也就介绍完了。 ## Experiment ``` # coding=utf-8 import tensorflow as tf import numpy as np import gym import sys sys.path.append('..') from base.model import BaseRLModel class Agent(BaseRLModel): def __init__(self, session, env, a_space, s_space, **options): super(Agent, self).__init__(session, env, a_space, s_space, **options) self._init_input() self._init_nn() self._init_op() self._init_saver() self.a_buffer = [] self.s_buffer = [] self.r_buffer = [] self.a_p_r_buffer = [] self.session.run(tf.global_variables_initializer()) def _init_input(self, *args): with tf.variable_scope('input'): self.s = tf.placeholder(tf.float32, [None, self.s_space], name='s') self.a = tf.placeholder(tf.int32, [None, ], name='a') self.r = tf.placeholder(tf.float32, [None, ], name='r') self.adv = tf.placeholder(tf.float32, [None, ], name='adv') self.a_p_r = tf.placeholder(tf.float32, [None, ], name='a_p_r') def _init_nn(self, *args): self.advantage, self.value = self._init_critic_net('critic_net') self.a_prob_eval, self.a_logits_eval = self._init_actor_net('eval_actor_net') self.a_prob_target, self.a_logits_target = self._init_actor_net('target_actor_net', trainable=False) def _init_op(self): with tf.variable_scope('critic_loss_func'): # loss func. self.c_loss_func = tf.losses.mean_squared_error(labels=self.r, predictions=self.value) with tf.variable_scope('critic_optimizer'): # critic optimizer. self.c_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.c_loss_func) with tf.variable_scope('update_target_actor_net'): # Get eval w, b. params_e = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='eval_actor_net') params_t = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_actor_net') self.update_target_a_op = [tf.assign(t, e) for t, e in zip(params_t, params_e)] with tf.variable_scope('actor_loss_func'): # one hot a. a_one_hot = tf.one_hot(self.a, self.a_space) # cross entropy. cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(labels=a_one_hot, logits=self.a_logits_eval) # loss func. self.a_loss_func = tf.reduce_mean(cross_entropy * self.adv * self.a_p_r) with tf.variable_scope('actor_optimizer'): self.a_optimizer = tf.train.AdamOptimizer(self.learning_rate).minimize(self.a_loss_func) def _init_actor_net(self, scope, trainable=True): with tf.variable_scope(scope): # Kernel initializer. w_initializer = tf.random_normal_initializer(0.0, 0.01) # First dense. f_dense = tf.layers.dense(self.s, 32, tf.nn.relu, trainable=trainable, kernel_initializer=w_initializer) # Second dense. s_dense = tf.layers.dense(f_dense, 32, tf.nn.relu, trainable=trainable, kernel_initializer=w_initializer) # Action logits. a_logits = tf.layers.dense(s_dense, self.a_space, trainable=trainable, kernel_initializer=w_initializer) # Action prob. a_prob = tf.nn.softmax(a_logits) return a_prob, a_logits def _init_critic_net(self, scope): with tf.variable_scope(scope): # Kernel initializer. w_initializer = tf.random_normal_initializer(0.0, 0.01) # First dense. f_dense = tf.layers.dense(self.s, 64, tf.nn.relu, kernel_initializer=w_initializer) # Value. value = tf.layers.dense(f_dense, 1) value = tf.reshape(value, [-1, ]) # Advantage. advantage = self.r - value return advantage, value def predict(self, s): # Calculate a eval prob. a_prob_eval, a_prob_target = self.session.run([self.a_prob_eval, self.a_prob_target], {self.s: [s]}) # Calculate action prob ratio between eval and target. a_p_r = np.max(a_prob_eval) / np.max(a_prob_target) self.a_p_r_buffer.append(a_p_r) return np.random.choice(range(a_prob_eval.shape[1]), p=a_prob_eval.ravel()) def snapshot(self, s, a, r, _): self.a_buffer.append(a) self.s_buffer.append(s) self.r_buffer.append(r) def train(self): # Copy r_buffer r_buffer = self.r_buffer # Init r_tau r_tau = 0 # Calculate r_tau for index in reversed(range(0, len(r_buffer))): r_tau = r_tau * self.gamma + r_buffer[index] self.r_buffer[index] = r_tau # Calculate adv. adv_buffer = self.session.run(self.advantage, {self.s: self.s_buffer, self.r: self.r_buffer}) # Minimize loss. self.session.run([self.a_optimizer, self.c_optimizer], { self.adv: adv_buffer, self.s: self.s_buffer, self.a: self.a_buffer, self.r: self.r_buffer, self.a_p_r: self.a_p_r_buffer, }) self.s_buffer = [] self.a_buffer = [] self.r_buffer = [] self.a_p_r_buffer = [] def run(self): if self.mode == 'train': for episode in range(self.train_episodes): s, r_episode = self.env.reset(), 0 while True: if episode > 200: self.env.render() a = self.predict(s) s_n, r, done, _ = self.env.step(a) if done: r = -5 r_episode += r self.snapshot(s, a, r, s_n) s = s_n if done: break self.train() if episode % 25 == 0: self.logger.warning('Episode: {} | Rewards: {}'.format(episode, r_episode)) self.save() else: for episode in range(self.eval_episodes): s, r_episode = self.env.reset() while True: a = self.predict(s) s_n, r, done, _ = self.env.step(a) r_episode += r s = s_n if done: break ``` ## Running ``` # Make env. env = gym.make('CartPole-v0') env.seed(1) env = env.unwrapped # Init session. session = tf.Session() # Init agent. agent = Agent(session, env, env.action_space.n, env.observation_space.shape[0], **{ 'model_name': 'PolicyGradient', }) agent.run() ``` ## 结尾 就目前观察,PPO在小车倒立杆问题上的收敛速度几倍于PG与一票基于值迭代的方法,让我非常惊讶。
github_jupyter
``` import os # os.environ["CUDA_VISIBLE_DEVICES"]="0" import re import json import string import numpy as np import tensorflow as tf from pprint import pprint from tensorflow import keras from tensorflow.keras import layers from tokenizers import BertWordPieceTokenizer from transformers import RobertaTokenizer, RobertaTokenizerFast, TFRobertaModel from tensorflow import keras tf.random.set_seed(1234) np.random.seed(1234) seq_len = 450 max_len = 512 ################### ## TRAINING DATA ## ################### ccc_train = [] with open('/home/ben/GDrive/Projects/distant_crowds/data/ccc_train.jsonl', 'r') as json_file: json_list = list(json_file) for json_str in json_list: ccc_train.append(json.loads(json_str)) for ex in ccc_train: ex["text"] = " ".join(ex["text"].split()) ex["size_text"] = " ".join(ex["size_text"].split()) print(f"Training examples from raw data: {len(ccc_train)}") # ccc_train = [a for a in ccc_train if a["size_text"].lower() in a["text"].lower()] print(f"Training examples after removing unmatched size_text strings: {len(ccc_train)}") ###################### ## SPAN TUNING DATA ## ###################### ccc_tune = [] with open('../data/ccc_span_tune.jsonl', 'r') as json_file: json_list = list(json_file) for json_str in json_list: ccc_tune.append(json.loads(json_str)) for ex in ccc_tune: ex["para"] = " ".join(ex["para"].split()) ex["size_text"] = " ".join(ex["size_text"].split()) print(f"Span tune examples in raw data: {len(ccc_tune)}") ccc_tune = [a for a in ccc_tune if a["size_text"].lower() in a["para"].lower()] print(f"Span tune examples after removing unmatched size_text strings: {len(ccc_tune)}") ccc_tune = {"data":[{"paragraphs":[{"context":qq["para"], "qas":[ {"answers":[{"answer_start":qq["para"].lower().index(qq["size_text"].lower()), "text":qq["size_text"]}], "question":"How many people protested?"} ]}]} for qq in ccc_tune]} ################## ## TESTING DATA ## ################## ccc_test = [] with open('../data/ccc_test_set.jsonl', 'r') as json_file: json_list = list(json_file) for json_str in json_list: ccc_test.append(json.loads(json_str)) for ex in ccc_test: ex["text"] = " ".join(ex["text"].split()) ex["size_text"] = " ".join(ex["size_text"].split()) print(f"Testing examples from raw data: {len(ccc_test)}") ccc_test = [a for a in ccc_test if a["size_text"].lower() in a["text"].lower()] print(f"Testing examples after removing unmatched size_text strings: {len(ccc_test)}") ccc_test = {"data":[{"paragraphs":[{"context":" ".join(qq["text"].split()), "qas":[ {"answers":[{"answer_start":qq["text"].lower().index(qq["size_text"].lower()), "text":qq["size_text"]}], "question":"How many people protested?"} ]}]} for qq in ccc_test]} ##################### ## VALIDATION DATA ## ##################### ccc_valid = [] with open('../data/ccc_validation_set.jsonl', 'r') as json_file: json_list = list(json_file) for json_str in json_list: ccc_valid.append(json.loads(json_str)) for ex in ccc_valid: ex["text"] = " ".join(ex["text"].split()) ex["size_text"] = " ".join(ex["size_text"].split()) print(f"Valid examples from raw data: {len(ccc_valid)}") ccc_valid = [a for a in ccc_valid if a["size_text"].lower() in a["text"].lower()] print(f"Valid examples after removing unmatched size_text strings: {len(ccc_valid)}") ccc_valid = {"data":[{"paragraphs":[{"context":" ".join(qq["text"].split()), "qas":[ {"answers":[{"answer_start":qq["text"].lower().index(qq["size_text"].lower()), "text":qq["size_text"]}], "question":"How many people protested?"} ]}]} for qq in ccc_valid]} ######################## ## SOFT-LABELED TRAIN ## ######################## soft_train = [] with open('../data/soft_labeled_train.jsonl', 'r') as json_file: json_list = list(json_file) for json_str in json_list: soft_train.append(json.loads(json_str)) for ex in soft_train: ex["context"] = " ".join(ex["context"].split()) ex["labeled_text"] = " ".join(ex["labeled_text"].split()) # print(f"Soft examples from raw data: {len(soft_train)}") # soft_train = [a for a in soft_train if a["labeled_text"].lower() in a["context"].lower()] # print(f"Soft examples after removing unmatched size_text strings: {len(soft_train)}") # soft_train = {"data":[{"paragraphs":[{"context":" ".join(qq["context"].split()), # "qas":[ # {"answers":[{"answer_start":qq["context"].lower().index(qq["labeled_text"].lower()), # "text":qq["labeled_text"]}], # "question":"How many people protested?"} # ]}]} for qq in soft_train]} ######################### ## TOKENIZER AND MODEL ## ######################### tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") x_cat_inputs = [] x_cat_attentions = [] x_cat_types = [] y_cat = [] y_start = [] y_end = [] for ii, ex in enumerate(ccc_train): text = ex["text"] answer = soft_train[ii]["labeled_text"] tokenized_context = tokenizer(text, return_offsets_mapping=True) tokenized_question = tokenizer("</s> How many people protested?", return_offsets_mapping=True) if len(tokenized_context.input_ids) > seq_len: continue ######################## ## MERGE IN SOFT SPAN ## ######################## try: start_char_idx = text.lower().index(answer.lower()) except: continue # Find end character index of answer in context end_char_idx = start_char_idx + len(answer) # Mark the character indexes in context that are in answer is_char_in_ans = [0] * len(text) for idx in range(start_char_idx, end_char_idx): is_char_in_ans[idx] = 1 # Find tokens that were created from answer characters ans_token_idx = [] for idx, (start, end) in enumerate(tokenized_context.offset_mapping): if sum(is_char_in_ans[start:end]) > 0: ans_token_idx.append(idx) # Find start and end token index for tokens from answer start_token_idx = ans_token_idx[0] end_token_idx = ans_token_idx[-1] y_start.append(start_token_idx) y_end.append(end_token_idx) ####################### ## ## ####################### context_ids = tokenized_context.input_ids[0:-1] question_ids = tokenized_question.input_ids[1:] input_ids = context_ids + question_ids padding = [0]*(max_len - len(input_ids)) input_ids = input_ids + (np.array(padding)+1).tolist() attention_mask = [1]*len(context_ids) + [1]*len(question_ids) + [0]*len(padding) token_type_ids = [0]*len(context_ids) + [1]*len(question_ids) + [1]*len(padding) x_cat_inputs.append(input_ids) x_cat_attentions.append(attention_mask) x_cat_types.append(token_type_ids) y_cat.append(ex["size_cat"]) x_cat = (np.array(x_cat_inputs), np.array(x_cat_types), np.array(x_cat_attentions)) y_cat = (np.array(y_cat), np.array(y_start), np.array(y_end)) print(len(x_cat_inputs)) class SquadExample: def __init__(self, question, context, start_char_idx, answer_text, all_answers, seq_len, max_len): # Clean context, answer and question context = " ".join(str(context).split()) question = " ".join(str(question).split()) answer = " ".join(str(answer_text).split()) self.question = str(question) self.context = str(context) self.start_char_idx = start_char_idx self.answer_text = str(answer) self.all_answers = all_answers self.max_len = max_len self.seq_len = seq_len self.skip_doc = False self.input_ids = None self.attention_mask = None self.token_type_ids = None self.start_token_idx = None self.end_token_idx = None self.skip = None def __str__(self): print(np.stack(self.input_ids).shape) print(np.stack(self.token_type_ids).shape) print(np.stack(self.attention_mask).shape) return "<SquadExample>" def __repr__(self): return repr({"input_ids":self.input_ids, "token_type_ids":self.token_type_ids, "attention_mask":self.attention_mask, "start_token_idx":self.start_token_idx, "end_token_idx":self.end_token_idx, "skip":self.skip}) def preprocess(self): context = self.context question = self.question answer = self.answer_text start_char_idx = self.start_char_idx # Find end character index of answer in context end_char_idx = start_char_idx + len(answer) if (end_char_idx >= len(context)) or (start_char_idx < 0): self.skip_doc = True return # Mark the character indexes in context that are in answer is_char_in_ans = [0] * len(context) for idx in range(start_char_idx, end_char_idx): is_char_in_ans[idx] = 1 # Tokenize context tokenized_context = tokenizer(context, return_offsets_mapping=True) context_input_ids = tokenized_context.input_ids context_offset_mapping = tokenized_context.offset_mapping context_attention_mask = tokenized_context.attention_mask self.context_input_ids = context_input_ids self.context_offset_mapping = context_offset_mapping # Find tokens that were created from answer characters ans_token_idx = [] for idx, (start, end) in enumerate(context_offset_mapping): if sum(is_char_in_ans[start:end]) > 0: ans_token_idx.append(idx) # if len(ans_token_idx) == 0: # self.skip_doc = True # return if (len(ans_token_idx) == 0): ans_token_idx = [-1] # Find start and end token index for tokens from answer start_token_idx = ans_token_idx[0] end_token_idx = ans_token_idx[-1] self.start_token_idx_master = start_token_idx self.end_token_idx_master = end_token_idx # Tokenize question tokenized_question = tokenizer("</s> "+question, return_offsets_mapping=True) ## Crop start and end tokens question_input_ids = tokenized_question.input_ids[1:] context_input_ids = context_input_ids[1:-1] ## ## SPLIT UP CONTEXT INTO MULTIPLE QUESTIONS OF max_len ## if seq_len >= len(context_input_ids): offsets = [0] else: ii = 0 offsets = [] while (ii+seq_len) <= len(context_input_ids): offsets.append(ii) ii = ii + round(seq_len/2) offsets = offsets + [len(context_input_ids)-seq_len] list_input_ids = [] list_start_token_idx = [] list_end_token_idx = [] list_attention_mask = [] list_token_type_ids = [] list_skip = [] for ii in offsets: subcontext_input_ids = [0]+context_input_ids[ii:(ii+seq_len)]+question_input_ids subcontext_start_token_idx = start_token_idx - ii subcontext_end_token_idx = end_token_idx - ii subcontext_padding = [0] * (self.max_len - len(subcontext_input_ids)) subcontext_attention_mask = [1] * len(subcontext_input_ids) + subcontext_padding subcontext_token_type_ids = [0] + [0]*len(context_input_ids[ii:(ii+seq_len)]) + [1]*len(question_input_ids) + (np.array(subcontext_padding)+1).tolist() subcontext_input_ids = subcontext_input_ids + (np.array(subcontext_padding)+1).tolist() if (subcontext_start_token_idx >= 0) and (subcontext_end_token_idx < seq_len): skip = False else: subcontext_start_token_idx = 0 subcontext_end_token_idx = 0 skip = False list_input_ids.append(subcontext_input_ids) list_attention_mask.append(subcontext_attention_mask) list_token_type_ids.append(subcontext_token_type_ids) list_start_token_idx.append(subcontext_start_token_idx) list_end_token_idx.append(subcontext_end_token_idx) list_skip.append(skip) self.input_ids = (list_input_ids) self.attention_mask = (list_attention_mask) self.token_type_ids = (list_token_type_ids) self.start_token_idx = (list_start_token_idx) self.end_token_idx = (list_end_token_idx) self.skip = list_skip self.example_offset = offsets def train_examples(self, include_impossible=False): for idx, skip_ex in enumerate(self.skip): if include_impossible is False: if skip_ex is False: yield {"input_ids":self.input_ids[idx], "token_type_ids":self.token_type_ids[idx], "attention_mask":self.attention_mask[idx], "start_token_idx":self.start_token_idx[idx], "end_token_idx":self.end_token_idx[idx]} else: yield {"input_ids":self.input_ids[idx], "token_type_ids":self.token_type_ids[idx], "attention_mask":self.attention_mask[idx], "start_token_idx":self.start_token_idx[idx], "end_token_idx":self.end_token_idx[idx]} def inference_from_onehot(self, pred_start, pred_end): # if force_answer == False: if (np.max(np.argmax(pred_start, axis=1)) == 0) and (np.max(np.argmax(pred_end, axis=1)) == 0): return("", {"all_answers":self.all_answers, "start_token_pred":-1, "end_token_pred":-1, "start_char_pred":-1, "end_char_pred":-1, "start_token":self.start_token_idx_master, "end_token":self.end_token_idx_master, "tokens_pred":set(list([])), "tokens":set(list(range(self.start_token_idx_master,(self.end_token_idx_master+1))))}) seq_len = min(self.seq_len, len(self.context_input_ids)) pred_start_matrix = np.zeros((len(self.input_ids), len(self.context_input_ids))) pred_end_matrix = np.zeros((len(self.input_ids), len(self.context_input_ids))) for idx, value in enumerate(pred_start): offset = self.example_offset[idx]+1 pred_start_sub = pred_start[idx][1:seq_len] pred_end_sub = pred_end[idx][1:seq_len] pred_start_matrix[idx,(offset):(offset+seq_len-1)] = pred_start_sub pred_end_matrix[idx,(offset):(offset+seq_len-1)] = pred_end_sub highest_prob = np.argmax(np.max(pred_start_matrix, axis=1) + np.max(pred_end_matrix, axis=1)) top_start = np.argmax(pred_start_matrix[highest_prob,:]) top_end = np.argmax(pred_end_matrix[highest_prob,:]) # pred_start = np.max(pred_start_matrix, axis=0) # pred_end = np.max(pred_end_matrix, axis=0) # top_start = np.argmax(pred_start) # top_end = np.argmax(pred_end) start_char = self.context_offset_mapping[top_start][0] end_char = self.context_offset_mapping[top_end][1] return (self.context[start_char:end_char], {"all_answers":self.all_answers, "start_token_pred":top_start, "end_token_pred":top_end, "start_char_pred":start_char, "end_char_pred":end_char, "start_token":self.start_token_idx_master, "end_token":self.end_token_idx_master, "tokens_pred":set(list(range(top_start,(top_end+1)))), "tokens":set(list(range(self.start_token_idx_master,(self.end_token_idx_master+1))))}) def model_inference(self, model): pred = model.predict([np.stack(self.input_ids), np.stack(self.attention_mask), np.stack(self.token_type_ids)], batch_size=8) pred_start = pred[0] pred_end = pred[1] return self.inference_from_onehot(pred_start, pred_end) def fake_inference(self): pred_start_mat = [] pred_end_mat = [] for idx, val in enumerate(self.start_token_idx): pred_start = np.zeros_like(np.array(self.input_ids[idx])) pred_end = np.zeros_like(np.array(self.input_ids[idx])) if self.skip[idx] == False: pred_start[val] = 1.0 pred_end[self.end_token_idx[idx]] = 1.0 pred_start_mat.append(pred_start) pred_end_mat.append(pred_end) pred_start_mat = np.array(pred_start_mat) pred_end_mat = np.array(pred_end_mat) return self.inference_from_onehot(pred_start_mat,pred_end_mat) def create_squad_examples(raw_data, seq_len, max_len): squad_examples = [] for item in raw_data["data"]: for para in item["paragraphs"]: context = para["context"] for qa in para["qas"]: if len(qa["answers"]) > 0: question = qa["question"] answer_text = qa["answers"][0]["text"] all_answers = [_["text"] for _ in qa["answers"]] start_char_idx = qa["answers"][0]["answer_start"] squad_eg = SquadExample( question, context, start_char_idx, answer_text, all_answers, seq_len, max_len ) squad_eg.preprocess() squad_examples.append(squad_eg) else: question = qa["question"] answer_text = "" all_answers = [""] start_char_idx = 0 squad_eg = SquadExample( question, context, start_char_idx, answer_text, all_answers, seq_len, max_len ) squad_eg.preprocess() squad_examples.append(squad_eg) return squad_examples def create_inputs_targets(squad_examples, include_impossible=False): dataset_dict = { "input_ids": [], "token_type_ids": [], "attention_mask": [], "start_token_idx": [], "end_token_idx": [], } for item in squad_examples: if item.skip_doc is False: for example in item.train_examples(include_impossible): for key in dataset_dict: dataset_dict[key].append(np.array(example[key])) for key in dataset_dict: dataset_dict[key] = np.array(dataset_dict[key]) x = ( dataset_dict["input_ids"], dataset_dict["token_type_ids"], dataset_dict["attention_mask"]) y = (dataset_dict["start_token_idx"], dataset_dict["end_token_idx"]) return x, y def merge_squad_results(squad_examples, start_preds, end_preds): ii = 0 tally = [] answers = [] f1_list = [] for ex in squad_examples: if ex.skip_doc is False: n_sub = len(ex.skip) pred_out = ex.inference_from_onehot(start_preds[ii:(ii+n_sub),:], end_preds[ii:(ii+n_sub),:]) tally.append(pred_out[0].lower() in [a.lower() for a in ex.all_answers]) answers.append(pred_out) tp = len(pred_out[1]["tokens_pred"].intersection(pred_out[1]["tokens"])) fp = len(pred_out[1]["tokens_pred"].difference(pred_out[1]["tokens"])) fn = len(pred_out[1]["tokens"].difference(pred_out[1]["tokens_pred"])) ii = ii + n_sub if tp>0: precision = tp/(tp+fp) recall=tp/(tp+fn) f1_list.append((2*precision*recall)/(precision+recall)) else: f1_list.append(0) return (np.mean(tally), np.mean(f1_list), answers) # train_squad_examples = create_squad_examples(raw_train_data, seq_len, max_len) # x_train, y_train = create_inputs_targets(train_squad_examples) # print(f"{len(train_squad_examples)} docs, {x_train.shape[0]} training points created.") span_examples = create_squad_examples(ccc_tune, seq_len, max_len) x_span, y_span = create_inputs_targets(span_examples) print(f"{len(span_examples)} docs, {x_span[0].shape} span points created.") test_examples = create_squad_examples(ccc_test, seq_len, max_len) x_test, y_test = create_inputs_targets(test_examples) print(f"{len(test_examples)} docs, {x_test[0].shape} test points created.") valid_examples = create_squad_examples(ccc_valid, seq_len, max_len) x_valid, y_valid = create_inputs_targets(valid_examples) print(f"{len(valid_examples)} docs, {x_valid[0].shape} valid points created.") ``` # Train and Validate ``` strategy = tf.distribute.MirroredStrategy() print("Number of devices: {}".format(strategy.num_replicas_in_sync)) with strategy.scope(): model = keras.models.load_model('./roberta_base_squad2_512in') model.summary() def makeMask(x): x_cumsum = tf.math.cumsum(x[0], axis=1) y_cumsum = tf.math.cumsum(x[1], axis=1, reverse=True) prod = tf.math.multiply(x_cumsum, y_cumsum) return prod # def makeMask(x): # x_argmax = tf.math.argmax(x[0], axis=1) # y_argmax = tf.math.argmax(x[1], axis=1) # x_onehot = tf.one_hot(x_argmax, 512, axis=1) # y_onehot = tf.one_hot(y_argmax, 512, axis=1) # x_cumsum = tf.cumsum(x_onehot, axis=1) # y_cumsum = tf.cumsum(y_onehot, axis=1, reverse=True) # prod = tf.math.multiply(x_cumsum,y_cumsum) # return prod def expandDims(x): return tf.expand_dims(x,2) with strategy.scope(): out_start = model.get_layer("activation_4").output out_end = model.get_layer("activation_5").output span_mask = layers.ActivityRegularization(l1=0.01)(layers.Lambda(makeMask)([out_start,out_end])) span_mask_2d = layers.Lambda(tf.tile, arguments={'multiples':(1,1,768)})(layers.Lambda(expandDims)(span_mask)) roberta_embedding = model.get_layer("roberta").output["last_hidden_state"] masked_embedding = layers.Multiply()([span_mask_2d,roberta_embedding]) avg_layer = layers.GlobalMaxPooling1D(data_format="channels_last")(masked_embedding) out_count = layers.Dense(1, activation="linear")(avg_layer) new_optimizer = keras.optimizers.Adam(lr=(5e-5)) model_cat = keras.models.Model(inputs=[model.input],outputs=[out_count, out_start, out_end]) model_cat.compile(optimizer=new_optimizer, loss=["mean_squared_error", "sparse_categorical_crossentropy", "sparse_categorical_crossentropy"]) model_mask = keras.models.Model(inputs=[model.input],outputs=[span_mask]) model_cat.summary() keras.backend.set_value(model.optimizer.learning_rate, (5e-6)) keras.backend.set_value(model_cat.optimizer.learning_rate, (5e-6)) pred_start_v, pred_end_v = model.predict((x_valid[0], x_valid[1], x_valid[2]), batch_size=36, verbose=1) valid_acc = merge_squad_results(valid_examples, pred_start_v, pred_end_v) pred_start_t, pred_end_t = model.predict((x_test[0], x_test[1], x_test[2]), batch_size=36, verbose=1) test_acc = merge_squad_results(test_examples, pred_start_t, pred_end_t) print(f"Validation set accuracy (EM): {valid_acc[0]}") print(f"Test set accuracy (EM): {test_acc[0]}") print(f"Validation set F1: {valid_acc[1]}") print(f"Test set F1: {test_acc[1]}") log_list = [] batch_size = 12 for ii in range(75): print(f"ITERATION: {ii} of 75.") # valid_loss = model.evaluate((x_valid[0], # x_valid[1], # x_valid[2]), # y_valid, # verbose=1, # batch_size=36, # return_dict=True) # pred_start, pred_end = model.predict((x_valid[0], x_valid[1], x_valid[2]), batch_size=36, verbose=1) # valid_acc = merge_squad_results(valid_examples, pred_start, pred_end)[0] # print(f"ACCURACY: {valid_acc}") # validation_loss.append(valid_loss) # validation_accuracy.append(valid_acc) # batch.append(ii*2) span_ii = np.random.choice(range(x_span[0].shape[0]), batch_size, replace=True) cat_ii = np.random.choice(range(x_cat[0].shape[0]), batch_size, replace=True) model_cat.fit( (x_cat[0][cat_ii,:], x_cat[1][cat_ii,:], x_cat[2][cat_ii,:]), (y_cat[0][cat_ii], y_cat[1][cat_ii], y_cat[2][cat_ii]), epochs=1, # For demonstration, 3 epochs are recommended verbose=1, batch_size=batch_size, # Made this smaller. Bump up. ) log = model.fit( (x_span[0][span_ii,:], x_span[1][span_ii,:], x_span[2][span_ii,:]), (y_span[0][span_ii], y_span[1][span_ii]), epochs=1, # For demonstration, 3 epochs are recommended verbose=1, batch_size=batch_size, # Made this smaller. Bump up. validation_data=((x_valid[0], x_valid[1], x_valid[2]), y_valid[0]), validation_batch_size=36 ) log_list.append(log.history) # valid_loss = model.evaluate((x_valid[0], # x_valid[1], # x_valid[2]), # y_valid[0], # verbose=1, # batch_size=36, # return_dict=True) # pred_start, pred_end = model.predict((x_valid[0], x_valid[1], x_valid[2]), batch_size=36, verbose=1) # valid_acc = merge_squad_results(valid_examples, pred_start, pred_end)[0] # validation_loss.append(valid_loss) # validation_accuracy.append(valid_acc) # batch.append(ii*2) import pandas as pd stats = pd.DataFrame(log_list) stats.to_csv("results/roberta_base_squad2_512in_cccspans_sizecats_softspans_75ii.csv") model.save("roberta_base_squad2_512in_cccspans_sizecats_softspans_75ii") model_cat.save("roberta_base_squad2_512in_cccspans_sizecats_softspans_75ii_auxiliary") pred_start_v, pred_end_v = model.predict((x_valid[0], x_valid[1], x_valid[2]), batch_size=36, verbose=1) valid_acc = merge_squad_results(valid_examples, pred_start_v, pred_end_v) pred_start_t, pred_end_t = model.predict((x_test[0], x_test[1], x_test[2]), batch_size=36, verbose=1) test_acc = merge_squad_results(test_examples, pred_start_t, pred_end_t) print(f"Validation set accuracy (EM): {valid_acc[0]}") print(f"Test set accuracy (EM): {test_acc[0]}") print(f"Validation set F1: {valid_acc[1]}") print(f"Test set F1: {test_acc[1]}") pd.DataFrame([{"model":"roberta_base_squad2_512in_cccspans_sizecats_softspans_75ii", "f1_valid":valid_acc[1],"f1_test":test_acc[1], "em_valid":valid_acc[0],"em_test":test_acc[0]}]).to_csv("results/roberta_f1v_f1t_emv_emt.csv",mode="a",header=False) ```
github_jupyter
# 12. Semantics 2 - Lab excercise ## Improving a baseline Sentiment Analysis algorithm Below is a small system for training and testing a Support Vector classifier on sentiment analysis data from the 2017 Semeval Task 4a, containing English tweets. Currently the system only contains a single feature type: each tweet is represented by the set of words it contains. More specifically, a binary feature is created for each word in the vocabulary of the full training set, and the value of each feature for any given tweet is 1 if the word is present and 0 otherwise. Your task will be to improve the performance of the system by implementing other binary features. (If you want to include non-binary features, you will also have to change the provided code) Before we start, let's download the dataset: ``` !wget http://sandbox.hlt.bme.hu/~recski/stuff/4a.tgz ``` And extract the files: ``` !tar xvvf 4a.tgz ``` __4a.train__ and __4a.dev__ are the full datasets for training and testing, __test.train__ and __test.dev__ are small samples from these that you may want to use while debugging your solution Before you get started, let's walk through the main components of the system. The __Featurizer__ class implements features as static methods and also converts train and test data to data structures handled by __sklearn__, the library we use for training an SVC model. ``` import numpy as np import scipy from nltk.tokenize import word_tokenize import nltk nltk.download('punkt') class Featurizer(): @staticmethod def bag_of_words(text): for word in word_tokenize(text): yield word feature_functions = [ 'bag_of_words'] def __init__(self): self.labels = {} self.labels_by_id = {} self.features = {} self.features_by_id = {} self.next_feature_id = 0 self.next_label_id = 0 def to_sparse(self, events): """convert sets of ints to a scipy.sparse.csr_matrix""" data, row_ind, col_ind = [], [], [] for event_index, event in enumerate(events): for feature in event: data.append(1) row_ind.append(event_index) col_ind.append(feature) n_features = self.next_feature_id n_events = len(events) matrix = scipy.sparse.csr_matrix( (data, (row_ind, col_ind)), shape=(n_events, n_features)) return matrix def featurize(self, dataset, allow_new_features=False): events, labels = [], [] n_events = len(dataset) for c, (text, label) in enumerate(dataset): if c % 2000 == 0: print("{0:.0%}...".format(c/n_events), end='') if label not in self.labels: self.labels[label] = self.next_label_id self.labels_by_id[self.next_label_id] = label self.next_label_id += 1 labels.append(self.labels[label]) events.append(set()) for function_name in Featurizer.feature_functions: function = getattr(Featurizer, function_name) for feature in function(text): if feature not in self.features: if not allow_new_features: continue self.features[feature] = self.next_feature_id self.features_by_id[self.next_feature_id] = feature self.next_feature_id += 1 feat_id = self.features[feature] events[-1].add(feat_id) print('done, sparsifying...', end='') events_sparse = self.to_sparse(events) labels_array = np.array(labels) print('done!') return events_sparse, labels_array ``` We'll need to evaluate our output against the gold data, using the metrics defined for the competition: ``` from collections import defaultdict def evaluate(predictions, dev_labels): stats_by_label = defaultdict(lambda: defaultdict(int)) for i, gold in enumerate(dev_labels): auto = predictions[i] # print(auto, gold) if auto == gold: stats_by_label[auto]['tp'] += 1 else: stats_by_label[auto]['fp'] += 1 stats_by_label[gold]['fn'] += 1 print("{:>8} {:>8} {:>8} {:>8} {:>8} {:>8}".format( 'label', 'n_true', 'n_tagged', 'precision', 'recall', 'F-score')) for label, stats in stats_by_label.items(): all_tagged = stats['tp'] + stats['fp'] stats['prec'] = stats['tp'] / all_tagged if all_tagged else 0 all_true = stats['tp'] + stats['fn'] stats['rec'] = stats['tp'] / all_true if all_true else 0 stats['f'] = (2 / ((1/stats['prec']) + (1/stats['rec'])) if stats['prec'] > 0 and stats['rec'] > 0 else 0) print("{:>8} {:>8} {:>8} {:>8.2f} {:>8.2f} {:>8.2f}".format( label, all_true, all_tagged, stats['prec'], stats['rec'], stats['f'])) accuracy = ( sum([stats_by_label[label]['tp'] for label in stats_by_label]) / len(predictions)) if predictions else 0 av_rec = sum([stats['rec'] for stats in stats_by_label.values()]) / 3 f_pn = (stats_by_label['positive']['f'] + stats_by_label['negative']['f']) / 2 print() print("{:>10} {:>.4f}".format('Acc:', accuracy)) print("{:>10} {:>.4f}".format('P/N av. F:', f_pn)) print("{:>10} {:>.4f}".format('Av.rec:', av_rec)) ``` We need a small function to read the data from file: ``` import sys def read_data(fn): data = [] with open(fn) as f: for line in f: if not line: continue fields = line.strip().split('\t') if line.strip() == '"': continue answer, text = fields[1:3] data.append((text, answer)) return data ``` And finally a main function to run an experiment: ``` from sklearn import svm def sa_exp(train_file, dev_file): print('reading data...') train_data = read_data(train_file) dev_data = read_data(dev_file) print('featurizing train...') featurizer = Featurizer() train_events, train_labels = featurizer.featurize( train_data, allow_new_features=True) print('featurizing dev...') dev_events, dev_labels = featurizer.featurize( dev_data, allow_new_features=False) print('training...') model = svm.LinearSVC() model.fit(train_events, train_labels) print('predicting...') predictions = model.predict(dev_events) predicted_labels = [ featurizer.labels_by_id[label] for label in predictions] dev_labels = [ featurizer.labels_by_id[label] for label in dev_labels] print('evaluating...') print() evaluate(predicted_labels, dev_labels) ``` Let's see how the system performs currently: ``` sa_exp('4a.train', '4a.dev') ``` Now it's time to get started! Try to improve the main performance figures by implementing new features in the __Featurizer__ class! Make sure that each feature function is a generator and that you add function names to the class variable __feature_functions__. Some ideas for features are listed below, but you should also come up with some ideas on your own: #### Ideas for simple features - What words are used? Should this be case sensitive? - What punctuation is used? Should all of them count? - Do word ngrams help? But for which values of n? - Emojis? #### Sentiment lexicons There are many on the internet (google is your friend). Just get a couple and use it! #### Some more ideas - part-of-speech (POS) tags - try the [POS-tagger in NLTK](http://www.nltk.org/book/ch05.html) - POS ngrams (but maybe not all of them?) - can WordNet be of any use? - recall from last week that there's an [NLTK WordNet interface](http://www.nltk.org/howto/wordnet.html) #### Advanced Try to get more info on __rare or unseen words__. You may even want to use the code from last week's excercise
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt import numpy as np # reset defalult plotting values plt.rcParams['figure.figsize'] = (10, 7) plt.rc('font', family='sans-serif') plt.rc('axes', labelsize=14) plt.rc('axes', labelweight='bold') plt.rc('axes', titlesize=16) plt.rc('axes', titleweight='bold') plt.rc('axes', linewidth=2) ``` # SQL ## The language of relational databases ![](https://wiki.postgresql.org/images/3/30/PostgreSQL_logo.3colors.120x120.png) ### Prof. Robert Quimby &copy; 2019 Robert Quimby ## In this tutorial you will... - hear about the role of databases - learn how to retrieve records (data) from a database - run basic SQL queries on an example database - use ADQL to query the Gaia database ## Databases ![](http://www.sqlitetutorial.net/wp-content/uploads/2015/12/RDBMS-Client-Server-Architecture.jpg) ## Structured Query Language (SQL) ## Resources - [SQL tutorial](https://www.w3schools.com/sql/default.asp) from w3schools.com #### Basic grammar for SQL queries - **SELECT** [comma separated list of values to select] - **FROM** [names of the tables to select values from] - **WHERE** [conditions on what values to select] - **ORDER BY** [options to sort the selected values] - **LIMIT** [option to limit the number of records retrieved] ## SQLite https://www.sqlite.org/index.html ![](http://www.sqlitetutorial.net/wp-content/uploads/2015/12/What-is-SQLite.jpg) ## SQLite database queries in python ``` # connect to a database import sqlite3 conn = sqlite3.connect(????) # data science package to work with query results import pandas as pd ``` ## Schema of tables in the example database ('example.db') The **stars** table holds data for 9110 stars in the Yale Bright Star Catalog. The table has the following columns: - **id** - (integer) a unique number for each star in the table - **name** - (text) name of the star (if any) - **ra** - (real) right ascension in degrees - **dec** - (real) declination in degrees - **vmag** - (real) V-band magnitude of the star - **sp_type** - (text) spectral type of the star The **spec_class** table holds data for (some of the) different spectral types including: - **sp_class** - Morgan–Keenan (Yerkes) spectral class (OBAFGKM) - **lum_class** - luminosity class (V=Main Sequence; III=Giant; I=Super Giant) - **temperature** - typical photospheric temperature - **abs_mag** - typical absolute magnitude ## SELECTing data from a database ``` # load everything from the stars table pd.read_sql_query(????, conn) # load just the ra and dec of stars in the stars table pd.read_sql_query("SELECT ???? FROM stars", conn) # load everything from the spec_class table pd.read_sql_query("SELECT * FROM spec_class", conn) ``` ## Using the WHERE clause ``` pd.read_sql_query("SELECT * FROM stars WHERE ????", conn) ``` ## Combining conditions with AND ``` query = """ SELECT ra, dec FROM stars WHERE vmag < 2.5 AND ra > 65 AND ra < 95 AND dec > -10 AND dec < 10 """ stars = pd.read_sql_query(query, conn) # plot the R.A., Dec. of the selected stars plt.axes(aspect='equal') plt.plot(stars['ra'], stars['dec'], 'ro') plt.gca().invert_xaxis() # select by luminosity class query = "SELECT * FROM spec_class WHERE lum_class='{}'" ms = pd.read_sql_query(query.format('V'), conn) g = pd.read_sql_query(query.format('III'), conn) sg = pd.read_sql_query(query.format('I'), conn) # plot an HR diagram plt.plot(ms['temperature'], ms['abs_mag'], 'go') plt.plot(g['temperature'], g['abs_mag'], 'ro') plt.plot(sg['temperature'], sg['abs_mag'], 'bo') plt.gca().invert_xaxis() plt.gca().invert_yaxis() plt.xscale('log') plt.xlabel('Temperature (K)') plt.ylabel('Absolute V-band Magnitude'); ``` ## Computing values in queries ``` # use SQL as a calculator pd.read_sql_query("SELECT ????", conn) # retrieve computed values pd.read_sql_query("SELECT ???? FROM stars ", conn) ``` ## Joining tables ``` query = """ SELECT name, ra, dec, vmag, temperature, abs_mag FROM stars, spec_class WHERE ???? """ pd.read_sql_query(query, conn) ``` ## Relation between observed mag, absolute mag, and distance $$D = 10^{ (m - M + 5)/5 }$$ ## Adding functions ``` # raising a number to some power in numpy ???? # add the POWER function to queries conn.create_function("power", ????, ????) query = """ SELECT name, ra, dec, vmag, abs_mag, POWER(10, (vmag - abs_mag + 5) / 5) AS dist FROM stars, spec_class WHERE sp_type = sp_class || lum_class ORDER BY dist LIMIT 10 """ pd.read_sql_query(query, conn) ``` ## Astronomical Data Query Language (ADQL) Based on SQL but designed for astronomers [Gaia query web interface](http://gea.esac.esa.int/archive/) - [Basic ADQL syntax](https://gea.esac.esa.int/archive-help/adql/index.html) from Gaia web pages - [Example ADQL queries](https://gea.esac.esa.int/archive-help/adql/examples/index.html) from Gaia ``` from astroquery.gaia import Gaia # query Gaia database to select stars in the Gaia DR2 catalog # near (R.A, Dec.) = (0, 0) # near the North Celestial Pole query = """SELECT ra, dec FROM gaiadr2.gaia_source WHERE 1=CONTAINS( POINT('ICRS', ra, dec), CIRCLE('ICRS', 0, 0, 0.25)) AND phot_bp_mean_mag < 15 """ job = Gaia.launch_job_async(query) # get the search results gaia = job.get_results() gaia ```
github_jupyter
# NOTE Unfortunately the `desimodel.focalplane.on_tile_gfa()` code had a bug in the 18.6 and 18.7 software releases, where it would fail in the case of no input targets overlapping and GFAs. This will be fixed before the next release but for now this tutorial doesn't work. We are also updating how GFA targets are pre-selected and subsequently used, so this tutorial will receive a major update. # Finding targets on the GFAs In this notebook, one can find useful information on my work on utility functions for the guide, focus, and alignment (GFA) sensors on DESI. I've included relevant code that I've written over the past summer of 2017, as well as problems I've run into, and general usage of the code. If you have any questions about my work, please don't hesitate to email me at wwang153@stanford.edu or woodywang153@gmail.com. Special thanks to Stephen Bailey for supervising and allowing me to work at LBNL on such an exciting, revolutionary project. Note: some of the code in the notebook might have been modified slightly upon being pushed the central desimodel repository in order to avoid redundancies of import statements or for the sake of minor adjustments. These were the general goals for the project, and the code in this notebook achieves a majority of them. Our project will focus on identifying where stars and galaxies will land on the DESI focal plane for arbitrary pointings of the telescope. Previous work has identified which positioners should point to which astronomical targets for each telescope pointing but more work remains, in particular: - What stars will land on the 10 "guide focus alignment" (GFA) sensors to be used for focusing the telescope and keeping it tracking (guiding) the sky as the earth rotates during the exposure. - Do any of our pre-defined pointings not have enough stars? Could small adjustments to the pointing gain more stars? - Do any of our predefined pointings have overly bright stars that would saturate the detectors and corrupt the data? - Where do the brightest stars on the sky land for each of our pre-defined pointings? - Develop visualization tools for understand the positions of stars and galaxies on the focal plane during commissioning and debugging. ## Find all DR4 targets that are on a DESI tile GFA This section demonstrates the core functionality for finding DR4 targets that overlap a GFA, using `desimodel.focalplane.get_gfa_targets()` ``` import os import numpy as np %pylab inline # Identify the location of sweep files; the default paths are for NERSC # so that that will work from jupyter-dev.nersc.gov import os sweep_dir = os.getenv('SWEEP_DIR', '/global/project/projectdirs/cosmo/data/legacysurvey/dr4/sweep/4.0/') fiberassign_dir = os.getenv('FIBERASSIGN_DIR', '/global/project/projectdirs/desi/datachallenge/dc17a-twopct/fiberassign/output/') assert os.path.isdir(sweep_dir) assert os.path.isdir(fiberassign_dir) def get_all_gfa_targets(sweep_dir): #- example code for the highest level wrapper for how this could be used #- to loop over sweeps and build tables of GFA targets for each tile from astropy.table import Table, vstack from astropy.io import fits import desitarget.io import desimodel.focalplane import numpy as np gfa_targets = list() for filename in sorted(desitarget.io.iter_sweepfiles(sweep_dir)): print(filename) targets = Table.read(filename) on_gfa_targets = desimodel.focalplane.get_gfa_targets(targets) on_gfa_targets.meta.clear() gfa_targets.append(on_gfa_targets) gfa_targets = vstack(gfa_targets) return gfa_targets #----- gfa_targets = get_all_gfa_targets(sweep_dir) # index = np.where(gfa_targets['FLUX_R'] == max(gfa_targets['FLUX_R'])) # print(gfa_targets[index]) from astropy.table import Table, vstack from astropy.io import fits import desitarget.io import desimodel.focalplane import numpy as np filename = '/global/project/projectdirs/cosmo/data/legacysurvey/dr4/sweep/4.0/sweep-060p065-070p070.fits' targets = Table.read(filename) print(len(targets)) on_gfa_targets = desimodel.focalplane.get_gfa_targets(targets) ``` ## Find the brightest 10 objects on any GFA ``` ii = np.argsort(-gfa_targets['FLUX_R']) gfa_targets[ii[0:10]]['TILEID', 'GFA_LOC', 'BRICKNAME', 'OBJID', 'TYPE', 'RA', 'DEC', 'FLUX_R'] print(22.5 - 2.5*np.log10(gfa_targets['FLUX_R'][ii[0:10]])) # Dump top 100 to fits file for loading into legacysurvey.org/viewer gfa_targets[ii[0:100]]['RA', 'DEC'].write('bright-gfa.fits') rmag = 22.5 - 2.5*np.log10(gfa_targets['FLUX_R']) ok = gfa_targets[(rmag > 0)] jj = np.argsort(-ok['FLUX_R']) gfa_targets[jj[0:100]]['RA', 'DEC'].write('bright-gfa.fits', overwrite=True) ok[jj[0:10]]['TILEID', 'GFA_LOC', 'BRICKNAME', 'OBJID', 'TYPE', 'RA', 'DEC', 'FLUX_R'] ``` ## Find GFAs with no targets above minimum rflux (default rmag=15) Currently this is a lot of targets because of tiles that only partially covered by DR4; would be better to identify GFAs with any targets with rmag<20 but no point sources targets with rmag<15. i.e. they are covered, but not with anything bright enough. ``` for tileid in sorted(set(gfa_targets['TILEID'])): gfatile = gfa_targets[gfa_targets['TILEID'] == tileid] for gfa_loc in range(10): n = np.count_nonzero(gfatile['GFA_LOC'] == gfa_loc) if n == 0: print(tileid, gfa_loc) ``` ## GFA basic geometry and visualizations The cell below shows my initial work on understanding the focal plane geometry simply by plotting it using matplotlib. %pylab populates the namespace from numpy and matplotlib, making it easier to plot graphs in jupyter notebook, but should not be used in code pushed to the desimodel repository. The function plot_focal_plane() plots the 5000 fiber positioners and the 10 GFA corners, a simple visualization tool to understand the geometry of the focal plane itself. ``` # Uses a rotation matrix to plot the focal plane in mm def plot_focal_plane(): ''' Plots the focal plane with the 5000 fiber positioners and 10 GFAs using the initial four corners of the 0th indexed GFA and rotating the points by 36 degrees counter-clockwise. Uses the reference projection of the active area for each GFA. ''' import desimodel.io # Sets the title of the graph title('Focal Plane Overhead View') # Plots the location of each of the fiber positioners fp = desimodel.io.load_fiberpos() plot(fp['X'],fp['Y'], 'g.') #x = [318.529, 330.901, 348.947, 336.574] #y = [225.702, 234.691, 209.830, 200.841] # Experiments with "Reference projection of active area" coordinates #x = [318.703, 331.075, 349.121, 336.748] #y = [225.816, 234.805, 209.944, 200.955] """ Uses the x and y from the petal indexed at 9 so the first petal added to the table is indexed at 0 [[ 313.24842144 -233.32358331] [ 325.62062672 -242.31230077] [ 307.55293135 -267.15753816] [ 295.18041705 -258.16786964]]""" x = [313.24842144, 325.62062672, 307.55293135, 295.18041705] y = [-233.32358331, -242.31230077, -267.15753816, -258.16786964] # Creates a rotation matrix for 36 degrees counter-clockwise rotatemat = numpy.zeros(shape=(2,2)) rotatemat[0] = [cos(36*pi/180), -sin(36*pi/180)] rotatemat[1] = [sin(36*pi/180), cos(36*pi/180)] draw_gfas(x, y, rotatemat) # Function that draws the GFAs on the focal plane def draw_gfas(x, y, rotatemat): """ Draws the 10 GFAs given the initial x and y coordinates of four corners of a GFA Parameters ---------- x : Array of four x initial coordinates of the GFA y : Array of four y initial coordinates of the GFA """ coord = numpy.zeros(shape=(2,1)) gfacoord = numpy.zeros(shape=(4, 2)) oldxcoord = x oldycoord = y for j in range(10): for i in range(4): coord[0] = oldxcoord[i] coord[1] = oldycoord[i] newcoord = matmul(rotatemat, coord) oldxcoord[i] = newcoord[0] oldycoord[i] = newcoord[1] gfacoord[i] = [newcoord[0], newcoord[1]] plot(newcoord[0], newcoord[1], 'k.') draw_single_gfa(gfacoord) def draw_single_gfa(gfacoord): """ Draws a single GFA given a 4X2 array of coordinates for the four corners of a GFA Parameters ---------- gfacoord: 4X2 array of x and y coordinates with each row representing a corner of the GFA """ # Prints all of the GFA coordinates for debugging #print(gfaCoord) gfapolygon = Polygon(gfacoord) plt.gca().add_patch(gfapolygon) figure(figsize=(8,8)) plot_focal_plane() ``` After better understanding of the focal plane geometry, I wrote code to write a .ecsv file with data on each of the GFA's corners. Using the DESI-0530-v13 Excel spreadsheet, I took the 4 corners of the single GFA locations provided and rotated each corner 36 degrees counter-clockwise to define each of the other 10 GFA's corners. The build_gfa_table() function was eventually moved to desimodel.inputs.gfa, with respective unit tests. The table includes information on the GFA's petal number, corner number, x, y, z, q, radius in degrees, and radius in mm. ``` # Import statements to run code for debugging import desimodel.io import scipy.interpolate def build_gfa_table(outfile = 'gfa.ecsv'): ''' Builds the GFA table given the data from DESI-0530-v13 Excel spreadsheet and writes a .ecsv file using the astropy table library. The data is pulled from the "GFALocation" tab on the spreadsheet and from rows 16-23 and columns A-I. Parameters ---------- outfile: a default parameter that represents the desired filename which is returned by this function. The filename defaults to "gfa.ecsv" if no parameters are given. ''' # Uses the reference projection of active area to create data table of GFAs from astropy.table import Table # Initial x and y coordinates for the GFAs """ Uses the x and y from the petal indexed at 9 so the first petal added to the table is indexed at 0 [[-125.10482863 -370.01790486] [-129.83038525 -384.56223777] [-159.04283509 -375.05643893] [-154.31646944 -360.51151824]] """ # Data obtained from DESI-0530-v13 Excel spreadsheet x = [-125.10482863, -129.83038525, -159.04283509, -154.31646944] y = [-370.01790486, -384.56223777, -375.05643893, -360.51151824] z = [-17.053, -18.487, -18.631, -17.198] rotatemat = numpy.zeros(shape=(2,2)) rotatemat[0] = [cos(36*pi/180), -sin(36*pi/180)] rotatemat[1] = [sin(36*pi/180), cos(36*pi/180)] # Note: the corners are 0 indexed gfatable = Table(names = ('PETAL', 'CORNER', 'X', 'Y', 'Z', 'Q', 'RADIUS_DEG', 'RADIUS_MM'), dtype = ('int', 'int', 'float', 'float', 'float', 'float', 'float', 'float')) # Sets the units for the GFA table gfatable['X'].unit = 'mm' gfatable['Y'].unit = 'mm' gfatable['Z'].unit = 'mm' gfatable['Q'].unit = 'degrees' gfatable['RADIUS_DEG'] = 'degrees' gfatable['RADIUS_MM'] = 'mm' find_gfa_coordinates(x, y, z, gfatable, rotatemat) # Saves the table of data as an ecsv file gfatable.write(outfile, format='ascii.ecsv') # Function that obtains the x and y coordinates for each corner of the GFAs def find_gfa_coordinates(x, y, z, gfatable, rotatemat): ''' Finds all the GFA coordinates by rotating the initial coordinates and adding the respective coordinates to the gfaTable Parameters ---------- x : Array of four x initial coordinates of the GFA y : Array of four y initial coordinates of the GFA z: Array of four z initial coordinates of the GFA gfaTable: Astropy Table object which stores the petal number, corner number, and x, y, and z coordinates in mm within each row rotateMat: Rotation matrix to rotate the coordinates 36 degrees counterclockwise ''' coord = numpy.zeros(shape=(2,1)) gfacoord = numpy.zeros(shape=(4, 2)) oldxcoord = x oldycoord = y for j in range(10): for i in range(4): coord[0] = oldxcoord[i] coord[1] = oldycoord[i] newcoord = matmul(rotatemat, coord) oldxcoord[i] = newcoord[0] oldycoord[i] = newcoord[1] gfacoord[i] = [newcoord[0], newcoord[1]] theta = cartesian_to_polar_angle(newcoord[0], newcoord[1]) # radius is the radius in mm radius = np.sqrt(newcoord[0]**2 + newcoord[1]**2) # degree is the radius in degrees degree = get_radius_deg(newcoord[0], newcoord[1]) # Could be building the table in O(N^2), which is notably inefficient gfatable.add_row([j, i, newcoord[0], newcoord[1], z[i], theta, degree, radius]) def get_radius_mm(theta): """ Returns an array of radii in mm given an array of radii in degrees using the platescale data relative to the center of the focal plane as (0,0). Supports scalar and vector inputs. Parameters ---------- theta: An array that represents the angle from the center of the focal plane """ import scipy.interpolate import desimodel.io platescale = desimodel.io.load_platescale() # Uses a quadratic one-dimensional interpolation to approximate the radius in degrees versus radius in mm fn = scipy.interpolate.interp1d(platescale['theta'], platescale['radius'], kind = 'quadratic') radius = fn(theta) if(np.isscalar(theta)): return float(radius) else: return radius def get_radius_deg(x, y): """ Returns the radius in degrees given x, y coordinates using the platescale data Parameters ---------- x: The x coordinate in mm of a location on the focal plane y: The y coordinate in mm of a location on the focal plane """ import scipy.interpolate import desimodel.io radius = np.sqrt(x**2 + y**2) platescale = desimodel.io.load_platescale() # Plots are used for debugging. #plot(platescale['radius'], platescale['theta'], 'k.') #plot(platescale['radius'], platescale['radial_platescale'], 'k.') fn = scipy.interpolate.interp1d(platescale['radius'], platescale['theta'], kind = 'quadratic') degree = float(fn(radius)) return degree def cartesian_to_polar_angle(x, y): """ Given cartesian coordinates, this function returns the polar angle in degrees for use in polar coordinates Parameters ---------- x: The x coordinate in mm of a location on the focal plane y: The y coordinate in mm of a location on the focal plane """ return np.degrees(np.arctan2(y, x)) build_gfa_table() ``` In the following cell, I've written functions to extract the GFA data with the likes of load_gfa(). load_gfa() has been moved to the desimodel.io Python file to allow users to read the GFA table easily. The following functions are utility plotting functions in order to confirm that the GFA table contains sensible data. The plotting function allows one to visualize the focal plane geometry along with the GFA locations. The plot_gfa(gfa, petal) function also allows one to specifcy which petal of the focal plane they would like to see plotted. These functions are mainly for developing and understanding the data in the GFA table from load_gfa(). It should be noted that most of the functions I've written work with the GFA locations either in RA and Dec or x and y, rather than using the z coordinate or q and s coordinates. Thus, the GFA and focal plane is essentially a 2D projection in my code, rather than the true 3D geometry. ``` # Follows the style and format of desimodel.io.py _gfa = None def load_gfa(): """Returns GFA table from desimodel/data/focalplane/gfa.ecsv""" global _gfa from astropy.table import Table # os is imported already in the desimodel io.py import os if _gfa is None: gfaFile = os.path.join(os.environ['DESIMODEL'], 'data', 'focalplane', 'gfa.ecsv') _gfa = Table.read(gfaFile, format = 'ascii.ecsv') return _gfa def plot_gfa(gfa, petal = None): """ Plots the GFA at a certain petal or all the GFAs depending on the respective parameters. Uses the reference projection of the active area to plot the GFA. Parameters ---------- gfa: Astropy Table object with the petal number, corner number, and x, y, z, and q coordinates of the GFAs, as well as the radius in degrees and mm of each corner of the GFAs. petal: optional integer parameter which specifies which petal to plot """ title('GFA Overhead View') plt.xlabel('mm') plt.ylabel('mm') gfacoord = numpy.zeros(shape=(4, 2)) if(type(petal) != int and petal != None): raise TypeError("Please enter an integer value") elif(petal == None): counter = 0 for i in range(40): gfacoord[counter % 4] = [gfa['X'][i], gfa['Y'][i]] plot(gfa['X'][i], gfa['Y'][i], 'k.') counter += 1 if ((counter % 4) == 0): draw_single_gfa(gfacoord) elif(petal < 0 or petal > 9): raise ValueError("Please enter a petal value between 0 and 9") elif(petal <= 9 and petal >= 0): index = petal * 4 for j in range(4): gfacoord[j] = [gfa['X'][index], gfa['Y'][index]] plot(gfa['X'][index], gfa['Y'][index], 'k.') index += 1 draw_single_gfa(gfacoord) else: raise Exception("Please enter an integer between 0 and 9") plot() def draw_single_gfa(gfacoord): """ Draws a single GFA given a 4X2 array of coordinates for the four corners of a GFA Parameters ---------- gfaCoord: 4X2 array of x and y coordinates with each row representing a corner of the GFA """ # Prints all of the GFA coordinates for debugging #print(GFAcoord) gfapolygon = Polygon(gfacoord) plt.gca().add_patch(gfapolygon) def plot_focal_plane(): """ Plots a 2D representation of the focal plane with the 5000 fiberpositioners and the 10 GFAs """ #%pylab #import desimodel.io # Plots the location of each of the fiber positioners fp = desimodel.io.load_fiberpos() plot(fp['X'],fp['Y'], 'g.') gfa = load_gfa() plot_gfa(gfa) title('Focal Plane Overhead View') # Debugging lines to test if functions work #import desimodel.io #%pylab inline #fp = desimodel.io.load_fiberpos() #plot(fp['X'], fp['Y'], 'g.') #gfa = load_gfa() #plot_gfa(gfa, 2) figure(figsize=(8,8)) plot_focal_plane() ``` The following function, plot_focal_plane_deg() simply plots the focal plane in degrees, rather than in mm, by reading the GFA table in using load_gfa(). ``` def plot_focal_plane_deg(): """ Plots the focal plane in degrees using the gfa Table object read in with the astropy library """ import desimodel.io # ONLY USED HERE RIGHT NOW BECAUSE LOAD_GFA() HAS NOT BEEN PUSHED gfa = desimodel.io.load_gfa() pyplot.polar() title('GFA Overhead View in Degrees') # Theta is in degrees theta = gfa['Q'] degree = gfa['RADIUS_DEG'] print(gfa) for i in range(40): thetaradians = theta[i] * pi / 180 pyplot.polar(thetaradians, degree[i], 'k.') # Used for debugging to print the angle in radians and the radius in degrees #print(str(theta[i]) + ", " + str(degree[i])) figure(figsize=(8,8)) plot_focal_plane_deg() ``` After creating the .ecsv file to hold the GFA information, in order to determine if a target would be on the GFA, I needed to develop a function to transform x, y coordinates to RA, Dec coordinates, and vice versa. The following code shows xy2radec() and radec2xy(), which use rotation matrices to transform certain coordinates to the new respective coordinate system. ``` def xy2radec(telra, teldec, x, y): """ Returns the new RA and Dec of an x, y position on the focal plane in the sky given an arbitrary telescope pointing in RA and Dec Parameters ---------- telra: a float signifying the telescope's RA pointing in degrees teldec: a float signifying the telescope's Dec pointing in degrees x: The x coordinate in mm of a location on the focal plane y: The y coordinate in mm of a location on the focal plane """ from math import atan2, acos # radial distance on the focal plane in degrees r_deg = get_radius_deg(x, y) # print(r_deg) # q signifies the angle the position makes with the +x-axis of focal plane q = cartesian_to_polar_angle(x, y) # print(q) coord = numpy.zeros(shape=(3,1)) coord[0] = 1 # Clockwise rotation around the z-axis by the radial distance to a point on the focal plane in radians zrotate = numpy.zeros(shape=(3,3)) r_rad = math.radians(r_deg) zrotate[0] = [cos(r_rad), sin(r_rad), 0] zrotate[1] = [-sin(r_rad), cos(r_rad), 0] zrotate[2] = [0, 0, 1] # Counter-clockwise rotation around the x-axis xrotate = numpy.zeros(shape=(3,3)) q_rad = math.radians(q) xrotate[0] = [1, 0, 0] xrotate[1] = [0, cos(q_rad), -sin(q_rad)] xrotate[2] = [0, sin(q_rad), cos(q_rad)] # Counter-clockwise rotation around y axis by declination of the tile center decrotate = numpy.zeros(shape=(3,3)) teldec_rad = math.radians(teldec) decrotate[0] = [cos(teldec_rad), 0, -sin(teldec_rad)] decrotate[1] = [0, 1, 0] decrotate[2] = [sin(teldec_rad), 0, cos(teldec_rad)] # Counter-clockwise rotation around the z-axis by the right ascension of the tile center rarotate = numpy.zeros(shape=(3,3)) telra_rad = math.radians(telra) rarotate[0] = [cos(telra_rad), -sin(telra_rad), 0] rarotate[1] = [sin(telra_rad), cos(telra_rad), 0] rarotate[2] = [0, 0, 1] coord1 = matmul(zrotate, coord) coord2 = matmul(xrotate, coord1) coord3 = matmul(decrotate, coord2) coord4 = matmul(rarotate, coord3) ra_rad = atan2(coord4[1], coord4[0]) dec_rad = (pi / 2) - acos(coord4[2] / sqrt((coord4[0]**2) + (coord4[1]**2) + (coord4[2]**2))) ra_deg = math.degrees(ra_rad) dec_deg = math.degrees(dec_rad) # Value can be 360, which should be 0 ra = ra_deg % 360 # MAKE THIS CHANGE AND COMMIT TO GITHUB IN NEW BRANCH BECAUSE RA SHOULD BE BETWEEN 0 AND 360 if(ra == 360): ra = 0 return ra, dec_deg newra, newdec = xy2radec(8.37, -10.65, -138.345, -333.179) #simple test case of only moving in ra #newra1, newdec1 = xy2radec(0, 0, 400, 0) #newra2, newdec2 = xy2radec(0, 0, 0, 400) print(newra) print(newdec) def radec2xy(telra, teldec, ra, dec): """ Returns arrays of the x, y positions of given celestial objects on the focal plane given an arbitrary telescope pointing in RA and Dec and arrays of the RA and Dec of celestial objects in the sky. Implements the Haversine formula. Parameters ---------- telra: a scalar float signifying the telescope's RA pointing in degrees teldec: a scalar float signifying the telescope's Dec pointing in degrees ra: An array of RA values for locations in the sky dec: An array of declination values for locations in the sky """ # Inclination is 90 degrees minus the declination in degrees inc = 90 - dec x0 = sin(math.radians(inc)) * cos(math.radians(ra)) y0 = sin(math.radians(inc)) * sin(math.radians(ra)) z0 = cos(math.radians(inc)) coord = [x0, y0, z0] # Clockwise rotation around y axis by declination of the tile center decrotate = numpy.zeros(shape=(3,3)) teldec_rad = math.radians(teldec) decrotate[0] = [cos(teldec_rad), 0, sin(teldec_rad)] decrotate[1] = [0, 1, 0] decrotate[2] = [-sin(teldec_rad), 0, cos(teldec_rad)] # Clockwise rotation around the z-axis by the right ascension of the tile center rarotate = numpy.zeros(shape=(3,3)) telra_rad = math.radians(telra) rarotate[0] = [cos(telra_rad), sin(telra_rad), 0] rarotate[1] = [-sin(telra_rad), cos(telra_rad), 0] rarotate[2] = [0, 0, 1] #coord1 = matmul(decrotate, coord) #coord2 = matmul(rarotate, coord1) coord1 = matmul(rarotate, coord) coord2 = matmul(decrotate, coord1) x = coord2[0] y = coord2[1] z = coord2[2] print(x, y, z) theta_deg = np.sqrt(y**2 + z**2) theta_rad = theta_deg * 180 / np.pi #radius = get_radius_mm(theta_rad) p = np.array([8.297E5, -1750.0, 1.394E4, 0.0]) radius = 0.0 for i in range(4): radius = theta_deg*radius + p[i] print(radius) testx = radius * -y / theta_deg testy = radius * -z / theta_deg return testx, testy """newra, newdec = xy2radec(8.37, -10.65, -138.345, -333.179) 8.927313423598427 -9.324956250231294""" x, y = radec2xy(8.37, -10.65, 8.927313423598427, -9.324956250231294) print(x) print(y) r = get_radius_deg(x, y) print(r) q = cartesian_to_polar_angle(x, y) print(q) ``` It turned out, however, that for some reason, the radec2xy() function would not return exactly the same values as the initial x,y coordinates if attempting a round-trip transformation. Thus, the following function, radecdifference() plots the difference between the intended results and the actual results of the transformation. Due to the inconsistency, I had to modify the radec2xy() formula to use the Haversine formula and slightly different rotation matrices, and the minor inconsistencies were resolved. ``` def radecdifference(): """ Reads in a file with data from 5000 fiberpositioners at a given telescope pointing and graphs a quiver plot and histogram to show the difference in calculations """ from astropy.io import fits from astropy.table import Table #import numpy as np import desimodel.io #%pylab tilefile = os.path.join(fiberassign_dir, 'tile_00612.fits') tile = Table.read(tilefile, 'FIBER_ASSIGNMENTS') #- File bug: TILERA, TILEDEC are arrays of identical values instead of scalars; #- just grab element 0 telra = tile.meta['TILERA'][0] teldec = tile.meta['TILEDEC'][0] ra = numpy.zeros(5000) dec = numpy.zeros(5000) # u and v are for the quiver plots, signifying the x and y components of the vectors respectively """u = numpy.zeros(5000) v = numpy.zeros(5000)""" for i in range(5000): ra[i], dec[i] = xy2radec(telra, teldec, tile['XFOCAL_DESIGN'][i], tile['YFOCAL_DESIGN'][i]) """u[i] = ra[i] - tile['RA'][i] v[i] = dec[i] - tile['DEC'][i]""" u = ra - tile['RA'] v = dec - tile['DEC'] #q = quiver(tile['RA'], tile['DEC'], 10*u, 10*v) hist(sqrt((u*cos(tile['DEC'] * pi / 180))**2 + (v)**2)*3600) radecdifference() ``` Below is the new radec2xy() function that consistently converts RA, Dec coordinates back to x,y, completing a round-trip transformation. The function also supports vector inputs. Visualizing and understanding the mathematics behind transforming from x, y coordinates on the focal plane to RA, Dec spherical coordinates was quite confusing for me, and with the help of Stephen Bailey, I was able to understand the transformation through a series of matrix rotations, outlined in the code below. The xy2radec() and radec2xy() code has been moved to desimodel.focalplane. ``` # Implements the haversine formula def radec2xy(telra, teldec, ra, dec): """ Returns arrays of the x, y positions of given celestial objects on the focal plane given an arbitrary telescope pointing in RA and Dec and arrays of the RA and Dec of celestial objects in the sky Parameters ---------- telra: a scalar float signifying the telescope's RA pointing in degrees teldec: a scalar float signifying the telescope's Dec pointing in degrees ra: An array of RA values for locations in the sky dec: An array of declination values for locations in the sky """ import numpy as np import math # Inclination is 90 degrees minus the declination in degrees dec = np.asarray(dec) inc = 90 - dec ra = np.asarray(ra) #inc = 90 - dec x0 = np.sin(np.radians(inc)) * np.cos(np.radians(ra)) y0 = np.sin(np.radians(inc)) * np.sin(np.radians(ra)) z0 = np.cos(np.radians(inc)) coord = [x0, y0, z0] # Clockwise rotation around y axis by declination of the tile center decrotate = np.zeros(shape=(3,3)) teldec_rad = np.radians(teldec) decrotate[0] = [np.cos(teldec_rad), 0, np.sin(teldec_rad)] decrotate[1] = [0, 1, 0] decrotate[2] = [-np.sin(teldec_rad), 0, np.cos(teldec_rad)] # Clockwise rotation around the z-axis by the right ascension of the tile center rarotate = np.zeros(shape=(3,3)) telra_rad = math.radians(telra) rarotate[0] = [np.cos(telra_rad), np.sin(telra_rad), 0] rarotate[1] = [-np.sin(telra_rad), np.cos(telra_rad), 0] rarotate[2] = [0, 0, 1] coord1 = np.matmul(rarotate, coord) coord2 = np.matmul(decrotate, coord1) x = coord2[0] y = coord2[1] z = coord2[2] newteldec = 0 newtelra = 0 ra_rad = np.arctan2(y, x) dec_rad = (np.pi / 2) - np.arccos(z / np.sqrt((x**2) + (y**2) + (z**2))) radius_rad = 2 * np.arcsin(np.sqrt((np.sin((dec_rad - newteldec) / 2)**2) + ((np.cos(newteldec)) * np.cos(dec_rad) * (np.sin((ra_rad - newtelra) / 2)**2)))) radius_deg = np.degrees(radius_rad) q_rad = np.arctan2(-z, -y) radius_mm = get_radius_mm(radius_deg) x_focalplane = radius_mm * np.cos(q_rad) y_focalplane = radius_mm * np.sin(q_rad) return x_focalplane, y_focalplane ra = [8.40634632111, 8.927313423598427] dec = [-9.93649291992, -9.324956250231294] #x, y = radec2xy(8.37, -10.65, 8.927313423598427, -9.324956250231294) x, y = radec2xy(8.37, -10.65, ra, dec) print(x) print(y) #8.40634632111 -9.93649291992 """newra, newdec = xy2radec(8.37, -10.65, -138.345, -333.179) 8.927313423598427 -9.324956250231294""" ``` After significant amounts of testing, the following functions, on_gfa(), on_tile_gfa(), and get_gfa_targets() are low, mid, and high level functions to retrieve targets on a particular GFA. The functions have all been moved to desimodel.focalplane. ``` #- example code for the highest level wrapper for how this could be used #- to loop over sweeps and build tables of GFA targets for each tile from astropy.table import Table, vstack import desitarget.io gfa_targets = list() for filename in desitarget.io.iter_sweepfiles(root_directory): targets = Table.read(filename) gfa_targets.append(get_gfa_targets(targets)) gfa_targets = vstack(gfa_targets) gfa_targets.write('blat.fits') #- or to write one file per tile: for tileid in np.unique(gfa_targets['TILEID']): ii = (gfa_targets['TILEID'] == tileid) outfile = 'gfa_targets-{}.fits'.format(tileid) gfa_targets[ii].write(outfile) ``` ``` def on_gfa(telra, teldec, ra, dec, buffer_arcsec = 100): """ Checks if a target is on any of the 10 GFAs given telra, teldec and an array of RA and Dec pointings, as well as a parameter for degrees of tolerance one would like to allow. When using desimodel.footprint.find_points_in_tiles(tiles, ra, dec, radius) with this function to check what points are on the GFAs, the default radius parameter should be set to 1.651 (degrees), so that boundary GFA area actually encompasses points normally outside of the tile. Parameters: telra: The telescope's arbitrary RA pointing teldec: The telescope's arbitrary Dec pointing ra: An array of RA values for locations in the sky dec: An array of declination values for locations in the sky buffer_arcsec: A value in arcseconds on the sky of how much tolerance one would allow for seeing if a target is on the gfa. Returns: targetindices: a list of targets with their respective indices in the RA and Dec list passed in that fall on certain GFAs denoted by the index in the gfaindices list. gfaindices: a list equal in length with the targetindices list with the gfa location 0-9 as each element """ import desimodel.footprint # If any calculated area is under the threshold area, it is mathematically impossible THRESHOLD_AREA = 469.7 MIN_TOLERANCE = 0.001 inrangeindices = desimodel.footprint.find_points_radec(telra, teldec, ra, dec, 1.651) if not inrangeindices: return np.array([]), np.array([]) inrangeindices = np.asarray(inrangeindices) targetx, targety = desimodel.focalplane.radec2xy(telra, teldec, ra[inrangeindices], dec[inrangeindices]) x_tolerance, y_tolerance = degrees2xytolerance(buffer_arcsec) targetindices = [] gfaindices = [] # x and y hold the 40 new GFA coordinates x, y = shift_gfa_points(x_tolerance, y_tolerance) # The area boundary's value is the area of the gfa plus some tolerance. AREA_BOUNDARY = retrieve_minimum_boundary(x_tolerance, y_tolerance) + MIN_TOLERANCE targetx = np.asarray(targetx) targety = np.asarray(targety) # Method to check if point is inside the rectangle for gfaid in range(0, 40, 4): # a1 through a4 are edge lengths of the rectangle formed by corners of the GFAs a1 = np.sqrt((x[gfaid] - x[gfaid + 1])**2 + (y[gfaid] - y[gfaid + 1])**2) a2 = np.sqrt((x[gfaid + 1] - x[gfaid + 2])**2 + (y[gfaid + 1] - y[gfaid + 2])**2) a3 = np.sqrt((x[gfaid + 2] - x[gfaid + 3])**2 + (y[gfaid + 2] - y[gfaid + 3])**2) a4 = np.sqrt((x[gfaid + 3] - x[gfaid])**2 + (y[gfaid + 3] - y[gfaid])**2) # b1 through b4 are the line segments from each corner to the target location b1 = np.sqrt((x[gfaid] - targetx)**2 + (y[gfaid] - targety)**2) b2 = np.sqrt((x[gfaid + 1] - targetx)**2 + (y[gfaid + 1] - targety)**2) b3 = np.sqrt((x[gfaid + 2] - targetx)**2 + (y[gfaid + 2] - targety)**2) b4 = np.sqrt((x[gfaid + 3] - targetx)**2 + (y[gfaid + 3] - targety)**2) # Calculating areas of triangles using Heron's Formula u1 = (a1 + b1 + b2) / 2.0 u2 = (a2 + b2 + b3) / 2.0 u3 = (a3 + b3 + b4) / 2.0 u4 = (a4 + b4 + b1) / 2.0 area1 = np.sqrt((u1 * (u1 - a1) * (u1 - b1) * (u1 - b2)).clip(0)) area2 = np.sqrt((u2 * (u2 - a2) * (u2 - b2) * (u2 - b3)).clip(0)) area3 = np.sqrt((u3 * (u3 - a3) * (u3 - b3) * (u3 - b4)).clip(0)) area4 = np.sqrt((u4 * (u4 - a4) * (u4 - b4) * (u4 - b1)).clip(0)) targetarea = area1 + area2 + area3 + area4 assert np.all(targetarea > THRESHOLD_AREA) if(any(targetarea < AREA_BOUNDARY) and all(targetarea > THRESHOLD_AREA)): newtargetindices = np.where(targetarea < AREA_BOUNDARY) targetindices.extend(newtargetindices[0]) gfaindices.extend([int(gfaid / 4)] * len(newtargetindices[0])) return inrangeindices[targetindices], gfaindices def retrieve_minimum_boundary(x_tolerance, y_tolerance): """ Used as a helper function to the on_gfa function to find the minimum boundary area for a point to lie inside a certain GFA given an tolerance in x and y in mm Parameters: x_tolerance: tolerance in x in mm y_tolerance: tolerance in y in mm Returns: targetarea: the minimum boundary area for the procedure to check if a point is inside the GFA """ import desimodel.footprint import desimodel.focalplane targetx = 116.279135121 targety = -372.885546514 #6.644525362152656, -9.055425745149217 GUARANTEED TO BE IN GFA (RA, DEC) #x, y = desimodel.focalplane.radec2xy(7.11, -10.53, targetx, targety) # If any calculated area is under the threshold area, it is mathematically impossible THRESHOLD_AREA = 469.7 MIN_TOLERANCE = 0.001 # The area boundary's value is the area of the gfa plus some tolerance. # x and y hold the 40 new GFA coordinates x, y = shift_gfa_points(x_tolerance, y_tolerance) targetx = np.asarray(targetx) targety = np.asarray(targety) # Method to check if point is inside the rectangle for gfaid in range(0, 4, 4): # a1 through a4 are edge lengths of the rectangle formed by corners of the GFAs a1 = np.sqrt((x[gfaid] - x[gfaid + 1])**2 + (y[gfaid] - y[gfaid + 1])**2) a2 = np.sqrt((x[gfaid + 1] - x[gfaid + 2])**2 + (y[gfaid + 1] - y[gfaid + 2])**2) a3 = np.sqrt((x[gfaid + 2] - x[gfaid + 3])**2 + (y[gfaid + 2] - y[gfaid + 3])**2) a4 = np.sqrt((x[gfaid + 3] - x[gfaid])**2 + (y[gfaid + 3] - y[gfaid])**2) # b1 through b4 are the line segments from each corner to the target location b1 = np.sqrt((x[gfaid] - targetx)**2 + (y[gfaid] - targety)**2) b2 = np.sqrt((x[gfaid + 1] - targetx)**2 + (y[gfaid + 1] - targety)**2) b3 = np.sqrt((x[gfaid + 2] - targetx)**2 + (y[gfaid + 2] - targety)**2) b4 = np.sqrt((x[gfaid + 3] - targetx)**2 + (y[gfaid + 3] - targety)**2) # Calculating areas of triangles using Heron's Formula u1 = (a1 + b1 + b2) / 2.0 u2 = (a2 + b2 + b3) / 2.0 u3 = (a3 + b3 + b4) / 2.0 u4 = (a4 + b4 + b1) / 2.0 area1 = np.sqrt(u1 * (u1 - a1) * (u1 - b1) * (u1 - b2)) area2 = np.sqrt(u2 * (u2 - a2) * (u2 - b2) * (u2 - b3)) area3 = np.sqrt(u3 * (u3 - a3) * (u3 - b3) * (u3 - b4)) area4 = np.sqrt(u4 * (u4 - a4) * (u4 - b4) * (u4 - b1)) targetarea = area1 + area2 + area3 + area4 assert np.all(targetarea > THRESHOLD_AREA) return targetarea def degrees2xytolerance(buffer_arcsec): """ Used as a helper function to the on_gfa function to find the tolerance in x and y given a tolerance in arcseconds Parameters: buffer_arcsec: a tolerance in arcseconds for checking if a point is on the GFA Returns: x_tolerance: tolerance in x in mm y_tolerance: tolerance in y in mm """ # Uses the center of a given GFA from DESI-0530-v13 Excel Spreadsheet to find the tolerance import desimodel.io import scipy.interpolate platescale = desimodel.io.load_platescale() fn = scipy.interpolate.interp1d(platescale['radius'], platescale['radial_platescale'], kind = 'quadratic') fn1 = scipy.interpolate.interp1d(platescale['radius'], platescale['az_platescale'], kind = 'quadratic') # Center of a given GFA from DESI-0530-v13 Excel Spreadsheet x = 333.738 y = 217.766 radius = np.sqrt(x**2 + y**2) # Platescales are in units of microns per arcsecond r_ps = fn(radius) az_ps = fn(radius) x_tolerance = buffer_arcsec / (10**3) * r_ps y_tolerance = buffer_arcsec / (10**3) * az_ps return x_tolerance, y_tolerance def shift_gfa_points(deltax, deltay): """ Used as a helper function to the on_gfa function to find the new GFA locations after incorporating a tolerance in x and y Parameters: deltax: tolerance in x in mm deltay: tolerance in y in mm Returns: Returns the 40 new GFA locations in x and y """ import numpy as np x = [-125.10482863, -129.83038525, -159.04283509, -154.31646944] y = [-370.01790486, -384.56223777, -375.05643893, -360.51151824] point1 = [x[2], y[2]] point2 = [x[1], y[1]] vector1 = [(point2[0] - point1[0]), (point2[1] - point1[1])] vector2 = [1, 0] # Angle between vector1 and vector 2 using dot product angle = np.arccos((np.dot(vector1, vector2))/(np.sqrt((vector1[0]**2) + (vector1[1]**2)))) shiftmat = np.zeros(shape=(2,2)) shiftmat[0] = [np.cos(angle), -np.sin(angle)] shiftmat[1] = [np.sin(angle), np.cos(angle)] reverseshift= np.zeros(shape=(2,2)) reverseshift[0] = [np.cos(angle), np.sin(angle)] reverseshift[1] = [-np.sin(angle), np.cos(angle)] # Shifts the initial coordinates to be parallel to the vector [1, 0] coord = np.zeros(shape=(2,1)) oldxcoord = x oldycoord = y for i in range(4): coord[0] = oldxcoord[i] coord[1] = oldycoord[i] newcoord = np.matmul(shiftmat, coord) oldxcoord[i] = newcoord[0] oldycoord[i] = newcoord[1] if(i == 0 or i == 1): x[i] = newcoord[0] + deltax else: x[i] = newcoord[0] - deltax if(i == 1 or i == 2): y[i] = newcoord[1] - deltay else: y[i] = newcoord[1] + deltay oldxcoord = x oldycoord = y for i in range(4): coord[0] = oldxcoord[i] coord[1] = oldycoord[i] newcoord = np.matmul(reverseshift, coord) oldxcoord[i] = newcoord[0] oldycoord[i] = newcoord[1] x[i] = newcoord[0] y[i] = newcoord[1] rotatemat = np.zeros(shape=(2,2)) rotatemat[0] = [np.cos(np.radians(36)), -np.sin(np.radians(36))] rotatemat[1] = [np.sin(np.radians(36)), np.cos(np.radians(36))] return find_new_gfa_coordinates(x, y, rotatemat) def find_new_gfa_coordinates(x, y, rotatemat): """ Used as a helper function to the on_gfa function to find the new GFA coordinates given a list of x coordinates, y coordinates, and a rotation matrix Parameters: x: a list of x coordinates for the GFAs y: a list of y coordinates for the GFAs rotatemat: a matrix for rotating the respective coordinates Returns: x_all: a complete list of the 40 GFA x coordinates y_all: a complete list of the 40 GFA y coordinates """ import numpy as np x_all = np.zeros(shape=(40,1)) y_all = np.zeros(shape=(40,1)) coord = np.zeros(shape=(2,1)) gfacoord = np.zeros(shape=(4, 2)) oldxcoord = x oldycoord = y counter = 0 for j in range(10): for i in range(4): coord[0] = oldxcoord[i] coord[1] = oldycoord[i] newcoord = np.matmul(rotatemat, coord) oldxcoord[i] = newcoord[0] oldycoord[i] = newcoord[1] gfacoord[i] = [newcoord[0], newcoord[1]] x_all[counter] = newcoord[0] y_all[counter] = newcoord[1] counter += 1 return x_all, y_all def on_tile_gfa(tileid, targets, buffer_arcsec = 100): """ This function takes a tileid, a table of targets, and an optional buffer_arcsec parameter to return the indices of targets lying on the GFA as well as the GFA locations from 0-9 Parameters: tileid: (int) DESI tile ID, used to lookup telescope (RA, dec) targets: table with columns RA, DEC Options: buffer_arcsec: (float) additional buffer region around GFA to include Returns: targetindices: list of indices for targets that are covered by GFA number in corresponding gfaindices gfaindices: list of indices corresponding to 0-9 GFA location """ import desimodel.footprint telra, teldec = desimodel.footprint.get_tile_radec(tileid) return on_gfa(telra, teldec, targets['RA'], targets['DEC'], buffer_arcsec) def get_gfa_targets(targets, rfluxlim = 1000, tiles = None, buffer_arcsec = 100): """ This function takes a table of targets, as well as optional parameters including a minimum flux in the r-band, a list of tiles, and a buffer in arcseconds and returns a table of targets on the GFA satisfying a minimum flux_r Parameters: targets: table with columns RA, DEC, FLUX_R Options: rfluxlim: (float) r-band flux limit; default 1000 = rmag 15 tiles: table of tiles, default to desimodel.io.load_tiles() buffer_arcsec: (float) additional buffer region around GFA to include Returns subset of input `targets` with additional columns: TILEID: (integer) DESI tile ID GFA_LOC: (integer) GFA location [0-9] Note that the same target could be repeated with different TILEID, GFA_LOC Note also that the function returns an empty list if no targets are on any GFAs or of sufficient brightness """ if(tiles is None): import desimodel.io tiles = desimodel.io.load_tiles() import desimodel.footprint points = desimodel.footprint.find_points_in_tiles(tiles, targets['RA'], targets['DEC']) alltargetindices = [] tileidlist = [] gfaidlist = [] # Checks if the flux_r meets a minimum threshold brightindices = np.where(targets['FLUX_R'] > rfluxlim) if(brightindices[0].size == 0): return [] counter = 0 for lists in points: if lists: tileid = tiles[counter]['TILEID'] targetindices, gfaindices = on_tile_gfa(tileid, targets[brightindices[0]], buffer_arcsec) tileidlist.extend([tileid] * len(targetindices)) alltargetindices.extend(targetindices) gfaidlist.extend(gfaindices) counter += 1 validtargets = targets[brightindices[0]][alltargetindices] tileidlist = np.asarray(tileidlist) gfaidlist = np.asarray(gfaidlist) validtargets['TILEID'] = tileidlist validtargets['GFA_LOC'] = gfaidlist return validtargets ``` The code below simply plots the an arbitray focal plane showing the tile in blue, the GFA boundary including the buffer in red, and the actual GFA boundary in green. ``` import matplotlib.pyplot as plt import numpy as np #telra and teldec = 7.11 -10.53 #tileid is 23658 mindec = -12.5 maxdec = -7 minra = 5.4 maxra = 8.9 ra = [] dec = [] import desimodel.focalplane import desimodel.footprint import desimodel.io tiles = desimodel.io.load_tiles() while mindec < maxdec: startra = minra while startra < maxra: ra.append(startra) dec.append(mindec) startra += .02 mindec += .02 ra = np.asarray(ra) dec = np.asarray(dec) points = desimodel.footprint.find_points_radec(7.1, -10.5, ra, dec, 1.651) #plt.plot(ra[points], dec[points], 'b.') targetindices, gfaindices = on_gfa(7.11, -10.53, ra, dec, 100) plt.plot(ra[targetindices], dec[targetindices], 'r.') targetindices, gfaindices = on_gfa(7.11, -10.53, ra, dec, 0) plt.plot(ra[targetindices], dec[targetindices], 'g.') plt.show() ``` The function plot_gfa_targets() is a visualization tool that plots all targets in a given table that are on a GFA satisfying a minimum flux in the r-band in blue. It plots anything that exceeds a maximum flux in the r-band in red. This visualization tool may be useful in determining if a given GFA has too many extremely bright targets that may over-saturate the images. ``` def plot_gfa_targets(targets, rfluxlim = 1000, rfluxmax = 10000, tiles = None, buffer_arcsec = 100, tileid = None): """ This function takes a table of targets, as well as optional parameters including a minimum flux in the r-band, a list of tiles, a buffer in arcseconds, and a tileid number and uses blue to plot the targets on the GFA satisfying a minimum flux_r on either a specific tileid in the targets table, or all the targets on any tileid returned by get_gfa_targets. It also uses red to plot all targets on the GFA above a maximum flux_r. Parameters: targets: table with columns RA, DEC, FLUX_R Options: rfluxlim: (float) r-band flux limit; default 1000 = rmag 15 rfluxmax: (float) r-band flux maximum; default 5000 tiles: table of tiles, default to desimodel.io.load_tiles() buffer_arcsec: (float) additional buffer region around GFA to include tileid: (int) a unique indentifier for a tile pointing """ import matplotlib.pyplot as plt if(tiles is None): import desimodel.io tiles = desimodel.io.load_tiles() valid = get_gfa_targets(targets, rfluxlim, tiles, buffer_arcsec) if(tileid is None): plt.plot(valid['RA'], valid['DEC'], 'b.') brightindices = np.where(valid['FLUX_R'] > rfluxmax) plt.plot(valid[brightindices[0]]['RA'], valid[brightindices[0]]['DEC'], 'r.') else: indices = np.where(valid['TILEID'] == tileid) plt.plot(valid[indices]['RA'], valid[indices]['DEC'], 'b.') brightindices = np.where(valid[indices]['FLUX_R'] > rfluxmax) plt.plot(valid[indices][brightindices[0]]['RA'], valid[indices][brightindices[0]]['DEC'], 'r.') plt.show() from astropy.io import fits from astropy.table import Table sweepname = 'sweep-280p050-290p055.fits' sweep = Table.read(os.path.join(sweep_dir, sweepname)) plot_gfa_targets(sweep, tileid = None) ``` ## Conclusions Further development may be necessary once the GFAs have actually been built, and thus the GFA table should be updated, which can be done manually or through updating the code in build_gfa_table(). A threshold magnitude for flux in the r-band for targets should be determined, and a simple insertion of np.where(valid['FLUX_R'] > rfluxmax) can be put into the get_gfa_targets() function to alert observers of any targets that may potentially over-saturate the images. Since DESI is constantly evolving, I've tried to make my code heavily documented with comments, so that if changes need to be made, it will be simple to do so. Thanks again to Stephen Bailey for helping make my summer productive and engaging! Again, if any part of this notebook is confusing, I'd be happy to clear things up via email at woodywang153@gmail.com. Onwards, Woodrow Wang Stanford University
github_jupyter
# python 2.4 - pep8, clean code - typehints - mypy - enum - pytest # PEP8 * PEP8 is recommendation, not rule (but it's valuable to follow) * there are also tools for * linting code (`pip install pep8`) * autoformatting (`pip install black`) * see more https://realpython.com/python-pep8/ * imports in header of file # clean code * see overview in [python2.4.cleancode.ipynb](python2.4.cleancode.ipynb) # SOLID * *(\<Ctrl> + F, "SOLID")* https://www.pentalog.com/blog/clean-code-with-python # test driven development (TDD) # typehints ``` # %load python2.4.mypy.py from typing import List, Dict # def indent(size, content: List[str]): def indent(size, content): print(f'This is output of {len(content)} lines') for line in content: print(size * '.' + line) indent(3, 'micka masa mirek') indent(3, 'micka masa mirek'.split()) def indent_dict(content: List[Dict[str, int]]): for item in content: for key, value in item.items(): print(key) c = value / 2 print(c) dict_content = [ # vs {'age': '5'}, {'age': 5}, ] indent_dict(dict_content) ``` # mypy - static analysis types ``` !pip install mypy !mypy python2.4.mypy.py !mypy --strict python2.4.mypy.py ``` # enum - since python3.4 #### Example: Simplified traffic lights abstraction. ``` # without enum def describe_status(state): if state == 'red': print('stop') elif state == 'orange': print('prepare yourself') elif state == 'green': print('go!') else: print(f'Uknown state {state}') describe_status('red') describe_status('orange') describe_status('green') ``` Possible problems: * invalid value `"yellow"` * invalid type `5` ``` describe_status('yellow') describe_status(5) from enum import Enum, auto class Colors(Enum): red = 'red' orange = 'orange' green = 'green' orangegreen = 'orangegreen' print(Colors) Colors.red == Colors.green list(Colors) Colors.red in Colors ``` ##### exercise create `FlowerColors` and try yourself: `FlowerColors.red == Colors.red` ``` class FlowerColors(Enum): red = 'red' FlowerColors.red == Colors.red # with enum def describe_status(state): if state == Colors.red: print('stop') elif state == Colors.orange: print('prepare yourself') elif state == Colors.green: print('go!') else: print(f'Unknown state {state}') # print(f'Unknown state {state.name}') describe_status(Colors.red) describe_status(FlowerColors.red) ``` # testing * condition no.0: tests has to exists * test each public method isn't ideal, better to test class behaviour as whole * writing tests are like experimenting in science, with difference you know correct result in advance. * A testing unit should focus on one tiny bit of functionality and prove it correct. * Each test unit must be fully independent. * Each test must be able to run alone, and also within the test suite, regardless of the order that they are called. * The implication of this rule is that each test must be loaded with a fresh dataset and may have to do some cleanup afterwards. * The first step when you are debugging your code is to write a new test pinpointing the bug. While it is not always possible to do, those bug catching tests are among the most valuable pieces of code in your project. * Use long and descriptive names for testing functions * Make testing code read as much as or even more than the running code. * Be explicit, verbose. * A unit test whose purpose is unclear is not very helpful in this case. * Another use of the testing code is as an introduction to new developers https://docs.python-guide.org/writing/tests/ # how to test - case study Let's extend previous example from Street light to Traffic semaphore. Requirements: 1. defined state after creation is `red` 1. `change_status()` to move to another state 1. `get_status()` to return current state 1. `set_status(Colors.*)` to set current state by passing color from enum `Colors` ``` # semaphore.py from enum import Enum #, auto class Colors(Enum): red = 'red' orange = 'orange' green = 'green' orangered = 'orangered' class Semaphore: sequence = [ Colors.red, Colors.orangered, Colors.green, Colors.orange ] def __init__(self): self.current_index = 0 def change_state(self): self.current_index += 1 def get_status(self): return self.sequence[self.current_index] def set_status(self, status): index = self.sequence.index(status) self.current_index = index sem = Semaphore() print(sem.get_status()) sem.change_state() print(sem.get_status()) sem.set_status(Colors.green) print(sem.get_status()) status = sem.get_status() print('Status is ' + status) ``` ## Why we need test-runner? Naive test suite: ``` sem = Semaphore() status = sem.get_status() if status == Colors.red: print('ok - init status is red') else: print('fail - init statu si NOT red') sem.change_state() #status = sem.get_status() status = sem.change_state() if status == Colors.orangered: print('ok - next status is orangered') else: print('fail - next status si NOT orangered') ``` # pytest - `pytest` is **test runner** - test case as function - tests discovery - junit export - fixture - better than builtin `unittests` **Exercise:** figure out serveral tests (`def test_...(self): ...`)\ *type single `X` per each test case (as comment in Barevné lístečky)* ``` # 7 test cases so far... def test_initial_state_is_red(): ... def test_next_state_after_red(): ... def test_next_state_after_orangered(): ... def test_next_state_after_green(): ... def test_next_state_after_orange(): ... def test_full_cycle(): ... def test_set_invalid_color(): ... ``` Notes: * `initial_state_is_red` is great * `next_state_after_red` is ok, better is to contain `_is_orangered` * stdout is captured by default (`-s` to display anyway) * show verbose output (`-v`) with percents * `--lf` (`--last-failed`) * `--setup-plan` ( ``` def test_initial_state_is_red(): sem = Semaphore() assert sem.get_status() == Colors.red ``` * PyCharm integration * **Run pytest in \<filename\>** from right click in editor * execute single test case * auto re-run tests after change (💗), also with minimized panel * by default passed tests are hidden ### fixtures ``` from pytest import fixture def semaphore(): return Semaphore() def red_semaphore(semaphore): semaphore.set_state(Colors.red) return semaphore def test_red_sem(red_semaphore): assert red_semaphore == Colors.red def test_change_from_red(red_semaphore): ... ``` ### expecting exceptions ``` from pytest import raises def test_exception(): with raises(Exception): raise KeyError('this was not expected') #raise Exception('this was not expected') ```
github_jupyter
``` import dash from dash.dependencies import Input, Output, State import dash_core_components as dcc import dash_html_components as html import dash_table_experiments as dt import json import pandas as pd import numpy as np import plotly from IPython import display import os def show_app(app, port = 9999, width = 700, height = 350, offline = False, in_binder = None): in_binder ='JUPYTERHUB_SERVICE_PREFIX' in os.environ if in_binder is None else in_binder if in_binder: base_prefix = '{}proxy/{}/'.format(os.environ['JUPYTERHUB_SERVICE_PREFIX'], port) url = 'https://hub.mybinder.org{}'.format(base_prefix) app.config.requests_pathname_prefix = base_prefix else: url = 'http://localhost:%d' % port iframe = '<a href="{url}" target="_new">Open in new window</a><hr><iframe src="{url}" width={width} height={height}></iframe>'.format(url = url, width = width, height = height) display.display_html(iframe, raw = True) if offline: app.css.config.serve_locally = True app.scripts.config.serve_locally = True return app.run_server(debug=False, # needs to be false in Jupyter host = '0.0.0.0', port=port) app = dash.Dash() DF_WALMART = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/1962_2006_walmart_store_openings.csv') DF_GAPMINDER = pd.read_csv( 'https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv' ) DF_GAPMINDER = DF_GAPMINDER[DF_GAPMINDER['year'] == 2007] DF_GAPMINDER.loc[0:20] DF_SIMPLE = pd.DataFrame({ 'x': ['A', 'B', 'C', 'D', 'E', 'F'], 'y': [4, 3, 1, 2, 3, 6], 'z': ['a', 'b', 'c', 'a', 'b', 'c'] }) ROWS = [ {'a': 'AA', 'b': 1}, {'a': 'AB', 'b': 2}, {'a': 'BB', 'b': 3}, {'a': 'BC', 'b': 4}, {'a': 'CC', 'b': 5}, {'a': 'CD', 'b': 6} ] app.layout = html.Div([ html.H4('Gapminder DataTable'), dt.DataTable( rows=DF_GAPMINDER.to_dict('records'), # optional - sets the order of columns columns=sorted(DF_GAPMINDER.columns), row_selectable=True, filterable=True, sortable=True, selected_row_indices=[], id='datatable-gapminder' ), html.Div(id='selected-indexes'), dcc.Graph( id='graph-gapminder' ), ], className="container") @app.callback( Output('datatable-gapminder', 'selected_row_indices'), [Input('graph-gapminder', 'clickData')], [State('datatable-gapminder', 'selected_row_indices')]) def update_selected_row_indices(clickData, selected_row_indices): if clickData: for point in clickData['points']: if point['pointNumber'] in selected_row_indices: selected_row_indices.remove(point['pointNumber']) else: selected_row_indices.append(point['pointNumber']) return selected_row_indices @app.callback( Output('graph-gapminder', 'figure'), [Input('datatable-gapminder', 'rows'), Input('datatable-gapminder', 'selected_row_indices')]) def update_figure(rows, selected_row_indices): dff = pd.DataFrame(rows) fig = plotly.tools.make_subplots( rows=3, cols=1, subplot_titles=('Life Expectancy', 'GDP Per Capita', 'Population',), shared_xaxes=True) marker = {'color': ['#0074D9']*len(dff)} for i in (selected_row_indices or []): marker['color'][i] = '#FF851B' fig.append_trace({ 'x': dff['country'], 'y': dff['lifeExp'], 'type': 'bar', 'marker': marker }, 1, 1) fig.append_trace({ 'x': dff['country'], 'y': dff['gdpPercap'], 'type': 'bar', 'marker': marker }, 2, 1) fig.append_trace({ 'x': dff['country'], 'y': dff['pop'], 'type': 'bar', 'marker': marker }, 3, 1) fig['layout']['showlegend'] = False fig['layout']['height'] = 800 fig['layout']['margin'] = { 'l': 40, 'r': 10, 't': 60, 'b': 200 } fig['layout']['yaxis3']['type'] = 'log' return fig app.css.append_css({ 'external_url': 'https://codepen.io/chriddyp/pen/bWLwgP.css' }) show_app(app) ```
github_jupyter
<a href="https://colab.research.google.com/github/Ducksss/Project-Cactus/blob/main/Project_Cactus_(Training).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> <div id="top"></div> <!-- PROJECT SHIELDS --> <!-- *** I'm using markdown "reference style" links for readability. *** Reference links are enclosed in brackets [ ] instead of parentheses ( ). *** See the bottom of this document for the declaration of the reference variables *** for contributors-url, forks-url, etc. This is an optional, concise syntax you may use. *** https://www.markdownguide.org/basic-syntax/#reference-style-links --> [![Contributors](https://img.shields.io/github/contributors/Ducksss/Project-Cactus.svg)][contributors-url] [![Forks](https://img.shields.io/github/forks/Ducksss/Project-Cactus.svg)][forks-url] [![Stargazers](https://img.shields.io/github/stars/Ducksss/Project-Cactus.svg)][stars-url] [![MIT License](https://img.shields.io/github/license/Ducksss/Project-Cactus.svg)][license-url] [![Open In Collab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1NBbmGYUZbKq0fjkI2OJIJx_3PV_Rdf7Z?usp=sharing) <!-- PROJECT LOGO --> <br /> <div align="center"> <a href="https://github.com/Ducksss/Project-Cactus"> <img src="assets/cactus-bg.png" alt="Logo" width="80" height="80"> </a> <h3 align="center">Project Cactus</h3> <p align="center"> A cross-platform AI Fake News Detector <br /> <br /> <a href="https://project-cactus-c9549.web.app/">Web Application</a> · <a href="#browser-extension">Browser Extension</a> · <a href="https://github.com/Ducksss/Project-Cactus/issues">Report Bugs</a> · <a href="https://github.com/Ducksss/Project-Cactus/issues">Request Features</a> </p> </div> > Note: Make sure to use a high-ram runtime, else the model may crash due to it's size ## Download the Data Set ``` !git clone https://github.com/Ducksss/FakeNews.git ``` ## Library Imports ``` import pandas as pd import tensorflow as tf import tensorflow.keras.layers as tfl import zipfile from tensorflow.keras import Sequential, Input from tensorflow.keras.utils import get_file BATCH_SIZE = 64 ``` ### Settings ``` #@title Define Hyperparameters BATCH_SIZE = 64 #@param {type:"integer"} max_words = 1000000 #@param {type:"integer"} checkpoint_path = "/tmp/checkpoints" #@param {type:"string"} save_dir = "saved_model" #@param {type:"string"} ``` ## Data Ingestion ``` dataset_dir = "data/fakeNews.csv" df = pd.read_csv(dataset_dir) df.head() max_seqlen = df["title"].apply(lambda x : len(x.split())).max() max_seqlen dataset_len = len(df) dataset_len def train_test_split(dataset, dataset_len, val_split=0.2, shuffle=True, shuffle_size=50000): if shuffle: dataset = dataset.shuffle(shuffle_size, seed=42) train_size = int((1-val_split) * dataset_len) val_size = int(val_split * dataset_len) try: train_ds = dataset.take(train_size).map(lambda x : (x["title"], x["isFakeNews"])) val_ds = dataset.skip(train_size).take(val_size).map(lambda x : (x["title"], x["isFakeNews"])) except: train_ds = dataset.take(train_size) val_ds = dataset.skip(train_size).take(val_size) train_ds = train_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE) val_ds = val_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE) return train_ds, val_ds ds = tf.data.experimental.make_csv_dataset(dataset_dir, select_columns=[ "title", "isFakeNews" ], batch_size=BATCH_SIZE) train_ds, val_ds = train_test_split(ds, dataset_len) val_ds, test_ds = train_test_split(val_ds, int(dataset_len * 0.2), val_split=0.5) ``` ## CactusNet ``` @tf.keras.utils.register_keras_serializable() # Decorator to allow us to save the TextVectorizer layer def text_preprocessor(text): punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~' stopwords = {'whom', 'all', 'shouldn', 'wouldn', 'how', 's', 'they', 'were', 'mustn', 'after', 'who', 'its', 'our', 't', 'a', 'very', 'an', 'do', 'be', 'to', 'can', 'had', 'i', 'these', 'himself', 'up', 'just', 'them', 'now', 'has', 'too', 'below', 'did', 'shan', 'until', 'during', 'him', 'into', 'have', "you'd", 'haven', 'theirs', 'ourselves', 'once', "isn't", 'than', "it's", 'wasn', 'yours', "mightn't", 'here', 'ours', 'her', 'doing', 'd', 'yourself', 'y', 'before', 'does', 'then', 'between', 'some', 'with', "needn't", 'but', 'didn', "shouldn't", 'that', "weren't", 'which', 'or', "hasn't", 'own', 'about', 'what', "aren't", 'couldn', 'doesn', 'as', "wouldn't", 'hasn', 'no', 'm', 'hers', 'hadn', 'aren', 'while', 'will', "don't", "shan't", 'why', 'at', 'mightn', 'themselves', 'weren', "that'll", 'isn', 'only', 'the', 'been', "couldn't", 'don', 'should', 'same', 'both', 'where', 'was', 'me', 'through', "hadn't", 've', 'against', 'if', 'under', 'such', 'is', 'll', "haven't", 'ain', 're', "didn't", 'nor', 'not', 'being', 'are', 'your', 'over', 'off', 'having', 'by', "won't", 'myself', 'out', 'more', "wasn't", "doesn't", 'won', 'this', 'my', 'again', 'ma', 'his', 'when', 'you', 'there', 'herself', 'yourselves', 'itself', 'of', "she's", 'needn', 'we', "mustn't", 'above', "you're", 'so', 'it', "should've", 'am', 'he', 'those', 'further', 'she', 'down', 'on', "you'll", 'for', 'other', 'any', 'their', 'from', 'each', 'most', 'because', 'and', 'few', 'in', "you've", 'o'} text = tf.strings.lower(text) text = tf.strings.strip(text) text = tf.strings.regex_replace(text, "<[^>]+>", "") # remove html tags text = tf.strings.regex_replace(text, '[%s]' % punctuation, "") # remove punctuation for stopword in stopwords: text = tf.strings.regex_replace(text, r"\b%s\b" % stopword, "") # remove stopwards return text sample_text = "<p>I'm very <span class='bold'>mad</span> about the results of the election!!! Who agrees with me?</p>" print(text_preprocessor(sample_text)) def create_tokenizer(train_ds, max_words, max_seqlen, output_mode = "int", standardize = "lower_and_strip_punctuation"): train_text = train_ds.map(lambda x, y : x) tokenizer = tfl.TextVectorization( standardize=standardize, max_tokens=max_words, output_sequence_length=max_seqlen, output_mode=output_mode ) tokenizer.adapt(train_text) return tokenizer tokenizer = create_tokenizer(train_ds, max_words, max_seqlen, standardize=text_preprocessor) def load_pretrained_embeddings_v1(url, output_file, embedding_file, embedding_dim, vocabulary, max_words, max_seqlen): embedding_vecs = dict() word_idx = dict(zip(vocabulary, range(len(vocabulary)))) file_dir = get_file(output_file, url) with zipfile.ZipFile(file_dir, "r") as f: f.extractall("/content/") with open(embedding_file, "r") as f: for line in f: values = line.split() word = values[0] embedding_vec = np.asarray(values[1:], dtype='float32') embedding_vecs[word] = embedding_vec embedding_matrix = np.zeros((max_words, embedding_dim)) for word, idx in word_idx.items(): if idx < max_words: embedding_vec = embedding_vecs.get(word) if embedding_vec is not None: embedding_matrix[idx] = embedding_vec embedding = tfl.Embedding(max_words, embedding_dim, embeddings_initializer=tf.keras.initializers.Constant(embedding_matrix), mask_zero=False, input_length=max_seqlen, trainable=False) return embedding vocabulary = tokenizer.get_vocabulary() embedding = load_pretrained_embeddings_v1("https://nlp.stanford.edu/data/glove.twitter.27B.zip", "glove.twitter.27B.zip", "glove.twitter.27B.100d.txt", 100, vocabulary=vocabulary, max_words=max_words, max_seqlen=max_seqlen) def create_cactusnet_v2(tokenizer, embedding_layer, max_words, max_seqlen, optimizer='adam'): model = Sequential( [ tokenizer, embedding_layer, tfl.Bidirectional(tfl.LSTM(128, return_sequences=True, input_shape=(max_words, max_seqlen))), tfl.Bidirectional(tfl.LSTM(128, return_sequences=False)), tfl.Dropout(0.2), tfl.Dense(1, activation='sigmoid') ] ) model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics = ['accuracy']) model.summary() return model model = create_cactusnet_v2(tokenizer, embedding, max_words, max_seqlen) ``` ### Model Training ``` from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, TerminateOnNaN, EarlyStopping callbacks = [ ModelCheckpoint(checkpoint_path), ReduceLROnPlateau(), TerminateOnNaN(), EarlyStopping(patience=2) ] def train_model(model, training_ds, validation_ds = None, val_split = 0.2, batch_size = BATCH_SIZE, epochs=5, callbacks=callbacks): if validation_ds is None: history = model.fit(training_ds, validation_split=val_split, batch_size=batch_size, epochs=epochs, callbacks=callbacks) else: history = model.fit(training_ds, validation_data=validation_ds, batch_size=batch_size, epochs=epochs, callbacks=callbacks) return history history = train_model(model, train_ds, val_ds, epochs=1) model.evaluate(test_ds) model.save(save_dir) ```
github_jupyter
# Section I. INTRODUCTION # Chapter 1. What is Robotics? What is a robot? ---------------- It might come as a surprise that it is actually tricky to define the word "robot." Contrast the idea of a science fiction android with a remote control flying drone. The android (appears) to think, feel, and move with the intellect of a human, while the drone simply executes the command of its operator. Yet, most people are in agreement that they can be rightfully called "robots." There are some *necessary* conditions for something to be a robot. A robot is a machine that: - Moves in the physical world, - Senses its environment and/or itself, and - Uses computational elements to generate its movement. However, these are not *sufficient* for qualifying a machine as a robot. For example, your washing machine (if built in the last 10 years) will likely have all of these characteristics. A washing machine certainly moves; it senses the settings of its controls, the weight of your laundry, the incoming water temperature, possibly the dirtiness of your laundry, and so on; and will adjust its cycle speeds and duration accordingly using computation circuits. But there seems to be something missing here, since few people would refer to their washing machine as a robot! Similarly, your (relatively modern) car performs fuel injection, anti-lock braking, cruise control, and airbag deployment using several computers and sensors to monitor fuel efficiency, tire pressure, velocity, seat occupancy, etc. Yet, we are not ready to call it a robot just yet<sup><a href="#footnote1">[1]</a></sup>. Let us propose a couple of other possible criteria to call a system a robot. What about if a robot were required to: - Exhibit autonomy or automation, and - Exhibit apparently intelligent behavior? These provide a sharper boundary, as they would disqualify the washing machine and non-self-driving car from consideration as robots. But, there exist many robots that are not autonomous, such as the remote-controlled drone mentioned above, or surgical robots under surgeon control, like the Intuitive Surgical Da Vinci robot. An intelligence criterion is also difficult to apply because since it is challenging to define "intelligence" without delving into a philosophical minefield! By using the phrase "apparently intelligent", we sidestep the issue by assuming a human judge. But what is agreed upon as "intelligent" may change from year to year; compared to those devices in the 1950's, our modern washing machines and cars are actually quite smart! Perhaps as the control and artificial intelligence technology used in robots become more widely adopted, the line dividing robot and non-robot machines will become blurrier and blurrier... until the term "robot" has lost its meaning. Overall, it may be a pointless exercise to extract a precise definition of a robot. In any case, the layperson's "I know a robot when I see one" should suffice for the purposes of this book. ------------------------------------------------------- <a name="#footnote1"></a> <sup>[1]</sup>: Presumably, by time of publication, self-driving cars are not yet widely commercially available. How to develop a robot ---------------------- A roboticist is a thinker, not a tinkerer. Although many students begin tinkering with robots at a relatively young age, this process is not usually the best way to develop a robot that performs a task well. Robotics is a more *deliberate* way of reaching the end goal that is informed through decades of prior research, analysis, and practical experience. One way to define it would be as follows: > **Robotics**: the study of systematic, principled techniques to aid in > the development of robots that perform desired functions. Although robots are some of the most complex machines in the world, there is a logic to how they should be developed. A good roboticist will follow this logic whilst using any available techniques at his/her disposal. Specifically, the recipe for developing an intelligent robot must follow these major steps: 1. **Fabrication**: Design and fabricate a mechanism with sensing, actuation, and computing capabilities to fulfill the intended task. 2. **Measurement**: Develop measurement apparatus(es) and a testing protocol to observe the function of the mechanism 3. **Calibration**: Use measurements to calibrate (or learn) a model of the mechanism's dynamics, actuation, and sensing. 4. **Control**: Develop and validate a control sub-system that maintains system stability and provides medium-level functionality for planning. 5. **Knowledge representation**: Decide upon a knowledge representation to be shared between planning and perception sub-systems. 6. **Perception**: Develop and evaluate a perception sub-system that produces knowledge relevant to the task (robot state, maps, object identities) 7. **Planning**: Implement and test a planning sub-system that generates feasible trajectories to accomplish high-level tasks. 8. **Supervisor**: Develop a high-level supervisor that schedules tasks, provides a user interface, etc. 9. **Testing and evaluation**: Test the entire system in the field (on the real task in the real world, and perhaps with human end-users). It is important to note that these steps are almost never followed linearly, because robot design is a cyclic process of trial and error. Any robotics project will incur many, many *design cycles* over its lifetime. For example, unless the team is limited to an established robot platform, or purchasing off-the-shelf parts, redesigns of the mechanism usually occur after steps 3, 4, and 6. Mechanical redesigns may also occur after planning tests to make task execution more successful. On the software side, new knowledge, perception, and planning requirements are bound to arise as tasks are tested more thoroughly. After testing with end-users, it is not uncommon to go all the way "back to the drawing board" to build a new mechanism! A wise roboticist will develop their later components to rapidly accommodate minor mechanical changes. Buying an established robot platform can greatly speed up development time by shortcutting steps 1-4, but many vendors sell fairly raw hardware (requiring a rehash of steps 2-4). Also, there may be locked-in decisions that prevent certain functionality to be implemented later. To use a robot as a haptic device, make sure its motor controllers provide high-rate feedback and a force control mode! To have your robot navigate outdoors, make sure that it has a laser sensor or stereo vision rather than a structured light sensor! This makes it very important to examine the technical specs of a robot &mdash; even the most mundane details &mdash; with a fine-toothed comb before making a purchase. The theory, mathematics, and algorithms that are discussed in this book are designed to facilitate the development of functional robots. For example, it is unnecessary to go to "square one" for every component of a robot, as long as that component is deeply understood. A good roboticist will understand which off-the-shelf techniques apply, and where. However, there is no substitute for real-world testing! Testing is one of the most painful parts of robotics, but ultimately one of its most satisfying. Although a given technique might be theoretically beautiful, it will have been largely tested in the lab, making certain idealizations of the real world, or for a slightly different use case. The bulk of time spent doing robotics work is usually tweaking, tuning, and testing these tried-and-true techniques until they fit for the problem at hand. But once the technique is validated by thorough field testing, its merits are unimpeachable! In summary, a good roboticist: - Understands the robot development process. - "Minds the gaps:" understands how their sub-system works with other components of the project. - Is not afraid of testing. - Clearly communicates the assumptions, performance measures, and limitations of their sub-system. - Understands the assumptions made by any technique before employing it successfully in practice &mdash; or making the tweaks necessary to make it work. - Is aware of classical and state-of-the-art techniques. - Is up-to-date on current design tools, hardware, software, algorithms, and programming languages used in robotics. ## Abstractions > **System** (n). A thing of such complexity that a human can only begin to comprehend it using cartoons. This statement, although tongue-in-cheek, is actually fairly accurate. A system is composed of many interacting parts, each of which is individually complex, and it would take ages to understand deeply how everything works and interacts. The "cartoons" referred to above are known (especially in the computing world) as **abstractions**. An abstraction is a "black box" with inputs and outputs that performs some understandable function, which lets you ignore, to some extent, what goes on "under the hood". For example, to a first approximation, the basic function of a car is to turn on when a key is turned in the ignition, steer when the steering wheel is turned, accelerate when the accelerator is pressed, and decelerate when the brake is pressed. This is the implied *contract* that the abstraction of a car makes with to a user; it makes little difference what type of material the tires are made of, the amount of wear on the brake pads, the function of the differential, whether the engine's fuel air mixture is controlled by computer or carburetor, whether the car's chassis was welded by robot or by human, or even whether the motor is an electric or combustion engine. It can generally be assumed that driving the car will take you from point A to point B, assuming that A and B are connected by roads and that we know how to operate a car. But a car is an incredibly complex machine, comprised of tens of thousands of mechanical parts, and modern cars have millions of lines of computer code. We don't have to understand each of these little components; the car just works as a transportation device. This is the cartoon we tell ourselves, and most of the time the car operates just as the cartoon tells us. Ah, you say, but what if the car is out of gas, or a tire is flat, or a family of bears has broken into your car and set up a den in the back seat? Well, these areas in which the cartoon "breaks down". This is also known as a *leaky abstraction*, because some factors that were unspecified in our abstract, assumed contract have "leaked" into the black box to cause unexpected or undesirable behavior. A famous software engineering principle states that ["all abstractions are, to some extent, leaky"](https://www.joelonsoftware.com/2002/11/11/the-law-of-leaky-abstractions/). This certainly holds true for robotics as well. The development of a robot will certainly generate many interacting components, each of which performs a complex mechanical or software function. Due to this complexity, multiple engineers will work on multiple components, and the only way to tractably comprehend the entire system is through abstraction. A good engineer will properly *label* their component and *summarize* its function in an easy to understand manner. E.g., the perception module gives the identity and location of all of the interesting things in view of the robot's cameras; the planning module waits for an instruction from a user and then plans a motion to act as requested; the controller module executes a current motion, and updates the motion whenever the planner requests it; the UI module shows the user the state of the robot, performs speech recognition, and translates words into recognized commands. Moreover, she will *document* the components' specifications in a way that other engineers can use it in a predictable manner. An important function of abstractions is that they let us swap **implementations** of what goes on "under the hood". We will describe several IK solvers, motion planners, trajectory optimizers, state estimators, and object recognizers. All of them, although they may perform the same intended function, will differ in terms of performance characteristics (e.g., for algorithms, computational complexity, failure rates, network usage; for hardware, speed, strength, noise, and power consumption). Different versions of an algorithm's code will also be swappable, and may have vastly different performance characteristics. For example, 3D mapping on a GPU is orders of magnitude faster than doing it on a CPU. It is a general rule that there will be many potential implementations of a given robot component, and more will be developed every year. A good architect of a robotic system will need to research and understand the state-of-the-art and plan how the system is developed to optimize the trade-offs between performance, resource usage, and implementation difficulty. A major goal of this book is to aid in the process of systems engineering by: 1. Defining standard abstractions used in robotics 2. Describing where these abstractions leak, why they leak, and what can be done to plug the leaks to the extent possible 3. Presenting multiple implementations, and explain their performance characteristics
github_jupyter
#CM360 Data Warehouse Deploy a BigQuery dataset mirroring CM360 account structure. Foundation for solutions on top. #License Copyright 2020 Google LLC, Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. #Disclaimer This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team. This code generated (see starthinker/scripts for possible source): - **Command**: "python starthinker_ui/manage.py colab" - **Command**: "python starthinker/tools/colab.py [JSON RECIPE]" #1. Install Dependencies First install the libraries needed to execute recipes, this only needs to be done once, then click play. ``` !pip install git+https://github.com/google/starthinker ``` #2. Set Configuration This code is required to initialize the project. Fill in required fields and press play. 1. If the recipe uses a Google Cloud Project: - Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md). 1. If the recipe has **auth** set to **user**: - If you have user credentials: - Set the configuration **user** value to your user credentials JSON. - If you DO NOT have user credentials: - Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md). 1. If the recipe has **auth** set to **service**: - Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md). ``` from starthinker.util.configuration import Configuration CONFIG = Configuration( project="", client={}, service={}, user="/content/user.json", verbose=True ) ``` #3. Enter CM360 Data Warehouse Recipe Parameters 1. Wait for <b>BigQuery->->->*</b> to be created. 1. Every table mimics the <a href='https://developers.google.com/doubleclick-advertisers/rel_notes' target='_blank'>CM360 API Endpoints</a>. Modify the values below for your use case, can be done multiple times, then click play. ``` FIELDS = { 'auth_bigquery': 'service', # Credentials used for writing data. 'auth_cm': 'service', # Credentials used for reading data. 'recipe_slug': '', # Name of Google BigQuery dataset to create. 'accounts': [], # List of account ids to pull. } print("Parameters Set To: %s" % FIELDS) ``` #4. Execute CM360 Data Warehouse This does NOT need to be modified unless you are changing the recipe, click play. ``` from starthinker.util.configuration import execute from starthinker.util.recipe import json_set_fields TASKS = [ { 'dataset': { 'description': 'Create a dataset for bigquery tables.', 'hour': [ 4 ], 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'description': 'Place where tables will be created in BigQuery.'}} } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'userProfiles.list', 'kwargs': { }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_userProfiles' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'accounts.get', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': "SELECT DISTINCT accountId AS id FROM `CM360_userProfiles` WHERE NOT ENDS_WITH(userName, '@dcm') AND (ARRAY_LENGTH({accounts}) = 0 OR accountId IN UNNEST({accounts})) ", 'parameters': { 'accounts': {'field': {'name': 'accounts', 'kind': 'integer_list', 'order': 4, 'default': [], 'description': 'List of account ids to pull.'}} }, 'legacy': False } }, 'iterate': False, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_accounts' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'subaccounts.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_subaccounts' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'advertisers.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_advertisers' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'advertiserGroups.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_advertiserGroups' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'advertiserLandingPages.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_advertiserLandingPages' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'campaigns.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_campaigns' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'campaignCreativeAssociations.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT accountId, id AS campaignId FROM `CM360_campaigns` WHERE accountId=10394172;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_campaignCreativeAssociations' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'ads.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_ads' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'sites.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_sites' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'directorySites.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_directorySites' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'placements.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_placements' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'placementGroups.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_placementGroups' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'placementStrategies.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_placementStrategies' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'creatives.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_creatives' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'creativeGroups.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_creativeGroups' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'sizes.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_sizes' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'creativeFields.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_creativeFields' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'creativeFieldValues.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT accountId, id AS creativeFieldId FROM `CM360_creativeFields`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_creativeFieldValues' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'browsers.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_browsers' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'cities.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_cities' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'languages.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_languages' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'metros.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_metros' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'connectionTypes.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_connectionTypes' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'contentCategories.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_contentCategories' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'countries.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_countries' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'regions.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_regions' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'postalCodes.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_postalCodes' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'projects.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_projects' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'videoFormats.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_videoFormats' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'platformTypes.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_platformTypes' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'orders.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS projectId, accountId FROM `CM360_projects`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_orders' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'orderDocuments.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS projectId, accountId FROM `CM360_projects`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_orderDocuments' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'mobileApps.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_mobileApps' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'mobileCarriers.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_mobileCarriers' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'operatingSystems.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_operatingSystems' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'operatingSystemVersions.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_operatingSystemVersions' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'remarketingLists.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': "SELECT id AS advertiserId, id AS accountId FROM `CM360_accounts` where name='BROKEN API CALL SEE: b/183547271';", 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_remarketingLists' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'targetingTemplates.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': "SELECT id AS advertiserId, id AS accountId FROM `CM360_accounts` where name='BROKEN API CALL SEE: b/183547271';", 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_targetingTemplates' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'targetableRemarketingLists.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': "SELECT id AS advertiserId, id AS accountId FROM `CM360_accounts` where name='BROKEN API CALL SEE: b/183547271';", 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_targetableRemarketingLists' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'inventoryItems.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS projectId, accountId FROM `CM360_projects`;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_inventoryItems' } } } }, { 'google_api': { 'auth': 'user', 'api': 'dfareporting', 'version': 'v3.4', 'function': 'dynamicTargetingKeys.list', 'kwargs_remote': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 0, 'default': '', 'description': 'Google BigQuery dataset to create tables in.'}}, 'query': 'SELECT id AS accountId FROM `CM360_accounts` LIMIT 1;', 'legacy': False } }, 'iterate': True, 'results': { 'bigquery': { 'auth': 'user', 'dataset': {'field': {'name': 'recipe_slug', 'kind': 'string', 'order': 4, 'default': '', 'description': 'Name of Google BigQuery dataset to create.'}}, 'table': 'CM360_dynamicTargetingKeys' } } } } ] json_set_fields(TASKS, FIELDS) execute(CONFIG, TASKS, force=True) ```
github_jupyter
<table align="center"> <td align="center"><a target="_blank" href="http://introtodeeplearning.com"> <img src="https://i.ibb.co/Jr88sn2/mit.png" style="padding-bottom:5px;" /> Visit MIT Deep Learning</a></td> <td align="center"><a target="_blank" href="https://colab.research.google.com/github/aamini/introtodeeplearning/blob/master/lab1/Part2_Music_Generation.ipynb"> <img src="https://i.ibb.co/2P3SLwK/colab.png" style="padding-bottom:5px;" />Run in Google Colab</a></td> <td align="center"><a target="_blank" href="https://github.com/aamini/introtodeeplearning/blob/master/lab1/Part2_Music_Generation.ipynb"> <img src="https://i.ibb.co/xfJbPmL/github.png" height="70px" style="padding-bottom:5px;" />View Source on GitHub</a></td> </table> # Copyright Information - Copyright 2021 MIT 6.S191 Introduction to Deep Learning. All Rights Reserved. - Licensed under the MIT License. You may not use this file except in compliance with the License. Use and/or modification of this code outside of 6.S191 must reference: - © MIT 6.S191: Introduction to Deep Learning - http://introtodeeplearning.com # Lab 1: Intro to TensorFlow and Music Generation with RNNs # Part 2: Music Generation with RNNs Music generation을 위한 RNN 구축 : ABC notation으로 표현된 악보에서 패턴을 학습하도록 훈련한 후 모델을 이용하여 새로운 음악을 생성하는 것이 과제(task). - ABC noation이란 악보를 A부터 G까지 문자 표기법을 사용하여 나타낸 것을 말합니다. 번호, 제목, 작곡가, 음표, 길이, 음계 등이 포함되어 있고 각 알파벳 별로 의미하는 것이 정해져 있습니다. 모델 향상을 위해서 어떻게 해야 할까? * How does the number of training epochs affect the performance? * What if you alter or augment the dataset? * Does the choice of start string significantly affect the result? ## 2.1 Dependencies cousre 저장소 다운로드, 실습 관련 패키지 import ``` # Import Tensorflow 2.0 # %tensorflow_version 2.x import tensorflow as tf # Download and import the MIT 6.S191 package !pip install mitdeeplearning import mitdeeplearning as mdl # Import all remaining packages import numpy as np import os import time import functools from IPython import display as ipythondisplay from tqdm import tqdm !apt-get install abcmidi timidity > /dev/null 2>&1 # Import import random from random import sample import matplotlib.pyplot as plt import pandas as pd def save_song_to_abc(song, filename="tmp"): save_name = "{}.abc".format(filename) with open(save_name, "w") as f: f.write(song) return filename def abc2wav(abc_file): suf = abc_file.rstrip('.abc') cmd = "abc2midi {} -o {}".format(abc_file, suf + ".mid") os.system(cmd) cmd = "timidity {}.mid -Ow {}.wav".format(suf, suf) return os.system(cmd) def play_wav(wav_file): return ipythondisplay.Audio(wav_file) def play_song(song): basename = save_song_to_abc(song) ret = abc2wav(basename + '.abc') if ret == 0: #did not suceed return play_wav(basename+'.wav') return None # Check that we are using a GPU, if not switch runtimes # using Runtime > Change Runtime Type > GPU #assert len(tf.config.list_physical_devices('GPU')) > 0 ``` ## 2.2 Dataset 817 songs, 200,679 vectorized songs - ABC notation으로 표현된 수천 개의 아일랜드 민요(Irish folk songs) 데이터 - 단순히 연주되는 음에 대한 정보만이 아니라 추가적으로 노래 제목, 키, 템포와 같은 메타 정보도 포함함 - **텍스트 파일에 존재하는 다양한 문자들이 complexity of the learning problem에 어떤 영향을 미칠까?** - 위 문제는 텍스트 데이터를 수치 데이터로 만들때 중요해짐 : Preprocessing - 음악데이터만 사용하려면 어떻게 해야 할까요? - NLP 적인 처리가 필요하지 않을까? - X : number - T : title, 제목 - Z : ?? 음자리표인감 높은음자리표 이런거.. - M : 뭐지? 첫 시작음? - L : 박자표인듯 - K : key, 음계, 다장조 이런거 - z : 악보, 우리가 실제로 훈련해야 할 것, 여기서 | 이거는 마디, !는 뭔지 모르겠음 :| 도돌이표 - **songs** : 817개의 노래가 들어있는 list - **show_music_detail(num)** : num 개의 음악을 자세하게 볼 수 있습니다 - **mdl.lab1.play_song(example_song)** : text형 음악을(ABC notation) audio 파형으로 표현합니다 ``` # Download the dataset # colab으로 실행한다면 주석과 같이 작성할 것 # jupyter notebook으로 실행한다면 아래와 같이 작성할 것 # import pattern과 관련해서 수정해주어야 오류가 해결되는 것 같습니다 from mitdeeplearning import lab1 #songs = mdl.lab1.load_training_data() songs = lab1.load_training_data() # Print one of the songs to instpect it in greater detail! # my function -> Print N of the song... def show_music_detail(songs, num) : num_list = sample(range(0, len(songs)), num) #sample output format is list for i in num_list : example_song = songs[i] print("\nExample song #", i, ":") print(example_song) show_music_detail(songs, 3) example_song = songs[random.randint(0, len(songs))] mdl.lab1.play_song(example_song) ``` ## 2.3 Preprocessing the dataset - Vectorize the text - 텍스트(text) 데이터셋 -> 수치(numerical) 데이터셋 => Vectorize - **vocab** : ABC notation에 있는 unique vocabulary(or character) - **char2idx** : 문자 -> 숫자 mapping, 정수 - **idx2char** : 숫자 -> 문자 mapping - **vectorized_songs** : ABC notation 노래 text에서 벡터화된 노래 - Make input sequence chunk from input text (and target sequence) ``` # Join our list of song strings into a single string containing all songs # 노래 문자열(string) 목록을 이 모든 노래가 포함된 하나의 단일 문자열로 결합함 songs_joined = "\n\n".join(songs) # Find all unique characters in the joined string # 결합된 노래 묶음 문자열에서 유일한(unique) 단어들을 모두 확인 # 데이터셋에는 총 83개의 유일한 문자들이 존재함 vocab = sorted(set(songs_joined)) print("There are", len(vocab), "unique characters in the dataset") ### Define numerical representation of text ### # 문자 -> unique index로 매핑 # 예를 들어, "d"라는 문자의 인덱스는 char2idx["d"]와 같이 작성하여 얻을 것임 char2idx = {u:i for i, u in enumerate(vocab)} # index -> 문자로 매핑 # char2idx의 반대 버전이며 index에서 다시 문자로 되돌리는 역할을 할 것임 idx2char = np.array(vocab) # [python] difference between str() and repr() # 숫자를 문자열로 변환시켜주는 함수임은 동일합니다. 하지만 기저에 작동하는 원리가 다른 것 같습니다. print('{') for char,_ in zip(char2idx, range(10)): print(' {:4s}: {:3d},'.format(repr(char), char2idx[char])) print(' ...\n}') ### Vectorize the songs string ### # 문자열 벡터화 함수, return은 무조건 'N(입력 문자열의 문자 개수)'요소의 np.array def vectorize_string(string): # TODO vectorized_list = [] vectorized_list = ([char2idx[_] for _ in string]) return np.array(vectorized_list) # my function -> Print N length of vectorized songs # 나중에 곡 하나를 통째로 vectorized 한 결과를 두 개 (원곡, vectorized) 보여주는 함수도 짜면 좋을 것 같음 def show_music_vectorized(vectorized_songs, length) : print ('\n{} \n---- characters mapped to int ---->\n {}'.format(repr(songs_joined[:length]), vectorized_songs[:length])) vectorized_songs = vectorize_string(songs_joined) print(len(vectorized_songs)) # 200,679 show_music_vectorized(vectorized_songs, 20) ``` ### Create training examples and targets - 실제 텍스트를 훈련 중에 사용할 예제 시퀀스로 나눌 것 - 입력 텍스트를 seq_length 단위로 나눔 - 예를 들어 텍스트는 Hello 이고 seq_length가 4라면 hell가 input, elloo가 target이 됨 - 이로부터 batch 방법을 사용하여 해당 문자 인덱스 스트림을 원하는 크기의 시퀀스로 변환 가능 - 각각의 벡터,각각의 index는 하나의 단일 time step으로 처리 - 즉, time step 0 은 들어온 시퀀스의 첫 번째 문자에 대한 인덱스를 받고 그 다음 인덱스 1의 문자를 맞출 것 ``` ### Batch definition to create training examples ### def get_batch(vectorized_songs, seq_length, batch_size): # the length of the vectorized songs string n = vectorized_songs.shape[0] - 1 # 200679 - 1 = 200678 # randomly choose the starting indices for the examples in the training batch # 0 ~ 200678-seq_length 숫자 사이에서 batch_size개의 임의의 표본 추출 -> 즉 batch size가 n개면 idx도 n개 나옴 idx = np.random.choice(n-seq_length, batch_size) #print("idx: ", idx) '''TODO: construct a list of input sequences for the training batch''' input_batch = [vectorized_songs[i : i+seq_length] for i in idx] #print("input batch: ", len(input_batch)) # batch size 는 seq_length 값과 동일, 그래서 step의 수도 0~seq_length-1 까지임! '''TODO: construct a list of output sequences for the training batch''' output_batch = [vectorized_songs[i+1 : i+seq_length+1] for i in idx] #print("output batch: ", len(output_batch)) # x_batch, y_batch provide the true inputs and targets for network training x_batch = np.reshape(input_batch, [batch_size, seq_length]) y_batch = np.reshape(output_batch, [batch_size, seq_length]) return x_batch, y_batch x_batch, y_batch = get_batch(vectorized_songs, seq_length=5, batch_size=1) for i, (input_idx, target_idx) in enumerate(zip(np.squeeze(x_batch), np.squeeze(y_batch))): print("Step {:3d}".format(i)) print(" input: {} ({:s})".format(input_idx, repr(idx2char[input_idx]))) print(" expected output: {} ({:s})".format(target_idx, repr(idx2char[target_idx]))) def graph(pred) : plt.figure(figsize=(16, 9)) plt.plot(pred, label = 'actual') plt.legend() plt.show() #graph(vectorized_songs) df2 = pd.DataFrame(vectorized_songs, columns = ["songs"]) df2.reset_index().plot(x='index', y='songs') df2 ``` ## 2.4 The Recurrent Neural Network (RNN) model 모델은 LSTM 구조를 기반하는데, 이 구조는 연속되는 문자들 사이의 시간 관계(temporal relationships)에 대한 정보를 유지하는 state 벡터를 사용합니다. LSTM의 최종 출력은 fully-connected `Dense` layer로 넘겨지는데, 여기서 vocabulary의 각 문자에 대해 softmax를 계산하고(출력하고) 이로부터 만들어진 불포에서 샘플을 추춘하여 다음 문자를 예측합니다. * keras API [`tf.keras.Sequential`](https://www.tensorflow.org/api_docs/python/tf/keras/models/Sequential)를 사용합니다. 모델을 정의하기 위해 세 개의 레이어가 사용됩니다. * [`tf.keras.layers.Embedding`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Embedding) : 입력 레이어, 각 문자의 숫자들을 `embedding_dim`차원의 벡터로 매핑하는 훈련 가능한 lookup 테이블로 구성됨 * [`tf.keras.layers.LSTM`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM): `units=rnn_units` 크기의 LSTM 신경망 * [`tf.keras.layers.Dense`](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense): `vocab_size` 출력이 있는 출력 레이어 * `Model.summary` : 모델의 내부 작동에 대한 summary를 출력, 모델 내부의 레이어, 각 레이어의 출력 형태(shape), 배치 사이즈 등을 확인해 볼 수 있습니다. <img src="https://raw.githubusercontent.com/aamini/introtodeeplearning/2019/lab1/img/lstm_unrolled-01-01.png" alt="Drawing"/> ``` # define model function def LSTM(rnn_units): return tf.keras.layers.LSTM( rnn_units, return_sequences=True, recurrent_initializer='glorot_uniform', recurrent_activation='sigmoid', stateful=True, ) ### Defining the RNN Model ### '''TODO: Add LSTM and Dense layers to define the RNN model using the Sequential API.''' def build_model(vocab_size, embedding_dim, rnn_units, batch_size): model = tf.keras.Sequential([ # Layer 1: Embedding layer to transform indices into dense vectors of a fixed embedding size tf.keras.layers.Embedding(vocab_size, embedding_dim, batch_input_shape=[batch_size, None]), # Layer 2: LSTM with `rnn_units` number of units. # TODO: Call the LSTM function defined above to add this layer. LSTM(rnn_units), # Layer 3: Dense (fully-connected) layer that transforms the LSTM output into the vocabulary size. # TODO: Add the Dense layer. tf.keras.layers.Dense(vocab_size) ]) return model # Build a simple model with default hyperparameters. You will get the chance to change these later. model = build_model(len(vocab), embedding_dim=256, rnn_units=1024, batch_size=32) model.summary() ``` ### Test out the RNN model 모델의 예상 작동을 확인하기 위한 간단한 테스트 시행합니다. 길이 100의 시퀀스를 사용하여 출력물의 차원도 확인해보자. **모델에는 어떤 길이의 입력도 동작할 수 있음을 주의!** ``` x, y = get_batch(vectorized_songs, seq_length=100, batch_size=32) pred = model(x) print("Input shape: ", x.shape, " # (batch_size, sequence_length)") print("Prediction shape: ", pred.shape, "# (batch_size, sequence_length, vocab_size)") ``` ### Predictions from the untrained model 훈련시키지 않은 모델의 예측은 어떨까? 모델의 실제 예측을 얻기 위해서, 우리는 문자 vocabulary를 `softmax`로 정의한 출력 분포에서 샘플을 추출해야 합니다. 이는 우리에게 실제 문자 인덱스틀 제공합니다. 즉, 범주형 분포([categorical distribution](https://en.wikipedia.org/wiki/Categorical_distribution) )을 사용하여 예제 예측을 샘플로 추출함을 의미합니다. 이로써 각 time step에서 다음 문자(특히나 그 인덱스에서의)에 대한 예측을 하게 됩니다. 여기서는 단순히 `argmax`를 취하는 대신 확률 분포에서 표본을 추출합니다. (그리고 argmax는 모델이 루프에 빠지게 만들 수 있습니다?) 첫 번째 예시 batch에서 샘플링을 실행해봅시다. ``` sampled_indices = tf.random.categorical(pred[0], num_samples=1) sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy() sampled_indices ``` 이제 이들을 디코딩하여 훈련되지 않은 모델이 예측한 텍스트를 확인할 수 있습니다. ``` print("Input: \n", repr("".join(idx2char[x[0]]))) print() print("Next Char Predictions: \n", repr("".join(idx2char[sampled_indices]))) ``` ## 2.5 Training the model: loss and training operations 다음 문자를 예측하는 문제를 일반적으로 분류 문제(classification, 여기서는 이진 분류가 아닌 multi-class)로 생각해 볼 수 있습니다. RNN의 이전 state와 주어진 time step에서의 입력을 고려하여 다음 문자에 대한 class를 예측해야 합니다. ### loss 해당 분류 작업에 대해 모델을 훈련시키기 위해 `crossentropy` loss 형태를 사용할 수 있습니다. 특히나, `sparse_categorical_crossentropy`](https://www.tensorflow.org/api_docs/python/tf/keras/backend/sparse_categorical_crossentropy) loss를 사용할 것인데, 해당 loss는 범주형 분류 작업을 위해 정수 타겟을 사용하기 때문입니다. 실제 타겟(`labels`)과 예측 타겟(`logits`)을 사용하여 loss를 계산하는 방식입니다. ### hyper parameter tuning 시작값 제공, 최적의 파라미터는 여러분들이 찾아보세요~ ### optimizer and training operation optimizer와 epochs은 신경망 출력에 영향을 줄 수 있음 - 아래는 시도해볼만 한 optimizer - [`Adam`](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam?version=stable) - [`Adagrad`](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adagrad?version=stable) Backpropagation은 [`tf.GradientTape`](https://www.tensorflow.org/api_docs/python/tf/GradientTape)를 사용 훈련 동안 모델의 진행 상황을 출력하여 loss를 최소화하고 있는지에 대한 여부를 시각화함 ### Benchmarking 1. No-trained model -> scalar loss : 4.418331 ``` ### Defining the loss function ### '''TODO: define the loss function to compute and return the loss between the true labels and predictions (logits). Set the argument from_logits=True.''' def compute_loss(labels, logits): loss = tf.keras.losses.sparse_categorical_crossentropy(labels, logits, from_logits=True) # TODO return loss '''TODO: compute the loss using the true next characters from the example batch and the predictions from the untrained model several cells above''' example_batch_loss = compute_loss(y, pred) print("Prediction shape: ", pred.shape, " # (batch_size, sequence_length, vocab_size)") print("scalar_loss: ", example_batch_loss.numpy().mean()) ### Hyperparameter setting and optimization ### # Optimization parameters: num_training_iterations = 2000 # Increase this to train longer batch_size = 4 # Experiment between 1 and 64 seq_length = 100 # Experiment between 50 and 500 learning_rate = 5e-3 # Experiment between 1e-5 and 1e-1 # Model parameters: vocab_size = len(vocab) embedding_dim = 256 rnn_units = 1024 # Experiment between 1 and 2048 # Checkpoint location: checkpoint_dir = './training_checkpoints' checkpoint_prefix = os.path.join(checkpoint_dir, "my_ckpt") ### Define optimizer and training operation ### '''TODO: instantiate a new model for training using the `build_model` function and the hyperparameters created above.''' model = build_model(vocab_size, embedding_dim, rnn_units, batch_size) '''TODO: instantiate an optimizer with its learning rate. Checkout the tensorflow website for a list of supported optimizers. https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/ Try using the Adam optimizer to start.''' optimizer = tf.keras.optimizers.Adam(learning_rate) @tf.function def train_step(x, y): # Use tf.GradientTape() with tf.GradientTape() as tape: '''TODO: feed the current input into the model and generate predictions''' y_hat = model(x) '''TODO: compute the loss!''' loss = compute_loss(y, y_hat) # Now, compute the gradients '''TODO: complete the function call for gradient computation. Remember that we want the gradient of the loss with respect all of the model parameters. HINT: use `model.trainable_variables` to get a list of all model parameters.''' grads = tape.gradient(loss, model.trainable_variables) # Apply the gradients to the optimizer so it can update the model accordingly optimizer.apply_gradients(zip(grads, model.trainable_variables)) return loss ################## # Begin training!# ################## history = [] plotter = mdl.util.PeriodicPlotter(sec=2, xlabel='Iterations', ylabel='Loss') if hasattr(tqdm, '_instances'): tqdm._instances.clear() # clear if it exists for iter in tqdm(range(num_training_iterations)): # Grab a batch and propagate it through the network x_batch, y_batch = get_batch(vectorized_songs, seq_length, batch_size) loss = train_step(x_batch, y_batch) # Update the progress bar history.append(loss.numpy().mean()) plotter.plot(history) # Update the model with the changed weights! if iter % 100 == 0: model.save_weights(checkpoint_prefix) # Save the trained model and the weights model.save_weights(checkpoint_prefix) ``` ## 2.6 Generate music using the RNN model : Inference 음악을 만들어낼 때, 모델의 시작을 위해 몇 가지 시드를 공급해야 합니다. 일단 생성된 시드를 가지고 나면, 훈련된 RNN을 사용하여 각각의 연속적인 문자를 반복적으로 예측할 수 있습니다. 보다 구체적으로, RNN은 가능한 연속적인 문자로 `softmax`를 출력한다는 것을 기억하자. 추론(Inference)을 하는 동안, 이런 분포로부터 반복적으로 표본을 추출한 다음 생성된 노래를 ABC 표기법으로 인코딩하는 데 샘플을 사용합니다. ### Restore the latest checkpoint - 추론(Inference) 단계를 간단하게 하기 위해서 공정 batch 크기는 1로 사용 - RNN의 state가 timestep에서 timestep으로 전달되는 방식 때문에, 모델은 오로지 한번 설정된 고정 batch 크기만을 수용할 수 있습니다. - 다른 batch_size 모델을 실행하려면 rebuilding 해야 하고 훈련 중 마지막 checkpoint 이후의 가중치를 복원해야 함 ``` '''TODO: Rebuild the model using a batch_size=1''' model = build_model(vocab_size, embedding_dim, rnn_units, batch_size=1) # Restore the model weights for the last checkpoint after training model.load_weights(tf.train.latest_checkpoint(checkpoint_dir)) model.build(tf.TensorShape([1, None])) model.summary() ``` ### The prediction procedure - "시드" 시작 문자열과 RNN state를 초기화 하고, 생성할 문자 수를 설정합니다. - 시작 문자열과 RNN state를 사용하여 다음 예측 문자에 대한 확률 분포를 얻습니다. - 예측 문자의 인덱스를 계산하기 위해 다항 분포 표본으로부터 샘플링합니다. 그런 다음 이 예측 문자를 모델의 다음 입력으로 사용합니다. - 각 time step에서, 업데이트 된 RNN state는 다시 모델에 공급되어, 다음 예측을 만들 때 더 많은 컨텍스트(context)를 갖게 됩니다. 다음 문자를 예측한 후, 업데이트 된 RNN states는 다시 모델에 공급되고, 이는 이전 예측으로부터 더 많은 정보를 얻기 때문에 데이터 속에서 시퀀스 종속성을 학습하게 하는 방법이 됩니다. ![LSTM inference](https://raw.githubusercontent.com/aamini/introtodeeplearning/2019/lab1/img/lstm_inference.png) ``` ### Prediction of a generated song ### def generate_text(model, start_string, generation_length=1000): # Evaluation step (generating ABC text using the learned RNN model) '''TODO: convert the start string to numbers (vectorize)''' #input_eval = [vectorize_string(start_string)] input_eval = [char2idx[s] for s in start_string] input_eval = tf.expand_dims(input_eval, 0) # Empty string to store our results text_generated = [] # Here batch size == 1 model.reset_states() tqdm._instances.clear() for i in tqdm(range(generation_length)): '''TODO: evaluate the inputs and generate the next character predictions''' predictions = model(input_eval) # Remove the batch dimension predictions = tf.squeeze(predictions, 0) '''TODO: use a multinomial distribution to sample''' predicted_id = tf.random.categorical(predictions, num_samples=1)[-1,0].numpy() # Pass the prediction along with the previous hidden state # as the next inputs to the model input_eval = tf.expand_dims([predicted_id], 0) '''TODO: add the predicted character to the generated text!''' # Hint: consider what format the prediction is in vs. the output text_generated.append(idx2char[predicted_id]) return (start_string + ''.join(text_generated)) '''TODO: Use the model and the function defined above to generate ABC format text of length 1000! As you may notice, ABC files start with "X" - this may be a good start string.''' generated_text = generate_text(model, start_string="X", generation_length=1000) # TODO # generated_text = generate_text('''TODO''', start_string="X", generation_length=1000) ``` ### Play back the generated music! ABC notation text audio -> audio file 로 변환 후 생성된 노래 확인 ``` ### Play back generated songs ### generated_songs = mdl.lab1.extract_song_snippet(generated_text) for i, song in enumerate(generated_songs): # Synthesize the waveform from a song waveform = mdl.lab1.play_song(song) # If its a valid song (correct syntax), lets play it! if waveform: print("Generated song", i) ipythondisplay.display(waveform) ```
github_jupyter
``` import random from tracery import Grammar, modifiers import tracery_alterations from collections import namedtuple import pycorpora class Question(namedtuple('Question', ['id','questions','answers','additional_tags'])): def instantiate(self, n=2): if n=='lambda': return (self.questions(), self.answers) else: return (self.questions(), [self.answers() for i in range(n)]) def question_set(questions, max_qs=10, answers=2, exclude=[]): r = [] tags = set(exclude) for i in range(max_qs): valid = [q for q in questions if q.id not in tags and all(qt not in tags for qt in q.additional_tags)] if len(valid) == 0: break q = random.choice(valid) r.append(q.instantiate(answers)) tags.add(q.id) for t in q.additional_tags: tags.add(t) return r saint_titles = ['Saint ', 'Pope ', 'King ', 'Mother '] qg = Grammar({ 'animal':pycorpora.animals.common['animals'], 'first_name_en':pycorpora.humans.firstNames['firstNames'], 'last_name_en':pycorpora.humans.lastNames['lastNames'], 'first_name_no':(pycorpora.humans.norwayFirstNamesBoys['firstnames_boys_norwegian'] + pycorpora.humans.norwayFirstNamesGirls['firstnames_girls_norwegian']), 'last_name_no':pycorpora.humans.norwayLastNames['lastnames_norwegian'], 'first_name_es':pycorpora.humans.spanishFirstNames['firstNames'], 'last_name_es':pycorpora.humans.spanishLastNames['lastNames'], 'any_title':pycorpora.humans.englishHonorifics['englishHonorifics'], 'object':[x.strip() for x in pycorpora.objects.objects['objects'] if x.strip()[-1] != 's'],# and len(x.split()) < 2 'cluedo_suspect':pycorpora.games.cluedo['suspects']['Cluedo'], 'cluedo_weapon':pycorpora.games.cluedo['weapons']['Cluedo'], 'cluedo_room':pycorpora.games.cluedo['rooms'], 'clue_suspect':pycorpora.games.cluedo['suspects']['Clue'], 'clue_weapon':pycorpora.games.cluedo['weapons']['Clue'], 'clue_room':pycorpora.games.cluedo['rooms'], 'room':pycorpora.architecture.rooms['rooms'], 'appliance':pycorpora.technology.appliances['appliances'], 'strange_word':pycorpora.words.strange_words['words'], 'name_suffix':pycorpora.humans.suffixes['suffixes'], 'greek_god':pycorpora.mythology.greek_gods['greek_gods'], 'greek_monster':pycorpora.mythology.greek_monsters['greek_monsters'], 'greek_titan':pycorpora.mythology.greek_titans['greek_titans'], 'celebrity':pycorpora.humans.celebrities['celebrities'], 'street_core':([x.split()[-1] for x in pycorpora.humans.celebrities['celebrities']] + [x.split()[-1] for x in pycorpora.humans.britishActors['britishActors']] + pycorpora.geography.english_towns_cities['towns'] + pycorpora.geography.english_towns_cities['cities'] + pycorpora.geography.countries['countries'] + [x['name'] for x in pycorpora.geography.oceans['oceans']] + [x['name'] for x in pycorpora.geography.rivers['rivers']]), 'saint':[x['saint'] if any(x['saint'].startswith(t) for t in saint_titles) else 'Saint '+x['saint'] for x in pycorpora.religion.christian_saints], 'pet':['#animal.a.capitalize#','#animal.a.capitalize#', '#animal.a.capitalize#','#animal.a.capitalize#', '#animal.a.capitalize#','#animal.a.capitalize#', '#animal.a.capitalize#','#animal.a.capitalize#', '#celebrity#'], 'street_noun':['street','road','street','road','street','road', 'street','road','street','road','street','road', 'lane','avenue','close','way', 'lane','avenue','close','way', 'boulevard','alley','drive','crescent','court', 'hill', 'strand','end','prospect','gate'], 'street_adjective':['old','new','west','east','north','south'], 'small_cardinal':['two','three','four'], 'street':['#street_core# #street_noun#','#street_core# #street_noun#','#street_core# #street_noun#', '#street_core# #street_noun#','#street_core# #street_noun#','#street_core# #street_noun#', '#street_adjective# #street_core# #street_noun#','#street_adjective# #street_core# #street_noun#', '#street_adjective# #street_noun#', '#street_adjective# #street_noun#', '#small_cardinal# #street_core.s# #street_noun#', 'the #street_adjective# #street_noun#', 'the #street_noun#', '#rare_street#'], 'rare_street':['#street#','#street#','#street#', '#real_rare_street#'], 'real_rare_street':['whipmawhopma#street_noun#', 'whip-ma-whop-ma-#street_noun#', #'#[street_core:#rude_word#]street#', '#[street_core:#strange_word#]street#'], 'greek_whatever':['#greek_god#','#greek_monster#','#greek_titan#'], 'cluedo':['#cluedo_suspect#, in the #cluedo_room#, with the #cluedo_weapon#', '#clue_suspect#, in the #clue_room#, with the #clue_weapon#'], 'any_pronouns':['{subject}/{object}/{dependentPossessive}/{independentPossessive}/{reflexive}'.format(**pronouns) for pronouns in pycorpora.humans.thirdPersonPronouns['thirdPersonPronouns']], 'simple_pronouns':['he/him/his/his/himself', 'she/her/her/hers/herself', 'they/them/their/theirs/themself'], 'pronouns':['#simple_pronouns#','#simple_pronouns#','#simple_pronouns#','#any_pronouns#'], 'simple_title':['Mr','Mr','Mr','Mrs','Ms','Miss','Mx','Mx','Mx'], 'title':['#simple_title#','#simple_title#','#simple_title#','#any_title#'], 'first_name':['#first_name_en#','#first_name_en#','#first_name_en#', '#first_name_no#','#first_name_es#'], 'single_last_name':['#last_name_en#','#last_name_en#','#last_name_en#', '#last_name_no#','#last_name_es#'], 'last_name':['#single_last_name#','#single_last_name#', '#single_last_name#-#single_last_name#'], 'full_name_no_suffix':['#first_name# #last_name#', '#first_name# #first_name# #last_name#'], 'full_name':['#full_name_no_suffix#','#full_name_no_suffix#','#full_name_no_suffix#', '#full_name_no_suffix# #name_suffix#'], 'title_last':'#title# #last_name#', 'title_full_name':'#title# #full_name#', 'first_name_noun':['first name', 'given name','given name','given name','given name','given name', 'personal name','personal name','personal name','personal name', 'forename', 'Christian name'], 'last_name_noun':['surname','surname','surname','surname', 'family name','family name','family name','family name','family name', 'last name'], 'title_noun':['honorific','title'], 'low_ordinal_number':['first','second','third','fourth','fifth', 'sixth','seventh','eighth','ninth','tenth', 'eleventh','twelth','thirteenth','fourteenth','fifteenth'], 'numerated_object':['#object.a#','two #object.s#','three #object.s#', 'four #object.s#','five #object.s#','six #object.s#', 'seven #object.s#','eight #object.s#','nine #object.s#'], 'object_collection_head':['#numerated_object#', '#object_collection_head#, #numerated_object#'], 'object_collection':['#object_collection_head#, #numerated_object#, and #numerated_object#'], 'receive_verb':['receive','get'], 'maybe_x':['#x#',''], 'cheese_noun':['cheese','cheese','cheese','cheese', 'curd','fermented dairy product', 'cheese, curd, or #[x:other ]maybe_x#fermented dairy product', 'cheese or #[x:other ]maybe_x#fermented dairy product', 'curd or #[x:other ]maybe_x#fermented dairy product', 'cheese or curd'], 'room_question_clause':['were you born','was your first kiss', 'do you usually eat','do you usually sleep', 'do you keep your #[x:best ]maybe_x##appliance#', 'were you born','was your first kiss', 'do you usually eat','do you usually sleep', 'do you keep your #[x:best ]maybe_x##appliance#', 'do you keep your life savings'], 'room_question':['What kind of room #room_question_clause# in?', 'In what kind of room #room_question_clause#?', 'Where #room_question_clause#?'], 'room_answer':['#room.a.capitalize#', 'The #room#'], 'new_or_emerging':['new', 'emerging', 'new or emerging'], 'fabric_item':['duvet cover','coat','skirt','pair of trousers','pair of pants', 'bandana'], 'fabric_question':['What is your favourite fabric?', 'What is your favourite fabric?', 'What is your favourite fabric?', 'What was your first #fabric_item# made of?', 'What was your first #fabric_item# made out of?', 'Of what fabric was your first #fabric_item# made?'] }) qg.add_modifiers(modifiers.base_english) def add_religion(grammar, name, data): decorated_name = 'religion_{0}'.format(name) if isinstance(data, str): return data elif isinstance(data, list): rule = [add_religion(grammar, '{0}_{1}'.format(name,i), x) for (i, x) in enumerate(data)] grammar.push_rules(decorated_name, rule) return decorated_name elif isinstance(data, dict): rule = [k if len(v) == 0 else '#{0}#'.format(add_religion(grammar, k, v)) for (k,v) in data.items()] grammar.push_rules(decorated_name, rule) return decorated_name rg = Grammar({}) add_religion(rg, 'all', {'Atheism':{}, 'Agnosticism':{}, 'Theism':{'all_other':pycorpora.religion.religions, 'Christianity':{}, 'Islam':{}, 'Hinduism':{}, 'Buddhism':{}, 'Sikhism':{}, 'Judaism':{}}}) mg = Grammar({}) for planetish in pycorpora.science.planets['planets']: if len(planetish['moons']) > 0: mg.push_rules('{0}_moon'.format(planetish['name']),planetish['moons']) mg.push_rules('moon',['#{0}_moon#'.format(planetish['name']) for planetish in pycorpora.science.planets['planets'] if len(planetish['moons']) > 0]) questions = [ Question('first_name', lambda:qg.flatten('What is your #first_name_noun#?'), lambda:qg.flatten('#first_name#'), ()), Question('last_name', lambda:qg.flatten('What is your #last_name_noun#?'), lambda:qg.flatten('#last_name#'), ()), Question('title', lambda:qg.flatten('What is your #title_noun#?'), lambda:qg.flatten('#title#'), ()), Question('first_last_name', lambda:qg.flatten('What is your #first_name_noun# and #last_name_noun#?'), lambda:qg.flatten('#first_name# #last_name#'), ('first_name', 'last_name')), Question('full_name', lambda:qg.flatten('What is your full name?'), lambda:qg.flatten('#full_name#'), ('first_name','last_name')), Question('title_last_name', lambda:qg.flatten('What is your #title_noun# and #last_name_noun#?'), lambda:qg.flatten('#title# #last_name#'), ('title','last_name')), Question('title_full_name', lambda:qg.flatten('What is your #title_noun# and full name?'), lambda:qg.flatten('#title# #full_name#'), ('title', 'first_name', 'last_name')), Question('pronouns', lambda:qg.flatten('What are your pronouns?'), lambda:qg.flatten('#pronouns#'), ()), Question('birthday_presents', lambda:qg.flatten('What did you #receive_verb# for your #low_ordinal_number# birthday?'), lambda:qg.flatten('#object_collection.capitalize#'), ()), Question('cheese', lambda:qg.flatten('What is your favourite #cheese_noun#?'), lambda:random.choice(pycorpora.foods.curds['curds']).capitalize(), ()), Question('fruit', lambda:qg.flatten('What is your favourite fruit?'), lambda:random.choice(pycorpora.foods.fruits['fruits']).capitalize(), ('vegetable',)), Question('vegetable', lambda:qg.flatten('What is your favourite vegetable?'), lambda:random.choice(pycorpora.foods.vegetables['vegetables']).capitalize(), ()), Question('sandwich', lambda:qg.flatten('What is your favourite type of sandwich?'), lambda:random.choice(pycorpora.foods.sandwiches['sandwiches'])['name'].capitalize(), ('bread',)), Question('bread', lambda:qg.flatten('What is your favourite type of bread?'), lambda:random.choice(pycorpora.foods.breads_and_pastries['breads']).capitalize(), ()), Question('pastry', lambda:qg.flatten('What is your favourite type of pastry?'), lambda:random.choice(pycorpora.foods.breads_and_pastries['pastries']).capitalize(), ('bread',)), Question('pokemon', lambda:qg.flatten('What was the first Pokémon you caught?'), lambda:random.choice(pycorpora.games.pokemon['pokemon'])['name'], ('game',)), Question('wrestling', lambda:qg.flatten('What is your favourite professional wrestling move?'), lambda:random.choice(pycorpora.games.wrestling_moves['moves']).capitalize(), ('game',)), Question('cluedo', lambda:qg.flatten('What is your favourite Clue#[x:do]maybe_x# murder?'), lambda:qg.flatten('#cluedo#'), ('game',)), Question('pet', lambda:qg.flatten('What was your first pet?'), lambda:qg.flatten('#pet#'), ()), Question('dinosaur', lambda:qg.flatten('What is your favourite dinosaur?'), lambda:random.choice(pycorpora.animals.dinosaurs['dinosaurs']), ('pet',)), Question('room', lambda:qg.flatten('#room_question#'), lambda:qg.flatten('#room_answer#'), ()), Question('ism', lambda:"What is your favourite style of modern art?", lambda:random.choice(pycorpora.art.isms['isms']).title(), ('art',)), Question('colour', lambda:"What is your favourite colour?", lambda:random.choice(pycorpora.colors.xkcd['colors'])['color'].capitalize(), ('art',)), Question('firework', lambda:"What is your favourite firework?", lambda:random.choice(pycorpora.technology.fireworks['effects']).capitalize(), ('art',)), Question('knot', lambda:"What is your favourite knot?", lambda:random.choice(pycorpora.technology.knots['knots']).capitalize(), ('art',)), Question('car', lambda:"Who was the manufacturer of your first car?", lambda:random.choice(pycorpora.corporations.cars['cars']), ('technology',)), Question('lisp', lambda:"What is your favourite dialect of LISP?", lambda:random.choice(pycorpora.technology.lisp['lisps']), ('technology','geek')), Question('technology', lambda:qg.flatten("What is your favourite #new_or_emerging# technology?"), lambda:random.choice(pycorpora.technology.new_technologies['technologies']).capitalize(), ('technology','geek',)), Question('programming', lambda:'What was the first programming language you learned?', lambda:random.choice(pycorpora.technology.programming_languages), ('technology','geek')), Question('fabric', lambda:qg.flatten('#fabric_question#'), lambda:random.choice(pycorpora.materials.fabrics['fabrics']).capitalize(), ()), Question('gem', lambda:"What is your favourite gemstone?", lambda:random.choice(pycorpora.materials.gemstones['gemstones']).capitalize(), ()), Question('fluid', lambda:"What was the first bodily fluid you had to flush down the toilet?", lambda:random.choice(pycorpora.materials.get_file('abridged-body-fluids')['abridged body fluids']).capitalize(), ()), Question('building', lambda:"What material was the house you grew up in built from?", lambda:random.choice(pycorpora.materials.get_file('building-materials')['building materials']).capitalize(), ()), Question('prime', lambda:'What is your favourite prime number?', lambda:str(random.choice(pycorpora.mathematics.primes['primes'][:random.randint(1,999)])), ('maths','geek')), Question('author', lambda:'Who is your favourite author?', lambda:random.choice(pycorpora.humans.authors['authors']), ()), Question('job', lambda:'What is your current occupation?', lambda:random.choice(pycorpora.humans.occupations['occupations']).capitalize(), ()), Question('tv', lambda:'What is your favourite TV show?', lambda:random.choice(getattr(pycorpora,'film-tv').tv_shows['tv_shows']), ()), Question('music', lambda:'What is your favourite style of music?', lambda:random.choice(pycorpora.music.genres['genres']), ()), Question('greek', lambda:'Which figure in Greek mythology do you most identify with?', lambda:qg.flatten('#greek_whatever#'), ()), Question('flower', lambda:'What is your favourite flower?', lambda:random.choice(pycorpora.plants.flowers['flowers']).capitalize(), ()), Question('religion', lambda:'What is your religion?', lambda:rg.flatten('#religion_all#'), ()), Question('saint', lambda:'Who is your favourite Christian saint?', lambda:qg.flatten('#saint#'), ()), Question('element', lambda:'What is your favourite chemical element?', lambda:random.choice(pycorpora.science.elements['elements'])['name'], ('geek',)), Question('minor_planet', lambda:'What is your favourite minor planet?', lambda:random.choice(pycorpora.science.minor_planets['minor_planets']), ('geek','astronomy')), Question('planet', lambda:'What is your favourite planet?', lambda:random.choice(['Mercury','Venus','Earth','Mars', 'Jupiter','Saturn','Uranus','Neptune']), ('astronomy')), Question('moon', lambda:'What is your favourite moon?', lambda:mg.flatten('#moon#'), ('astronomy','geek')), Question('headline', lambda:'What was the front page headline on the day you were born?', lambda:random.choice(pycorpora.words.crash_blossoms['crash_blossoms']), ('birth',)), Question('street', lambda:'What was the name of the street you grew up on?', lambda:qg.flatten('#street#').title(), ()), ] ```
github_jupyter
# mmdcornea Cornea cells marking. ## Description This procedure creates a maker for each cell in a very poor quality microscopic image of a cornea. The composition of an opening with the regional maximum is used to create the markers. ``` import numpy as np from PIL import Image import ia870 as ia ``` # Reading and topographic view The gray-scale image of the cornea is read and displayed. A topographic model is also displayed. We can notice that the cells are formed by small hills in the topographic model. We can also notice that the image is very noisy. ``` a_pil = Image.open('data/corneacells.tif').convert('L') # b = mmsurf(a); a_pil a = np.array (a_pil) ``` # Filtering and cell detection The image is filtered by an alternating sequential filtering with size 2. This filter is composed by openings and closings, removing small peaks and valleys. Next, the regional maxima are detected. For illustrative purpose, they are displayed overlayed on the topographic image view. These regional maxima are the markers for each cell. If anything goes wrong in this step, the error will be propagated throughout the process. ``` c = ia.iaasf(a,'OC',ia.iasecross(),2) d = ia.iaregmax(c) Image.fromarray(ia.iagsurf(c)) Image.fromarray(ia.iagshow(ia.iagsurf(c), d).transpose(1,2,0)) ``` # Find the background marker Following the paradigm of segmentation by watershed, the background marker is detected by applying the constrained watershed on the negation of the cells image using the markers detected in the last step. These watershed lines partition the image in regions of influence of each cell. For illustrative display, the negative of the cell image is displayed overlayed by the markers on the left, and also overlayed by the watershed lines on the right. ``` e = ia.ianeg(a); f = ia.iacwatershed(e, d, ia.iasebox()); Image.fromarray(ia.iagshow(e, d).transpose(1, 2, 0)) Image.fromarray(ia.iagshow(e, f, d).transpose(1, 2, 0)) ``` # Labeling the markers and gradient As the internal and external markers can be touching, we combine the external marker with value 1 with the labeling of the internal markers added by 1. The labeled marker image is shown on the left. The final watershed will be applied on the gradient of the original image, which is shown on the right. ``` g = ia.iagray(f, 'uint16', 1); h1 = ia.iaaddm(ia.ialabel(d), np.uint16(1)); h = ia.iaintersec(ia.iagray(d,'uint16'), h1); i = ia.iaunion( g, h); Image.fromarray(ia.iaglblshow(i).transpose(1, 2, 0)) #mmshow(j); j = ia.iagradm( a) Image.fromarray((255.*j/j.max()).astype(np.uint8)) ``` ## Constrained watershed of the gradient from markers Apply the constrained watershed on the gradient from the labeled internal and external markers. Show the watershed lines on the left and the results overlayed on the original image, on the right. ``` k = ia.iacwatershed(j, i) Image.fromarray(k.astype(np.uint8)*255) Image.fromarray(ia.iagshow(a, k, k).transpose(1, 2, 0)) ```
github_jupyter
# Advanced Bayes Search CV Example This is a more advanced example of how the `BayesSearchCV` class can be applied - it's recommended that you first read through the simpler `bayes_search_cv_example`. The `BayesSearchCV` class is used to search for the set of hyperparameters that produce the best decision engine performance for a given Iguanas Pipeline, whilst also reducing the likelihood of overfitting. The process is as follows: * Generate k-fold stratified cross validation datasets. * For each of the training and validation datasets: * Fit the pipeline on the training set using a set of parameters chosen by the Bayesian Optimiser from a given set of ranges. * Apply the pipeline to the validation set to return a prediction. * Use the provided `scorer` to calculate the score of the prediction. * Return the parameter set which generated the highest mean overall score across the validation datasets. In this example, we'll consider the following more advanced workflow (compared to the standard `bayes_search_cv_example` notebook), which considers the generation of a Rules-Based System for a credit card fraud transaction use case: <center><img src="images/complex_example.png"/></center> Here, we have a fraud detection use case, and we're aiming to create two distinct rule sets - one for flagging fraudulent behaviour (which we'll refer to as our **Reject** rule set); one for flagging good behaviour (which we'll refer to as our **Approve** rule set). Each of these rule sets will be comprised of a generated rule set and an existing rule set. We'll optimise and filter these two rule sets separately, then combine and feed them into the decision engine optimiser. **Note:** we optimise the generated rules as they'll be created using the `RuleGeneratorDT` class, which generates rules from the branches of decision trees - these split based on gini or entropy - so we can further optimise them for a specific metric. **The decision engine will have the following constraint:** for a given transaction, if any approve rules fire it will be approved; else, if any reject rules fire it will be rejected; else, it will be approved. We'll use the `BayesSearchCV` class to optimise the hyperparameters of the steps in this workflow, **ensuring that we maximise the revenue for our decision engine.** --- ## Import packages ``` from iguanas.rule_generation import RuleGeneratorDT from iguanas.rule_selection import SimpleFilter, CorrelatedFilter, GreedyFilter, BayesSearchCV from iguanas.metrics import FScore, Precision, Revenue, JaccardSimilarity from iguanas.rbs import RBSOptimiser, RBSPipeline from iguanas.correlation_reduction import AgglomerativeClusteringReducer from iguanas.pipeline import LinearPipeline, ParallelPipeline from iguanas.pipeline.class_accessor import ClassAccessor from iguanas.space import UniformFloat, UniformInteger, Choice from iguanas.rules import Rules from iguanas.rule_optimisation import BayesianOptimiser import pandas as pd import numpy as np from sklearn.model_selection import train_test_split from category_encoders.one_hot import OneHotEncoder from sklearn.ensemble import RandomForestClassifier from sklearn.impute import SimpleImputer ``` ## Read in data Let's read in the [credit card fraud dataset](https://www.kaggle.com/mlg-ulb/creditcardfraud) from Kaggle. **Note:** this data has been altered to include some null values in the `V1` column. This is to simulate unprocessed data (the dataset on Kaggle has been processed using PCA, so there are no null values). It has also been randomly sampled to 10% of its original number of records, to reduce the file size. ``` target_col = 'Class' time_col = 'Time' amt_col = 'Amount' # Ready in data df = pd.read_csv('dummy_data/creditcard.csv') # Sort data by time ascending df.sort_values(time_col, ascending=True) # Create X and y dataframes X = df.drop([target_col, time_col], axis=1) y = df[target_col] X_train_raw, X_test_raw, y_train, y_test = train_test_split( X, y, test_size=0.33, random_state=42 ) ``` To calculate the **Revenue**, we need the monetary amount of each transaction - we'll use these later: ``` amts_train = X_train_raw[amt_col] amts_test = X_test_raw[amt_col] ``` ### Process data Let's impute the null values with the mean: ``` imputer = SimpleImputer(strategy='mean') X_train = pd.DataFrame( imputer.fit_transform(X_train_raw), columns=X_train_raw.columns, index=X_train_raw.index ) X_test = pd.DataFrame( imputer.transform(X_test_raw), columns=X_test_raw.columns, index=X_test_raw.index ) # Check nulls have been imputed X_train.isna().sum().sum(), X_test.isna().sum().sum() ``` ### Existing rules Let's also assume we have the following existing rules, stored in the standard Iguanas string format: ``` reject_rule_strings = { "ExistingReject1": "((X['V1']<0)|(X['V1'].isna()))&(X['V3']<1)", "ExistingReject2": "(X['V2']>3)", } approve_rule_strings = { "ExistingApprove1": "(X['V1']>0)&(X['V3']>1)", "ExistingApprove2": "(X['V2']<3)", "ExistingApprove3": "(X['V4']<3)" } ``` We can create a `Rules` class for each of these: ``` reject_rules = Rules(rule_strings=reject_rule_strings) approve_rules = Rules(rule_strings=approve_rule_strings) ``` Then convert them to the standard Iguanas lambda expression format (we'll need this for the optimisation step): ``` reject_rule_lambdas = reject_rules.as_rule_lambdas( as_numpy=False, with_kwargs=True ) approve_rule_lambdas = approve_rules.as_rule_lambdas( as_numpy=False, with_kwargs=True ) ``` ---- ## Set up pipeline Before we can apply the `BayesSearchCV` class, we need to set up our pipeline. To create the workflow shown at the beginning of the notebook, we must use a combination of `LinearPipeline` and `ParallelPipeline` classes as shown below: ![title](images/complex_example_setup.png) Let's begin building the **Reject *LinearPipeline***. ### Reject *LinearPipeline* Let's first instantiate the classes that we'll use in the pipeline: ``` # F1 Score f1 = FScore(beta=1) # Precision p = Precision() # Rule generation reject_gen = RuleGeneratorDT( metric=f1.fit, n_total_conditions=2, tree_ensemble=RandomForestClassifier( n_estimators=10, random_state=0 ), target_feat_corr_types='Infer', rule_name_prefix='Reject' # Set this so generated reject rules distinguishable from approve rules ) # Rule optimisation (for generated rules) reject_gen_opt = BayesianOptimiser( rule_lambdas=ClassAccessor( class_tag='reject_gen', class_attribute='rule_lambdas' ), lambda_kwargs=ClassAccessor( class_tag='reject_gen', class_attribute='lambda_kwargs' ), metric=f1.fit, n_iter=10 ) # Rule optimisation (for existing rules) reject_opt = BayesianOptimiser( rule_lambdas=reject_rule_lambdas, lambda_kwargs=reject_rules.lambda_kwargs, metric=f1.fit, n_iter=10 ) # Rule filter (performance-based) reject_sf = SimpleFilter( threshold=0.1, operator='>=', metric=f1.fit ) # Rule filter (correlation-based) js = JaccardSimilarity() reject_cf = CorrelatedFilter( correlation_reduction_class=AgglomerativeClusteringReducer( threshold=0.9, strategy='top_down', similarity_function=js.fit, metric=f1.fit ), rules=ClassAccessor( class_tag='reject_gen', class_attribute='rules' ) ) ``` Now we can create our **Reject Rule Generation *LinearPipeline***. Note that we pass the tag for the optimisation of the generated rules to the `use_init_data` parameter, so that the feature set is passed to the `BayesianOptimiser` class, rather than the output from the `RuleGeneratorDT`: ``` reject_gen_lp = LinearPipeline( steps = [ ('reject_gen', reject_gen), ('reject_gen_opt', reject_gen_opt), ], use_init_data=['reject_gen_opt'] ) ``` And then our **Reject *ParallelPipeline*** (noting that one of the steps in this pipeline is the **Reject Rule Generation *LinearPipeline*** created above): ``` reject_pp = ParallelPipeline( steps = [ ('reject_gen_lp', reject_gen_lp), ('reject_opt', reject_opt), ] ) ``` And then finally, our **Reject *LinearPipeline***: ``` reject_lp = LinearPipeline( steps = [ ('reject_pp', reject_pp), ('reject_sf', reject_sf), ('reject_cf', reject_cf) ] ) ``` Now we can do the same for the **Approve *LinearPipeline***: ### Approve *LinearPipeline* Let's first instantiate the classes that we'll use in the pipeline: ``` # Rule generation approve_gen = RuleGeneratorDT( metric=f1.fit, n_total_conditions=2, tree_ensemble=RandomForestClassifier( n_estimators=10, random_state=0 ), target_feat_corr_types='Infer', rule_name_prefix='Approve' # Set this so generated reject rules distinguishable from approve rules ) # Rule optimisation (for generated rules) approve_gen_opt = BayesianOptimiser( rule_lambdas=ClassAccessor( class_tag='approve_gen', class_attribute='rule_lambdas' ), lambda_kwargs=ClassAccessor( class_tag='approve_gen', class_attribute='lambda_kwargs' ), metric=f1.fit, n_iter=10 ) # Rule optimisation (for existing rules) approve_opt = BayesianOptimiser( rule_lambdas=approve_rule_lambdas, lambda_kwargs=approve_rules.lambda_kwargs, metric=f1.fit, n_iter=10 ) # Rule filter (performance-based) approve_sf = SimpleFilter( threshold=0.1, operator='>=', metric=f1.fit ) # Rule filter (correlation-based) js = JaccardSimilarity() approve_cf = CorrelatedFilter( correlation_reduction_class=AgglomerativeClusteringReducer( threshold=0.9, strategy='top_down', similarity_function=js.fit, metric=f1.fit ), rules=ClassAccessor( class_tag='approve_gen', class_attribute='rules' ) ) ``` Now we can create our **Approve Rule Generation *LinearPipeline***. Note that we pass the tag for the optimisation of the generated rules to the `use_init_data` parameter, so that the feature set is passed to the `BayesianOptimiser` class, rather than the output from the `RuleGeneratorDT`: ``` approve_gen_lp = LinearPipeline( steps = [ ('approve_gen', approve_gen), ('approve_gen_opt', approve_gen_opt), ], use_init_data=['approve_gen_opt'] ) ``` And then our **Approve *ParallelPipeline*** (noting that one of the steps in this pipeline is the **Approve Rule Generation *LinearPipeline*** created above): ``` approve_pp = ParallelPipeline( steps = [ ('approve_gen_lp', approve_gen_lp), ('approve_opt', approve_opt), ] ) ``` And then finally, our **Approve *LinearPipeline***: ``` approve_lp = LinearPipeline( steps = [ ('approve_pp', approve_pp), ('approve_sf', approve_sf), ('approve_cf', approve_cf) ] ) ``` Now we can move on to constructing the **Overall Pipelines:** ### Overall Pipelines First, we'll construct our **Overall *ParallelPipeline*** using the **Reject *LinearPipeline*** and **Approve *LinearPipeline***: ``` overall_pp = ParallelPipeline( steps = [ ('reject_lp', reject_lp), ('approve_lp', approve_lp) ] ) ``` Now we can instantiate the decision engine optimiser. Since we have a constraint on the decision engine (if any approve rules fire, approve the transaction; else if any reject rules fire, reject the transaction; else approve the transaction), we pass the rules remaining after the filtering stages to the relevant elements in the `config` parameter of the `RBSPipeline` class, using the `ClassAccessor` class: ``` # Decision engine optimisation metric opt_metric = Revenue( y_type='Fraud', chargeback_multiplier=3 ) # Decision engine (to be optimised) rbs_pipeline = RBSPipeline( config=[ [ 0, ClassAccessor( # If any approve rules fire, approve class_tag='approve_cf', class_attribute='rules_to_keep' ), ], [ 1, ClassAccessor( # Else if any reject rules fire, reject class_tag='reject_cf', class_attribute='rules_to_keep' ) ], ], final_decision=0 # Else approve ) # Decision engine optimiser rbs_optimiser = RBSOptimiser( pipeline=rbs_pipeline, metric=opt_metric.fit, rules=ClassAccessor( class_tag='overall_pp', class_attribute='rules' ), n_iter=10 ) ``` Finally, we can instantiate our **Overall *LinearPipeline***: ``` overall_lp = LinearPipeline( steps=[ ('overall_pp', overall_pp), ('rbs_optimiser', rbs_optimiser) ] ) ``` ## Define the search space Now we need to define the search space for each of the relevant parameters of our pipeline. **Note:** this example does not search across all hyperparameters - you should define your own search spaces based on your use case. To do this, we create a dictionary, where each key corresponds to the tag used for the relevant pipeline step. Each value should be a dictionary of the parameters (keys) and their search spaces (values). Search spaces should be defined using the classes in the `iguanas.space` module: ``` # Define additional FScores f0dot5 = FScore(beta=0.5) f0dot25 = FScore(beta=0.25) search_spaces = { 'reject_gen': { 'n_total_conditions': UniformInteger(2, 7), }, 'reject_gen_opt': { 'metric': Choice([f0dot25.fit, f0dot5.fit, f1.fit]), }, 'reject_sf': { 'threshold': UniformFloat(0, 1), }, 'reject_cf': { 'correlation_reduction_class': Choice( [ AgglomerativeClusteringReducer( threshold=0.9, strategy='top_down', similarity_function=js.fit, metric=f1.fit ), AgglomerativeClusteringReducer( threshold=0.95, strategy='top_down', similarity_function=js.fit, metric=f1.fit ) ] ) }, 'approve_gen': { 'n_total_conditions': UniformInteger(2, 7), }, 'approve_gen_opt': { 'metric': Choice([f0dot25.fit, f0dot5.fit, f1.fit]), }, 'approve_sf': { 'threshold': UniformFloat(0, 1), }, 'approve_cf': { 'correlation_reduction_class': Choice( [ AgglomerativeClusteringReducer( threshold=0.9, strategy='top_down', similarity_function=js.fit, metric=f1.fit ), AgglomerativeClusteringReducer( threshold=0.95, strategy='top_down', similarity_function=js.fit, metric=f1.fit ) ] ) } } ``` ## Optimise the pipeline hyperparameters Now that we have our pipeline and search spaces defined, we can instantiate the `BayesSearchCV` class. We'll split our data into 3 cross-validation datasets and try 10 different parameter sets. **Note:** since we're using the `Revenue` as the scoring metric for the `BayesSearchCV` class, we need to set the `sample_weight_in_val` parameter to `True`. This ensures that the `sample_weight` passed to the final step in the pipeline is used when applying the `metric` function to the prediction of each validation set (for `Revenue`, the `sample_weight` corresponds to the monetary amount of each transaction, which is required). ``` bs = BayesSearchCV( pipeline=overall_lp, search_spaces=search_spaces, metric=opt_metric.fit, # Use the same metric as the RBSOptimiser cv=3, n_iter=10, num_cores=3, error_score=0, verbose=1, sample_weight_in_val=True # Set to True ) ``` Finally, we can run the `fit` method to optimise the hyperparameters of the pipeline. **Note the following:** * The existing rules contain conditions that rely on unprocessed data (in this case, there are conditions that check for nulls). So for the rule optimisation steps, we must use the unprocessed training data `X_train_raw`; for the rule generation steps, we must use the processed training data `X_train`. * Since we're generating and optimising rules that flag both positive and negative cases (i.e. reject and approve rules in this example), we need to specify what the target is in each case. For the reject rules, we can just use `y_train`, however for the approve rules, we need to flip `y_train` (so that the rule generator and rule optimisers target the negative cases). * We need the `amts_train` to be passed to the `sample_weight` parameter of the `RBSOptimiser`, as we're optimising the decision engine for the `Revenue`. ``` bs.fit( X={ 'reject_lp': X_train, # Use processed features for rule generation 'reject_opt': X_train_raw, # Use raw features for optimising existing rules 'approve_lp': X_train, # Use processed features for rule generation 'approve_opt': X_train_raw # Use raw features for optimising existing rules }, y={ 'reject_lp': y_train, # Use target for Reject LinearPipeline 'approve_lp': 1-y_train, # Flip target for Approve LinearPipeline 'rbs_optimiser': y_train # Use target for RBSOptimiser }, sample_weight={ 'reject_lp': None, # No sample_weight for Reject LinearPipeline 'approve_lp': None, # No sample_weight for Approve LinearPipeline 'rbs_optimiser': amts_train # sample_weight for RBSOptimiser } ) ``` ### Outputs The `fit` method doesn't return anything. See the `Attributes` section in the class docstring for a description of each attribute generated: ``` bs.best_score bs.best_params bs.best_index bs.cv_results.head() ``` To see the final optimised decision engine configuration and rule set, we first return the parameters of the trained pipeline (stored in the attribute `pipeline_`): ``` pipeline_params = bs.pipeline_.get_params() ``` Then, to see the final optimised decision engine configuration, we filter to the `config` parameter of the `rbs_optimiser` step: ``` final_config = pipeline_params['rbs_optimiser']['config'] final_config ``` This shows us which rules should be used for the approval step (decision `0`) and which rules should be used for the rejection step (decision `1`). To see the logic of our final set of rules, we filter to the `rules` parameter of the `rbs_optimiser` step: ``` final_rules = bs.pipeline_.get_params()['rbs_optimiser']['rules'] ``` Then extract the `rule_strings` attribute: ``` final_rules.rule_strings ``` ## Apply the optimised pipeline We can apply our optimised pipeline to a new data set and make a prediction using the `predict` method: ``` y_pred_test = bs.predict(X_test) ``` ### Outputs The `predict` method returns the prediction generated by class in the final step of the pipeline - in this case, the `RBSOptimiser`: ``` y_pred_test ``` We can now calculate the **Revenue** of our optimised pipeline using the test data: ``` rev_opt = opt_metric.fit( y_preds=y_pred_test, y_true=y_test, sample_weight=amts_test ) ``` Comparing this to our original, unoptimised pipeline: ``` overall_lp.fit( X={ 'reject_gen_lp': X_train, 'reject_opt': X_train_raw, 'approve_gen_lp': X_train, 'approve_opt': X_train_raw }, y={ 'reject_lp': y_train, 'approve_lp': 1-y_train, 'rbs_optimiser': y_train }, sample_weight={ 'reject_lp': None, 'approve_lp': None, 'rbs_optimiser': y_train } ) y_pred_test_init = overall_lp.predict(X_test) rev_init = opt_metric.fit( y_preds=y_pred_test_init, y_true=y_test, sample_weight=amts_test ) print(f'Revenue of original, unoptimised pipeline: ${round(rev_init)}') print(f'Revenue of optimised pipeline: ${round(rev_opt)}') print(f'Absolute improvement in Revenue: ${round(rev_opt-rev_init)}') print(f'Percentage improvement in Revenue: {round(100*(rev_opt-rev_init)/rev_init, 2)}%') ``` ---
github_jupyter
# Preparing news vectors We want to use [word2vec] pre-computed word vectors to approximate the semantic distance between user queries and dictionary definitions. See Daniel Dacanay, Antti Arppe, and Atticus Harrigan, [Computational Analysis versus Human Intuition: A Critical Comparison of Vector Semantics with Manual Semantic Classification in the Context of Plains Cree][vecpaper1], in Proceedings of the 4th Workshop on the Use of Computational Methods in the Study of Endangered Languages; and Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean, [Efficient Estimation of Word Representations in Vector Space][eewrvs], in Proceedings of Workshop at ICLR, 2013. [eewrvs]: http://arxiv.org/pdf/1301.3781.pdf [word2vec]: https://code.google.com/archive/p/word2vec/ [vecpaper1]: https://computel-workshop.org/wp-content/uploads/2021/02/2021.computel-1.5.pdf But first we are going to massage the precomputed vectors into an easier-to-use form. There are a couple of things to do first: - Use a file format that’s faster to load - Save time and space by pruning keys we’ll never query for ## The upstream files First, we’ll store the files in the `res/vector_models` directory. Let’s make a variable that points at that. ``` import os from pathlib import Path # jupyter does not expose the filename of the notebook, and # the kernel working directory appears to be the directory # containing the first notebook opened in the jupyter session. def find_project_root(target_filename='Pipfile'): """Walk upwards from current dir, looking for target_filename""" start_directory = directory = Path(os.getcwd()) while directory.parent != directory: if (directory / target_filename).exists(): return directory directory = directory.parent else: raise Exception(f'Could not find {target_filename!r} in any parent of {start_directory}') return directory ROOT = find_project_root() VECTOR_DIR = ROOT / 'CreeDictionary' / 'res' / 'vector_models' ``` The upstream `GoogleNews-vectors-negative300.bin.gz` file is not checked in here, so you’ll have to get it elsewhere. ``` !env BLOCK_SIZE="'1" ls -s $VECTOR_DIR %%time from gensim.models import KeyedVectors vectors = VECTOR_DIR / 'GoogleNews-vectors-negative300.bin.gz' wv = KeyedVectors.load_word2vec_format(vectors, binary=True) ``` It’s called a keyed vector because it maps keys to vectors. If we run some basic stats, we see: there are 3 million, 300-dimensional vectors. ``` import numpy as np def shortprint(a): with np.printoptions(threshold=10): print(a) shortprint(wv['hello']) len(wv['hello']) len(wv.key_to_index) ``` We can query for similar concepts: ``` wv.similar_by_vector(wv['hello']) ``` And then the deep magic is that this vector model appears to capture semantic relationships. Take the physics away from Einstein, add painting, and what do you get? ``` wv.similar_by_vector(wv['Einstein'] - wv['physics'] + wv['painting']) ``` ### Faster file format Let’s use the built-in gensim file format, which saves the vectors into a memory-mapping numpy array on disk. ``` import os REDUCED_FILE = os.fspath(VECTOR_DIR / 'news_vectors.kv') wv.save(REDUCED_FILE) !env BLOCK_SIZE="'1" ls -s $VECTOR_DIR %%time wv = KeyedVectors.load(REDUCED_FILE, mmap='r') with np.printoptions(threshold=10): print(wv['hello']) wv.similar_by_vector(wv['hello']) %%time wv.similar_by_vector(wv['hello']) ``` This file is *much* faster to load, ~1 second instead of 30 seconds, but it is also much larger. #### Float precision Interestingly, if we look inside the original file, we only started out with 16-bit floats, but they’re being stored as 32-bit ones. We can halve the file size by setting the data type correctly. ``` !zcat $VECTOR_DIR/GoogleNews-vectors-negative300.bin.gz \ | head -c 300M | tail -c 256 | hexdump -C wv.vectors.dtype wv2 = KeyedVectors.load(REDUCED_FILE, mmap='r') wv2.vectors = wv2.vectors.astype('float16') shortprint(wv2['hello']) ``` But, sadly, doing so makes lookups take more than **15x** as long, going from a fraction of a second to multiple seconds. This is because modern CPUs do not generally have built-in 16-bit float operations. ``` %%time wv2.similar_by_vector(wv2['hello']) %%time wv.similar_by_vector(wv['hello']) ``` If we need to be careful with disk space, we could save the vectors on disk as float16 and then do `.astype('float32')` on load, which would only takes a few seconds. It would use more disk space, but may be much faster than dealing with a compressed file. However, anything that’s not `mmap`ing a file gets risky in terms of memory use. A few gigs of data in memory isn’t a big deal for a server with lots of RAM, but (1) if the data isn’t all ready *before* the webserver forks worker processes, 10 copies of a few gigs of data adds up quickly, and (2) it could substantially increase the requirements for developer machines, which might not have many gigabytes of spare RAM. So for now I think we’ll stick with the bigger file that can be processed more efficiently both in terms of RAM and CPU. ## Pruning keys ### Keys with punctuation The file is still quite large. There’s probably a *lot* of stuff in there we will never, ever query for. For example, what’s the millionth entry? ``` wv.index_to_key[1_000_000] ``` That’s not something we’ll ever query the dictionary for. What are the top keys? ``` keys = list(wv.key_to_index.keys()) ", ".join(keys[:100]) ``` Right away we see that `#`—presumably a placeholder for a number—and `$` are common terms. What keys containing punctuation can we drop? ``` from collections import Counter import string [(char, f"{count:,}") for (char, count) in Counter(''.join(keys)).most_common() if char not in string.ascii_letters + string.digits][:30] ``` We also see some duplication in terms of case; both “it” and “It” appear as keys. ``` wv.similar_by_key('It') wv.similar_by_key('it') ``` The distinction would definitely be useful for some purposes, but our dictionary lowercases all queries on input, so that would be lost on us. ``` import re re_double_underscore = re.compile('.*_.*_.*') def figure_out_items_to_keep(): # new_key, vector to_keep = {} # The original data does not seem to include frequencies, but we # assume that the keys are in frequency order, so we will see # the most common term first. for key in keys: pruned_key = key.lower() if pruned_key in to_keep: continue # drop keys with unwanted punctuation if any(c in key for c in "$#.=/'@:,®+&*™•"): continue has_uppercase_char = key != key.lower() if has_uppercase_char: if '_' in key: continue # Skip items like “Dow_Jones_industrial” if re_double_underscore.match(key): continue to_keep[pruned_key] = wv[key] return to_keep items_to_keep = figure_out_items_to_keep() len(items_to_keep) ``` ### Taking a top-$n$ subset That’s still a lot of keys, and the ones toward the end don’t seem very useful. ``` from itertools import islice offset, n = 500_000, 10; print(list(islice(items_to_keep.keys(), offset, offset + n))) ``` At 100,000 keys in, we still seem to have some more common terms: ``` offset, n = 100_000, 10; print(list(islice(items_to_keep.keys(), offset, offset + n))) ``` And 250k isn’t too bad either ``` offset, n = 250_000, 10; print(list(islice(items_to_keep.keys(), offset, offset + n))) ``` But, subjectively, the bits at 300k don’t seem too useful? ``` offset, n = 300_000, 10; print(list(islice(items_to_keep.keys(), offset, offset + n))) ``` It’s a pretty arbitrary cut-off, but let’s just take the top 300,000 keys. ``` threshold = 300_000 new_wv = KeyedVectors(vector_size=wv.vector_size) new_keys = list(items_to_keep.keys())[:threshold] new_values = list(items_to_keep.values())[:threshold] new_wv.add_vectors(new_keys, new_values) ``` ## The pruned file A quick check that things look ok: ``` new_wv.similar_by_key('hello') ``` Well, that’s disappointingly different—and lower quality—compared to the uppercase version, but it’s actually a fairly uncommon word in news articles. ``` wv.key_to_index['hello'] ``` What about a more common word? ``` wv.key_to_index['train'] new_wv.similar_by_key('train') ``` That seems just fine. Let’s try it for now and revisit it if we run into issues with query quality from not having/trying the uppercase versions, or speed issues from having too many keys. ``` new_wv.save(REDUCED_FILE) !env BLOCK_SIZE="'1" ls -s $VECTOR_DIR ```
github_jupyter
``` # %% import pandas as pd from datetime import datetime import numpy as np from pathlib import Path import matplotlib.pylab as pl # %% # CONFIGS class pathMap(): def __init__(self) -> None: scratch = '/scratch/enis/data/nna/' home = '/home/enis/projects/nna/' self.data_folder = home + 'data/' self.exp_dir = '/home/enis/projects/nna/src/nna/exp/megan/run-3/' self.clipping_results_path = Path(scratch + 'clipping_info/all_data_2021-02-08/') self.output_dir = scratch + 'real/' self.file_properties_df_path = self.data_folder + '/allFields_dataV4.pkl' self.results_folder = home + 'results/' self.vis_output_path = self.results_folder + 'vis/bars_test_V2/' def setup_configs(): pathmap = pathMap() config = {} versiontag = 'multi9-V1' id2name={'1-0-0': 'biophony', '1-1-0': 'bird', '1-1-10': 'songbirds', '1-1-7': 'duck-goose-swan', '0-0-0': 'anthrophony', '1-3-0': 'insect', '1-1-8': 'grouse-ptarmigan', '0-2-0': 'aircraft', '3-0-0': 'silence'} generic_id2name = list(id2name.items()) id2name = {} for k, v in generic_id2name: id2name[f'{versiontag}-{k}'] = v config['id2name'] = id2name config['input_data_freq'] = '10S' # FREQS to reduce results config['output_data_freq'] = '270min' classname2colorindex = { 'anthrophony':14, 'auto':15, 'aircraft':9, 'wind':18, 'running water':0, 'silence':1, 'bird':4, 'songbirds':5, 'duck-goose-swan':16, 'grouse-ptarmigan':17, 'biophony':8, 'insect':10, 'mammal':2, } config['classname2colorindex'] = classname2colorindex # cmap to use aCmap = pl.cm.tab20 norm_cmaps = visutils.add_normal_dist_alpha(aCmap) # cached results src='/home/enis/projects/nna/src/scripts/' csv_file=src+'dalton/03/aggregated/multi9-V1_prob2binary=False_output-data-freq=270min_prob.csv' cached_preds = {('dalton','03'):csv_file} config['cached_preds'] = cached_preds config['norm_cmaps'] = norm_cmaps return pathmap, config def setup(args, pathmap, region_location): file_properties_df = pd.read_pickle(pathmap.file_properties_df_path) #important to keep them in order file_properties_df.sort_values(by=['timestamp'], inplace=True) # delete older than 2016 # delete older than 2016 fromtime = datetime(2016, 1, 1, 0) file_properties_df = file_properties_df[ file_properties_df.timestamp >= fromtime] # fromtime = datetime(2019, 5, 1, 0) # file_properties_df = file_properties_df[ # file_properties_df.timestamp >= fromtime] # fromtime = datetime(2019, 5, 30, 0) # file_properties_df = file_properties_df[ # file_properties_df.timestamp <= fromtime] if not region_location: # region_location = [('anwr','49'),('prudhoe','11'),('prudhoe','26')] region_location = tuple( sorted( set( zip(file_properties_df.region.values, file_properties_df.locationId.values)))) return region_location,file_properties_df # %% def sigmoid(data): return 1 / (1 + np.exp(-data)) from nna import visutils def vis_preds_with_clipping_local(region_location, config, file_properties_df, pathmap,norm_cmaps,cached_preds): no_result = {} for region, location_id in region_location: # print(region, all_regions.index(region),'location_id',location_id) filtered_files = file_properties_df[file_properties_df.region == region] filtered_files = filtered_files[filtered_files.locationId == location_id] filtered_files = filtered_files[filtered_files.durationSec > 0] figures_axes, no_result_paths = visutils.vis_preds_with_clipping( region, location_id, filtered_files, config['input_data_freq'], config['output_data_freq'], config['id2name'].keys(), norm_cmaps, pathmap.output_dir, pathmap.clipping_results_path, pathmap.vis_output_path, config['id2name'], clipping_threshold=1.0, pre_process_func=sigmoid, classname2colorindex=config.get('classname2colorindex',None), cached_pred=cached_preds[(region,location_id)]) return figures_axes, no_result_paths pathmap, config = setup_configs() location ='03' region='dalton' config['output_data_freq']='270min' if region!='' and location !='': region_location = [(region,location)] else: print('Region and location are not given, we will do all of them.') region_location = None region_location,file_properties_df = setup('', pathmap,region_location) print('now') cached_preds=config['cached_preds'] norm_cmaps=config['norm_cmaps'] figures_axes, no_result_paths = vis_preds_with_clipping_local(region_location, config, file_properties_df, pathmap,norm_cmaps,cached_preds) fig,ax=figures_axes[0] csv_path = 'dalton03labels.csv' (letters,y_multi_labels_by_month, x_multi_label_by_month, classname2colorindex,color_indexes)=load_enis_labels4bars(csv_path,classname2colorindex) norm_cmaps=add_equally_spaced_bars(letters,y_multi_labels_by_month, x_multi_label_by_month,ax,color_indexes,norm_cmaps) add_legend(ax,classname2colorindex,norm_cmaps,legend_ax_index=0,) fig.tight_layout() fig.subplots_adjust(top=0.95) ```
github_jupyter
``` import dgl.nn as dglnn from dgl import from_networkx import torch.nn as nn import torch as th import torch.nn.functional as F import dgl.function as fn from dgl.data.utils import load_graphs import networkx as nx import pandas as pd import socket import struct import random from sklearn.preprocessing import LabelEncoder from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split import seaborn as sns import matplotlib.pyplot as plt import numpy as np data = pd.read_csv('./bot.csv') data.drop(columns=['subcategory','pkSeqID','stime','flgs','attack','state','proto','seq'],inplace=True) data.rename(columns={"category": "label"},inplace = True) data.label.value_counts() DDoS = data[data['label'] == 'DDoS'].sample(frac=0.1) DoS = data[data['label'] == 'DoS'].sample(frac=0.1) Reconnaissance = data[data['label'] == 'Reconnaissance'].sample(frac=0.1) Normal = data[data['label'] == 'Normal'] Theft = data[data['label'] == 'Theft'] data = pd.concat([DDoS,DoS,Reconnaissance,Normal,Theft]) data.label.value_counts() le = LabelEncoder() le.fit_transform(data.label.values) data['label'] = le.transform(data['label']) data['saddr'] = data.saddr.apply(str) data['sport'] = data.sport.apply(str) data['daddr'] = data.daddr.apply(str) data['dport'] = data.dport.apply(str) data['saddr'] = data.saddr.apply(lambda x: socket.inet_ntoa(struct.pack('>I', random.randint(0xac100001, 0xac1f0001)))) data['saddr'] = data['saddr'] + ':' + data['sport'] data['daddr'] = data['daddr'] + ':' + data['dport'] data.drop(columns=['sport','dport'],inplace=True) label_ground_truth = data[["saddr", "daddr","label"]] data = pd.get_dummies(data, columns = ['flgs_number','state_number', 'proto_number']) data = data.reset_index() data.replace([np.inf, -np.inf], np.nan,inplace = True) data.fillna(0,inplace = True) label_ground_truth = data[["saddr", "daddr","label"]] data.drop(columns=['index'],inplace=True) data scaler = StandardScaler() cols_to_norm = list(set(list(data.iloc[:, 2:].columns )) - set(list(['label'])) ) data[cols_to_norm] = scaler.fit_transform(data[cols_to_norm]) X_train, X_test, y_train, y_test = train_test_split( data, label_ground_truth, test_size=0.3, random_state=42, stratify=label_ground_truth.label) X_train['h'] = X_train[ cols_to_norm ].values.tolist() #from dgl.data.utils import load_graphs #G = load_graphs("./data.bin")[0][0] G = nx.from_pandas_edgelist(X_train, "saddr", "daddr", ['h','label'], create_using= nx.MultiGraph()) G = G.to_directed() G = from_networkx(G,edge_attrs=['h','label']) #from dgl.data.utils import save_graphs #save_graphs("./data.bin", [G]) G.ndata['h'] = th.ones(G.num_nodes(), G.edata['h'].shape[1]) G.edata['train_mask'] = th.ones(len(G.edata['h']), dtype= th.bool) #G = load_graphs("./bot_train_G.bin") [0][0] # Eq1 G.ndata['h'] = th.ones(G.num_nodes(), G.edata['h'].shape[1]) G.edata['train_mask'] = th.ones(len(G.edata['h']), dtype=th.bool) G.ndata['h'] = th.reshape(G.ndata['h'], (G.ndata['h'].shape[0], 1, G.ndata['h'].shape[1])) G.edata['h'] = th.reshape(G.edata['h'], (G.edata['h'].shape[0], 1, G.edata['h'].shape[1])) class MLPPredictor(nn.Module): def __init__(self, in_features, out_classes): super().__init__() self.W = nn.Linear(in_features * 2, out_classes) def apply_edges(self, edges): h_u = edges.src['h'] h_v = edges.dst['h'] global score global emb emb = th.cat([h_u, h_v], 1) score = self.W(th.cat([h_u, h_v], 1)) return {'score': score} def forward(self, graph, h): with graph.local_scope(): graph.ndata['h'] = h graph.apply_edges(self.apply_edges) return graph.edata['score'] G.ndata['h'].shape def compute_accuracy(pred, labels): return (pred.argmax(1) == labels).float().mean().item() class SAGELayer(nn.Module): def __init__(self, ndim_in, edims, ndim_out, activation): super(SAGELayer, self).__init__() ### force to outut fix dimensions self.W_msg = nn.Linear(ndim_in + edims, ndim_out) ### apply weight self.W_apply = nn.Linear(ndim_in + ndim_out, ndim_out) self.activation = activation def message_func(self, edges): return {'m': self.W_msg(th.cat([edges.src['h'], edges.data['h']], 2))} def forward(self, g_dgl, nfeats, efeats): with g_dgl.local_scope(): g = g_dgl g.ndata['h'] = nfeats g.edata['h'] = efeats # Eq4 g.update_all(self.message_func, fn.mean('m', 'h_neigh')) # Eq5 g.ndata['h'] = F.relu(self.W_apply(th.cat([g.ndata['h'], g.ndata['h_neigh']], 2))) return g.ndata['h'] class SAGE(nn.Module): def __init__(self, ndim_in, ndim_out, edim, activation, dropout): super(SAGE, self).__init__() self.layers = nn.ModuleList() self.layers.append(SAGELayer(ndim_in, edim, 128, activation)) self.layers.append(SAGELayer(128, edim, ndim_out, activation)) self.dropout = nn.Dropout(p=dropout) def forward(self, g, nfeats, efeats): for i, layer in enumerate(self.layers): if i != 0: nfeats = self.dropout(nfeats) nfeats = layer(g, nfeats, efeats) return nfeats.sum(1) class Model(nn.Module): def __init__(self, ndim_in, ndim_out, edim, activation, dropout): super().__init__() self.gnn = SAGE(ndim_in, ndim_out, edim, activation, dropout) self.pred = MLPPredictor(ndim_out, 5) def forward(self, g, nfeats, efeats): h = self.gnn(g, nfeats, efeats) return self.pred(g, h) from sklearn.utils import class_weight class_weights = class_weight.compute_class_weight('balanced', np.unique(G.edata['label'].cpu().numpy()), G.edata['label'].cpu().numpy()) class_weights = th.FloatTensor(class_weights).cuda() criterion = nn.CrossEntropyLoss(weight = class_weights) G = G.to('cuda:0') G.device G.ndata['h'].device G.edata['h'].device criterion = nn.CrossEntropyLoss() node_features = G.ndata['h'] edge_features = G.edata['h'] edge_label = G.edata['label'] train_mask = G.edata['train_mask'] model = Model(G.ndata['h'].shape[2], 128, G.ndata['h'].shape[2], F.relu, 0.2).cuda() opt = th.optim.Adam(model.parameters()) for epoch in range(1,8000): pred = model(G, node_features,edge_features).cuda() loss = criterion(pred[train_mask] ,edge_label[train_mask]) opt.zero_grad() loss.backward() opt.step() if epoch % 100 == 0: print('Epoch:', epoch ,' Training acc:', compute_accuracy(pred[train_mask], edge_label[train_mask])) X_test['h'] = X_test[ cols_to_norm ].values.tolist() #G_test = load_graphs("bot_test_G.bin") [0][0] G_test = nx.from_pandas_edgelist(X_test, "saddr", "daddr", ['h','label'],create_using=nx.MultiGraph()) G_test = G_test.to_directed() G_test = from_networkx(G_test,edge_attrs=['h','label'] ) actual = G_test.edata.pop('label') G_test.ndata['feature'] = th.ones(G_test.num_nodes(), 52) G_test.ndata['feature'] = th.reshape(G_test.ndata['feature'], (G_test.ndata['feature'].shape[0], 1, G_test.ndata['feature'].shape[1])) G_test.edata['h'] = th.reshape(G_test.edata['h'], (G_test.edata['h'].shape[0], 1, G_test.edata['h'].shape[1])) G_test = G_test.to('cuda:0') th.cuda.empty_cache() import timeit start_time = timeit.default_timer() node_features_test = G_test.ndata['feature'] edge_features_test = G_test.edata['h'] test_pred = model(G_test, node_features_test, edge_features_test).cuda() elapsed = timeit.default_timer() - start_time print(str(elapsed) + ' seconds') test_pred = test_pred.argmax(1) test_pred = th.Tensor.cpu(test_pred).detach().numpy() edge_label = le.inverse_transform(actual) test_pred = le.inverse_transform(test_pred) def plot_confusion_matrix(cm, target_names, title='Confusion matrix', cmap=None, normalize=True): import matplotlib.pyplot as plt import numpy as np import itertools accuracy = np.trace(cm) / float(np.sum(cm)) misclass = 1 - accuracy if cmap is None: cmap = plt.get_cmap('Blues') plt.figure(figsize=(12, 12)) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() if target_names is not None: tick_marks = np.arange(len(target_names)) plt.xticks(tick_marks, target_names, rotation=45) plt.yticks(tick_marks, target_names) if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] thresh = cm.max() / 1.5 if normalize else cm.max() / 2 for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): if normalize: plt.text(j, i, "{:0.4f}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") else: plt.text(j, i, "{:,}".format(cm[i, j]), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass)) plt.show() from sklearn.metrics import confusion_matrix plot_confusion_matrix(cm = confusion_matrix(edge_label, test_pred), normalize = False, target_names = np.unique(edge_label), title = "Confusion Matrix") from sklearn.metrics import classification_report target_names = np.unique(edge_label) print(classification_report(edge_label, test_pred, target_names=target_names, digits=4)) emb_num = emb.cpu().detach().numpy() np.save('emb_mul.npy',emb_num) emb_viz = np.load('emb_viz.npy') df_umap = pd.DataFrame(emb_viz, columns=['comp1', 'comp2']) df_umap['label'] = edge_label import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(8,8)); sns.scatterplot(x='comp1', y='comp2', data=df_umap, hue='label'); plt.legend(loc='upper left', frameon=False) emb_num = score.cpu().detach().numpy() np.save('emb_mul.npy',emb_num) emb_viz = np.load('emb_viz.npy') df_umap = pd.DataFrame(emb_viz, columns=['comp1', 'comp2']) df_umap['label'] = edge_label plt.figure(figsize=(8,8)); sns.scatterplot(x='comp1', y='comp2', data=df_umap, hue='label'); plt.legend(loc='upper left', frameon=False) np.save('raw.npy',X_test.drop(columns=['saddr','daddr','h']).to_numpy()) raw_viz = np.load('raw.npy') df_umap = pd.DataFrame(raw_viz, columns=['comp1', 'comp2']) df_umap['label'] = le.inverse_transform(y_test.label) import seaborn as sns import matplotlib.pyplot as plt plt.figure(figsize=(8,8)); sns.scatterplot(x='comp1', y='comp2', data=df_umap, hue='label'); plt.legend(loc='upper left', frameon=False) ```
github_jupyter
##### Copyright 2019 The TensorFlow Probability Authors. Licensed under the Apache License, Version 2.0 (the "License"); ``` #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" } # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ``` # TFP Probabilistic Layers: Regression <table class="tfo-notebook-buttons" align="left"> <td> <a target="_blank" href="https://www.tensorflow.org/probability/examples/Probabilistic_Layers_Regression"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a> </td> <td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a> </td> <td> <a target="_blank" href="https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a> </td> <td> <a href="https://storage.googleapis.com/tensorflow_docs/probability/tensorflow_probability/examples/jupyter_notebooks/Probabilistic_Layers_Regression.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a> </td> </table> In this example we show how to fit regression models using TFP's "probabilistic layers." ### Dependencies & Prerequisites ``` #@title Import { display-mode: "form" } from pprint import pprint import matplotlib.pyplot as plt import numpy as np import seaborn as sns import tensorflow.compat.v2 as tf tf.enable_v2_behavior() import tensorflow_probability as tfp sns.reset_defaults() #sns.set_style('whitegrid') #sns.set_context('talk') sns.set_context(context='talk',font_scale=0.7) %matplotlib inline tfd = tfp.distributions ``` ### Make things Fast! Before we dive in, let's make sure we're using a GPU for this demo. To do this, select "Runtime" -> "Change runtime type" -> "Hardware accelerator" -> "GPU". The following snippet will verify that we have access to a GPU. ``` if tf.test.gpu_device_name() != '/device:GPU:0': print('WARNING: GPU device not found.') else: print('SUCCESS: Found GPU: {}'.format(tf.test.gpu_device_name())) ``` Note: if for some reason you cannot access a GPU, this colab will still work. (Training will just take longer.) ## Motivation Wouldn't it be great if we could use TFP to specify a probabilistic model then simply minimize the negative log-likelihood, i.e., ``` negloglik = lambda y, rv_y: -rv_y.log_prob(y) ``` Well not only is it possible, but this colab shows how! (In context of linear regression problems.) ``` #@title Synthesize dataset. w0 = 0.125 b0 = 5. x_range = [-20, 60] def load_dataset(n=150, n_tst=150): np.random.seed(43) def s(x): g = (x - x_range[0]) / (x_range[1] - x_range[0]) return 3 * (0.25 + g**2.) x = (x_range[1] - x_range[0]) * np.random.rand(n) + x_range[0] eps = np.random.randn(n) * s(x) y = (w0 * x * (1. + np.sin(x)) + b0) + eps x = x[..., np.newaxis] x_tst = np.linspace(*x_range, num=n_tst).astype(np.float32) x_tst = x_tst[..., np.newaxis] return y, x, x_tst y, x, x_tst = load_dataset() ``` ### Case 1: No Uncertainty ``` # Build model. model = tf.keras.Sequential([ tf.keras.layers.Dense(1), tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)), ]) # Do inference. model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) model.fit(x, y, epochs=1000, verbose=False); # Profit. [print(np.squeeze(w.numpy())) for w in model.weights]; yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) #@title Figure 1: No uncertainty. w = np.squeeze(model.layers[-2].kernel.numpy()) b = np.squeeze(model.layers[-2].bias.numpy()) plt.figure(figsize=[6, 1.5]) # inches #plt.figure(figsize=[8, 5]) # inches plt.plot(x, y, 'b.', label='observed'); plt.plot(x_tst, yhat.mean(),'r', label='mean', linewidth=4); plt.ylim(-0.,17); plt.yticks(np.linspace(0, 15, 4)[1:]); plt.xticks(np.linspace(*x_range, num=9)); ax=plt.gca(); ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data', 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_smart_bounds(True) #ax.spines['bottom'].set_smart_bounds(True) plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5)) plt.savefig('/tmp/fig1.png', bbox_inches='tight', dpi=300) ``` ### Case 2: Aleatoric Uncertainty ``` # Build model. model = tf.keras.Sequential([ tf.keras.layers.Dense(1 + 1), tfp.layers.DistributionLambda( lambda t: tfd.Normal(loc=t[..., :1], scale=1e-3 + tf.math.softplus(0.05 * t[...,1:]))), ]) # Do inference. model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) model.fit(x, y, epochs=1000, verbose=False); # Profit. [print(np.squeeze(w.numpy())) for w in model.weights]; yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) #@title Figure 2: Aleatoric Uncertainty plt.figure(figsize=[6, 1.5]) # inches plt.plot(x, y, 'b.', label='observed'); m = yhat.mean() s = yhat.stddev() plt.plot(x_tst, m, 'r', linewidth=4, label='mean'); plt.plot(x_tst, m + 2 * s, 'g', linewidth=2, label=r'mean + 2 stddev'); plt.plot(x_tst, m - 2 * s, 'g', linewidth=2, label=r'mean - 2 stddev'); plt.ylim(-0.,17); plt.yticks(np.linspace(0, 15, 4)[1:]); plt.xticks(np.linspace(*x_range, num=9)); ax=plt.gca(); ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data', 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_smart_bounds(True) #ax.spines['bottom'].set_smart_bounds(True) plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5)) plt.savefig('/tmp/fig2.png', bbox_inches='tight', dpi=300) ``` ### Case 3: Epistemic Uncertainty ``` # Specify the surrogate posterior over `keras.layers.Dense` `kernel` and `bias`. def posterior_mean_field(kernel_size, bias_size=0, dtype=None): n = kernel_size + bias_size c = np.log(np.expm1(1.)) return tf.keras.Sequential([ tfp.layers.VariableLayer(2 * n, dtype=dtype), tfp.layers.DistributionLambda(lambda t: tfd.Independent( tfd.Normal(loc=t[..., :n], scale=1e-5 + tf.nn.softplus(c + t[..., n:])), reinterpreted_batch_ndims=1)), ]) # Specify the prior over `keras.layers.Dense` `kernel` and `bias`. def prior_trainable(kernel_size, bias_size=0, dtype=None): n = kernel_size + bias_size return tf.keras.Sequential([ tfp.layers.VariableLayer(n, dtype=dtype), tfp.layers.DistributionLambda(lambda t: tfd.Independent( tfd.Normal(loc=t, scale=1), reinterpreted_batch_ndims=1)), ]) # Build model. model = tf.keras.Sequential([ tfp.layers.DenseVariational(1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]), tfp.layers.DistributionLambda(lambda t: tfd.Normal(loc=t, scale=1)), ]) # Do inference. model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) model.fit(x, y, epochs=1000, verbose=False); # Profit. [print(np.squeeze(w.numpy())) for w in model.weights]; yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) #@title Figure 3: Epistemic Uncertainty plt.figure(figsize=[6, 1.5]) # inches plt.clf(); plt.plot(x, y, 'b.', label='observed'); yhats = [model(x_tst) for _ in range(100)] avgm = np.zeros_like(x_tst[..., 0]) for i, yhat in enumerate(yhats): m = np.squeeze(yhat.mean()) s = np.squeeze(yhat.stddev()) if i < 25: plt.plot(x_tst, m, 'r', label='ensemble means' if i == 0 else None, linewidth=0.5) avgm += m plt.plot(x_tst, avgm/len(yhats), 'r', label='overall mean', linewidth=4) plt.ylim(-0.,17); plt.yticks(np.linspace(0, 15, 4)[1:]); plt.xticks(np.linspace(*x_range, num=9)); ax=plt.gca(); ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data', 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_smart_bounds(True) #ax.spines['bottom'].set_smart_bounds(True) plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5)) plt.savefig('/tmp/fig3.png', bbox_inches='tight', dpi=300) ``` ### Case 4: Aleatoric & Epistemic Uncertainty ``` # Build model. model = tf.keras.Sequential([ tfp.layers.DenseVariational(1 + 1, posterior_mean_field, prior_trainable, kl_weight=1/x.shape[0]), tfp.layers.DistributionLambda( lambda t: tfd.Normal(loc=t[..., :1], scale=1e-3 + tf.math.softplus(0.01 * t[...,1:]))), ]) # Do inference. model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=negloglik) model.fit(x, y, epochs=1000, verbose=False); # Profit. [print(np.squeeze(w.numpy())) for w in model.weights]; yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) #@title Figure 4: Both Aleatoric & Epistemic Uncertainty plt.figure(figsize=[6, 1.5]) # inches plt.plot(x, y, 'b.', label='observed'); yhats = [model(x_tst) for _ in range(100)] avgm = np.zeros_like(x_tst[..., 0]) for i, yhat in enumerate(yhats): m = np.squeeze(yhat.mean()) s = np.squeeze(yhat.stddev()) if i < 15: plt.plot(x_tst, m, 'r', label='ensemble means' if i == 0 else None, linewidth=1.) plt.plot(x_tst, m + 2 * s, 'g', linewidth=0.5, label='ensemble means + 2 ensemble stdev' if i == 0 else None); plt.plot(x_tst, m - 2 * s, 'g', linewidth=0.5, label='ensemble means - 2 ensemble stdev' if i == 0 else None); avgm += m plt.plot(x_tst, avgm/len(yhats), 'r', label='overall mean', linewidth=4) plt.ylim(-0.,17); plt.yticks(np.linspace(0, 15, 4)[1:]); plt.xticks(np.linspace(*x_range, num=9)); ax=plt.gca(); ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data', 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_smart_bounds(True) #ax.spines['bottom'].set_smart_bounds(True) plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5)) plt.savefig('/tmp/fig4.png', bbox_inches='tight', dpi=300) ``` ### Case 5: Functional Uncertainty ``` #@title Custom PSD Kernel class RBFKernelFn(tf.keras.layers.Layer): def __init__(self, **kwargs): super(RBFKernelFn, self).__init__(**kwargs) dtype = kwargs.get('dtype', None) self._amplitude = self.add_variable( initializer=tf.constant_initializer(0), dtype=dtype, name='amplitude') self._length_scale = self.add_variable( initializer=tf.constant_initializer(0), dtype=dtype, name='length_scale') def call(self, x): # Never called -- this is just a layer so it can hold variables # in a way Keras understands. return x @property def kernel(self): return tfp.math.psd_kernels.ExponentiatedQuadratic( amplitude=tf.nn.softplus(0.1 * self._amplitude), length_scale=tf.nn.softplus(5. * self._length_scale) ) # For numeric stability, set the default floating-point dtype to float64 tf.keras.backend.set_floatx('float64') # Build model. num_inducing_points = 40 model = tf.keras.Sequential([ tf.keras.layers.InputLayer(input_shape=[1]), tf.keras.layers.Dense(1, kernel_initializer='ones', use_bias=False), tfp.layers.VariationalGaussianProcess( num_inducing_points=num_inducing_points, kernel_provider=RBFKernelFn(), event_shape=[1], inducing_index_points_initializer=tf.constant_initializer( np.linspace(*x_range, num=num_inducing_points, dtype=x.dtype)[..., np.newaxis]), unconstrained_observation_noise_variance_initializer=( tf.constant_initializer(np.array(0.54).astype(x.dtype))), ), ]) # Do inference. batch_size = 32 loss = lambda y, rv_y: rv_y.variational_loss( y, kl_weight=np.array(batch_size, x.dtype) / x.shape[0]) model.compile(optimizer=tf.optimizers.Adam(learning_rate=0.01), loss=loss) model.fit(x, y, batch_size=batch_size, epochs=1000, verbose=False) # Profit. yhat = model(x_tst) assert isinstance(yhat, tfd.Distribution) #@title Figure 5: Functional Uncertainty y, x, _ = load_dataset() plt.figure(figsize=[6, 1.5]) # inches plt.plot(x, y, 'b.', label='observed'); num_samples = 7 for i in range(num_samples): sample_ = yhat.sample().numpy() plt.plot(x_tst, sample_[..., 0].T, 'r', linewidth=0.9, label='ensemble means' if i == 0 else None); plt.ylim(-0.,17); plt.yticks(np.linspace(0, 15, 4)[1:]); plt.xticks(np.linspace(*x_range, num=9)); ax=plt.gca(); ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.spines['left'].set_position(('data', 0)) ax.spines['top'].set_visible(False) ax.spines['right'].set_visible(False) #ax.spines['left'].set_smart_bounds(True) #ax.spines['bottom'].set_smart_bounds(True) plt.legend(loc='center left', fancybox=True, framealpha=0., bbox_to_anchor=(1.05, 0.5)) plt.savefig('/tmp/fig5.png', bbox_inches='tight', dpi=300) ```
github_jupyter
Here is the base code for takings paragraphes of dpef and splitting them into sentences, then keep only long sentences. MIN_NB_OF_TOKENS=8 seems to do the trick. Improvements: - Remove last filter and look at what has <8 words. Mostly fragments, mainly titles, etc. but may shows errors in parsing. - Questions were kept here, maybe to exclude if appear in top sentences. but seem not frequent. - main mistakes are of shape "['13 21 14 7 25 24 17 15 4 22 23']" --> add criteria to require actual words ? ``` import pandas as pd; pd.set_option('display.max_colwidth', -1) # text from spacy.lang.fr import French from spacy.cli.download import download from spacy.tokens import Span # download('fr_core_news_sm') spacy.util.is_package("fr_core_news_sm") input_filename = "../../data/processed/DPEFs/dpef_paragraphs_debug.csv" output_filename = "../../data/processed/DPEFs/dpef_paragraphs_sentences.csv" output_filename2 = "../../data/processed/DPEFs/dpef_paragraphs_sentences_long_format_debug.csv" df = pd.read_csv(input_filename, sep=";") print(df.shape) df = df[df.paragraph.notna()] # nan created while saving/ removing header -> to correct print(df.shape) df = df.loc[5000:10000] def load_nlp_sententizer_object(): """ Load french mspacy model, customize it, add custom nb_words attributes to Span.""" Span.set_extension("nb_words", setter=get_nb_words, getter=get_nb_words, force=True) if not spacy.util.is_package("fr_core_news_sm"): print("Downloading fr_core_news_sm spacy model...") download('fr_core_news_sm') print("done.") nlp = spacy.load('fr_core_news_sm') nlp.add_pipe(custom_sentence_boundaries, before = "parser") # add exception to sententizer nlp.add_pipe(nlp.create_pipe('sentencizer')) # to add default sentencizer, AFTER custom rule return nlp nlp = load_nlp_sententizer_object() nlp.pipeline # params MIN_NB_OF_TOKENS = 3 def get_nb_words(doc): """ Count numer of tokens in spacy Doc, ignoring NUM and ADP (e.g. for, at...) and not counting % as noun. """ return len([token for token in doc if (token.pos_ in ["NOUN","PROPN","VERB"]) and (token.text!="%")]) Span.set_extension("nb_words", setter=get_nb_words, getter=get_nb_words, force=True) # custom parser to split sentences while ignoring title section like cf. and splitting on "etc." nlp = spacy.load('fr_core_news_sm') def exception_to_split(token): if 'cf' in token.nbor(-2).text and token.nbor(-1).text == ".": return True return False def custom_sentence_boundaries(doc): for i, token in enumerate(doc[2:]): if exception_to_split(token): token.is_sent_start = False # if exception_to_not_split(token): # token.is_sent_start = True return doc # def exception_to_not_split(token): # if 'etc' in token.nbor(-2).text and token.nbor(-1).text == ".": # return True # return False nlp.add_pipe(nlp.create_pipe('sentencizer')) # to add default sentencizer nlp.add_pipe(custom_sentence_boundaries) # add exception to sententizer df_sent = df["paragraph"].apply(lambda x: [sent.text for sent in nlp(x).sents if sent._.nb_words>MIN_NB_OF_TOKENS]) # save df["paragraph_sentences"] = df_sent.values print(df.shape) df = df[df["paragraph_sentences"].apply(lambda x : len(x)>0)] print(df.shape) # df.to_csv(output_filename, sep=";") # TODO: remove # reshaping following https://stackoverflow.com/questions/53860398/pandas-dataframe-how-do-i-split-one-row-into-multiple-rows-by-multi-value-colum/53860543 # convert list of pd.Series then stack it df2 = (df .set_index(df.columns[:-1].values.tolist())['paragraph_sentences'] # all except last colname as index .apply(pd.Series) .stack() .reset_index() .drop('level_{}'.format(len(df.columns)-1), axis=1) .rename(columns={0:'sentence'})) df2.to_csv(output_filename2, sep=";") df.paragraph ```
github_jupyter
<a href="https://colab.research.google.com/github/yukinaga/ai_programming/blob/main/lecture_06/03_exercise.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # 演習 Tensorflowベースのアート関連ライブラリ「Magenta」を使います。 Magenta内のモデル「Music VAE」を使って、自由に作曲しましょう。 主に、 曲の最初のNoteSequenceと、最後のNoteSequenceに変更を加えます。 ## ライブラリのインストール ``` !apt-get update -qq && apt-get install -qq libfluidsynth1 fluid-soundfont-gm build-essential libasound2-dev libjack-dev !pip install -qU pyfluidsynth pretty_midi !pip install -qU magenta ``` ## チェックポイントのダウンロード ``` !gsutil -q -m cp -R gs://download.magenta.tensorflow.org/models/music_vae/colab2/checkpoints/mel_2bar_big.ckpt.* /content/ ``` ## モデルの初期化 ``` from magenta.models.music_vae import configs from magenta.models.music_vae.trained_model import TrainedModel # モデルの初期化 music_vae = TrainedModel( configs.CONFIG_MAP["cat-mel_2bar_big"], batch_size=4, # 一度に処理するデータ数 checkpoint_dir_or_path="/content/mel_2bar_big.ckpt") ``` ## NoteSequenceの生成 温度(ランダム度合い)を、好みに合わせて変更しましょう。 ``` import note_seq generated = music_vae.sample(n=5, # 生成数 length=128, # ステップ数 temperature=1.0) # 温度 for ns in generated: note_seq.plot_sequence(ns) note_seq.play_sequence(ns, synth=note_seq.fluidsynth) ``` ## 最初と最後のNoteSequence 曲の最初のNoteSequenceと、最後のNoteSequenceに変更を加えましょう。 ``` import magenta import note_seq from note_seq.protobuf import music_pb2 # 最初のNoteSeqence kira2_start = music_pb2.NoteSequence() kira2_start.notes.add(pitch=60, start_time=0.0, end_time=0.4, velocity=80) kira2_start.notes.add(pitch=60, start_time=0.4, end_time=0.8, velocity=80) kira2_start.notes.add(pitch=67, start_time=0.8, end_time=1.2, velocity=80) kira2_start.notes.add(pitch=67, start_time=1.2, end_time=1.6, velocity=80) kira2_start.notes.add(pitch=69, start_time=1.6, end_time=2.0, velocity=80) kira2_start.notes.add(pitch=69, start_time=2.0, end_time=2.4, velocity=80) kira2_start.notes.add(pitch=67, start_time=2.4, end_time=3.2, velocity=80) kira2_start.notes.add(pitch=65, start_time=3.2, end_time=3.6, velocity=80) kira2_start.notes.add(pitch=65, start_time=3.6, end_time=4.0, velocity=80) kira2_start.notes.add(pitch=64, start_time=4.0, end_time=4.4, velocity=80) kira2_start.notes.add(pitch=64, start_time=4.4, end_time=4.8, velocity=80) kira2_start.notes.add(pitch=62, start_time=4.8, end_time=5.2, velocity=80) kira2_start.notes.add(pitch=62, start_time=5.2, end_time=5.6, velocity=80) kira2_start.notes.add(pitch=60, start_time=5.6, end_time=6.4, velocity=80) kira2_start.total_time = 6.4 kira2_start.tempos.add(qpm=75); note_seq.plot_sequence(kira2_start) note_seq.play_sequence(kira2_start, synth=note_seq.fluidsynth) # 最後のNoteSeqence kira2_end = music_pb2.NoteSequence() kira2_end.notes.add(pitch=60, start_time=0.0, end_time=0.4, velocity=80) kira2_end.notes.add(pitch=62, start_time=0.4, end_time=0.8, velocity=80) kira2_end.notes.add(pitch=64, start_time=0.8, end_time=1.2, velocity=80) kira2_end.notes.add(pitch=67, start_time=1.2, end_time=1.6, velocity=80) kira2_end.notes.add(pitch=69, start_time=1.6, end_time=2.0, velocity=80) kira2_end.notes.add(pitch=64, start_time=2.0, end_time=2.4, velocity=80) kira2_end.notes.add(pitch=60, start_time=2.4, end_time=3.2, velocity=80) kira2_end.notes.add(pitch=62, start_time=3.2, end_time=3.6, velocity=80) kira2_end.notes.add(pitch=64, start_time=3.6, end_time=4.0, velocity=80) kira2_end.notes.add(pitch=67, start_time=4.0, end_time=4.4, velocity=80) kira2_end.notes.add(pitch=69, start_time=4.4, end_time=4.8, velocity=80) kira2_end.notes.add(pitch=64, start_time=4.8, end_time=5.2, velocity=80) kira2_end.notes.add(pitch=62, start_time=5.2, end_time=5.6, velocity=80) kira2_end.notes.add(pitch=60, start_time=5.6, end_time=6.4, velocity=80) kira2_end.total_time = 6.4 kira2_end.tempos.add(qpm=75); note_seq.plot_sequence(kira2_end) note_seq.play_sequence(kira2_end, synth=note_seq.fluidsynth) # NoteSequenceの再生 ``` ## NoteSequnce間の補間 どのような曲が生成されるのか、以下のコードにより確認します。 ``` n_seq = 8 # 曲のNoteSeqence数(最初と最後を含む) # NoteSeqenceを複数生成し、リストに格納 gen_seq = music_vae.interpolate( kira2_start, # 最初のNoteSeqence kira2_end, # 最後のNoteSeqence num_steps=n_seq, length=32) # NoteSeqenceを全て結合し、1つの曲に interp_seq = note_seq.sequences_lib.concatenate_sequences(gen_seq) note_seq.plot_sequence(interp_seq) note_seq.play_sequence(interp_seq, synth=note_seq.fluidsynth) ``` ## MIDIファイルの保存とダウンロード ``` from google.colab import files note_seq.sequence_proto_to_midi_file(interp_seq, "exercise.mid") #MIDI データに変換し保存 files.download("exercise.mid") # ダウンロード ```
github_jupyter
Visualizing NBA Shots with `py-Goldsberry` === One of the coolest features of `py-Goldsberry` is access to raw data for NBA shots. Visualizing NBA shot charts as a method of analytics was proposed by Kirk Goldsberry at the 2012 MIT Sloan Sports Analytics Conference ([read paper](http://www.sloansportsconference.com/wp-content/uploads/2012/02/Goldsberry_Sloan_Submission.pdf)). Given the data `py-Goldsberry` provides access to and the methods proposed by it's namesake, it would be inappropriate to not include some additional functionality that eased the creation of shot charts for NBA data. Thanks to some great work by (Savvas), the `py-Goldsberry` user has the ability to easily acquire and visualize NBA shooting data in Python. ###Getting The Data We start by getting the data that we want to visualize. We can do this by using the `player.shot_chart()` class on a player ID ``` import goldsberry import pandas as pd pd.set_option("display.max_columns", 50) goldsberry.__version__ players_2014 = goldsberry.PlayerList(2014) players_2014 = pd.DataFrame(players_2014) ``` We have a DataFrame of players and their ids that we can use to get information on a specific player. For the sake of simplicity, we are going to study the shooting pattern of James Harden. To get his ID, we can either scroll through the table or do a quick filter using Pandas. ``` harden_id = players_2014['PERSON_ID'].ix[players_2014['DISPLAY_LAST_COMMA_FIRST'].str.contains("Harden")] harden_id ``` Now that we have the unique **PERSON_ID** for James, we can collect information on his shots using another `py-Goldsberry` class, `player.shot_chart()` ``` harden_shots = goldsberry.player.shot_chart(harden_id, season=2014, clutchtime=8) harden_shots = pd.DataFrame(harden_shots.chart()) harden_shots.head() ``` --- ``` import numpy as np from scipy.stats import binned_statistic_2d import seaborn as sns from bokeh.plotting import figure from math import pi %matplotlib inline import urllib import matplotlib.pyplot as plt from matplotlib.patches import Circle, Rectangle, Arc urllib.__version__ sns.set_style("white") sns.set_color_codes() plt.figure(figsize=(12,11)) plt.scatter(harden_shots.LOC_X, harden_shots.LOC_Y) plt.show() right = harden_shots[harden_shots.SHOT_ZONE_AREA == "Right Side(R)"] plt.figure(figsize=(12,11)) plt.scatter(right.LOC_X, right.LOC_Y) plt.xlim(-300,300) plt.ylim(-100,500) plt.show() from matplotlib.patches import Circle, Rectangle, Arc def draw_court(ax=None, color='black', lw=2, outer_lines=False): # If an axes object isn't provided to plot onto, just get current one if ax is None: ax = plt.gca() # Create the various parts of an NBA basketball court # Create the basketball hoop # Diameter of a hoop is 18" so it has a radius of 9", which is a value # 7.5 in our coordinate system hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the side 3pt lines, they are 14ft long before they begin to arc corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) # 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop # I just played around with the theta values until they lined up with the # threes three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax plt.figure(figsize=(12,11)) draw_court(outer_lines=True, color="red") plt.xlim(-300,300) plt.ylim(-100,500) plt.show() plt.figure(figsize=(12,11)) plt.scatter(harden_shots.LOC_X, harden_shots.LOC_Y) draw_court(outer_lines=True) # Descending values along the axis from left to right plt.xlim(300,-300) plt.show() plt.figure(figsize=(12,11)) plt.scatter(harden_shots.LOC_X, harden_shots.LOC_Y) draw_court() # Adjust plot limits to just fit in half court plt.xlim(-300,300) # Descending values along th y axis from bottom to top # in order to place the hoop by the top of plot plt.ylim(422.5, -47.5) # get rid of axis tick labels # plt.tick_params(labelbottom=False, labelleft=False) plt.show() x = harden_id.tolist()[0] def get_player_img(player_id): """Returns the image of the player from stats.nba.com Parameters ---------- player_id: int The player ID used to find the image. """ url = "http://stats.nba.com/media/players/230x185/"+str(player_id)+".png" img_file = str(player_id) + ".png" img = plt.imread(urllib.urlretrieve(url, img_file)[0]) return plt.imshow(img) get_player_img(x) def draw_court(ax=None, color='gray', lw=1, outer_lines=False): """ Returns an axes with a basketball court drawn onto to it. This function draws a court based on the x and y-axis values that the NBA stats API provides for the shot chart data. For example, the NBA stat API represents the center of the hoop at the (0,0) coordinate. Twenty-two feet from the left of the center of the hoop in is represented by the (-220,0) coordinates. So one foot equals +/-10 units on the x and y-axis. TODO: explain the parameters """ if ax is None: ax = plt.gca() # Create the various parts of an NBA basketball court # Create the basketball hoop hoop = Circle((0, 0), radius=7.5, linewidth=lw, color=color, fill=False) # Create backboard backboard = Rectangle((-30, -7.5), 60, -1, linewidth=lw, color=color) # The paint # Create the outer box 0f the paint, width=16ft, height=19ft outer_box = Rectangle((-80, -47.5), 160, 190, linewidth=lw, color=color, fill=False) # Create the inner box of the paint, widt=12ft, height=19ft inner_box = Rectangle((-60, -47.5), 120, 190, linewidth=lw, color=color, fill=False) # Create free throw top arc top_free_throw = Arc((0, 142.5), 120, 120, theta1=0, theta2=180, linewidth=lw, color=color, fill=False) # Create free throw bottom arc bottom_free_throw = Arc((0, 142.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color, linestyle='dashed') # Restricted Zone, it is an arc with 4ft radius from center of the hoop restricted = Arc((0, 0), 80, 80, theta1=0, theta2=180, linewidth=lw, color=color) # Three point line # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_a = Rectangle((-220, -47.5), 0, 140, linewidth=lw, color=color) # Create the right side 3pt lines, it's 14ft long before it arcs corner_three_b = Rectangle((220, -47.5), 0, 140, linewidth=lw, color=color) # 3pt arc - center of arc will be the hoop, arc is 23'9" away from hoop three_arc = Arc((0, 0), 475, 475, theta1=22, theta2=158, linewidth=lw, color=color) # Center Court center_outer_arc = Arc((0, 422.5), 120, 120, theta1=180, theta2=0, linewidth=lw, color=color) center_inner_arc = Arc((0, 422.5), 40, 40, theta1=180, theta2=0, linewidth=lw, color=color) # List of the court elements to be plotted onto the axes court_elements = [hoop, backboard, outer_box, inner_box, top_free_throw, bottom_free_throw, restricted, corner_three_a, corner_three_b, three_arc, center_outer_arc, center_inner_arc] if outer_lines: # Draw the half court line, baseline and side out bound lines outer_lines = Rectangle((-250, -47.5), 500, 470, linewidth=lw, color=color, fill=False) court_elements.append(outer_lines) # Add the court elements onto the axes for element in court_elements: ax.add_patch(element) return ax ``` Shot_Chart function should take a dataset as an argument and return a shot chart * Scatter should differentiate between ACTION_TYPE, EVENT_TYPE, SHOT_TYPE, SHOT_ZONE_AREA, SHOT_ZONE_BASIC, SHOT_ZONE_RANGE, TEAM_NAME, PLAYER_NAME * Scatter set dot size, color, alpha ``` harden_shot_master = pd.merge(harden_shots, harden_shots_advanced, how='left', left_on=['GAME_ID', 'PERIOD', 'MINUTES_REMAINING', 'SECONDS_REMAINING']) harden_shots_advanced.tail() harden_shots.head() shot_chart(harden_shots, likeness=harden_id.tolist()[0]) def shot_chart(data, x="LOC_X", y="LOC_Y", title="", kind="scatter", color="b", cmap=None, likeness=False, xlim=(-250, 250), ylim=(422.5, -47.5), court_color="gray", outer_lines=False, court_lw=1, flip_court=False, kde_shade=True, hex_gridsize=None, ax=None, **kwargs): """ data: dataset to be visualizaed x = "LOC_X": default column name in dataset with X Coordinates y = "LOC_Y": default column name in dataset with Y Coordinates title="": kind = "scatter": Type of visual, default is "scatter"; takes: "scatter", "kde", "hex" color = "b": default color cmap = None: default color map likeness = None: Logical indicator to determine inclusion of player photo. Takes player ID value xlim = (-250,250): default plot limits ylim = (422.5, -47.5), court_color = "gray": outer_lines=False: hex_gridsize=None: ax=None: **kwargs """ if ax is None: ax = plt.gca() if cmap is None: cmap = sns.light_palette(color, as_cmap=True) if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) if kind == "scatter": ax.scatter(data[x], data[y], c=color, **kwargs) elif kind == "kde": sns.kdeplot(data[x], data[y], shade=kde_shade, cmap=cmap, ax=ax, **kwargs) ax.set_xlabel('') ax.set_ylabel('') elif kind == "hex": if hex_gridsize is None: # Get the number of bins for hexbin using Freedman-Diaconis rule # This is idea was taken from seaborn, which got the calculation # from http://stats.stackexchange.com/questions/798/ from seaborn.distributions import _freedman_diaconis_bins x_bin = _freedman_diaconis_bins(data[x]) y_bin = _freedman_diaconis_bins(data[y]) hex_gridsize = int(np.mean([x_bin, y_bin])) ax.hexbin(x, y, gridsize=hex_gridsize, cmap=cmap, **kwargs) else: raise ValueError("kind must be 'scatter', 'kde', or 'hex'.") if likeness is not None: get_player_img(likeness) return ax def heatmap_fgp(x, y, z, bins=20, title="", cmap=plt.cm.YlOrRd, xlim=(-250, 250), ylim=(422.5, -47.5), facecolor='lightgray', facecolor_alpha=0.4, court_color="black", outer_lines=False, court_lw=0.5, flip_court=False, ax=None, **kwargs): """ Returns an AxesImage object that contains a heatmap of the FG% TODO: Explain parameters """ # Bin the FGA (x, y) and Calculcate the mean number of times shot was # made (z) within each bin # mean is the calculated FG percentage for each bin mean, xedges, yedges, binnumber = binned_statistic_2d(x=x, y=y, values=z, statistic='mean', bins=bins) if ax is None: ax = plt.gca() if not flip_court: ax.set_xlim(xlim) ax.set_ylim(ylim) else: ax.set_xlim(xlim[::-1]) ax.set_ylim(ylim[::-1]) ax.tick_params(labelbottom="off", labelleft="off") ax.set_title(title, fontsize=18) ax.patch.set_facecolor(facecolor) ax.patch.set_alpha(facecolor_alpha) draw_court(ax, color=court_color, lw=court_lw, outer_lines=outer_lines) heatmap = ax.imshow(mean.T, origin='lower', extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]], interpolation='nearest', cmap=plt.cm.YlOrRd) return heatmap # Bokeh Shot Chart def bokeh_draw_court(figure, line_width=1, line_color='gray'): """Returns a figure with the basketball court lines drawn onto it""" # hoop figure.circle(x=0, y=0, radius=7.5, fill_alpha=0, line_color=line_color, line_width=line_width) # backboard figure.line(x=range(-30,31), y=-7.5, line_color=line_color) # The paint # outerbox figure.rect(x=0, y=47.5, width=160, height=190,fill_alpha=0, line_color=line_color, line_width=line_width) # innerbox # left inner box line figure.line(x=-60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # right inner box line figure.line(x=60, y=np.arange(-47.5, 143.5), line_color=line_color, line_width=line_width) # Restricted Zone figure.arc(x=0, y=0, radius=40, start_angle=pi, end_angle=0, line_color=line_color, line_width=line_width) # top free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=pi, end_angle=0, line_color=line_color) # bottome free throw arc figure.arc(x=0, y=142.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_dash="dashed") # Three point line # corner three point lines figure.line(x=-220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) figure.line(x=220, y=np.arange(-47.5, 92.5), line_color=line_color, line_width=line_width) # # three point arc figure.arc(x=0, y=0, radius=237.5, start_angle=3.528, end_angle=-0.3863, line_color=line_color, line_width=line_width) # add center court # outer center arc figure.arc(x=0, y=422.5, radius=60, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # inner center arct figure.arc(x=0, y=422.5, radius=20, start_angle=0, end_angle=pi, line_color=line_color, line_width=line_width) # outer lines, consistting of half court lines and out of bounds # lines figure.rect(x=0, y=187.5, width=500, height=470, fill_alpha=0, line_color=line_color, line_width=line_width) return figure def bokeh_shot_chart(source, x="LOC_X", y="LOC_Y", fill_color="#1f77b4", fill_alpha=0.3, line_alpha=0.3, court_lw=1, court_line_color='gray'): """ Returns a figure with both FGA and basketball court lines drawn onto it. This function expects data to be a ColumnDataSource with the x and y values named "LOC_X" and "LOC_Y". Otherwise specify x and y. """ fig = figure(width=700, height=658, x_range=[-250, 250], y_range=[422.5, -47.5], min_border=0, x_axis_type=None, y_axis_type=None, outline_line_color="black") fig.scatter(x, y, source=source, size=10, fill_alpha=0.3, line_alpha=0.3) bokeh_draw_court(fig, line_color='gray') return fig ```
github_jupyter
# CSE 6040, Fall 2015 [14]: PageRank (still cont'd) > This notebook is identical to [Lab 13](http://nbviewer.ipython.org/github/rvuduc/cse6040-ipynbs/blob/master/13--pagerank-partial-solns.ipynb), but with solutions provided for Part 1 and partial solutions for Part 2. In this notebook, you'll implement the [PageRank algorithm](http://ilpubs.stanford.edu:8090/422/) summarized in class. You'll test it on a real dataset (circa 2005) that consists of [political blogs](http://networkdata.ics.uci.edu/data/polblogs/) and their links among one another. Note that the presentation in class follows the matrix view of the algorithm. Cleve Moler (inventor of MATLAB) has a nice set of notes [here](https://www.mathworks.com/moler/exm/chapters/pagerank.pdf). For today's notebook, you'll need to download the following additional materials: * A `cse6040utils` module, which is a Python module containing some handy routines from previous classes: [link](https://raw.githubusercontent.com/rvuduc/cse6040-ipynbs/master/cse6040utils.py) (Note: This module is already part of the `git` repo for our notebooks if you are pulling from there.) * A SQLite version of the political blogs dataset: http://cse6040.gatech.edu/fa15/poliblogs.db (~ 611 KiB) ## Part 1: Explore the Dataset Let's start by looking at the dataset, to get a feel for what it contains. Incidentally, one of you asked recently how to get the schema for a SQLite database when using Python. Here is some code adapted from a few ideas floating around on the web. Let's use these to inspect the tables available in the political blogs dataset. ``` import sqlite3 as db import pandas as pd def get_table_names (conn): assert type (conn) == db.Connection # Only works for sqlite3 DBs query = "SELECT name FROM sqlite_master WHERE type='table'" return pd.read_sql_query (query, conn) def print_schemas (conn, table_names=None, limit=0): assert type (conn) == db.Connection # Only works for sqlite3 DBs if table_names is None: table_names = get_table_names (conn) c = conn.cursor () query = "PRAGMA TABLE_INFO ({table})" for name in table_names: c.execute (query.format (table=name)) columns = c.fetchall () print ("=== {table} ===".format (table=name)) col_string = "[{id}] {name} : {type}" for col in columns: print (col_string.format (id=col[0], name=col[1], type=col[2])) print ("\n") conn = db.connect ('poliblogs.db') for name in get_table_names (conn)['name']: print_schemas (conn, [name]) query = '''SELECT * FROM %s LIMIT 5''' % name print (pd.read_sql_query (query, conn)) print ("\n") ``` **Exercise.** Write a snippet of code to verify that the vertex IDs are _dense_ in some interval $[1, n]$. That is, there is a minimum value of $1$, some maximum value $n$, and _no_ missing values between $1$ and $n$. ``` query = ''' SELECT MIN(Id) AS MinId, MAX(Id) AS MaxId, COUNT(DISTINCT Id) AS NumDistinctIds FROM Vertices ''' df = pd.read_sql_query (query, conn) print df assert df.MinId[0] == 1 assert df.MaxId[0] == df.NumDistinctIds[0] print ("\n==> Verified: Vertex ids cover [1, %d] densely." \ % df.NumDistinctIds[0]) ``` **Exercise.** Make sure every edge has its end points in the vertex table. ``` query = ''' SELECT {col} FROM Edges WHERE {col} NOT IN (SELECT Id FROM Vertices) ''' df_s = pd.read_sql_query (query.format (col='Source'), conn) print (df_s['Source']) df_t = pd.read_sql_query (query.format (col='Target'), conn) print (df_t['Target']) assert df_s['Source'].empty assert df_t['Target'].empty print ("==> Verified: All source and target IDs are vertices.") ``` **Exercise.** Determine which vertices have no incident edges. Store the number of such vertices in a variable, `num_solo_vertices`. ``` query = ''' SELECT Id, Url FROM Vertices WHERE (Id NOT IN (SELECT DISTINCT Source FROM Edges)) AND (Id NOT IN (SELECT DISTINCT Target FROM Edges)) ''' df_solo_vertices = pd.read_sql_query (query, conn) print df_solo_vertices.head () num_solo_vertices = len (df_solo_vertices) # Our testing code follows, assuming your `num_solo_vertices` variable: print ("\n==> %d vertices have no incident edges." % num_solo_vertices) assert num_solo_vertices == 266 ``` **Exercise.** Compute a view called `Outdegrees`, which contains the following columns: 1. `Id`: vertex ID 2. `Degree`: the out-degree of this vertex. To help you test your view, the following snippet includes a second query that selects from your view but adds a Url field and orders the results in descending order of degree. It also prints first few and last few rows of this query, so you can inspect the URLs as a sanity check. (Perhaps it also provides a small bit of entertainment!) ``` # Complete this query: query = ''' CREATE VIEW IF NOT EXISTS Outdegrees AS SELECT Source AS Id, COUNT(*) AS Degree FROM Edges GROUP BY Source ''' c = conn.cursor () c.execute (query) from IPython.display import display query = ''' SELECT Outdegrees.Id, Degree, Url FROM Outdegrees, Vertices WHERE Outdegrees.Id = Vertices.Id ORDER BY -Degree ''' df_outdegrees = pd.read_sql_query (query, conn) print "==> A few entries with large out-degrees:" display (df_outdegrees.head ()) print "\n==> A few entries with small out-degrees:" display (df_outdegrees.tail ()) ``` **Exercise.** Query the database to extract a report of which URLs point to which URLs. Also include the source vertex out-degree and order the rows in descending order by it. ``` query = ''' SELECT S.Url, T.Url, Out.Degree FROM Edges AS E, (SELECT Id, Url FROM Vertices) AS S, (SELECT Id, Url FROM Vertices) AS T, (SELECT Id, Degree FROM Outdegrees) AS Out WHERE (E.Source=S.Id) AND (E.Target=T.Id) AND (E.Source=Out.Id) ORDER BY -Out.Degree ''' df_G = pd.read_sql_query (query, conn) from IPython.display import display display (df_G.head ()) print ("...") display (df_G.tail ()) ``` ## Part 2: Implement PageRank The following exercises will walk you through a possible implementation of PageRank for this dataset. **Exercise.** Build a sparse matrix, `A_1`, that stores $G^TD^{-1}$, where $G^T$ is the transpose of the connectivity matrix $G$, and $D^{-1}$ is the diagonal matrix of inverse out-degrees. ``` from cse6040utils import sparse_matrix # Extract entries from the table query = ''' SELECT Target AS Row, Source AS Col, 1.0/Degree AS Val FROM Edges, Outdegrees WHERE Edges.Source = Outdegrees.Id ''' df_A = pd.read_sql_query (query, conn) display (df_A.head (10)) # Copy entries from df_A into A_1 A_1 = sparse_matrix () # Initially all zeros, with no rows or columns for (i, j, a_ij) in zip (df_A['Row'], df_A['Col'], df_A['Val']): A_1[i-1][j-1] += a_ij # "-1" switches to 0-based indexing ``` **Errata: Bug in matrix construction.** Based on questions from students after class, it seems the construction of $A \equiv G^TD^{-1}$ as Prof. Vuduc described it in class has a subtle bug: it does _not_ treat unlinked pages correctly! To see why, suppose you are the random surfer visiting page $i$, and, with probability $\alpha$, you decide to follow an outgoing link. But what if the page has no outgoing link? This scenario corresponds to row $i$ of $G$ being entirely zero. So, the random surfer would just "disappear." The easiest fix to the model to account for this case is to assume that the random surfer stays on the same page, which means we should set $a_{ii}$ to 1. The following code snippet handles this case. ``` # Select all vertices with no outgoing edges query = ''' SELECT Id FROM Vertices WHERE Id NOT IN (SELECT DISTINCT Source FROM Edges) ''' df_anti_social = pd.read_sql_query (query, conn) print ("==> Found %d vertices with no outgoing links." \ % len (df_anti_social)) # Add self-edges for empty rows/columns for i in df_anti_social['Id']: A_1[i-1][i-1] = 1.0 ``` **Exercise.** Implement a function to multiply a sparse matrix by a dense vector, assuming a dense vector defined as follows. ``` def dense_vector (n, init_val=0.0): """ Returns a dense vector of length `n`, with all entries set to `init_val`. """ return [init_val] * n def spmv (n, A, x): """Returns a dense vector y of length n, where y = A*x.""" y = dense_vector (n) for (i, A_i) in A.items (): s = 0 for (j, a_ij) in A_i.items (): s += a_ij * x[j] y[i] = s return y ``` As a quick test, let's verify that multiplying $A_1$ by the vector of all ones, $u$, counts the number of vertices. > Why should that be the case? Two of you asked about this after class. ``` n = df.NumDistinctIds[0] # Number of vertices, from Part 1 u = dense_vector (n, 1.0) y = spmv (n, A_1, u) print sum (y) ``` **Exercise.** Complete the PageRank implementation for this dataset. To keep it simple, you may take $\alpha=0.85$, $x(0)$ equal to the vector of all $1/n$ values, and 25 iterations. Additionally, you may find the following functions helpful. > The support code in the next code cell differs _slightly_ from the notebook we posted originally. It renames those functions and provides additional functions (e.g., `vec_2norm`), in case you want to implement a residual-based termination test. ``` # Some helper functions, in case you need them import math def vec_scale (x, alpha): """Scales the vector x by a constant alpha.""" return [x_i*alpha for x_i in x] def vec_add_scalar (x, c): """Adds the scalar value c to every element of x.""" return [x_i+c for x_i in x] def vec_sub (x, y): """Returns x - y""" return [x_i - y_i for (x_i, y_i) in zip (x, y)] def vec_2norm (x): """Returns ||x||_2""" return math.sqrt (sum ([x_i**2 for x_i in x])) # YOUR CODE GOES BELOW. We've provided some scaffolding code, # so you just need to complete it. ALPHA = 0.85 # Probability of following some link MAX_ITERS = 25 n = df.NumDistinctIds[0] # Number of vertices, from Part 1 # Let X[t] store the dense x(t) vector at time t X = [] x_0 = dense_vector (n, 1.0/n) # Initial distribution: 1/n at each page X.append (x_0) for t in range (1, MAX_ITERS): # Complete this implementation X.append (...) ``` **Exercise.** Check your result by first inserting the _final_ computed PageRank vector back into the database, and then using a SQL query to see the ranked URLs. In your query output, also include _both_ the in-degrees and out-degrees of each vertex. ``` # Write some code here to create a table in the database # called PageRank. It should have one column to hold the # page (vertex) ID, and one for the rank value. # Some helper code to compute a view containing the indegrees. query = ''' CREATE VIEW IF NOT EXISTS Indegrees AS SELECT Target AS Id, COUNT(*) AS Degree FROM Edges GROUP BY Target ''' c = conn.cursor () c.execute (query) # Complete this query: query = ''' ... ''' df_ranks = pd.read_sql_query (query, conn) display (df_ranks) sum (df_ranks['Rank']) ``` **Exercise.** The `Vertices` table includes a column called, `Leaning`, which expresses a political leaning -- either "Left" or "Right". How might you use this column to come up with an alternative ranking scheme? **Exercise (advanced?).** Create an SQL-based implementation of the PageRank algorithm, where you implement the sparse matrix-vector multiply in SQL, rather than in Python as above.
github_jupyter
# Classifying Wines <!-- PELICAN_BEGIN_SUMMARY --> Let's kick off the blog with learning about wines, or rather training classifiers to learn wines for us ;) In this post, we'll take a look at the [UCI Wine data](https://archive.ics.uci.edu/ml/datasets/wine), and then train several scikit-learn classifiers to predict wine classes. <!-- PELICAN_END_SUMMARY --> On a recent 5-hour wifi-less bus trip I learned that scikit-learn comes prepackaged with some interesting [datasets](http://scikit-learn.org/stable/datasets/index.html). Among them are several 'toy datasets' which don't require an internet connection, so I decided that this would be a perfect time to experiment with building classifiers! As a result of not having internet, this post will track how I went about working with a dataset where I didn't initially have any idea how it would be structured or formatted. We'll focus on a small wine database which carries a categorical label for each wine along with several continuous-valued features. There are three different wine 'categories' and our goal will be to classify an unlabeled wine according to its characteristic features such as alcohol content, flavor, hue etc. ## Load and Organize Data First let's import the usual data science modules! ``` %matplotlib inline from sklearn import datasets import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np ``` Now from the skearn datasets let's load the 'wine' dataset and see whats all in there. ``` raw_data = datasets.load_wine() raw_data ``` It's a dictionary with some extra info besides the actual data. And one of the keys is `'DESCR'`, so let's start by taking a look at that. By `print`ing it, we can see the formatted text... ``` print(raw_data['DESCR']) ``` Excellent! This is just what we needed to understand how to think about and use this data set. The key points: 1. there are 3 classes (creatively named `'class_0'`, `'class_1'`, and `'class_2'`). It's likely these correspond to some typical wine varietals like Pinot Noir, or Cabernet, or Merlot... 2. there are 13 numerical attributes detailing things like alcohol perentage, ash (am I drinking ash in my wine??), Flavanoids, etc. Since this is a dictionary, let's also print out the other key/value pairs so we can decide how we'll format a data structure useful for our needs. The `dict.items()` method allows easy access to the key/values in a for loop. ``` for key,value in raw_data.items(): print(key,'\n',value,'\n') ``` Everything looks good. We are most interested in the `'data'` and `'target'` fields. Let's quickly check their shapes. ``` print('data.shape\t',raw_data['data'].shape, '\ntarget.shape \t',raw_data['target'].shape) ``` That looks good, we've confirmed that we have 178 samples (rows) and 13 features (columns). Now let's build a pandas DataFrame to hold our data so that we can make use of all of its snazzy features. Then we'll take a look at the first few entries. ``` features = pd.DataFrame(data=raw_data['data'],columns=raw_data['feature_names']) data = features data['target']=raw_data['target'] data['class']=data['target'].map(lambda ind: raw_data['target_names'][ind]) data.head() ``` We can also get some summary statistics now that we have a DataFrame. ``` data.describe() ``` What is the distrbution of alcohol content among all of the wines? ``` sns.distplot(data['alcohol'],kde=0) ``` What is the distribution of alcohol content by class? ``` for i in data.target.unique(): sns.distplot(data['alcohol'][data.target==i], kde=1,label='{}'.format(i)) plt.legend() ``` Interestingly, the three classes seem to naturally separate as low/mid/high alcohol distrubtions. What are the distributions of the classes for the rest of the features? We'll use Seaborn's `.kdeplot()` method so we can cleanly distinguish each class. However, note that this scales the y-axis so that the integral under each curve is 1. ``` import matplotlib.gridspec as gridspec for feature in raw_data['feature_names']: print(feature) #sns.boxplot(data=data,x=data.target,y=data[feature]) gs1 = gridspec.GridSpec(3,1) ax1 = plt.subplot(gs1[:-1]) ax2 = plt.subplot(gs1[-1]) gs1.update(right=0.60) sns.boxplot(x=feature,y='class',data=data,ax=ax2) sns.kdeplot(data[feature][data.target==0],ax=ax1,label='0') sns.kdeplot(data[feature][data.target==1],ax=ax1,label='1') sns.kdeplot(data[feature][data.target==2],ax=ax1,label='2') ax2.yaxis.label.set_visible(False) ax1.xaxis.set_visible(False) plt.show() ``` For some of the features such as flavanoids and total phenols, it is clear the class distributions have quite different means. Thus we could expect that even simple models may be able to distinguish the wines... ## Split data In order to effectively train and test our model, we need to separate the data into a training set which we will feed to our model along the the training labels. Then after we have trained the model, we will test it on the 'test' data, so that we can gauge the real-world applicability of the model. Scikit-learn has a useful functionality here with the `train_test_split()` method. `test_size` governs the proportion of data that is reserved for testing. We want to train on enough data that our model can make good predictions but we also need enough test data to determine if we've overfit the model. We'll use 20% of the data for testing. ``` from sklearn.model_selection import train_test_split data_train, data_test, label_train, label_test = \ train_test_split(raw_data['data'],raw_data['target'], test_size=0.2) print(len(data_train),' samples in training data\n', len(data_test),' samples in test data\n', ) ``` Often the features will require some preprocessing. However, since we are already given distributions of numerical data, we can go straight to training our models. ## Model selection/validation There are many classifcation algorithms to choose through, so let's throw them all at our problem and pick the one that performs best. To do that we'll create a dict of all of the scikit-learn classifiers. Some classifiers depend on one or more hyperparemeters or regularizatin techniques whose optimal values are not known ahead of time. In order to find the optimal parameters, we can just take a brute force approach where we test all of them, and keep model that has the highest cross-validation score. Below, we'll perform [GridSearchCV](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html) to find the optimal hyperparameters included in the dict below. ``` from sklearn.naive_bayes import GaussianNB from sklearn.svm import SVC, LinearSVC from sklearn.neighbors import KNeighborsClassifier from sklearn.linear_model import LogisticRegression from sklearn import tree from sklearn.neural_network import MLPClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.gaussian_process.kernels import RBF from sklearn.ensemble import RandomForestClassifier dict_classifiers = { "Logistic Regression": {'classifier': LogisticRegression(), 'params' : [ { 'penalty': ['l1','l2'], 'C': [0.001,0.01,0.1,1,10,100,1000] } ] }, "Nearest Neighbors": {'classifier': KNeighborsClassifier(), 'params': [ { 'n_neighbors': [1, 3, 5, 10], 'leaf_size': [3, 30] } ] }, "Linear SVM": {'classifier': SVC(), 'params': [ { 'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['linear'] } ] }, "Gradient Boosting Classifier": {'classifier': GradientBoostingClassifier(), 'params': [ { 'learning_rate': [0.05, 0.1], 'n_estimators' :[50, 100, 200], 'max_depth':[3,None] } ] }, "Decision Tree": {'classifier': tree.DecisionTreeClassifier(), 'params': [ { 'max_depth':[3,None] } ] }, "Random Forest": {'classifier': RandomForestClassifier(), 'params': {} }, "Naive Bayes": {'classifier': GaussianNB(), 'params': {} } } ``` When fitting classifiers it is useful to get a sense of their predictive power as a function of the number of training samples. This is visualized as a 'learning curve.' Below is a method from the [scikit-learn documentation](http://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html#sphx-glr-auto-examples-model-selection-plot-learning-curve-py) that plots the training score (the accuracy of the model on the training data) along with the cross-validation score (which measures the accuracy on data that is left out of the training set). A good model should be able to capture most of the complexity of the training data (otherwise we need a model with less bias) and the validation score should increase with more training data. This behavior indicates that the model will generalize well as we collect more data. ``` from sklearn.model_selection import learning_curve def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None, n_jobs=1, train_sizes=np.linspace(.6, 1.0, 5)): """ Generate a simple plot of the test and traning learning curve. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. title : string Title for the chart. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. ylim : tuple, shape (ymin, ymax), optional Defines minimum and maximum yvalues plotted. cv : integer, cross-validation generator, optional If an integer is passed, it is the number of folds (defaults to 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects n_jobs : integer, optional Number of jobs to run in parallel (default 1). """ plt.figure() plt.title(title) if ylim is not None: plt.ylim(*ylim) plt.xlabel("Training examples") plt.ylabel("Score") train_sizes, train_scores, test_scores = learning_curve( estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes) train_scores_mean = np.mean(train_scores, axis=1) train_scores_std = np.std(train_scores, axis=1) test_scores_mean = np.mean(test_scores, axis=1) test_scores_std = np.std(test_scores, axis=1) plt.grid() plt.fill_between(train_sizes, train_scores_mean - train_scores_std, train_scores_mean + train_scores_std, alpha=0.1, color="r") plt.fill_between(train_sizes, test_scores_mean - test_scores_std, test_scores_mean + test_scores_std, alpha=0.1, color="g") plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="Training score") plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="Cross-validation score") plt.legend(loc="best") return plt ``` Now comes the real training of our models. When learning how to compare classifiers I found [this blog](http://ataspinar.com/2017/05/26/classification-with-scikit-learn/) which is the basis of the 'batch_classify' method below. For each classifier, we'll perform an exhaustive grid search where each combination of hyperparameters are cross-validated using [StratifiedKfold](http://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html#sklearn.model_selection.StratifiedKFold) with 10 folds. This means, the data is split into 10 sets, trained on 9 of them and tested on the rest which gives the cross-validation score. The grid search then selects the combination of parameters that gives the best cv score. Once the optimal parameters are found, we fit each model to the full training set. Then we calculate the accuracy of predictions on the training set and the test set to see how well the model generalizes to data it has never seen. We sort the models based on their predictions on the test set--which is the true test of a classifier for 'real world' data. Additionally, we'll plot the learning curve as explained above. ``` import time from sklearn.model_selection import GridSearchCV from sklearn.metrics import accuracy_score num_classifiers = len(dict_classifiers.keys()) def batch_classify(X_train, Y_train, X_test, Y_test, verbose = True): df_results = pd.DataFrame( data=np.zeros(shape=(num_classifiers,4)), columns = ['classifier', 'train_score', 'test_score', 'training_time']) count = 0 for key, classifier in dict_classifiers.items(): t_start = time.clock() grid = GridSearchCV(classifier['classifier'], classifier['params'], refit=True, cv = 10, # 9+1 scoring = 'accuracy', # scoring metric n_jobs = -1 ) estimator = grid.fit(X_train, Y_train) t_end = time.clock() t_diff = t_end - t_start train_score = estimator.score(X_train, Y_train) test_score = estimator.score(X_test, Y_test) df_results.loc[count,'classifier'] = key df_results.loc[count,'train_score'] = train_score df_results.loc[count,'test_score'] = test_score df_results.loc[count,'training_time'] = t_diff if verbose: print("trained {c} in {f:.2f} s".format(c=key, f=t_diff)) count+=1 plot_learning_curve(estimator, "{}".format(key), X_train, Y_train, ylim=(0.75,1.0), cv=10) return df_results df_results = batch_classify(data_train, label_train, data_test, label_test) display(df_results.sort_values(by='test_score', ascending=False)) ``` Looks like all of the classifiers perform reasonably well except for Nearest Neighbors. Interestingly, Naive Bayes performs at almost the same level as the more complex models. Random Forests, LinearSVM, and Gradient Boosting classifiers almost perfectly predict the training set, meaning they have low enough bias in order to capture all of the nuance of the data. From the above analysis it seems like Naive Bayes, Random Forest, GradientBoosting, and LinearSVM, would all be adequate choices as 'real-world' models for wine classification that could reliably predict wine classes >95% of the time. ## Conclusions In this post, we analyzed the UCI Wine Dataset (which is a preloaded dataset included with scikit-learn). Pandas and Seaborn were used to organize and plot the data, which revealed that several of the features naturally separated into classes. Classifiers were trained and testing using the split/train/test paradigm, and we plotted the learning curves for each feature to get a sense of the stability of the models. Now that we've learned how to work with scikit-learn classifiers, we'll delve into exactly how they work in future posts. Thanks for reading! Find this notebook and others at [https://github.com/jbechtel/data_science_notebooks](https://github.com/jbechtel/data_science_notebooks)
github_jupyter
Credits: Prof Bhiksha Raj Course Homework for the course [11-785](https://deeplearning.cs.cmu.edu/) Introduction to Deep Learning, Spring 2020 You will write your own implementation of the backpropagation algorithm for training your own neural network, as well as a few other features such as activation and loss functions. **Note**: It is difficult to verify whether or not your implementation of backprop works. As we have mentioned in the assignment, we have borrowed it from CMU's deep learning course. You can download this homework archive from their website: http://deeplearning.cs.cmu.edu/document/homework/hw1p1_handout.tar Inside the archive, you will find python files where you had to write the code. Paste your code implementation in the respective places, install the dependencies: ``` pip install numpy pip install pytest ``` And run this command from the top-level directory: ``` python3 autograder/hw1_autograder/runner.py ``` You should get a score corresponding to each module. ## Task 1: Activations Implement the `forward` and `derivative` class methods for each activation function. * The identity function has been implemented for you as an example. * The output of the activation should be stored in the `self.state` variable of the class. The `self.state` variable should be used for calculating the derivative during the backward pass. ``` import numpy as np import math class Activation(object): """ Interface for activation functions (non-linearities). In all implementations, the state attribute must contain the result, i.e. the output of forward. """ # No additional work is needed for this class, as it acts like an # abstract base class for the others # Note that these activation functions are scalar operations. I.e, they # shouldn't change the shape of the input. def __init__(self): self.state = None def __call__(self, x): return self.forward(x) def forward(self, x): raise NotImplemented def derivative(self): raise NotImplemented class Identity(Activation): """ Identity function (already implemented). """ # This class is a gimme as it is already implemented for you as an example def __init__(self): super(Identity, self).__init__() def forward(self, x): self.state = x return x def derivative(self): return 1.0 class Sigmoid(Activation): """ Sigmoid non-linearity """ # Remember do not change the function signatures as those are needed # to stay the same for AutoLab. def __init__(self): super(Sigmoid, self).__init__() def forward(self, x): self.state=1/(1+np.exp(-x)) return self.state def derivative(self): return self.state*(1-self.state) class Tanh(Activation): """ Tanh non-linearity """ def __init__(self): super(Tanh, self).__init__() def forward(self, x): e_plus=np.exp(x) e_minus=np.exp(-x) numerator=e_plus-e_minus denominator=e_plus+e_minus self.state=numerator/denominator return self.state def derivative(self): return 1-self.state*self.state class ReLU(Activation): """ ReLU non-linearity """ def __init__(self): super(ReLU, self).__init__() def forward(self, x): self.state=np.where(x>0,x,0) return self.state def derivative(self): relu_prime=np.where(self.state>0,1,0) return relu_prime ``` ## Task 2: Loss Implement the forward and derivative methods for `SoftmaxCrossEntropy`. * This class inherits the base `Criterion` class. * We will be using the softmax cross entropy loss detailed in the appendix of this writeup; use the LogSumExp trick to ensure numerical stability. The LogSumExp trick is used to prevent numerical underflow and overflow which can occur when the exponent is very large or very small. For example, try looking at the results of trying to exponentiate in python shown below: ```python import math print(math.e**1000) # throws an error print(math.e**(-1000) ``` As you will see, for exponents that are too large, python throws an overflow error, and for exponents that are too small, it rounds down to zero. We can avoid these errors by using the LogSumExp trick: ![alt text](https://imgur.com/download/L0P17iv) You can read more about the derivation of the equivalence [here](https://www.xarg.org/2016/06/the-log-sum-exp-trick-in-machine-learning/) and [here](https://blog.feedly.com/tricks-of-the-trade-logsumexp/) ``` # The following Criterion class will be used again as the basis for a number # of loss functions (which are in the form of classes so that they can be # exchanged easily (it's how PyTorch and other ML libraries do it)) class Criterion(object): """ Interface for loss functions. """ # Nothing needs done to this class, it's used by the following Criterion classes def __init__(self): self.logits = None self.labels = None self.loss = None def __call__(self, x, y): return self.forward(x, y) def forward(self, x, y): raise NotImplemented def derivative(self): raise NotImplemented ``` * Implement the softmax cross entropy operation on a batch of output vectors. * Hint: Add a class attribute to keep track of intermediate values necessary for the backward computation * Calculate the ‘derivative’ of softmax cross entropy using intermediate values saved in the forward pass. ``` class SoftmaxCrossEntropy(Criterion): """ Softmax loss """ def __init__(self): super(SoftmaxCrossEntropy, self).__init__() def forward(self, x, y): """ Argument: x (np.array): (batch size, 10) y (np.array): (batch size, 10) Return: out (np.array): (batch size, ) """ self.logits = x self.labels = y # softmax mx=np.max(self.logits,axis=1).reshape(-1,1) subtracted=self.logits-mx self.exp_logits=np.exp(subtracted) self.exp_sum=self.exp_logits.sum(axis=1).reshape(-1,1) self.sm=self.exp_logits/self.exp_sum # cross entropy first term=-(self.logits*self.labels.sum(axis=1)) second term=mx+np.log(self.exp_sum) return first term + second term.reshape(-1) def derivative(self): """ Return: out (np.array): (batch size, 10) """ return self.sm-self.labels ``` ## Task 3: Linear Layer Implement the forward and backward methods for the `Linear` class. * Hint: Add a class attribute to keep track of intermediate values necessary for the backward computation. Write the code for the backward method of Linear. * The input delta is the derivative of the loss with respect to the output of the linear layer. It has the same shape as the linear layer output. * Calculate `self.dW` and `self.db` for the backward method. `self.dW` and `self.db` represent the gradients of the loss (averaged across the batch) w.r.t `self.W` and `self.b`. Their shapes are the same as the weight `self.W` and the bias `self.b`. * Calculate the return value for the backward method. `dx` is the derivative of the loss with respect to the input of the linear layer and has the same shape as the input. ``` class Linear(): def __init__(self, in_feature, out_feature, weight_init_fn, bias_init_fn): """ Argument: W (np.array): (in feature, out feature) dW (np.array): (in feature, out feature) momentum_W (np.array): (in feature, out feature) b (np.array): (1, out feature) db (np.array): (1, out feature) momentum_B (np.array): (1, out feature) """ self.W = weight_init_fn(in_feature, out_feature) self.b = bias_init_fn(out_feature) # TODO: Complete these but do not change the names. self.dW = np.zeros(self.W.shape) self.db = np.zeros(self.b.shape) self.momentum_W = np.zeros(self.W.shape) self.momentum_b = np.zeros(self.b.shape) def __call__(self, x): return self.forward(x) def forward(self, x): """ Argument: x (np.array): (batch size, in feature) Return: out (np.array): (batch size, out feature) """ self.x=x out=np.matmul(self.x,self.W)+self.b return out def backward(self, delta): """ Argument: delta (np.array): (batch size, out feature) Return: out (np.array): (batch size, in feature) """ self.dw=np.dot(delta,self.X.T)/delta.shape[0] self.db=np.sum(delta,axis=0,keepdims=True)/delta.shape[0] dx=np.dot(delta,self.W.T) return dx ``` ## Task 4: Simple MLP In this section of the homework, you will be implementing a Multi-Layer Perceptron with an API similar to popular Automatic Differentiation Libraries like PyTorch. Go through the functions of the given `MLP` class thoroughly and make sure you understand what each function in the class does so that you can create a generic implementation that supports an arbitrary number of layers, types of activations and network sizes. The parameters for the MLP class are: * `input size`: The size of each individual data example. * `output size`: The number of outputs. * `hiddens`: A list with the number of units in each hidden layer. * `activations`: A list of Activation objects for each layer. * `weight init fn`: A function applied to each weight matrix before training. * `bias init fn`: A function applied to each bias vector before training. * `criterion`: A Criterion object to compute the loss and its derivative. * `lr`: The learning rate. The attributes of the MLP class are: * `@linear layers`: A list of Linear objects. * `@bn layers`: A list of BatchNorm objects. (Should be None until completing 3.3). The methods of the MLP class are: * `forward`: Forward pass. Accepts a mini-batch of data and return a batch of output activations. * `backward`: Backward pass. Accepts ground truth labels and computes gradients for all parameters. Hint: Use state stored in activations during forward pass to simplify your code. * `zero grads`: Set all gradient terms to 0. * `step`: Apply gradients computed in backward to the parameters. * `train` (Already implemented): Set the mode of the network to train. * `eval` (Already implemented): Set the mode of the network to evaluation. Note: Pay attention to the data structures being passed into the constructor and the class attributes specified initially. Sample constructor call: ```python MLP(784, 10, [64, 64, 32], [Sigmoid(), Sigmoid(), Sigmoid(), Identity()], weight_init_fn, bias_init_fn, SoftmaxCrossEntropy(), 0.008) ``` ``` class MLP(object): """ A simple multilayer perceptron """ def __init__(self, input_size, output_size, hiddens, activations, weight_init_fn, bias_init_fn, criterion, lr): # Don't change this --> self.train_mode = True self.nlayers = len(hiddens) + 1 self.input_size = input_size self.output_size = output_size self.activations = activations self.criterion = criterion self.lr = lr # <--------------------- # Don't change the name of the following class attributes, # the autograder will check against these attributes. But you will need to change # the values in order to initialize them correctly # Initialize and add all your linear layers into the list 'self.linear_layers' # (HINT: self.foo = [ bar(???) for ?? in ? ]) # (HINT: Can you use zip here?) self.linear_layers = [Linear(inf, outf, weight_init_fn, bias_init_fn)] for inf,outf in zip([self.input_size]+hiddens,hiddens+[self.output_size]) def forward(self, x): """ Argument: x (np.array): (batch size, input_size) Return: out (np.array): (batch size, output_size) """ # Complete the forward pass through your entire MLP. for i,layer in enumerate(self.linear_layers): x=forward(x) x=self.activations[i](x) return x def zero_grads(self): # Use numpyArray.fill(0.0) to zero out your backpropped derivatives in each # of your linear and batchnorm layers. for layer in self.linear_layers: layer.dw.fill(0.0) layer.db.fill(0.0) def step(self): # Apply a step to the weights and biases of the linear layers. # (You will add momentum later in the assignment to the linear layers) for i in range(len(self.linear_layers)): layer=self.linear_layers[i] layer.W=layer.W-layer.dW layer.b=layer.b-layer.dB def backward(self, labels): final_layer=self.activations[-1] final_outputs=final_layer.state loss=criterion(final_outputs,labels) delta=self.criterion.derivative() for i in range(self.nlayers-1,-1,-1): delta=delta*self.activations[i].derivative() delta=self.linear_layers[i].backward(delta) # Backpropagate through the activation functions, batch norm and # linear layers. # Be aware of which return derivative def error(self, labels): return (np.argmax(self.output, axis = 1) != np.argmax(labels, axis = 1)).sum() def total_loss(self, labels): return self.criterion(self.output, labels).sum() def __call__(self, x): return self.forward(x) def train(self): self.train_mode = True def eval(self): self.train_mode = False ``` ## Task 5: Momentum Modify the `step` function present in the MLP class to include momentum in your gradient descent. We will be using the following momentum update equation: ![alt text](https://imgur.com/download/ZVA66FC) The momentum value will be passed as a parameter to the `MLP`. Copy the rest of your code from above. ``` class MLP(object): """ A simple multilayer perceptron """ def __init__(self, input_size, output_size, hiddens, activations, weight_init_fn, bias_init_fn, criterion, lr,momentum): # Don't change this --> self.train_mode = True self.nlayers = len(hiddens) + 1 self.input_size = input_size self.output_size = output_size self.activations = activations self.criterion = criterion self.lr = lr # <--------------------- # Don't change the name of the following class attributes, # the autograder will check against these attributes. But you will need to change # the values in order to initialize them correctly # Initialize and add all your linear layers into the list 'self.linear_layers' # (HINT: self.foo = [ bar(???) for ?? in ? ]) # (HINT: Can you use zip here?) self.linear_layers = [Linear(inf, outf, weight_init_fn, bias_init_fn)] for inf,outf in zip([self.input_size]+hiddens,hiddens+[self.output_size]) def forward(self, x): """ Argument: x (np.array): (batch size, input_size) Return: out (np.array): (batch size, output_size) """ # Complete the forward pass through your entire MLP. for i,layer in enumerate(self.linear_layers): x=forward(x) x=self.activations[i](x) return x def zero_grads(self): # Use numpyArray.fill(0.0) to zero out your backpropped derivatives in each # of your linear and batchnorm layers. for layer in self.linear_layers: layer.dw.fill(0.0) layer.db.fill(0.0) def step(self): # Apply a step to the weights and biases of the linear layers. # (You will add momentum later in the assignment to the linear layers) for i in range(len(self.linear_layers)): layer=self.linear_layers[i] layer.momentum_W=layer.momentum_W*self.momentum-self.lr*dW layer.W=linear.W+layer.momentum_W layer.momentum_b=layer.momentum_b*self.momentum-self.lr*db layer.b=linear.b+layer.momentum_b def backward(self, labels): final_layer=self.activations[-1] final_outputs=final_layer.state loss=criterion(final_outputs,labels) delta=self.criterion.derivative() for i in range(self.nlayers-1,-1,-1): delta=delta*self.activations[i].derivative() delta=self.linear_layers[i].backward(delta) # Backpropagate through the activation functions, batch norm and # linear layers. # Be aware of which return derivative def error(self, labels): return (np.argmax(self.output, axis = 1) != np.argmax(labels, axis = 1)).sum() def total_loss(self, labels): return self.criterion(self.output, labels).sum() def __call__(self, x): return self.forward(x) def train(self): self.train_mode = True def eval(self): self.train_mode = False ```
github_jupyter
``` %matplotlib inline import matplotlib.pyplot as plt import matplotlib.colors as colors import matplotlib.cm as cmx import numpy as np import os ``` <h3>Plot Target and Predictions ``` # open the target and pred textfiles filepath_source = '/Users/robinson/Downloads/data/pred/20180628_151243/test_source.txt' # use quantised version # filepath_target = '/Users/robinson/Downloads/data/pred/20180524_114605/test_target.txt' # use quantised version filepath_pred = '/Users/robinson/Downloads/data/pred/20180628_151243/predictions.txt' # use quantised version filepath_testlog = '/Users/robinson/Downloads/data/pred/20180628_151243/test_log.txt' # use quantised version # open files for reading fs = open(filepath_source, 'r') # ft = open(filepath_target, 'r') fp = open(filepath_pred, 'r') ftl = open(filepath_testlog, 'r') # read lines (one line per f0 contour) into lists of strings, stripping all whitespace and empty lines source_strings = fs.read().strip().split('\n') # target_strings = ft.read().strip().split('\n') pred_strings = fp.read().strip().split('\n') test_log_strings = ftl.read().strip().split('\n') for i in range(len(source_strings)): # parse strings and divide all ints by 100 to get real float f0 value (use float(x)/100 for d2p) source_array = np.array([float(x) for x in source_strings[i].split(' ')]) # target_array = np.array([float(x) for x in target_strings[i].split(' ')]) # if line == 'UNK': # [a if C else b for i in items] # if pred string is empty, move on to next iteration. if UNK found, replace with 0.0 if pred_strings[i]: pred_array = np.array([float(x) if x != 'UNK' else float(0) for x in pred_strings[i].split(' ')]) else: continue fig = plt.figure(figsize=(16, 8)) plt.title(test_log_strings[i]) plt.xlabel('Time (s)') plt.ylabel('Freq (Hz)') plt.xlim(0, 150) plt.ylim(0, 700) plt.plot(range(np.shape(source_array)[0]), source_array, color='b', alpha=0.5, linewidth=3, label='source') # plt.plot(range(np.shape(target_array)[0]), target_array, color='y', alpha=0.5, linewidth=3, label='target') plt.plot(range(np.shape(pred_array)[0]), pred_array, '--', color='y', alpha=0.5, linewidth=3, label='prediction') # plt.scatter(range(np.shape(pred_array)[0]), pred_array, color='r', label='pred', s=1) plt.legend() plt.tight_layout() plt.show() outfilepath = '/Users/robinson/Downloads/data/pred/20180628_151243/predictions/' if not os.path.exists(outfilepath): os.mkdir(outfilepath) outfilename, _ = os.path.splitext(test_log_strings[i]) fig.savefig(os.path.join(outfilepath, outfilename + '.png')) # calc diff between the two # plot the diff on same/another plot # close the files fs.close() # ft.close() fp.close() ```
github_jupyter
# Image Classification Using Vision Transformer Vision Transformer (ViT) is a new alternative to Convolution Neural Networks (CNNs) in the field of computer vision. The idea of [ViT](https://arxiv.org/abs/2010.11929) was inspired from the success of the [Transformer](https://arxiv.org/abs/1706.03762) and [BERT](https://arxiv.org/abs/1810.04805) architectures in NLP applications. In this example, we will implement a ViT in PyTorch and showcase how to pre-train a ViT and then fine-tune it on a downstream task for good results with minimal downstream training time. ## ViT Model The ViT model is almost the same as the original Transformer except for the following differences: 1. Input image is broken down into small patches, which are used as sequences similar to language. The patching and embedding are implemented by a Convolution2D operation in the `patch_embedding`. 2. Different from original Transformer, the positional embedding is now a trainable parameter. 3. Similar to BERT, a `CLS` token is added before the patch sequence. But in contrast to BERT, the value of the `CLS` token is trainable. 4. After the Transformer encoding, only the embedding corresponding to the `CLS` token will be used as feature for the classification layer. ``` import torch import torch.nn as nn from torch.nn import TransformerEncoder, TransformerEncoderLayer class ViTEmbeddings(nn.Module): def __init__(self, image_size=224, patch_size=16, num_channels=3, em_dim=768, drop=0.1) -> None: super().__init__() assert image_size % patch_size == 0, "image size must be an integer multiply of patch size" self.patch_embedding = nn.Conv2d(num_channels, em_dim, kernel_size=patch_size, stride=patch_size, bias=False) self.position_embedding = nn.Parameter(torch.zeros(1, (image_size // patch_size)**2 + 1, em_dim)) self.cls_token = nn.Parameter(torch.zeros(1, 1, em_dim)) self.dropout = nn.Dropout(drop) def forward(self, x): x = self.patch_embedding(x).flatten(2).transpose(1, 2) # [B,C, H, W] -> [B, num_patches, em_dim] x = torch.cat([self.cls_token.expand(x.size(0), -1, -1), x], dim=1) # [B, num_patches+1, em_dim] x = x + self.position_embedding x = self.dropout(x) return x class ViTEncoder(nn.Module): def __init__(self, num_layers, image_size, patch_size, num_channels, em_dim, drop, num_heads, ff_dim): super().__init__() self.embedding = ViTEmbeddings(image_size, patch_size, num_channels, em_dim, drop) encoder_layer = TransformerEncoderLayer(em_dim, nhead=num_heads, dim_feedforward=ff_dim, activation='gelu', dropout=drop) self.encoder = TransformerEncoder(encoder_layer=encoder_layer, num_layers=num_layers) self.layernorm = nn.LayerNorm(em_dim, eps=1e-6) def forward(self, x): x = self.embedding(x) x = x.transpose(0, 1) # Switch batch and sequence length dimension for pytorch convention x = self.encoder(x) x = self.layernorm(x[0]) return x class ViTModel(nn.Module): def __init__(self, num_classes, num_layers=12, image_size=224, patch_size=16, num_channels=3, em_dim=768, drop=0.1, num_heads=12, ff_dim=3072): super().__init__() self.vit_encoder = ViTEncoder(num_layers=num_layers, image_size=image_size, patch_size=patch_size, num_channels=num_channels, em_dim=em_dim, drop=drop, num_heads=num_heads, ff_dim=ff_dim) self.linear_classifier = nn.Linear(em_dim, num_classes) def forward(self, x): x = self.vit_encoder(x) x = self.linear_classifier(x) return x ``` Now let's define some parameters that will be used later: ``` batch_size=128 pretrain_epochs=100 finetune_epochs=1 train_steps_per_epoch=None eval_steps_per_epoch=None ``` ## Upstream Pre-training We will use CIFAIR 100 as our upstream dataset. The data preprocessing and augmentation is the standard Padded Crop + Dropout used in [this example](https://github.com/fastestimator/fastestimator/blob/master/apphub/image_classification/cifar10_fast/cifar10_fast_torch.py). ``` import tempfile import fastestimator as fe from fastestimator.dataset.data import cifair10, cifair100 from fastestimator.op.numpyop.meta import Sometimes from fastestimator.op.numpyop.multivariate import HorizontalFlip, PadIfNeeded, RandomCrop from fastestimator.op.numpyop.univariate import ChannelTranspose, CoarseDropout, Normalize from fastestimator.op.tensorop.loss import CrossEntropy from fastestimator.op.tensorop.model import ModelOp, UpdateOp from fastestimator.trace.metric import Accuracy def pretrain(batch_size, epochs, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): train_data, eval_data = cifair100.load_data() pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x") ]) model = fe.build( model_fn=lambda: ViTModel(num_classes=100, image_size=32, patch_size=4, num_layers=6, num_channels=3, em_dim=256, num_heads=8, ff_dim=512), optimizer_fn=lambda x: torch.optim.SGD(x, lr=0.01, momentum=0.9, weight_decay=1e-4)) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch, log_steps=0) estimator.fit(warmup=False) return model ``` ## Start Pre-training Let's train the ViT model for 100 epochs, and get the pre-trained weight. This would take ~40 minutes on single GTX 1080 TI GPU. Here we are only training a mini version of the actual ViT model, and the CIFAR100 performance after 100 epochs is similar to the 55% top-1 performance [reported in the community](https://keras.io/examples/vision/image_classification_with_vision_transformer/). However, training the official `ViTModel` model with its original parameters on the JFT-300M dataset would produce much better encoder weights at the cost of a much longer training time. The paper used this strategy to reach near 81% ImageNet downstream top-1 accuracy. ``` pretrained_model = pretrain(batch_size=batch_size, epochs=pretrain_epochs, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) ``` ## Downstream Fine-tuning A general rule-of-thumb to ensure successful downstream fine-tuning is to choose a downstream task with less variety and complexity than the upstream training. In this example, given that we used CIFAIR100 as our upstream task, a good candidate for the downstream dataset is CIFAIR10. The official implementation mapped this practice to a larger scale, using JFT-300M as their upstream task and then ImageNet as their downstream task. Given the similarity between our downstream and upstream datasets, the fine-tuning configuration is almost the same as before. ``` def finetune(pretrained_model, batch_size, epochs, model_dir=tempfile.mkdtemp(), train_steps_per_epoch=None, eval_steps_per_epoch=None): train_data, eval_data = cifair10.load_data() pipeline = fe.Pipeline( train_data=train_data, eval_data=eval_data, batch_size=batch_size, ops=[ Normalize(inputs="x", outputs="x", mean=(0.4914, 0.4822, 0.4465), std=(0.2471, 0.2435, 0.2616)), PadIfNeeded(min_height=40, min_width=40, image_in="x", image_out="x", mode="train"), RandomCrop(32, 32, image_in="x", image_out="x", mode="train"), Sometimes(HorizontalFlip(image_in="x", image_out="x", mode="train")), CoarseDropout(inputs="x", outputs="x", mode="train", max_holes=1), ChannelTranspose(inputs="x", outputs="x") ]) model = fe.build( model_fn=lambda: ViTModel(num_classes=100, image_size=32, patch_size=4, num_layers=6, num_channels=3, em_dim=256, num_heads=8, ff_dim=512), optimizer_fn=lambda x: torch.optim.SGD(x, lr=0.01, momentum=0.9, weight_decay=1e-4)) # load the encoder's weight if hasattr(model, "module"): model.module.vit_encoder.load_state_dict(pretrained_model.module.vit_encoder.state_dict()) else: model.vit_encoder.load_state_dict(pretrained_model.vit_encoder.state_dict()) network = fe.Network(ops=[ ModelOp(model=model, inputs="x", outputs="y_pred"), CrossEntropy(inputs=("y_pred", "y"), outputs="ce", from_logits=True), UpdateOp(model=model, loss_name="ce") ]) traces = [ Accuracy(true_key="y", pred_key="y_pred") ] estimator = fe.Estimator(pipeline=pipeline, network=network, epochs=epochs, traces=traces, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) estimator.fit(warmup=False) ``` ## Start the Fine-tuning The downstream ViT is re-using the ViT encoder pre-trained on the CIFAR100 dataset. To illustrate the effect of using the pre-trained encoder, we will only train the downstream task for a **single** epoch. ``` finetune(pretrained_model, batch_size=batch_size, epochs=finetune_epochs, train_steps_per_epoch=train_steps_per_epoch, eval_steps_per_epoch=eval_steps_per_epoch) ``` With only one epoch of training, we are able to get 74% top-1 accuracy on the CIFAIR 10 test set. Not bad huh?
github_jupyter
# STELLARSTRUC.IPYNB -- Solve equations of stellar structure ``` ### IMPORT STUFF ### import numpy as np from scipy.interpolate import interp1d from scipy.integrate import odeint import matplotlib.pyplot as plt from crust import crust G = 6.674e-8 # Newton's constant in cgs units c = 2.998e10 # speed of light in cm/s Msun = 1.988e33 # solar mass in g rhonuc = 2.7e14 # nuclear density in g/cm^3 ### MAKE SOME HELPFUL UTILITIES ### def geteos(eospath,eosname): # import tabulated EoS data eos = np.genfromtxt(eospath+eosname+".dat") # EoS data (rho=mass density, p=pressure/c^2, mu=total energy density/c^2) in g/cm^3 [rhodat,mudat,pdat] = crust(eos) # affix low-density crust EoS, return (rho,mu,p) in units of rhonuc return [rhodat, rhodat, pdat] # set [rhodat, rhodat, pdat] such that mu = rho for Newtonian gravity, [rhodat, mudat, pdat] for GR def intpeos(rhodat,mudat,pdat): # interpolate full EoS from tabulated data pmuintp = interp1d(mudat,pdat,kind='linear',bounds_error=False,fill_value=0.) dpdmumuintp = interp1d(mudat,np.gradient(pdat)/np.gradient(mudat),kind='linear',bounds_error=False,fill_value=0.) def p(mu): # pressure as a function of total energy density return pmuintp(mu) def dpdmu(mu): # sound speed squared return dpdmumuintp(mu) murhointp = interp1d(rhodat,mudat,kind='linear',bounds_error=False,fill_value=0.) def Mu(rho): # total energy density as a function of rest-mass energy density, for calculating central value of total energy density return murhointp(rho) return [p, dpdmu, Mu] ### DEFINE KEY FUNCTIONS ### def hydro(y,r): # condition of hydrostatic equilibrium mu, m = y return -mu*m/r # note that we are using G=c=1 units in this code def mass(y,r): # defining equation for the mass mu, m = y return r*mu def struceqs(y,r): # implement equations of stellar structure as a set of coupled ODEs return hydro(y,r), mass(y,r) ### PROVIDE INPUT PARAMETERS ### eosname = "APR4" # SET EQUATION OF STATE HERE rhoc = 1. # SET CENTRAL MASS DENSITY HERE eospath = "./" # path to EoS data files stp = 1e-4 # starting step for numerical integration pts = 5e3 # number of points at which to evaluate numerical integration tol = 1e-6 # tolerance for surface finding algorithm ### RUN CODE ### [rhodat,mudat,pdat] = geteos(eospath,eosname) # get tabulated EoS data in units of rhonuc [p, dpdmu, Mu] = intpeos(rhodat,mudat,pdat) # interpolate full EoS p(mu), dpdmu(mu), Mu(rho) from tabulated data y0 = [10.,1e-3] # implement boundary conditions at center of star rlist = np.linspace(stp,10.,int(pts)) # list radial points at which to evaluate numerical integration ys = np.zeros((len(rlist),2)) # create array to store values of functions at evaluation points ys[0] = y0 # store central boundary values Rsol = rlist[-2] # to initialize search, set maximum possible surface location to be furthest radial evaluation point for i in range(len(rlist)-1): # integrate to each radial evaluation point, check if p = 0, continue if not, break if yes rs = [rlist[i],rlist[i+1]] # current integration interval y = odeint(struceqs,ys[i],rs) # do numerical integration ys[i+1] = y[-1] # save solution for functions pressure = ys[i+1][0] # extract pressure if (pressure < tol or pressure != pressure): # check if pressure vanishes Rsol = rs[0] # if so, define stellar surface to lie at current location break rlist = rlist[0:i+1] # truncate list of radial points at surface r=R musoldat = ys[0:i+1,0] # record solution for mu(r) msoldat = ys[0:i+1,1] # record solution for m(r) musol = interp1d(rlist,musoldat,kind='linear') # interpolate full solution for mu(r) from tabulation msol = interp1d(rlist,msoldat,kind='linear') # interpolate full solution for m(r) from tabulation psol = interp1d(rlist,p(musoldat),kind='linear') # interpolate full solution for p(r)=p(mu(r)) from tabulation Msol = msol(Rsol) # evaluate total mass of star M = m(R) ### OUTPUT RESULTS ### plt.figure(1,(15,10)) # plot mu(r), p(r), m(r) plt.plot(rlist,rlist**2,c='black',marker='.',label='mu/mu_c') plt.show() R = Rsol*c/(1e5*(G*rhonuc)**0.5) # convert R from code units to km M = Msol*c**3/(G*(G*rhonuc)**0.5*Msun) # convert M from code units to solar masses print 'I am a message that reports the results of the integration.' ```
github_jupyter