code
stringlengths
38
801k
repo_path
stringlengths
6
263
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys module_path = os.path.abspath(os.path.join('..')) if module_path not in sys.path: sys.path.append(module_path) import pandas as pd import random import torch import time import numpy as np from gensim.models.word2vec import Word2Vec from model import BatchProgramClassifier from torch.autograd import Variable from torch.utils.data import DataLoader import torch.nn as nn import torch.nn.functional as F import torch from torch.autograd import Variable import re from pycparser.c_ast import TypeDecl, ArrayDecl from pycparser import c_ast # + root = '/home/david/projects/university/astnn/data/' word2vec = Word2Vec.load(root+"train/embedding/node_w2v_128").wv embeddings = np.zeros((word2vec.vectors.shape[0] + 1, word2vec.vectors.shape[1]), dtype="float32") embeddings[:word2vec.vectors.shape[0]] = word2vec.vectors HIDDEN_DIM = 100 ENCODE_DIM = 128 LABELS = 104 EPOCHS = 15 BATCH_SIZE = 1 USE_GPU = False MAX_TOKENS = word2vec.vectors.shape[0] EMBEDDING_DIM = word2vec.vectors.shape[1] model = BatchProgramClassifier(EMBEDDING_DIM,HIDDEN_DIM,MAX_TOKENS+1,ENCODE_DIM,LABELS,BATCH_SIZE, USE_GPU, embeddings) model.load_state_dict(torch.load("/home/david/projects/university/astnn/model.pt")) # - # # Load Data # + word2vec = Word2Vec.load('/home/david/projects/university/astnn/data/train/embedding/node_w2v_128').wv vocab = word2vec.vocab ast_data = pd.read_pickle(root+'test/test_.pkl') block_data = pd.read_pickle(root+'test/blocks.pkl') # - # # Allowed var names leaf_embed = nn.Sequential( model._modules['encoder']._modules['embedding'], model._modules['encoder']._modules['W_c'] ) # + # words we wont allow as variable names reserved_words = [ 'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do', 'int', 'long', 'register', 'return', 'short', 'sizeof', 'static', 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while', 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'printf', 'scanf', 'cos', 'malloc' ] def allowed_variable(var): pattern = re.compile("([a-z]|[A-Z]|_)+([a-z]|[A-Z]|[0-9]|_)*$") if (var not in reserved_words) and pattern.match(var): return True else: return False allowed_variable('scanf') # + embedding_map = {} for index in range(len(vocab)): if allowed_variable(word2vec.index2word[index]): embedding_map[index] = leaf_embed(torch.tensor(index)).detach().numpy() # - # # Var replace functions # + def replace_index(node, old_i, new_i): i = node[0] if i == old_i: result = [new_i] else: result = [i] children = node[1:] for child in children: result.append(replace_index(child, old_i, new_i)) return result def replace_var(x, old_i, new_i): mod_blocks = [] for block in x: mod_blocks.append(replace_index(block, old_i, new_i)) return mod_blocks # - # # Closest Var functions # + def l2_norm(a, b): return np.linalg.norm(a-b) def cos_sim(a, b): return np.inner(a, b) / (np.linalg.norm(a) * np.linalg.norm(b)) def closest_index(embedding, embedding_map, metric): embedding = embedding.detach().numpy() closest_i = list(embedding_map.keys())[0] closest_dist = metric(embedding_map[closest_i], embedding) for i, e in embedding_map.items(): d = metric(embedding_map[i], embedding) if d < closest_dist: closest_dist = d closest_i = i return closest_i def normalize(v): norm = np.linalg.norm(v) if norm == 0: return v return v / norm # - # # Grad locating functions # + def get_embedding(indices, node_list): ''' get the embeddings at the index positions in postorder traversal. ''' res = [] c = 0 for i in range(node_list.size(0)): if not np.all(node_list[i].detach().numpy() == 0): if c in indices: res.append(node_list[i]) c += 1 return res def post_order_loc(node, var, res, counter): ''' ''' index = node[0] children = node[1:] for child in children: res, counter = post_order_loc(child, var, res, counter) if var == index and (not children): res.append(counter) # print(counter, word2vec.index2word[index]) counter += 1 return res, counter def get_grad(x, var_index, node_list): grads = [] for i, block in enumerate(x): indices, _ = post_order_loc(block, var_index, [], 0) grads += get_embedding(indices, node_list.grad[:, i, :]) try: node_embedding = get_embedding(indices, node_list[:, i, :])[0] except: pass if len(grads) < 1: return None, None grad = torch.stack(grads).sum(dim=0) return grad, node_embedding # - # # Var name finder # + class declarationFinder(c_ast.NodeVisitor): def __init__(self): self.names = set() def visit_Decl(self, node): if type(node.type) in [TypeDecl, ArrayDecl] : self.names.add(node.name) def get_var_names(ast): declaration_finder = declarationFinder() declaration_finder.visit(ast) return declaration_finder.names # get_var_names(x) # - # # FGSM # # with vars ordered and early exit # + # def gradient_method(x, n_list, var, epsilon, metric): # orig_index = vocab[var].index if var in vocab else MAX_TOKEN # grad, node_embedding = get_grad(x, orig_index, n_list) # if grad is None: # # print("no leaf occurences") # return None # v = node_embedding.detach().numpy() # g = torch.sign(grad).detach().numpy() # v = v + epsilon * g # # get the closest emebedding from our map # i = closest_index(v, sampled_embedding_map, metric) # # print("orig name:", word2vec.index2word[orig_index], "; new name:", word2vec.index2word[i]) # if i != orig_index: # return replace_var(x, orig_index, i) # else: # return x # - MAX_TOKEN = word2vec.vectors.shape[0] # + import time import datetime def evaluate(epsilon, limit = None, sort_vars = True): ast_count = 0 var_count = 0 ast_total = 0 var_total = 0 start = time.time() for code_id in block_data['id'].tolist(): # print(code_id) x, ast = block_data['code'][code_id], ast_data['code'][code_id] _, orig_pred = torch.max(model([x]).data, 1) orig_pred = orig_pred.item() # get the grad loss_function = torch.nn.CrossEntropyLoss() labels = torch.LongTensor([orig_pred]) output = model([x]) loss = loss_function(output, Variable(labels)) loss.backward() n_list = model._modules['encoder'].node_list var_names = get_var_names(ast) success = False var_weighted = [] for var in list(var_names): orig_index = vocab[var].index if var in vocab else MAX_TOKEN grad, node_embedding = get_grad(x, orig_index, n_list) if grad is not None: h = abs((grad @ torch.sign(grad)).item()) var_weighted.append( (h, grad, node_embedding) ) if sort_vars: var_weighted = sorted(var_weighted, key=lambda x: x[0], reverse = True) for h, grad, node_embedding in var_weighted: v = node_embedding g = torch.sign(grad) v = v + epsilon * g # get the closest emebedding from our map i = closest_index(v, sampled_embedding_map, l2_norm) if i != orig_index: new_x_l2 = replace_var(x, orig_index, i) else: new_x_l2 = x if new_x_l2: o = model([new_x_l2]) _, predicted_l2 = torch.max(o.data, 1) # print(orig_pred, predicted_l2.item()) var_total += 1 if orig_pred != predicted_l2.item(): var_count += 1 success = True break if success: ast_count += 1 ast_total += 1 if ast_total % 500 == 499: eval_time = time.time() - start eval_time = datetime.timedelta(seconds=eval_time) print(ast_total, ";", eval_time, ";", ast_count / ast_total, ";", var_count / var_total) if limit and limit < ast_total: break return (1-(ast_count / ast_total), 1-(var_count / var_total)) # + # sample_rate = 0.2 # sample_count = int(len(embedding_map) * sample_rate) # sampled_embedding_map = {key: embedding_map[key] for key in random.sample(embedding_map.keys(), sample_count)} sampled_embedding_map = embedding_map # - evaluate(10, 500) # + import time epsilons = np.linspace(1,100,30) ast_performances = [] var_performances = [] for e in epsilons: start = time.time() ast_performance, var_performance = evaluate(e, limit=200) eval_time = time.time() - start ast_performances.append(ast_performance) var_performances.append(var_performance) print(e, eval_time, ast_performance, var_performance)
notebooks/fgsm-heuristic-multivar.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Tutorial: PyTorch __author__ = "<NAME>" __version__ = "CS224U, Stanford, Spring 2020" # ## Contents # # 1. [Motivation](#Motivation) # 1. [Importing PyTorch](#Importing-PyTorch) # 1. [Tensors](#Tensors) # 1. [Tensor creation](#Tensor-creation) # 1. [Operations on tensors](#Operations-on-tensors) # 1. [GPU computation](#GPU-computation) # 1. [Neural network foundations](#Neural-network-foundations) # 1. [Automatic differentiation](#Automatic-differentiation) # 1. [Modules](#Modules) # 1. [Sequential](#Sequential) # 1. [Criteria and loss functions](#Criteria-and-loss-functions) # 1. [Optimization](#Optimization) # 1. [Training a simple model](#Training-a-simple-model) # 1. [Reproducibility](#Reproducibility) # 1. [References](#References) # ## Motivation # PyTorch is a Python package designed to carry out scientific computation. We use PyTorch in a range of different environments: local model development, large-scale deployments on big clusters, and even _inference_ in embedded, low-power systems. While similar in many aspects to NumPy, PyTorch enables us to perform fast and efficient training of deep learning and reinforcement learning models not only on the CPU but also on a GPU or other ASICs (Application Specific Integrated Circuits) for AI, such as Tensor Processing Units (TPU). # ## Importing PyTorch # This tutorial assumes a working installation of PyTorch using your `nlu` environment, but the content applies to any regular installation of PyTorch. If you don't have a working installation of PyTorch, please follow the instructions in [the setup notebook](setup.ipynb). # # To get started working with PyTorch we simply begin by importing the torch module: import torch # **Side note**: why not `import pytorch`? The name of the package is `torch` for historical reasons: `torch` is the orginal name of the ancestor of the PyTorch library that got started back in 2002 as a C library with Lua scripting. It was only much later that the original `torch` was ported to Python. The PyTorch project decided to prefix the Py to make clear that this library refers to the Python version, as it was confusing back then to know which `torch` one was referring to. All the internal references to the library use just `torch`. It's possible that PyTorch will be renamed at some point, as the original `torch` is no longer maintained and there is no longer confusion. # We can see the version installed and determine whether or not we have a GPU-enabled PyTorch install by issuing print("PyTorch version {}".format(torch.__version__)) print("GPU-enabled installation? {}".format(torch.cuda.is_available())) # PyTorch has good [documentation](https://pytorch.org/docs/stable/index.html) but it can take some time to familiarize oneself with the structure of the package; it's worth the effort to do so! # # We will also make use of other imports: import numpy as np # ## Tensors # Tensors collections of numbers represented as an array, and are the basic building blocks in PyTorch. # # You are probably already familiar with several types of tensors: # # - A scalar, a single number, is a zero-th order tensor. # # - A column vector $v$ of dimensionality $d_c \times 1$ is a tensor of order 1. # # - A row vector $x$ of dimensionality $1 \times d_r$ is a tensor of order 1. # # - A matrix $A$ of dimensionality $d_r \times d_c$ is a tensor of order 2. # # - A cube $T$ of dimensionality $d_r \times d_c \times d_d$ is a tensor of order 3. # # Tensors are the fundamental blocks that carry information in our mathematical models, and they are composed using several operations to create mathematical graphs in which information can flow (propagate) forward (functional application) and backwards (using the chain rule). # # We have seen multidimensional arrays in NumPy. These NumPy objects are also a representation of tensors. # **Side note**: what is a tensor _really_? Tensors are important mathematical objects with applications in multiple domains in mathematics and physics. The term "tensor" comes from the usage of these mathematical objects to describe the stretching of a volume of matter under *tension*. They are central objects of study in a subfield of mathematics known as differential geometry, which deals with the geometry of continuous vector spaces. As a very high-level summary (and as first approximation), tensors are defined as multi-linear "machines" that have a number of slots (their order, a.k.a. rank), taking a number of "column" vectors and "row" vectors *to produce a scalar*. For example, a tensor $\mathbf{A}$ (represented by a matrix with rows and columns that you could write in a sheet of paper) can be thought of having two slots. So when $\mathbf{A}$ acts upon a column vector $\mathbf{v}$ and a row vector $\mathbf{x}$, it returns a scalar: # # $$\mathbf{A}(\mathbf{x}, \mathbf{v}) = s$$ # # If $\mathbf{A}$ only acts on the column vector, for example, the result will be another column tensor $\mathbf{u}$ of one order less than the order of $\mathbf{A}$. Thus, when $\mathbf{v}$ acts is similar to "removing" its slot: # # $$\mathbf{u} = \mathbf{A}(\mathbf{v})$$ # # The resulting $\mathbf{u}$ can later interact with another row vector to produce a scalar or be used in any other way. # # This can be a very powerful way of thinking about tensors, as their slots can guide you when writing code, especially given that PyTorch has a _functional_ approach to modules in which this view is very much highlighted. As we will see below, these simple equations above have a completely straightforward representation in the code. In the end, most of what our models will do is to process the input using this type of functional application so that we end up having a tensor output and a scalar value that measures how good our output is with respect to the real output value in the dataset. # ### Tensor creation # Let's get started with tensors in PyTorch. The framework supports eight different types ([Lapan 2018](#References)): # # - 3 float types (16-bit, 32-bit, 64-bit): `torch.FloatTensor` is the class name for the commonly used 32-bit tensor. # - 5 integer types (signed 8-bit, unsigned 8-bit, 16-bit, 32-bit, 64-bit): common tensors of these types are the 8-bit unsigned tensor `torch.ByteTensor` and the 64-bit `torch.LongTensor`. # # There are three fundamental ways to create tensors in PyTorch ([Lapan 2018](#References)): # # - Call a tensor constructor of a given type, which will create a non-initialized tensor. So we then need to fill this tensor later to be able to use it. # - Call a built-in method in the `torch` module that returns a tensor that is already initialized. # - Use the PyTorch–NumPy bridge. # #### Calling the constructor # Let's first create a 2 x 3 dimensional tensor of the type `float`: t = torch.FloatTensor(2, 3) print(t) print(t.size()) # Note that we specified the dimensions as the arguments to the constructor by passing the numbers directly – and not a list or a tuple, which would have very different outcomes as we will see below! We can always inspect the size of the tensor using the `size()` method. # # The constructor method allocates space in memory for this tensor. However, the tensor is *non-initialized*. In order to initialize it, we need to call any of the tensor initialization methods of the basic tensor types. For example, the tensor we just created has a built-in method `zero_()`: t.zero_() # The underscore after the method name is important: it means that the operation happens _in place_, this is, the returned object is the same object but now with different content. # A very handy way to construct a tensor using the constructor happens when we have available the content we want to put in the tensor in the form of a Python iterable. In this case, we just pass it as the argument to the constructor: torch.FloatTensor([[1, 2, 3], [4, 5, 6]]) # #### Calling a method in the torch module # A very convenient way to create tensors, in addition to using the constructor method, is to use one of the multiple methods provided in the `torch` module. In particular, the `tensor` method allows us to pass a number or iterable as the argument to get the appropriately typed tensor: tl = torch.tensor([1, 2, 3]) t = torch.tensor([1., 2., 3.]) print("A 64-bit integer tensor: {}, {}".format(tl, tl.type())) print("A 32-bit float tensor: {}, {}".format(t, t.type())) # We can create a similar 2x3 tensor to the one above by using the `torch.zeros()` method, passing a sequence of dimensions to it: t = torch.zeros(2, 3) print(t) # There are many methods for creating tensors. We list some useful ones: # + t_zeros = torch.zeros_like(t) # zeros_like returns a new tensor t_ones = torch.ones(2, 3) # creates a tensor with 1s t_fives = torch.empty(2, 3).fill_(5) # creates a non-initialized tensor and fills it with 5 t_random = torch.rand(2, 3) # creates a uniform random tensor t_normal = torch.randn(2, 3) # creates a normal random tensor print(t_zeros) print(t_ones) print(t_fives) print(t_random) print(t_normal) # - # We now see emerging two important paradigms in PyTorch. The _imperative_ approach to performing operations, using _inplace_ methods, is in marked contrast with an additional paradigm also used in PyTorch, the _functional_ approach, where the returned object is a copy of the original object. Both paradigms have their specific use cases as we will be seeing below. The rule of thumb is that _inplace_ methods are faster and don't require extra memory allocation in general, but they can be tricky to understand (keep this in mind regarding the computational graph that we will see below). _Functional_ methods make the code referentially transparent, which is a highly desired property that makes it easier to understand the underlying math, but we rely on the efficiency of the implementation: # + # creates a new copy of the tensor that is still linked to # the computational graph (see below) t1 = torch.clone(t) assert id(t) != id(t1), 'Functional methods create a new copy of the tensor' # To create a new _independent_ copy, we do need to detach # from the graph t1 = torch.clone(t).detach() # - # #### Using the PyTorch–NumPy bridge # A quite useful feature of PyTorch is its almost seamless integration with NumPy, which allows us to perform operations on NumPy and interact from PyTorch with the large number of NumPy libraries as well. Converting a NumPy multi-dimensional array into a PyTorch tensor is very simple: we only need to call the `tensor` method with NumPy objects as the argument: # + # Create a new multi-dimensional array in NumPy with the np datatype (np.float32) a = np.array([1., 2., 3.]) # Convert the array to a torch tensor t = torch.tensor(a) print("NumPy array: {}, type: {}".format(a, a.dtype)) print("Torch tensor: {}, type: {}".format(t, t.dtype)) # - # We can also seamlessly convert a PyTorch tensor into a NumPy array: t.numpy() # **Side note**: why not `torch.from_numpy(a)`? The `from_numpy()` method is depecrated in favor of `tensor()`, which is a more capable method in the torch package. `from_numpy()` is only there for backwards compatibility. It can be a little bit quirky, so I recommend using the newer method in PyTorch >= 0.4. # #### Indexing # # # Indexing works as expected with NumPy: t = torch.randn(2, 3) t[ : , 0] # PyTorch also supports indexing using long tensors, for example: t = torch.randn(5, 6) print(t) i = torch.tensor([1, 3]) j = torch.tensor([4, 5]) print(t[i]) # selects rows 1 and 3 print(t[i, j]) # selects (1, 4) and (3, 5) # #### Type conversion # Each tensor has a set of convenient methods to convert types. For example, if we want to convert the tensor above to a 32-bit float tensor, we use the method `.float()`: t = t.float() # converts to 32-bit float print(t) t = t.double() # converts to 64-bit float print(t) t = t.byte() # converts to unsigned 8-bit integer print(t) # ### Operations on tensors # Now that we know how to create tensors, let's create some of the fundamental tensors and see some common operations on them: # Scalars =: creates a tensor with a scalar # (zero-th order tensor, i.e. just a number) s = torch.tensor(42) print(s) # **Tip**: a very convenient to access scalars is with `.item()`: s.item() # Let's see higher-order tensors – remember we can always inspect the dimensionality of a tensor using the `.size()` method: # + # Row vector x = torch.randn(1,3) print("Row vector\n{}\nwith size {}".format(x, x.size())) # Column vector v = torch.randn(3,1) print("Column vector\n{}\nwith size {}".format(v, v.size())) # Matrix A = torch.randn(3, 3) print("Matrix\n{}\nwith size {}".format(A, A.size())) # - # A common operation is matrix-vector multiplication (and in general tensor-tensor multiplication). For example, the product $\mathbf{A}\mathbf{v} + \mathbf{b}$ is as follows: u = torch.matmul(A, v) print(u) b = torch.randn(3,1) y = u + b # we can also do torch.add(u, b) print(y) # where we retrieve the expected result (a column vector of dimensions 3x1). We can of course compose operations: s = torch.matmul(x, torch.matmul(A, v)) print(s.item()) # There are many functions implemented for every tensor, and we encourage you to study the documentation. Some of the most common ones: # + # common tensor methods (they also have the counterpart in # the torch package, e.g. as torch.sum(t)) t = torch.randn(2,3) t.sum(dim=0) t.t() # transpose t.numel() # number of elements in tensor t.nonzero() # indices of non-zero elements t.view(-1, 2) # reorganizes the tensor to these dimensions t.squeeze() # removes size 1 dimensions t.unsqueeze(0) # inserts a dimension # operations in the package torch.arange(0, 10) # tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) torch.eye(3, 3) # creates a 3x3 matrix with 1s in the diagonal (identity in this case) t = torch.arange(0, 3) torch.cat((t, t)) # tensor([0, 1, 2, 0, 1, 2]) torch.stack((t, t)) # tensor([[0, 1, 2], # [0, 1, 2]]) # - # ## GPU computation # Deep Learning frameworks take advantage of the powerful computational capabilities of modern graphic processing units (GPUs). GPUs were originally designed to perform frequent operations for graphics very efficiently and fast, such as linear algebra operations, which makes them ideal for our interests. PyTorch makes it very easy to use the GPU: the common scenario is to tell the framework that we want to instantiate a tensor with a type that makes it a GPU tensor, or move a given CPU tensor to the GPU. All the tensors that we have seen above are CPU tensors, and PyTorch has the counterparts for GPU tensors in the `torch.cuda` module. Let's see how this works. # # A common way to explicitly declare the tensor type as a GPU tensor is through the use of the constructor method for tensor creation inside the `torch.cuda` module: t_gpu = torch.cuda.FloatTensor(3, 3) # creation of a GPU tensor t_gpu.zero_() # initialization to zero # However, a more common approach that gives us flexibility is through the use of devices. A device in PyTorch refers to either the CPU (indicated by the string "cpu") or one of the possible GPU cards in the machine (indicated by the string "cuda:$n$", where $n$ is the index of the card). Let's create a random gaussian matrix using a method from the `torch` package, and set the computational device to be the GPU by specifying the `device` to be `cuda:0`, the first GPU card in our machine (this code will fail if you don't have a GPU, but we will work around that below): # + try: t_gpu = torch.randn(3, 3, device="cuda:0") except: print("Torch not compiled with CUDA enabled") t_gpu = None t_gpu # - # As you can notice, the tensor now has the explicit device set to be a CUDA device, not a CPU device. Let's now create a tensor in the CPU and move it to the GPU: # # we could also state explicitly the device to be the # CPU with torch.randn(3,3,device="cpu") t = torch.randn(3, 3) t # In this case, the device is the CPU, but PyTorch does not explicitly say that given that this is the default behavior. To copy the tensor to the GPU we use the `.to()` method that every tensor implements, passing the device as an argument. This method creates a copy in the specified device or, if the tensor already resides in that device, it returns the original tensor ([Lapan 2018](#References)): t_gpu = t.to("cuda:0") # copies the tensor from CPU to GPU # note that if we do now t_to_gpu.to("cuda:0") it will # return the same tensor without doing anything else # as this tensor already resides on the GPU print(t_gpu) print(t_gpu.device) # **Tip**: When we program PyTorch models, we will have to specify the device in several places (not so many, but definitely more than once). A good practice that is consistent accross the implementation and makes the code more portable is to declare early in the code a device variable by querying the framework if there is a GPU available that we can use. We can do this by writing device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") print(device) # We can then use `device` as an argument of the `.to()` method in the rest of our code: # moves t to the device (this code will **not** fail if the # local machine has not access to a GPU) t.to(device) # **Side note**: having good GPU backend support is a critical aspect of a deep learning framework. Some models depend crucially on performing computations on a GPU. Most frameworks, including PyTorch, only provide good support for GPUs manufactured by Nvidia. This is mostly due to the heavy investment this company made on CUDA (Compute Unified Device Architecture), the underlying parallel computing platform that enables this type of scientific computing (and the reason for the device label), with specific implementations targeted to Deep Neural Networks as cuDNN. Other GPU manufacturers, most notably AMD, are making efforts to towards enabling ML computing in their cards, but their support is still partial. # ## Neural network foundations # Computing gradients is a crucial feature in deep learning, given that the training procedure of neural networks relies on optimization techniques that update the parameters of the model by using the gradient information of a scalar magnitude – the loss function. How is it possible to compute the derivatives? There are different methods, namely # # - **Symbolic Differentiation**: given a symbolic expression, the software provides the derivative by performing symbolic transformations (e.g. Wolfram Alpha). The benefits are clear, but it is not always possible to compute an analytical expression. # # - **Numerical Differentiation**: computes the derivatives using expressions that are suitable to be evaluated numerically, using the finite differences method to several orders of approximation. A big drawback is that these methods are slow. # # - **Automatic Differentiation**: a library adds to the set of functional primitives an implementation of the derivative for each of these functions. Thus, if the library contains the function $sin(x)$, it also implements the derivative of this function, $\frac{d}{dx}sin(x) = cos(x)$. Then, given a composition of functions, the library can compute the derivative with respect a variable by successive application of the chain rule, a method that is known in deep learning as backpropagation. # ### Automatic differentiation # Modern deep learning libraries are capable of performing automatic differentiation. The two main approaches to computing the graph are _static_ and _dynamic_ processing ([Lapan 2018](#References)): # # - **Static graphs**: the deep learning framework converts the computational graph into a static representation that cannot be modified. This allows the library developers to do very aggressive optimizations on this static graph ahead of computation time, pruning some areas and transforming others so that the final product is highly optimized and fast. The drawback is that some models can be really hard to implement with this approach. For example, TensorFlow uses static graphs. Having static graphs is part of the reason why TensorFlow has excellent support for sequence processing, which makes it very popular in NLP. # # - **Dynamic graphs**: the framework does not create a graph ahead of computation, but records the operations that are performed, which can be quite different for different inputs. When it is time to compute the gradients, it unrolls the graph and perform the computations. A major benefit of this approach is that implementing complex models can be easier in this paradigm. This flexibility comes at the expense of the major drawback of this approach: speed. Dynamic graphs cannot leverage the same level of ahead-of-time optimization as static graphs, which makes them slower. PyTorch uses dynamic graphs as the underlying paradigm for gradient computation. # Here is simple graph to compute $y = wx + b$ (from [<NAME> 2019](#References-and-Further-Reading)): # <img src="fig/simple_computation_graph.png" width=600 /> # PyTorch computes the graph using the Autograd system. Autograd records a graph when performing the forward pass (function application), keeping track of all the tensors defined as inputs. These are the leaves of the graph. The output tensors are the roots of the graph. By navigating this graph from root to leaves, the gradients are automatically computed using the chain rule. In summary, # # - Forward pass (the successive function application) goes from leaves to root. We use the apply method in PyTorch. # - Once the forward pass is completed, Autograd has recorded the graph and the backward pass (chain rule) can be done. We use the method `backwards()` on the root of the graph. # ### Modules # The base implementation for all neural network models in PyTorch is the class `Module` in the package `torch.nn`: import torch.nn as nn # All our models subclass this base `nn.Module` class, which provides an interface to important methods used for constructing and working with our models, and which contains sensible initializations for our models. Modules can contain other modules (and usually do). # # Let's see a simple, custom implementation of a multi-layer feed forward network. In the example below, our simple mathematical model is # # $$\mathbf{y} = \mathbf{U}(f(\mathbf{W}(\mathbf{x})))$$ # # where $f$ is a non-linear function (a `ReLU`), is directly translated into a similar expression in PyTorch. To do that, we simply subclass `nn.Module`, register the two affine transformations and the non-linearity, and implement their composition within the `forward` method: class MyCustomModule(nn.Module): def __init__(self, n_inputs, n_hidden, n_output_classes): # call super to initialize the class above in the hierarchy super(MyCustomModule, self).__init__() # first affine transformation self.W = nn.Linear(n_inputs, n_hidden) # non-linearity (here it is also a layer!) self.f = nn.ReLU() # final affine transformation self.U = nn.Linear(n_hidden, n_output_classes) def forward(self, x): y = self.U(self.f(self.W(x))) return y # Then, we can use our new module as follows: # + # set the network's architectural parameters n_inputs = 3 n_hidden= 4 n_output_classes = 2 # instantiate the model model = MyCustomModule(n_inputs, n_hidden, n_output_classes) # create a simple input tensor # size is [1,3]: a mini-batch of one example, # this example having dimension 3 x = torch.FloatTensor([[0.3, 0.8, -0.4]]) # compute the model output by **applying** the input to the module y = model(x) # inspect the output print(y) # - # As we see, the output is a tensor with its gradient function attached – Autograd tracks it for us. # **Tip**: modules overrides the `__call__()` method, where the framework does some work. Thus, instead of directly calling the `forward()` method, we apply the input to the model instead. # ### Sequential # A powerful class in the `nn` package is `Sequential`, which allows us to express the code above more succinctly: class MyCustomModule(nn.Module): def __init__(self, n_inputs, n_hidden, n_output_classes): super(MyCustomModule, self).__init__() self.network = nn.Sequential( nn.Linear(n_inputs, n_hidden), nn.ReLU(), nn.Linear(n_hidden, n_output_classes)) def forward(self, x): y = self.network(x) return y # As you can imagine, this can be handy when we have a large number of layers for which the actual names are not that meaningful. It also improves readability: class MyCustomModule(nn.Module): def __init__(self, n_inputs, n_hidden, n_output_classes): super(MyCustomModule, self).__init__() self.p_keep = 0.7 self.network = nn.Sequential( nn.Linear(n_inputs, n_hidden), nn.ReLU(), nn.Linear(n_hidden, 2*n_hidden), nn.ReLU(), nn.Linear(2*n_hidden, n_output_classes), # dropout argument is probability of dropping nn.Dropout(1 - self.p_keep), # applies softmax in the data dimension nn.Softmax(dim=1) ) def forward(self, x): y = self.network(x) return y # **Side note**: Another important package in `torch.nn` is `Functional`, typically imported as `F`. Functional contains many useful functions, from non-linear activations to convolutional, dropout, and even distance functions. Many of these functions have counterpart implementations as layers in the `nn` package so that they can be easily used in pipelines like the one above implemented using `nn.Sequential`. # + import torch.nn.functional as F y = F.relu(torch.FloatTensor([[-5, -1, 0, 5]])) y # - # ### Criteria and loss functions # PyTorch has implementations for the most common criteria in the `torch.nn` package. You may notice that, as with many of the other functions, there are two implementations of loss functions: the reference functions in `torch.nn.functional` and practical class in `torch.nn`, which are the ones we typically use. Probably the two most common ones are ([Lapan 2018](#References)): # # - `nn.MSELoss` (mean squared error): squared $L_2$ norm used for regression. # - `nn.CrossEntropyLoss`: criterion used for classification as the result of combining `nn.LogSoftmax()` and `nn.NLLLoss()` (negative log likelihood), operating on the input scores directly. When possible, we recommend using this class instead of using a softmax layer plus a log conversion and `nn.NLLLoss`, given that the `LossSoftmax` implementation guards against common numerical errors, resulting in less instabilities. # # Once our model produces a prediction, we pass it to the criteria to obtain a measure of the loss: # + # the true label (in this case, 2) from our dataset wrapped # as a tensor of minibatch size of 1 y_gold = torch.tensor([1]) # our simple classification criterion for this simple example criterion = nn.CrossEntropyLoss() # forward pass of our model (remember, using apply instead of forward) y = model(x) # apply the criterion to get the loss corresponding to the pair (x, y) # with respect to the real y (y_gold) loss = criterion(y, y_gold) # the loss contains a gradient function that we can use to compute # the gradient dL/dw (gradient with respect to the parameters # for a given fixed input) print(loss) # - # ### Optimization # Once we have computed the loss for a training example or minibatch of examples, we update the parameters of the model guided by the information contained in the gradient. The role of updating the parameters belongs to the optimizer, and PyTorch has a number of implementations available right away – and if you don't find your preferred optimizer as part of the library, chances are that you will find an existing implementation. Also, coding your own optimizer is indeed quite easy in PyTorch. # # **Side Note** The following is a summary of the most common optimizers. It is intended to serve as a reference (I use this table myself quite a lot). In practice, most people pick an optimizer that has been proven to behave well on a given domain, but optimizers are also a very active area of research on numerical analysis, so it is a good idea to pay some attention to this subfield. We recommend using second-order dynamics with an adaptive time step: # # - First-order dynamics # - Search direction only: `optim.SGD` # - Adaptive: `optim.RMSprop`, `optim.Adagrad`, `optim.Adadelta` # # - Second-order dynamics # - Search direction only: Momentum `optim.SGD(momentum=0.9)`, Nesterov, `optim.SGD(nesterov=True)` # - Adaptive: `optim.Adam`, `optim.Adamax` (Adam with $L_\infty$) # ### Training a simple model # In order to illustrate the different concepts and techniques above, let's put them together in a very simple example: our objective will be to fit a very simple non-linear function, a sine wave: # # $$y = a \sin(x + \phi)$$ # # where $a, \phi$ are the given amplitude and phase of the sine function. Our objective is to learn to adjust this function using a feed forward network, this is: # # $$ \hat{y} = f(x)$$ # # such that the error between $y$ and $\hat{y}$ is minimal according to our criterion. A natural criterion is to minimize the squared distance between the actual value of the sine wave and the value predicted by our function approximator, measured using the $L_2$ norm. # # **Side Note**: Although this example is easy, simple variations of this setting can pose a big challenge, and are used currently to illustrate difficult problems in learning, especially in a very active subfield known as meta-learning. # Let's import all the modules that we are going to need: import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.data as data import numpy as np import matplotlib.pyplot as plt import math # Early on the code, we define the device that we want to use: device = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") # Let's fix $a=1$, $\phi=1$ and generate traning data in the interval $x \in [0,2\pi)$ using NumPy: # + M = 1200 # sample from the x axis M points x = np.random.rand(M) * 2*math.pi # add noise eta = np.random.rand(M) * 0.01 # compute the function y = np.sin(x) + eta # plot _ = plt.scatter(x,y) # + # use the NumPy-PyTorch bridge x_train = torch.tensor(x[0:1000]).float().view(-1, 1).to(device) y_train = torch.tensor(y[0:1000]).float().view(-1, 1).to(device) x_test = torch.tensor(x[1000:]).float().view(-1, 1).to(device) y_test = torch.tensor(y[1000:]).float().view(-1, 1).to(device) # + class SineDataset(data.Dataset): def __init__(self, x, y): super(SineDataset, self).__init__() assert x.shape[0] == y.shape[0] self.x = x self.y = y def __len__(self): return self.y.shape[0] def __getitem__(self, index): return self.x[index], self.y[index] sine_dataset = SineDataset(x_train, y_train) sine_dataset_test = SineDataset(x_test, y_test) sine_loader = torch.utils.data.DataLoader( sine_dataset, batch_size=32, shuffle=True) sine_loader_test = torch.utils.data.DataLoader( sine_dataset_test, batch_size=32) # - class SineModel(nn.Module): def __init__(self): super(SineModel, self).__init__() self.network = nn.Sequential( nn.Linear(1, 5), nn.ReLU(), nn.Linear(5, 5), nn.ReLU(), nn.Linear(5, 5), nn.ReLU(), nn.Linear(5, 1)) def forward(self, x): return self.network(x) # + # declare the model model = SineModel().to(device) # define the criterion criterion = nn.MSELoss() # select the optimizer and pass to it the parameters of the model it will optimize optimizer = torch.optim.Adam(model.parameters(), lr = 0.01) epochs = 60 # training loop for epoch in range(epochs): print(epoch) for i, (x_i, y_i) in enumerate(sine_loader): y_hat_i = model(x_i) # forward pass loss = criterion(y_hat_i, y_i) # compute the loss and perform the backward pass optimizer.zero_grad() # cleans the gradients loss.backward() # computes the gradients optimizer.step() # update the parameters if epoch % 20 == 0: #print(x_i.shape) #print(y_hat_i.shape) plt.scatter(x_i.data.cpu().numpy(), y_hat_i.data.cpu().numpy()) # - print(x_i.shape) print(y_hat_i.shape) # + # testing with torch.no_grad(): model.eval() total_loss = 0. for k, (x_k, y_k) in enumerate(sine_loader_test): y_hat_k = model(x_k) loss_test = criterion(y_hat_k, y_k) total_loss += float(loss_test) print(total_loss) # - # ## Reproducibility # + def enforce_reproducibility(seed=42): # Sets seed manually for both CPU and CUDA torch.manual_seed(seed) # For atomic operations there is currently # no simple way to enforce determinism, as # the order of parallel operations is not known. # # CUDNN torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # System based np.random.seed(seed) enforce_reproducibility() # - # ## References # <NAME> (2018) *Deep Reinforcement Learning Hands-On*. Birmingham: Packt Publishing # # <NAME> and <NAME> (2019) *Natural Language Processing with PyTorch*. Sebastopol, CA: O'Reilly Media
tutorial_pytorch.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + from datetime import timedelta import functools from glob import glob import inspect from itertools import product import os import subprocess import sys import numpy as np import pandas as pd from toolz import partition_all from multiprocessing import pool # - assert sys.version_info >= (3, 6), 'Use Python ≥3.6' def upd(d, **kwargs): # Update a `dict` inline and return the `dict`. d = d.copy() for k, v in kwargs.items(): d[k] = v return d def run_simulation(lview, func, vals, parameters, fname_i, N=None, overwrite=False): """Run a simulation where one loops over `vals`. The simulation yields len(vals) results, but by using `N`, you can split it up in parts of length N. Parameters ---------- lview : ipyparallel.client.view.LoadBalancedView object LoadBalancedView for asynchronous map. func : function Function that takes a list of arguments: `vals`. vals : list Arguments for `func`. parameters : dict Dictionary that is saved with the data, used for constant parameters. fname_i : str Name for the resulting HDF5 files. If the simulation is split up in parts by using the `N` argument, it needs to be a formatteble string, for example 'file_{}'. N : int Number of results in each pandas.DataFrame. overwrite : bool Overwrite the file even if it already exists. """ if N is None: N = 1000000 if len(vals) > N: raise Exception('You need to split up vals in smaller parts') N_files = len(vals) // N + (0 if len(vals) % N == 0 else 1) print('`vals` will be split in {} files.'.format(N_files)) time_elapsed = 0 parts_done = 0 for i, chunk in enumerate(partition_all(N, vals)): fname = fname_i.replace('{}', '{:03d}').format(i) print('Busy with file: {}.'.format(fname)) if not os.path.exists(fname) or overwrite: map_async = lview.map_async(func, chunk) map_async.wait_interactive() result = map_async.result() df = pd.DataFrame(result) df = df.assign(**parameters) df = df.assign(git_hash=get_git_revision_hash()) os.makedirs(os.path.dirname(fname), exist_ok=True) df.to_hdf(fname, 'all_data', mode='w', complib='zlib', complevel=9) # Print useful information N_files_left = N_files - (i + 1) parts_done += 1 time_elapsed += map_async.elapsed time_left = timedelta(seconds=(time_elapsed / parts_done) * N_files_left) print_str = ('Saved {}, {} more files to go, {} time left ' 'before everything is done.') print(print_str.format(fname, N_files_left, time_left)) else: print('File: {} was already done.'.format(fname)) def change_var_name(func, from_name, to_name): sig = inspect.signature(func) pars = [(name, value) for name, value in sig.parameters.items()] new_pars = [] for k, v in pars: if k is not from_name: new_pars.append(v) else: new_pars.append(inspect.Parameter(to_name, v.kind, default=v.default)) def wrapped(*args, **kwargs): kwargs[from_name] = kwargs.pop(to_name) return func(*args, **kwargs) wrapped.__signature__ = inspect.Signature(parameters=new_pars) return wrapped def parse_params(params): for k, v in params.items(): if isinstance(v, str): try: params[k] = eval(v) except NameError: pass return params def combine_dfs(pattern, fname=None): files = glob(pattern) df = pd.concat([pd.read_hdf(f) for f in sorted(files)]) df = df.reset_index(drop=True) if fname is not None: os.makedirs(os.path.dirname(fname), exist_ok=True) df.to_hdf(fname, 'all_data', mode='w', complib='zlib', complevel=9) return df def lat_from_syst(syst): lats = set(s.family for s in syst.sites) if len(lats) > 1: raise Exception('No unique lattice in the system.') return list(lats)[0] def memoize(obj): cache = obj.cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): key = str(args) + str(kwargs) if key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer def named_product(**items): names = items.keys() vals = items.values() return [dict(zip(names, res)) for res in product(*vals)] def get_git_revision_hash(): """Get the git hash to save with data to ensure reproducibility.""" git_output = subprocess.check_output(['git', 'rev-parse', 'HEAD']) return git_output.decode("utf-8").replace('\n', '') def find_nearest(array, value): """Find the nearest value in an array to a specified `value`.""" idx = np.abs(np.array(array) - value).argmin() return array[idx] def remove_unhashable_columns(df): df = df.copy() for col in df.columns: if not hashable(df[col].iloc[0]): df.drop(col, axis=1, inplace=True) return df def hashable(v): """Determine whether `v` can be hashed.""" try: hash(v) except TypeError: return False return True def drop_constant_columns(df): """Taken from http://stackoverflow.com/a/20210048/3447047""" df = remove_unhashable_columns(df) df = df.reset_index(drop=True) return df.loc[:, (df != df.ix[0]).any()]
common.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] id="u-0WRhZ8kbAB" # # Kaggle: Plant Pathology 2021 - FGVC8 # # [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Borda/kaggle_plant-pathology/blob/main/notebooks/Plant-Pathology-with-Flash.ipynb) # + [markdown] id="-jQ8XEEDkgpt" # ## Setup environment # # - connect the gDrive with dataset # - extract data to local # - install pytorch lightning # + colab={"base_uri": "https://localhost:8080/"} id="F1Mn0WVLkbAR" outputId="6165b84d-5a53-43ef-ed95-840c90e62539" from google.colab import drive # connect to my gDrive drive.mount('/content/gdrive') # + colab={"base_uri": "https://localhost:8080/"} id="3obQb89OkmEe" outputId="6913c693-33c2-4241-ed22-795c6371e749" # # copy the dataset to local drive # ! rsync -ah --progress /content/gdrive/Shareddrives/Datasets/plant-pathology-2021-fgvc8_640.zip plant-pathology-2021-fgvc8.zip # + colab={"base_uri": "https://localhost:8080/"} id="pbe7GjyVkmMx" outputId="3431b1c2-e862-4a80-f129-47f9b4847a0a" # extract dataset to the drive # ! unzip plant-pathology-2021-fgvc8.zip | awk 'BEGIN {ORS=" "} {if(NR%250==0)print "."}' # ! ls -l # + colab={"base_uri": "https://localhost:8080/"} id="YWMSKqjskvCb" outputId="ab03f178-680d-4494-c5df-aaced5ccfa55" # ! pip install -q "lightning-bolts==0.3.2" "torchtext==0.6" # ! pip install -q "lightning-flash==0.2.2rc2" # ! pip list | grep torch # ! pip list | grep lightning # + colab={"base_uri": "https://localhost:8080/"} id="mag4jUSukyFP" outputId="f1a3730f-f387-40b3-bdfb-4f2111924292" # ! nvidia-smi # + [markdown] id="CTMS2J9MkbAS" # ## Data exploration # # Checking what data do we have available and what is the labels distribution... # + [markdown] id="dXvPEfaVkbAV" # Looking in the training dataset table, what colums and what is the data representation... # + colab={"base_uri": "https://localhost:8080/"} id="3XqU6rABkbAV" outputId="53ce3d43-ec72-422c-d06c-b3baf0b4409b" # %matplotlib inline import os import json import pandas as pd from pprint import pprint base_path = '/content' path_csv = os.path.join(base_path, 'train.csv') train_data = pd.read_csv(path_csv) print(train_data.head()) # + [markdown] id="cM4LvItmkbAW" # We can see that each image can have multiple labels so lets check what is the mos common label count... # # *The target classes, a space delimited list of all diseases found in the image. # Unhealthy leaves with too many diseases to classify visually will have the complex class, and may also have a subset of the diseases identified.* # + colab={"base_uri": "https://localhost:8080/"} id="HZaavHwtkbAW" outputId="6154a8fe-fce5-439d-c3d0-677689bca149" import numpy as np train_data['nb_classes'] = [len(lbs.split(" ")) for lbs in train_data['labels']] lb_hist = dict(zip(range(10), np.bincount(train_data['nb_classes']))) pprint(lb_hist) # + [markdown] id="T1JtivbfkbAW" # Browse the label distribution, enrolling all labels in the dataset, so in case an image has two labels both are used in this stat... # + colab={"base_uri": "https://localhost:8080/", "height": 279} id="rNxT_5HskbAW" outputId="fd196ad2-70bb-4595-f6fe-309220bb7137" import itertools import seaborn as sns labels_all = list(itertools.chain(*[lbs.split(" ") for lbs in train_data['labels']])) train_data['labels_sorted'] = [" ".join(sorted(lbs.split(" "))) for lbs in train_data['labels']] ax = sns.countplot(y=labels_all, orient='v') ax.grid() # + [markdown] id="8NzaZuB8kbAX" # ## Dataset adjusment # + id="xReHeojOvA4O" # # ! apt-get install -qq -y imagemagick # # #! mogrify -resize 960 train_images/*.jpg # import os, tqdm, glob # import multiprocessing # ls_images = glob.glob("train_images/*.jpg") # print(f'found images: {len(ls_images)}') # def _convert(pimg: str): # os.system(f'convert -resize 640 point {pimg} {pimg}') # nb_cpu = multiprocessing.cpu_count() # pbar = tqdm.tqdm(total=len(ls_images), desc=f"using {nb_cpu} proc.") # pool = multiprocessing.Pool(processes=nb_cpu) # for _ in pool.imap(_convert, ls_images): # pbar.update() # pool.close() # pool.join() # + colab={"base_uri": "https://localhost:8080/"} id="iC7L9QVHkbAY" outputId="b7682e63-a63f-4bbf-c293-b34a5591953d" # ! rm -rf /content/dataset import os import tqdm import shutil import pandas as pd data = pd.read_csv(path_csv) # shuffle data data = data.sample(frac=1, random_state=42).reset_index(drop=True) frac = int(0.8 * len(data)) train = data[:frac] valid = data[frac:] dataset_path = '/content/dataset' # crating train and valid folder for folder, df in [('train', train), ('valid', valid)]: folder = os.path.join(dataset_path, folder) os.makedirs(folder, exist_ok=True) # triage images per class / label for _, row in tqdm.tqdm(df.iterrows()): img_name, lb = row['image'], row['labels'] if 'complex' in lb or len(lb.split(" ")) > 1: lb = 'complex' folder_lb = os.path.join(folder, str(lb)) # create folder for label if it is missing if not os.path.isdir(folder_lb): os.mkdir(folder_lb) shutil.copy(os.path.join(dataset_path, 'train_images', img_name), os.path.join(folder_lb, img_name)) # ! ls -l /content/dataset/train # ! ls -l /content/dataset/valid # + [markdown] id="ofdTYMkMkbAY" # ## Flash finetuning # + colab={"base_uri": "https://localhost:8080/"} id="RwbmfcTRkbAY" outputId="a064b05a-6e45-4caa-c2cc-8b4482ff3bde" import flash import torch import multiprocessing as mproc from flash.core.data import download_data from flash.core.finetuning import FreezeUnfreeze from flash.vision import ImageClassificationData, ImageClassifier # 2. Load the data datamodule = ImageClassificationData.from_folders( train_folder=os.path.join(dataset_path, "/train/"), valid_folder=os.path.join(dataset_path, "/valid/"), batch_size=128, num_workers=mproc.cpu_count(), ) # 3. Build the model model = ImageClassifier( backbone="resnet34", optimizer=torch.optim.Adam, num_classes=datamodule.num_classes, ) # + colab={"base_uri": "https://localhost:8080/", "height": 370, "referenced_widgets": ["16d8d9b13da948b9bfbedcbb3e3f24af", "329ae1b33d3d4f96ac1dabfe9ef9401e", "9d8620487fab4819be829797fa8759f5", "4a8ff878f63c4c2c9edafd815f70e9e8", "07b262562a2f43b9b78aa0a4e049bdf2", "<KEY>", "ffa92f5e74f947a1bc97835cf11a104b", "a21933808e6e46269a405b0f2ffa3235", "3e5d4d6888d541f391091160264265b3", "<KEY>", "8e3f45190e0b4a0692fd67aa3c155a2e", "<KEY>", "<KEY>", "<KEY>", "9a6e4ee29bb34170999a763d3028df45", "a5ba7922346f458db57187e2f06db334", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3f3e24ca76e74b0bb3ad91ead824c189", "e766145fe5f54d1fa5b94ca2e8e79e88", "b3e4729390064d6f96fd24a9634b1fad", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3f51a3464ece4ad8be6ba46babc64359", "5bc0a17ccd344722a352b339837362ea", "<KEY>", "<KEY>", "939e7af15e134298a85287633a051e22", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "68a77da7603448ccb300dbe69c5232f3", "<KEY>", "e696eb0ee8bd43ffb0af77bccc7926ef", "35387595136c48e08482a1a64d715155", "ae49fae44967449695df365972eef79a", "<KEY>", "00fcae426e5d4f5c88af010a1ef0172d", "6e005af99d5f453e8c275a329e99730e", "<KEY>", "f3a4b58025e84d4e869e5ee9ae5f1ae6", "0be0bcc2d3e04d23b70e795a2120b601", "0563b269b6d34ce19dad3b0829af3ac1", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "2eb6382abf6a44d68100461404ba699c", "ce7d7232042a44c99eda2d9dc1c9e880", "<KEY>", "624f36b02e494b66be4ba212aa8cca7d", "9c03c6e5dfd14ca0a5bde60c4d34b16a", "<KEY>", "f469811c8bc54e7780c2a5e05d816e57", "<KEY>", "<KEY>", "<KEY>", "90eccb8e04ea4f848f4083224cad5667", "<KEY>", "ecab736af0d1435983d49a1f8e153e5a", "1ef54fad45404a36a07c5f32798e4b5a", "<KEY>", "ff8459dfdd174f5ba15dfe9e01826789", "570ea7720050464c84129fc48dc2ee5d", "<KEY>", "393e56abc3e64277b8b096e6cc2cef9b", "<KEY>", "8950a0708bb1450aa794fa985d54d252", "d0199fba616e464d8f6e01eaf4cd51b2", "<KEY>", "85b9308747c04303af94a7488dfaa5f5", "94314d4319114b64b0eddcc9f6ef165f", "c63ecc04e0f6452ca3ce15a5df8c0655", "aa322252c4aa41e8a2476f3ea2caa21d", "<KEY>", "10f8b4e54e024c28980ddfff382f5443", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "66be4179f73c4119a24e603a7d81f7c6", "<KEY>", "a094981dcdfc482dad75665db57e7b55", "<KEY>", "b4eb31d98f8a405d900b8d7ce04a2297", "<KEY>", "23b395459aef4d6b940e09d1e6bb73b4", "<KEY>", "15610e2cc85e4ecaaef3e5d33254a7f5", "71ddf076f326497999dea03063a9263b", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "d37083f6e7b9429a93f9e42f61b4d718", "<KEY>", "9cc14d2e9118435f9ed314ecdb13bb43", "<KEY>", "e4d179fc52a049c699659cf8e2c454eb", "6385029c8ce14b39be389cca817d27a0", "3ebc6f4aef6f48e7a62ec0e8d792b7c2", "f030eb4e0e2840399c1db79da5181f5b", "<KEY>", "f9a31339b8d346e39aedb2a67c89a9ec", "6efb67a2ba4f4ebc87e2799e23a683d8", "<KEY>", "<KEY>", "<KEY>", "1b49cf3f16f44ee393f53fba986ed3b0", "ab9c486f318e4c35a98e96c17273beda", "c0cd5a00a3bc4a8b9ed3235fb39783e3", "<KEY>", "<KEY>", "68b5e38a39044b28a6fa1a69defcdc99", "d26a9d7bacef4fd5a8391fd9a33999cb", "145c05e3d87b469083ee36899d646bd1", "277ca2bae9394a1686a48a0af9ee44a7", "<KEY>", "60be2699275c46a0999126cdef3f4613", "10ca37e239c64086901c2afd8dc111fe", "<KEY>", "e40b785bd827427a8839c933771042e3", "3e3713453a62414cb1ed903d504ab07c", "b3a587461117418a9adbbc5016e3d23a", "<KEY>", "<KEY>", "afe9c0299f87466faf88a9e5683588a1", "a16904674826413c92acc4ce9e86d265", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ff55e477f2aa43e6b3eb6adead5f35bb", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "3b064ec7a6364bca8f7eb1bdd57aefad", "2a0caa187ee543bebe26a5880557dead", "5758d540bc8749198e866edfd4e501b9", "<KEY>", "<KEY>", "<KEY>", "41cd42e969f844f292b56de5c4db3e28", "<KEY>", "83cb7abb1b6f4ec48ab8d74ed45763af", "62e268cc55ac4c92aebdd5407fae1d34", "<KEY>", "<KEY>", "622be91ff4d5496b92ef73f8c1850a0d", "<KEY>", "0ac6ebd919844690b093b85b828e039d", "<KEY>", "16972552ced24cb7a7efa5347024dd52", "e688792601374edb9d009a672a2e1d43", "0069d34530784ae28e2bedb37be1254c"]} id="XK_jeAQikbAY" outputId="4f879701-16c0-4c4d-9316-d8348d239e88" # 4. Create the trainer. Run twice on data trainer = flash.Trainer( gpus=1, max_epochs=10, precision=16, val_check_interval=0.5, progress_bar_refresh_rate=1, ) # 5. Train the model trainer.finetune(model, datamodule=datamodule, strategy=FreezeUnfreeze(unfreeze_epoch=1)) # 7. Save it! trainer.save_checkpoint("image_classification_model.pt") # + [markdown] id="ueGzQXSCkbAa" # Quick visualization of the training process... # - # Start tensorboard. # %load_ext tensorboard # %tensorboard --logdir lightning_logs/
notebooks/Plant-Pathology-with-Flash.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %matplotlib inline import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import matplotlib.dates as mdates from dateutil.relativedelta import relativedelta from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() # Países selecionados para análise visual do dataset subject_countries = ['BRA','CHN','ESP','DEU','ITA','JPN','KOR','PRT','GBR','USA'] # ## 1. Distribuição geográfica da COVID-19 # ### 1.1. Dataset original # Dataset da distribuição de casos e mortes por COVID-19 no mundo df = pd.read_csv('data\COVID-19-geographic-distribution-worldwide-2020-07-24.csv', ';') df.dropna(inplace=True) df.head() # ### 1.2. Totais de casos e mortes acumulados # Adicionadas colunas com acumuladores de casos e mortes, pois o dataset original tinha apenas os números novos a cada dia. Algumas colunas foram renomeadas para facilitar a visualização df.drop(columns=['day','month','year','geoId','continentExp','Cumulative_number_for_14_days_of_COVID-19_cases_per_100000'], inplace=True) df.rename(columns={'dateRep':'date','countriesAndTerritories':'country','countryterritoryCode':'code','popData2019':'pop'}, inplace=True) df['date'] = pd.to_datetime(df['date'], dayfirst=True) df.sort_values(by=['code','date'], ascending=True, inplace=True) df['total_cases'] = df.groupby(by='code')['cases'].cumsum() df['total_deaths'] = df.groupby(by='code')['deaths'].cumsum() df.head() df['country'] = df['country'].str.replace('_', ' ') # Abaixo os gráficos dos totais de casos e mortes no tempo def setupDatetimeAxis(ax): ax.xaxis.set_major_locator(mdates.MonthLocator()) ax.xaxis.set_minor_locator(mdates.DayLocator()) ax.xaxis.set_major_formatter(mdates.DateFormatter("%B-%Y")) # + df_vis = df.loc[df['code'].isin(subject_countries)] fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, figsize=(15,7)) ax1.set_title('Total cases over time') setupDatetimeAxis(ax1) sns.lineplot(x='date', y='total_cases', hue='country', data=df_vis, ax=ax1, sort=False) ax2.set_title('Total deaths over time') setupDatetimeAxis(ax2) sns.lineplot(x='date', y='total_deaths', hue='country', data=df_vis, ax=ax2, sort=False) fig.autofmt_xdate() plt.show() # - # ### 1.3. Evolução semanal dos números de cada país, a partir do primeiro caso # Como os casos começaram a aparecer em datas diferentes em cada país, não faz muito sentido comparar dados na mesma data, mas sim no mesmo tempo decorrido desde o primeiro caso de cada país, então vamos calcular, para cada país, os totais por semana, desde o primeiro caso do respectivo país # Primeiramente removemos os registros anteriores ao primeiro caso de cada país df2 = df.copy() df2 = df2[df2['total_cases'] > 0] df2.sort_values(by=['code','date'], ascending=True, inplace=True) # Então, para cada registro, calculamos a semana relativa ao primeiro caso deste país df2['min_date'] = df2.groupby(by='code')['date'].transform('min') df2['days_passed'] = df2['date'] - df2['min_date'] df2['week'] = (df2['days_passed'] / np.timedelta64(1, 'W')).astype(int) + 1 # Finalmente, calculamos o total de cada semana, e descartamos registros duplicados, deixando somente um registro por semana contendo os respetivos totais df2 = df2.drop(columns=['date','cases','deaths','min_date','days_passed']) df2['max_total_cases'] = df2.groupby(by=['code','week'])['total_cases'].transform(max) df2['max_total_deaths'] = df2.groupby(by=['code','week'])['total_deaths'].transform(max) df2 = df2.drop(columns=['total_cases','total_deaths']) df2 = df2.rename(columns={'max_total_cases':'total_cases','max_total_deaths':'total_deaths'}) df2.drop_duplicates(inplace=True) df2[df2['code']=='BRA'].head(10) # Abaixo os gráficos dos totais de casos e mortes por semana, desde o primeiro caso, para cada país # + df_vis = df2.loc[df['code'].isin(subject_countries)] fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, figsize=(15,7)) ax1.set_title('Total cases per week since first case') sns.lineplot(x='week', y='total_cases', hue='country', data=df_vis, ax=ax1) ax2.set_title('Total deaths per week since first case') sns.lineplot(x='week', y='total_deaths', hue='country', data=df_vis, ax=ax2) plt.show() # - # ### 1.4. Números por 100 mil habitantes # Os dados calculados até o momento são números absolutos, porém esses países têm populações diferentes, então vamos calcular os números por 100 mil habitantes, para uma comparação mais proporcional df3 = df2.copy() df3['total_cases_100k'] = df3['total_cases'] * 100000 / df3['pop'] df3['total_deaths_100k'] = df3['total_deaths'] * 100000 / df3['pop'] df3.drop(columns=['pop','total_cases','total_deaths'], inplace=True) df3.rename(columns={'total_cases_100k':'total_cases','total_deaths_100k':'total_deaths'}, inplace=True) df3.head(12) # + df_vis = df3.loc[df['code'].isin(subject_countries)] fig, (ax1, ax2) = plt.subplots(1, 2, sharex=True, figsize=(15,7)) ax1.set_title('Total cases (per 100k population), per week since first case') sns.lineplot(x='week', y='total_cases', hue='country', data=df_vis, ax=ax1) ax2.set_title('Total deaths (per 100k population), per week since first case') sns.lineplot(x='week', y='total_deaths', hue='country', data=df_vis, ax=ax2) plt.show() # - df3.to_csv('data\COVID-19-numbers-per-week-since-1st-case-until-2020-07-24.csv', ';', index=False) # ## 2. Indicadores # ### 2.1. PIB df_gdp = pd.read_csv('data\API_NY.GDP.MKTP.PP.CD_DS2_en_csv_v2_1069861.csv') df_gdp.head() df_gdp = df_gdp[['Country Code','2018']] df_gdp.rename(columns={'Country Code': 'code', '2018': 'gdp'}, inplace=True) df_gdp.dropna(inplace=True) df_gdp.set_index(['code'], inplace=True) df_gdp.head() df_vis = df_gdp.reset_index() df_vis = df_vis.loc[df_vis['code'].isin(subject_countries)] sns.barplot(x='code', y='gdp', data=df_vis) plt.show() # ### 2.2. IDH df_hdi = pd.read_csv('data\Human development index (HDI).csv', ';') df_hdi.head() df_hdi = df_hdi[['Country Code','2018']] df_hdi.rename(columns={'Country Code': 'code', '2018': 'hdi'}, inplace=True) df_hdi.dropna(inplace=True) df_hdi.set_index(['code'], inplace=True) df_hdi.head() df_vis = df_hdi.reset_index() df_vis = df_vis.loc[df_vis['code'].isin(subject_countries)] sns.barplot(x='code', y='hdi', data=df_vis) plt.show() # ### 2.3. Cobertura dos serviços de saúde essenciais df_uhc = pd.read_csv('data\WHO_UHC_index_of_essential_service_coverage.csv', ';') df_uhc.head() df_uhc = df_uhc.loc[df_uhc['Period'] == df_uhc['Period'].max()] df_uhc = df_uhc[['Country Code','First Tooltip']] df_uhc.rename(columns={'Country Code': 'code', 'First Tooltip': 'uhc'}, inplace=True) df_uhc.dropna(inplace=True) df_uhc.set_index(['code'], inplace=True) df_uhc.head() df_vis = df_uhc.reset_index() df_vis = df_vis.loc[df_vis['code'].isin(subject_countries)] sns.barplot(x='code', y='uhc', data=df_vis) plt.show() # ### 2.4. Distribuição populacional # #### 2.4.1. População total df_pop_total = pd.read_csv('data\API_SP.POP.TOTL_DS2_en_csv_v2_1217749.csv') df_pop_total.head() df_pop_total = df_pop_total[['Country Code','2018']] df_pop_total.rename(columns={'Country Code': 'code', '2018': 'pop_total'}, inplace=True) df_pop_total.dropna(inplace=True) df_pop_total.set_index(['code'], inplace=True) df_pop_total.head() # #### 2.4.2. População de zero a 14 anos de idade df_pop_00_14 = pd.read_csv('data\API_SP.POP.0014.TO_DS2_en_csv_v2_1004418.csv') df_pop_00_14.head() df_pop_00_14 = df_pop_00_14[['Country Code','2018']] df_pop_00_14.rename(columns={'Country Code': 'code', '2018': 'pop_0_14'}, inplace=True) df_pop_00_14.dropna(inplace=True) df_pop_00_14.sort_values(by=['code']) df_pop_00_14.set_index(['code'], inplace=True) df_pop_00_14.head() # #### 2.4.3. População de 15 a 64 anos de idade df_pop_15_64 = pd.read_csv('data\API_SP.POP.1564.TO_DS2_en_csv_v2_1004520.csv') df_pop_15_64.head() df_pop_15_64 = df_pop_15_64[['Country Code','2018']] df_pop_15_64.rename(columns={'Country Code': 'code', '2018': 'pop_15_64'}, inplace=True) df_pop_15_64.dropna(inplace=True) df_pop_15_64.set_index(['code'], inplace=True) df_pop_15_64.head() # #### 2.4.4. População de 65 anos de idade em diante df_pop_65plus= pd.read_csv('data\API_SP.POP.65UP.TO_DS2_en_csv_v2_993674.csv') df_pop_65plus.head() df_pop_65plus = df_pop_65plus[['Country Code','2018']] df_pop_65plus.rename(columns={'Country Code': 'code', '2018': 'pop_65+'}, inplace=True) df_pop_65plus.dropna(inplace=True) df_pop_65plus.set_index(['code'], inplace=True) df_pop_65plus.head() # #### 2.4.5. Dataset final da distribuição populacional df_pop = df_pop_total.join(df_pop_00_14).join(df_pop_15_64).join(df_pop_65plus) df_pop.dropna(inplace=True) df_pop.head() df_pop['pop_0_14'] = df_pop['pop_0_14'] / df_pop['pop_total'] df_pop['pop_15_64'] = df_pop['pop_15_64'] / df_pop['pop_total'] df_pop['pop_65+'] = df_pop['pop_65+'] / df_pop['pop_total'] df_pop.head() df_vis = df_pop.reset_index().drop(columns='pop_total') df_vis = df_vis.loc[df_vis['code'].isin(subject_countries)] df_vis.set_index('code').plot(kind='bar', stacked=True) plt.show() # ### 2.5. Dataset final de inficadores df_ind = df_uhc.join(df_pop).join(df_gdp).join(df_hdi) df_ind.dropna(inplace=True) df_ind.head() # No dataset original dp PIB temos números absolutos, porém há grandes diferenças populacionais entre os países, então vamos calcular os números por 100 mil habitantes, para uma comparação mais proporcional df_ind['gdp'] = df_ind['gdp'] * 100000.0 / df_ind['pop_total'] df_ind.drop(columns=['pop_total'], inplace=True) df_ind.head() # #### PIB por 100 mil habitantes df_vis = df_gdp[['gdp']] df_vis = df_vis.join(df_pop_total) df_vis['gdp'] = df_vis['gdp'] * 100000.0 / df_vis['pop_total'] df_vis.reset_index(inplace=True) df_vis.drop(columns=['pop_total'], inplace=True) df_vis = df_vis.loc[df_vis['code'].isin(subject_countries)] sns.barplot(x='code', y='gdp', data=df_vis) plt.show() # ## 3. Dataset final df4 = df3.join(df_ind, on=['code']) df4.rename(columns={'total_cases':'cases','total_deaths':'deaths'}, inplace=True) df4.dropna(inplace=True) df4.head() # #### Por fim vamos adicionar em cada registro os números da semana anterior, para o treinamento do modelo df5 = df4.copy() df5['prev_cases'] = df5.groupby(['code'])['cases'].shift(1, fill_value=0) df5['prev_deaths'] = df5.groupby(['code'])['deaths'].shift(1, fill_value=0) df5.head() df_final = df5[['code','country','gdp','hdi','uhc','pop_0_14','pop_15_64','pop_65+','week','prev_cases','prev_deaths','cases','deaths']] df_final.head(10) df_final.to_csv('data\COVID-19-final.csv', ';', index=False)
build_dataset.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import clustertools as ctools import numpy as np # # Operations # First load a snapshot of a cluster in file 00000.dat, which has position units of pc and velocity units of km/s in clustercentric coordinates. Stellar masses are in solar units and were generated using a Salpeter IMF. cluster=ctools.load_cluster('snapshot',filename='00000.dat',units='pckms',origin='cluster',ofilename='orbit.dat',ounits='kpckms') # When using ``load_cluster``, key parameters like the total mass and the half-mass radius of the cluster are automatically calculated: # Once initialized, the units and coordinate system of the cluster can easily be changed. Unit changes will affect the positions and velocities of stars, the location of the cluster's centre of density, and the cluster itself if its galactocentric coordinates are known. To save computational time, key parameters like the total mass, mean radius and half-mass radius are NOT automatically recalculated unless ``do_key_params=True``. Any other previoulsy calculated values will remain in their initial units. For example, lets compare two methods for calculating the mean radius and two methods of calculating the half mass radius: # + print('Mean radius with numpy =', np.mean(cluster.r)) print('Mean radius =', cluster.rmean) print('Half mass radius =', cluster.rm) cluster.rlagrange() print('50% Lagrange Radius = ',cluster.rn[4]) # - # As expected, both methods yield the exact same results. Since ``cluster.units`` currently eqauls ``'pckms``, radii are in parsecs. We can then convert to ``'kpckms'``: cluster.to_kpckms() # Since coordinates are changed to ``'kpckms'``, stellar radii will all be changed. Hence calculating the mean radius with numpy will yield the result in kpc. However, if ``key_params`` is not called again then ``cluster.rmean`` and ``cluster.rm`` retain their initial values. Similarly ``cluster.rn`` won't change unless ``cluster.rlagrange()`` is called: print('Mean radius with numpy = ',np.mean(cluster.r)) print('Mean radius with clustertools = ',cluster.rmean) print('Half mass radius =', cluster.rm) print('50% Lagrange Radius = ',cluster.rn[4]) # To change the units of key parameters, you must set ``do_key_params=True`` when doing a unit change. The only way to update the units of ``cluster.rn`` is to calculate the lagrange radii again. # + cluster.to_kpckms() print('Mean radius with numpy = ',np.mean(cluster.r)) print('Mean radius with clustertools = ',cluster.rmean) print('Half mass radius =', cluster.rm) cluster.rlagrange() print('50% Lagrange Radius = ',cluster.rn[4]) # - # Other unit systems include ``galpy`` and ``nbodydy``. For the latter, most of the time a dataset is given in N-body units and then converted to physical units. However if you wish to convert to Nbody units, the scaling factors for masses, positions, velocities and time must be determined. This can be done via: cluster.reset_nbody_scale() print('MASS SCALING: ',cluster.zmbar) print('POSITION SCALING: ',cluster.rbar) print('VELOCITY SCALING: ',cluster.vbar) print('TIME SCALING: ',cluster.tbar) # Note that the default way of finding the position scaling is to set ``cluster.rbar`` equal to 4/3 the half-mass radius. For a more accurate calculate, set ``rvirial=True`` so the cluster can be scaled to a virial radius of 1. cluster.reset_nbody_scale(rvirial=True) print('MASS SCALING: ',cluster.zmbar) print('POSITION SCALING: ',cluster.rbar) print('VELOCITY SCALING: ',cluster.vstar) print('TIME SCALING: ',cluster.tbar) # Once the Nbody scaling factors have been determined, one can convert to NBODY units to make caluclations cluster.to_nbody() ctools.starplot(cluster) # Similar to changing units, changing coordinate systems will not recalculate key parameters or variables set in previous function calls unless ``do_key_params=True``. The latter will likely only need to be set if moving between the ``cluster`` and ``centre`` origins. It would be rare that one wants to know the order of stars with respect to the Galactic centre when ``origin='galaxy'``. # For example, to view your cluster in the galaxy's reference frame and calculate values in kpc: cluster.to_kpckms() cluster.to_galaxy() # + print('Mean radius with numpy = ',np.mean(cluster.r)) print('Mean radius with clustertools = ',cluster.rmean) print('Half mass radius =', cluster.rm) print('50% Lagrange Radius = ',cluster.rn[4]) ctools.starplot(cluster) # - # Note that the mean radius, when calculated using numpy, is approximately 10 kpc which is the orbital distance of the cluster. However, since the ``do_key_params`` call was done before the change of coordinates and the stars have not been reordered ``cluster.rmean``, ``cluster.rm`` and ``cluster.rn`` are the same as before (just in units of kpc). If I instead set ``do_key_params=True`` when moving to the galaxy coordinate system, everything is recalculated: cluster.to_galaxy() # + print('Mean radius with numpy = ',np.mean(cluster.r)) print('Mean radius with clustertools = ',cluster.rmean) print('Half mass radius =', cluster.rm) print('50% Lagrange Radius = ',cluster.rn[4]) ctools.starplot(cluster) # - # Two functions worth noting are ``save_cluster`` and ``return_cluster``. ``save_cluster`` will return the current units and origin of your cluster, so you can then i) change units/coordinate systems, ii) make calculates, and then return your cluster to its original state using ``return_cluster``. # # For example, with the above cluster having ``units='kpckms'`` and ``origin='galaxy'``, one may wish to switch to the clustercentric frame of reference to calculate the half-mass radius: #Save Cluster units,origin,rorder,rorder_origin=ctools.save_cluster(cluster) print(units,origin) #Move to clustercentric coordinates cluster.to_cluster() print('Half mass radius = ',cluster.rm) #Move back ctools.return_cluster(cluster,units,origin,rorder,rorder_origin) print(cluster.units,cluster.origin) # It is worth noting that if ``cluster.units='galaxy'``, almost all functions will move the cluster to a clustercentric coordinate system or to its centre of density to make calculations and then return it to its original state. # Finally, two operations that may be helpful when trying to initialize a cluster are ``virialize`` and ``add_rotation``. ``virialize`` will scale all the stellar velocities so the virial parameter is exactly 0.5. # + #At the moment, our cluster is not perfectly in virial equilibrium: cluster.energies() print(cluster.qvir) #However it can be scaled such that ``cluster.qvir=-0.5`` cluster.virialize() cluster.energies() print('New Qv: ',cluster.qvir) # - # Adding rotation will, as the name implies, add a degree of rotation to the cluster. Rotation is added using the ``qrot`` parameter, which will be the fraction of stars with v_phi < 0 that are switched to having vphi > 0. It can be done via: # + #How many stars have vtheta < 0 in the clustercentric coordinate system cluster.to_cluster() r, theta, z, vr, vtheta, vz=ctools.cart_to_cyl(cluster.x,cluster.y,cluster.z,cluster.vx,cluster.vy,cluster.vz) print('Fraction of stars with vtheta<0 =', np.sum(vtheta<0)/cluster.ntot) #Now switch the sign of vtheta for 50% of stars with vtheta<0 print('Add rotation of 50%') cluster.add_rotation(qrot=0.5) r, theta, z, vr, vtheta, vz=ctools.cart_to_cyl(cluster.x,cluster.y,cluster.z,cluster.vx,cluster.vy,cluster.vz) print('Now fraction of stars with vtheta<0 =', np.sum(vtheta<0)/cluster.ntot)
docs/source/notebooks/operations.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Node and Link analysis: Centrality measures # Centrality measures are used to appraise the "importance" of the elements of the network. The problem is that "importance" # * Is not well-defined # * Depends on the domain of the network # # During this seminar we will consider two node centrality measures: *degree centrality* and *closeness centrality* # ## Degree Centrality # In fact you have already met the degree centrality in this course. # # Given adjacency matrix $A$ of the **unweighted** and **undirected** graph $G = (V,E)$ degree centrality of the node $v_i$ is computed as: # $$ C_D(i) = \sum_j A_{ji} $$ # In order to compare nodes across graphs this measure can be normalized by a factor $\frac{1}{N-1}$ # # # ## Closeness Centrality # The most correspondent to the word "central". Closeness centrality is used to identify nodes that can reach other nodes quickly. # $$ C_C(i) = \left[ \sum_{j,\ j\neq i} d(v_i, v_j) \right]^{-1}\text{,} $$ # where $d(v_i, v_j)$ is a length of the shortest path between $v_i$ and $v_j$. Again, to be normalized it is multiplied by $(N-1)$. # ## Why? # Centralities allow us to # * Understand the structure of the graph without looking at it # * Compare nodes of a graph (between graphs) and identify the most "important" # * Compare graphs* import networkx as nx import random import numpy as np import matplotlib.pyplot as plt import pprint pp = pprint.PrettyPrinter(indent=4) # %matplotlib inline # ## Example: Zachary's Karate Club # Let's load Zachary's Karate Club network. This is quite small example so we can both calculate centralities and map them of the picture of the graph G = nx.karate_club_graph() pos = nx.spring_layout(G) # Fix node positions on all pictures # Original network plt.figure(1, figsize=(7,7)) nx.draw_networkx(G, pos) # Degree centrality dc = nx.degree_centrality(G) plt.figure(2, figsize=(7,7)) coord = nx.spring_layout(G) nx.draw(G, pos, nodelist=list(dc.keys()), node_size = [d*7000 for d in list(dc.values())], node_color=list(dc.values()), font_size=8, cmap=plt.cm.Reds, ) # Closeness centrality cl = nx.closeness_centrality(G) plt.figure(1, figsize=(7,7)) coord = nx.spring_layout(G) nx.draw(G, pos, nodelist=list(cl.keys()), node_size = [d*3000 for d in list(cl.values())], node_color=list(cl.values()), font_size=8, cmap=plt.cm.Reds, ) # Plot degree-closeness xdata = list(dc.values()) ydata = list(cl.values()) plt.figure(1, figsize=(7,7)) plt.plot(xdata,ydata, '+') plt.xlabel('Degree Centrality') plt.ylabel('Closeness Centrality') # + # Not Clear. Lets add node ids: fig = plt.figure(1, figsize=(14,7)) ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) for v in range(len(dc)): ax1.text(x = xdata[v], y = ydata[v], s=str(v)) ax1.set_xlim(0, 0.6) ax1.set_ylim(0.25, 0.6) ax1.set_xlabel('Degree Centrality') ax1.set_ylabel('Closeness Centrality') ax2 = nx.draw_networkx(G, pos) # -
Lecture5/centralities.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Network Training # Having implemented and tested all the components of the final networks in steps 1-3, we are now ready to train the network on a large dataset (ImageNet). # + import gc import datetime import pandas as pd import numpy as np from copy import deepcopy from tqdm import tqdm from keras.preprocessing.image import ImageDataGenerator from keras.callbacks import TensorBoard from keras import backend as K import cv2 import matplotlib import matplotlib.pyplot as plt from matplotlib.ticker import NullFormatter from IPython.display import clear_output from libs.pconv_model import PConvUnet from libs.util import random_mask # %load_ext autoreload # %autoreload 2 plt.ioff() # SETTINGS TRAIN_DIR = r"D:/workspace/PConv-Keras/data/food_images_set/train" TEST_DIR = r"D:/workspace/PConv-Keras/data/food_images_set/validation" VAL_DIR = r"D:/workspace/PConv-Keras/data/food_images_set/validation" BATCH_SIZE = 4 # - # # Creating train & test data generator # + class DataGenerator(ImageDataGenerator): def flow_from_directory(self, directory, *args, **kwargs): generator = super().flow_from_directory(directory, class_mode=None, *args, **kwargs) while True: # Get augmented image samples ori = next(generator) # Get masks for each image sample mask = np.stack([random_mask(ori.shape[1], ori.shape[2]) for _ in range(ori.shape[0])], axis=0) # Apply masks to all image sample masked = deepcopy(ori) masked[mask==0] = 1 # Yield ([ori, masl], ori) training batches # print(masked.shape, ori.shape) gc.collect() yield [masked, mask], ori # Create training generator train_datagen = DataGenerator( rotation_range=20, width_shift_range=0.2, height_shift_range=0.2, rescale=1./255, horizontal_flip=True ) train_generator = train_datagen.flow_from_directory( TRAIN_DIR, target_size=(256, 256), batch_size=BATCH_SIZE ) # Create validation generator val_datagen = DataGenerator(rescale=1./255) val_generator = val_datagen.flow_from_directory( VAL_DIR, target_size=(256, 256), batch_size=BATCH_SIZE, seed=1 ) # Create testing generator test_datagen = DataGenerator(rescale=1./255) test_generator = test_datagen.flow_from_directory( TEST_DIR, target_size=(256, 256), batch_size=BATCH_SIZE, seed=1 ) # + # Pick out an example test_data = next(test_generator) (masked, mask), ori = test_data # Show side by side for i in range(len(ori)): _, axes = plt.subplots(1, 3, figsize=(20, 5)) axes[0].imshow(masked[i,:,:,:]) axes[1].imshow(mask[i,:,:,:] * 1.) axes[2].imshow(ori[i,:,:,:]) plt.show() # - # # Training on PIXNET Food 20. It's only for demo. Please use Command Line def plot_callback(model): """Called at the end of each epoch, displaying our previous test images, as well as their masked predictions and saving them to disk""" # Get samples & Display them pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 3, figsize=(20, 5)) axes[0].imshow(masked[i,:,:,:]) axes[1].imshow(pred_img[i,:,:,:] * 1.) axes[2].imshow(ori[i,:,:,:]) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[2].set_title('Original Image') plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time)) plt.close() # ## Phase 1 - with batch normalization # Instantiate the model model = PConvUnet(weight_filepath='data/logs/') model.load(r"C:\Users\MAFG\Documents\Github-Public\PConv-Keras\data\logs\50_weights_2018-06-01-16-41-43.h5") # Run training for certain amount of epochs model.fit( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=100, epochs=50, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/initial_training', write_graph=False) ] ) # ## Phase 2 - without batch normalization # Load weights from previous run model = PConvUnet(weight_filepath='data/logs/') model.load( r"C:\Users\MAFG\Documents\Github-Public\PConv-Keras\data\logs\150_weights_2018-06-26-22-19-32.h5", train_bn=False, lr=0.00005 ) # Run training for certain amount of epochs model.fit( train_generator, steps_per_epoch=10000, validation_data=val_generator, validation_steps=100, epochs=20, workers=3, plot_callback=plot_callback, callbacks=[ TensorBoard(log_dir='../data/logs/fine_tuning', write_graph=False) ] ) # ## Phase 3 - Generating samples # Load weights from previous run model = PConvUnet(weight_filepath='data/logs/') model.load( r"C:\Users\MAFG\Documents\Github-Public\PConv-Keras\data\logs\170_weights_2018-06-28-15-00-38.h5", train_bn=False, lr=0.00005 ) n = 0 for (masked, mask), ori in tqdm(test_generator): # Run predictions for this batch of images pred_img = model.predict([masked, mask]) pred_time = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') # Clear current output and display test images for i in range(len(ori)): _, axes = plt.subplots(1, 2, figsize=(10, 5)) axes[0].imshow(masked[i,:,:,:]) axes[1].imshow(pred_img[i,:,:,:] * 1.) axes[0].set_title('Masked Image') axes[1].set_title('Predicted Image') axes[0].xaxis.set_major_formatter(NullFormatter()) axes[0].yaxis.set_major_formatter(NullFormatter()) axes[1].xaxis.set_major_formatter(NullFormatter()) axes[1].yaxis.set_major_formatter(NullFormatter()) plt.savefig(r'data/test_samples/img_{}_{}.png'.format(i, pred_time)) plt.close() n += 1 # Only create predictions for about 100 images if n > 100: break
Step5 - Food Training.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import jax # #jax.config.update('jax_platform_name', 'cpu') from jax import jit,device_put import numpy as np import jax.numpy as jnp import optax import matplotlib.pyplot as plt from emlp.reps import T from emlp.groups import * def krylov_constraint_solve_upto_r(C,r,tol=1e-5,lr=1e-2):#,W0=None): """ Iterative routine to compute the solution basis to the constraint CQ=0 and QᵀQ=I up to the rank r, with given tolerance. Uses gradient descent (+ momentum) on the objective |CQ|^2, which provably converges at an exponential rate.""" W = np.random.randn(C.shape[-1],r)/np.sqrt(C.shape[-1])# if W0 is None else W0 W = device_put(W) opt_init,opt_update = optax.sgd(lr,.9) opt_state = opt_init(W) # init stats def loss(W): return (jnp.absolute(C@W)**2).sum()/2 # added absolute for complex support loss_and_grad = jit(jax.value_and_grad(loss)) lstart, _ = loss_and_grad(W) lossvals = [] for i in range(20000): lossval, grad = loss_and_grad(W) lossvals.append(lossval) updates, opt_state = opt_update(grad, opt_state, W) W = optax.apply_updates(W, updates) # update progress bar if jnp.sqrt(lossval) <tol: # check convergence condition break # has converged U,S,VT = np.linalg.svd(np.array(W),full_matrices=False) rank = (S>10*tol).sum() Q = device_put(U[:,:rank]) return jnp.array(lossvals) r=20 k=4 C=T(k,G=SO(2)).constraint_matrix() import pandas as pd rows = [] groups = [SO(2),O(3),S(5),Z(6),D(4),SO13p(),Sp(2),SU(3),RubiksCube(),S(50)] for G in groups: for k in range(1,7): r=50 if r*G.d**k>2e8:continue rep = T(k//2,(k+1)//2,G=G) C=rep.constraint_matrix() if C.shape[0]*r*2>2e8: continue x = krylov_constraint_solve_upto_r(C,r) rows.append(pd.DataFrame({'iterations':np.arange(len(x)),r'$\|CW\|_F^2$ error':x,'Group':str(G),'Tensor Rank':k})) plt.plot(np.arange(len(x)),x,label=r"{}-$T_{}$".format(G,k)) df = pd.concat(rows) #plt.title(r'Projected GD on $\|CW\|_F^2$') plt.yscale('log') plt.ylabel(r'$\|CW\|_F^2$ error') plt.xlabel('iterations') #plt.legend() plt.show() # + import seaborn as sns #sns.set(style='whitegrid') sns.set_context("paper", font_scale=1.5) plot=sns.relplot( data=df, x='iterations', y=r'$\|CW\|_F^2$ error', hue="Group", style="Tensor Rank",palette= sns.color_palette("cubehelix",len(groups)),#sns.color_palette("rocket_r"), kind="line",alpha=.8, height=3, aspect=16/12,#legend=False, ) plt.ylabel(r'$\|CX\|_F^2$ error') plt.yscale('log') #plt.xlim((-5,270)) plt.ylim((1e-11,1e3)) plot._legend.remove() axes =plot.axes[0][0] h,l = axes.get_legend_handles_labels() l1 = axes.legend(h[:len(groups)+1],l[:len(groups)+1], loc='best',bbox_to_anchor=(1,1),framealpha=0,prop={'size': 10}) l2 = axes.legend(h[len(groups)+1:],l[len(groups)+1:], loc='best',framealpha=0,prop={'size': 10}) l2.handletextpad = 0 shift = max([t.get_window_extent().width for t in l2.get_texts()]) for t in l2.get_texts(): t.set_ha('right') # ha is alias for horizontalalignment t.set_position((shift,0)) for l in l2.get_lines(): l.set_xdata(np.array(l.get_xdata())+55) axes.add_artist(l1) plt.tight_layout() plt.savefig(f'krylov_convergence.pdf', bbox_inches='tight') plt.show()
experiments/notebooks/additional_appendix_figs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 数学函数、字符串和对象 # ## 本章介绍Python函数来执行常见的数学运算 # - 函数是完成一个特殊任务的一组语句,可以理解为一个函数相当于一个小功能,但是在开发中,需要注意一个函数的长度最好不要超过一屏 # - Python中的内置函数是不需要Import导入的 # <img src="../Photo/15.png"></img> max(1,2,3,4,6) for i in range(10): print(i) for i in range(10): print(i) pow(2,3,2) round(10.10,1) #保留小数位数 # ## 尝试练习Python内置函数 # ## Python中的math模块提供了许多数学函数 # <img src="../Photo/16.png"></img> # <img src="../Photo/17.png"></img> # # ## 两个数学常量PI和e,可以通过使用math.pi 和math.e调用 # ## EP: # - 通过math库,写一个程序,使得用户输入三个顶点(x,y)返回三个角度 # - 注意:Python计算角度为弧度制,需要将其转换为角度 # <img src="../Photo/18.png"> # + x1=eval(input("x1")) y1=eval(input("y1")) x2=eval(input("x2")) y2=eval(input("y2")) x3=eval(input("x3")) y3=eval(input("y3")) a=math.acos((a**2)-(b**2)-(c**2)) b=math.acos((a**2)-(b**2)-(c**2)) c=math.acos((a**2)-(b**2)-(c**2)) # - import math #导入包 a1 = math.fabs(-2) print(a1) b2=math.radians(90) b1=math.cos(b2) # cos 带入的是弧度制,very important print(b1) import math #导入包 a1 = math.sqrt(4) print(a1) import math #导入包 b2=math.radians(2*math.pi) b1=math.sin(b2) # cos 带入的是弧度制,very important print(b1) import math #导入包 b2=math.radians(2*math.pi) b1=math.cos(b2) # cos 带入的是弧度制,very important print(b1) min(2,2,1) import math #导入包 a1 = math.log(math.e) print(a1) import math #导入包 b2=math.radians(47) b1=b2 # cos 带入的是弧度制,very important print(b1) import math #导入包 b2=math.degrees(math.pi/7) b1=b2 # cos 带入的是弧度制,very important print(b1) feet = eval(input("7+3=? ")) if feet==10: print("zhengque") else: print("cuowu") import random a=random.randint(0,9) b=random.randint(0,9) c=a+b print(a, "+", b ,"=") feet = eval(input("shuru")) if feet==c: print("zhengque") else: print("cuowu") a= 'joker' a1="hahahahah" def a(): """ zhushi """ # 当6个引号没有赋值的时候,那么 # ## 字符串和字符 # - 在Python中,字符串必须是在单引号或者双引号内,在多段换行的字符串中可以使用“”“ # - 在使用”“”时,给予其变量则变为字符串,否则当多行注释使用 # ## ASCII码与Unicode码 # - <img src="../Photo/19.png"></img> # - <img src="../Photo/20.png"></img> # - <img src="../Photo/21.png"></img> # ## 函数ord、chr # - ord 返回ASCII码值 # - chr 返回字符 joker="q" ord(joker) # ## EP: # - 利用ord与chr进行简单邮箱加密 email="<EMAIL>" for i in email: test=ord(i)+1 test_test= chr(test) print(test_test,end="") a="he said,\"johon's program is easy to read\"" print(a) # ## 转义序列 \ # - a = "He said,"Johon's program is easy to read"" # ## 高级print # - 参数 end: 以什么方式结束打印 # - 默认换行打印 # ## 函数str # - 将类型强制转换成字符串类型 # - 其他一些以后会学到(list,set,tuple...) a=100 str(a) # ## 字符串连接操作 # - 直接使用 “+” # - join() 函数 a1="www.baidu.com/page=" a2="2h" print(a1+a2) a1="www.baidu.com/page=" for i in range(10): a2 = a1 + str(i) print(a2) "^".join(("a","c")) # %time '^'.join(("a","c","d","f")) # ## EP: # - 将 “Welcome” “to” "Python" 拼接 # - 将int型 100 与 “joker is a bad man” 拼接 # - 从控制台读取字符串 # > 输入一个名字返回夸奖此人是一个帅哥 " ".join(("Welcome","to","Python")) a=100 b=str(a) "".join((b,"joker is a bad man")) pt= input("name") print("very good"+pt) # ## 实例研究:最小数量硬币 # - 开发一个程序,让用户输入总金额,这是一个用美元和美分表示的浮点值,返回一个由美元、两角五分的硬币、一角的硬币、五分硬币、以及美分个数 # <img src="../Photo/22.png"></img> amount = eval(input('Ennter an amount,for example 11.56:')) remainingAmount = int(amount * 100) print(remainingAmount) numberOfOneDollars = remainingAmount //100 remainingAmount = remainingAmount % 100 numberOfQuarters = remainingAmount // 25 remainingAmount = remainingAmount % 25 numberOfDimes = remainingAmount // 10 remainingAmount = remainingAmount % 10 numberOfNickls = remainingAmount // 5 remainingAmount = remainingAmount % 5 numberOfPenies = remainingAmount print(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies) # - Python弱项,对于浮点型的处理并不是很好,但是处理数据的时候使用的是Numpy类型 # <img src="../Photo/23.png"></img> remainingAmount = eval(input('Ennter an amount,for example 11.56:')) print(remainingAmount) numberOfOneDollars = remainingAmount //100 remainingAmount = remainingAmount % 100 numberOfQuarters = remainingAmount // 25 remainingAmount = remainingAmount % 25 numberOfDimes = remainingAmount // 10 remainingAmount = remainingAmount % 10 numberOfNickls = remainingAmount // 5 remainingAmount = remainingAmount % 5 numberOfPenies = remainingAmount print(numberOfOneDollars,numberOfQuarters,numberOfDimes,numberOfNickls,numberOfPenies) # ## id与type # - id 查看内存地址,在判断语句中将会使用 # - type 查看元素类型 a=100 id(a) 100 is 100 a=True b=False print() # ## 其他格式化语句见书 # # Homework # - 1 # <img src="../Photo/24.png"><img> # <img src="../Photo/25.png"><img> import math r =eval(input("Enter the length from the center to a vertex: ")) s=2*r*math.sin(math.pi/5) area=5*(s**2)/(4*math.tan(math.pi/5)) print("The area of the pentagon is" ,area) # - 2 # <img src="../Photo/26.png"><img> import math a1,b1 =eval(input("Enter point 1 (latitude and longitude) in degrees: ")) a2,b2 =eval(input("Enter point 2 (latitude and longitude) in degrees: ")) x1=math.radians(a1) x2=math.radians(a2) y1=math.radians(b1) y2=math.radians(b2) d=6371.01*(math.acos(math.sin(x1)*math.sin(x2)+math.cos(x1)*math.cos(x2)*math.cos(y1-y2))) print("The distance between the two points is" ,d,"km") # - 3 # <img src="../Photo/27.png"><img> import math s =eval(input("Enter the side: ")) area=5*(s**2)/(4*math.tan(math.pi/5)) print("The area of the pentagon is" ,area) # - 4 # <img src="../Photo/28.png"><img> import math n =eval(input("Enter the number of sides: ")) s =eval(input("Enter the sides: ")) area=(n*s**2)/(4*math.tan(math.pi/n)) print("The area of the pentagon is" ,area) # - 5 # <img src="../Photo/29.png"><img> # <img src="../Photo/30.png"><img> # + n =eval(input("Enter an ASCII code: ")) r=chr(n) print("The character is" ,r) # - # - 6 # <img src="../Photo/31.png"><img> # + a =(input("Enter employee's name: ")) b =eval(input("Enter number of hours worked in a week: ")) c =eval(input("Enter hourly pay rate: ")) d =eval(input("Enter federal tax withholding rate: ")) e =eval(input("Enter state tax withholding rate: ")) f=b*c print("Employee Name:" ,a) print("Hours Worked" ,b) print("Pay Rate" ,"$"+str(c)) print("Gross Pay" ,"$"+str(f)) print("Deductions:" ) print(" Federal withholding(20.0%):","$"+str(f*0.2)) print(" state withholding(9.0%):","$"+str(f*0.09 )) print(" Total Deduction:","$"+(str(f*0.2+f*0.09)) ) print("Net Pay:","$"+(str(f-f*0.2-f*0.09))) # - # - 7 # <img src="../Photo/32.png"><img> n =eval(input("Enter an integer: ")) a1=n//1000 a2=n%1000//100 a3=n%100//10 a4=n%10 print("The reversed number is" ,"".join((str(a4),str(a3),str(a2),str(a1)))) # - 8 进阶: # > 加密一串文本,并将解密后的文件写入本地保存 n="iiiii" for i in n: test=ord(i)+1 test_test= chr(test) print(test_test,end="")
7.17.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import gym from EduSim.Envs.KSS import KSSAgent env = gym.make('KSS-v0', learner_num=4000) agent = KSSAgent(env.action_space) max_episode_num = 1000 n_step = False max_steps = 20 train = True episode = 0 while True: if max_episode_num is not None and episode > max_episode_num: break try: agent.begin_episode(env.begin_episode()) episode += 1 except ValueError: # pragma: no cover break # recommend and learn if n_step is True: # generate a learning path learning_path = agent.n_step(max_steps) env.n_step(learning_path) else: # generate a learning path step by step for _ in range(max_steps): try: learning_item = agent.step() except ValueError: # pragma: no cover break interaction, _ ,_, _ = env.step(learning_item) agent.observe(**interaction["performance"]) # test the learner to see the learning effectiveness agent.episode_reward(env.end_episode()["reward"]) agent.end_episode() if train is True: agent.tune()
docs/tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Let's take our old friend: the factorial function! l_factorial = lambda n: 1 if n == 0 else n*l_factorial(n-1) # ## Chaining functions and combining return values # # Say that we want to call this function a number of times, with different arguments, and do something with the return values. How can we do that? # + def chain_mul(*what): """Takes a list of (function, argument) tuples. Calls each function with its argument, multiplies up the return values, (starting at 1) and returns the total.""" total = 1 for (fnc, arg) in what: total *= fnc(arg) return total chain_mul( (l_factorial, 2), (l_factorial, 3) ) # - # ## Operators as regular functions # # The function above is not very general: it can only multiple values, not multiply or subtract them. Ideally, we would pass an operator to the function as well. But `*` is syntax and not an object that we can pass! Fortunately, the Python's built-in `operator` module offers all operators as regular functions. # + import operator def chain(how, *what): total = 1 for (fnc, arg) in what: total = how(total, fnc(arg)) return total chain(operator.truediv, (l_factorial, 2), (l_factorial, 3) )
7181_03_code_ACC_SB/The operator module - operators as regular functions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [default] # language: python # name: python2 # --- # # Glassdoor Salaries # ## Salary Prediction Model # # WIP # ## Model setup # # We need to import all the modules we'll be using from numpy, scipy, and keras: # + import matplotlib.pyplot as plt from numpy.random import random, permutation from scipy import misc, ndimage from scipy.ndimage.interpolation import zoom from scipy.io import mmread from scipy.sparse import coo_matrix, csr_matrix import numpy as np import math import csv import random from keras import backend as K import keras import json from keras import backend as K from keras.utils.data_utils import get_file from keras.models import Sequential, Model from keras.layers.core import Flatten, Dense, Dropout, Lambda from keras.layers import Input from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.optimizers import SGD, RMSprop from keras.preprocessing import image # - # %pwd # %matplotlib inline # Define Location of data directory and load training matrix into sparse matrix. # data_dir = '/home/ubuntu/full' sparse_training_matrix = mmread('%s/training-matrix.txt' % data_dir) print "Done" # - sparse matrix text = training row + " " + column (where value is +1) # # Notes: # # ''' # a.todense() or a.M - Return a dense matrix representation of this matrix. (numpy.matrix) # a.A - Return a dense ndarray representation of this matrix. (numpy.array) # ''' # print sparse_training_matrix.shape print sparse_training_matrix.getrow(568370) #print coo_matrix(sparse_training_matrix.getrow(568370), dtype=np.bool) print "Done" # + def salaryToTarget(salary): return int(round((max((min((salary * 1.0, 595000.0)), 15000))-15000) / 5000, 0)) + 1 def targetToSalary(target): return ((target - 1) * 5000) + 15000 def logSalaryToTarget(logSalary): return salaryToTarget(math.pow(math.e, logSalary)) print salaryToTarget(15000) print salaryToTarget(25000) print salaryToTarget(2500000) print targetToSalary(8) print logSalaryToTarget(10.3089859934221) salaries = np.zeros((6516817, 118), dtype=np.bool) with open('%s/log-salaries-truncated.csv' % data_dir,'r') as dest_f: data_iter = csv.reader(dest_f) for row in data_iter: x_index = int(row[0]) if x_index < 6516817: salaries[x_index][logSalaryToTarget(float(row[1]))] = True print "Created salaries" #targets = mmread('%s/training-matrix.txt' % data_dir) # - # ## Model creation # # ### Features # # - Binary vector of length 1812571 # - Each row is a # # ### Training Data # # - Log salary # - Salaries: $15K => $600K # - Divide into classes of $5K ranges => 117 classes # # ### Targets # # - 118 length vector # - Note: 0 should be ignored # def ConvBlock(layers, model, filters): for i in range(layers): model.add(ZeroPadding2D((1,1))) model.add(Convolution2D(filters, 3, 3, activation='relu')) model.add(MaxPooling2D((2,2), strides=(2,2))) # ...and here's the fully-connected definition. def FCBlock(model): model.add(Dense(64, activation='relu')) model.add(Dropout(0.5)) # Define the model def PhillipSalary1(): model = Sequential() model.add(Dense(118, input_shape=(1812571,))) # todo this is definitely not correct #ConvBlock(3, model, 128) #model.add(Flatten()) #FCBlock(model) #FCBlock(model) model.add(Dense(118, activation='softmax')) return model # We'll learn about what these different blocks do later in the course. For now, it's enough to know that: # # - Convolution layers are for finding patterns in images # - Dense (fully connected) layers are for combining patterns across an image # # Now that we've defined the architecture, we can create the model like any python object: # As well as the architecture, we need the weights that the VGG creators trained. The weights are the part of the model that is learnt from the data, whereas the architecture is pre-defined based on the nature of the problem. # # Downloading pre-trained weights is much preferred to training the model ourselves, since otherwise we would have to download the entire Imagenet archive, and train the model for many days! It's very helpful when researchers release their weights, as they did here. # + try: del model except NameError: pass #K.clear_session() model = PhillipSalary1() # - lr = 0.01 model.compile(optimizer=RMSprop(lr=lr), loss='binary_crossentropy', metrics=['accuracy']) csr_sparse_training_matrix = csr_matrix(sparse_training_matrix, dtype=np.bool) for n in range(10): segment = random.randint(1, 12000) start = 512 * segment end = start + 512 test_y = salaries[start:end] #print "Finished Salary Matrix: " #print test_y #test_x = [] test_x = csr_sparse_training_matrix[start:end].todense() #print "Finished CSR Training Matrix" #print test_x #test_x_a = test_x.A #test_x.shape #for n in range(10) #print "Training Set: %s" % len(test_x_a) #print "Training targets: %s" % len(test_y) #print "Features: %s" % len(test_x_a[0]) #y = np.zeros(118, dtype=np.int32) #y[18] = 1 #y if n % 10 is 0: print "Evaluating..." print model.metrics_names print model.test_on_batch(x=test_x, y=test_y) else: print "Training..." print model.train_on_batch(x=test_x, y=test_y) for n in range(1,2): segment = random.randint(1, 12000) start = 512 * segment end = start + 512 test_y = salaries[start:end] #print "Finished Salary Matrix: " #print test_y #test_x = [] test_x = csr_sparse_training_matrix[start:end].todense() print model.metrics_names print model.evaluate(x=test_x, y=test_y) # + #history = model.fit(x=test_x, y=test_y, shuffle="batch") # + # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # summarize history for loss plt.plot(history.history['loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() # - model.save_weights("model-linear.h5")
experiments/salary-estimates/glassdoor-salaries-linear-2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 3. Linear Regression Operations # BaLinear regression implementation with TensorFlow v2 library. # # This example is using a low-level approach to better understand all mechanics behind the training process. # # * Author: [<NAME>](@Snapchat) # * Project: [https://github.com/aymericdamien/TensorFlow-Examples/](https://github.com/aymericdamien/TensorFlow-Examples/) # + from __future__ import absolute_import, division, print_function import tensorflow as tf import numpy as np rng = np.random # Parameters. learning_rate = 0.01 training_steps = 1000 display_step = 50 # Training Data. X = np.array([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167, 7.042,10.791,5.313,7.997,5.654,9.27,3.1]) Y = np.array([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) # + # Weight and Bias, initialized randomly. W = tf.Variable(rng.random(), name="weight") b = tf.Variable(rng.random(), name="bias") # Linear regression (Wx+b) def linear_regression(x): return W * x + b # Mean square error. def mean_square(y_pred, y_true): return tf.reduce_mean(tf.square(y_pred - y_true)) # Stochastic Gradient Descent Optimizer. optimizer = tf.optimizers.SGD(learning_rate) # - # Optimization process. def run_optimization(): # Wrap computation inside a GradientTape for automatic differentiation. with tf.GradientTape() as g: pred = linear_regression(X) loss = mean_square(pred, Y) # Compute gradients. gradients = g.gradient(loss, [W, b]) # Update W and b following gradients. optimizer.apply_gradients(zip(gradients, [W, b])) # Run training for the given number of steps. for step in range(1, training_steps + 1): # Run the optimization to update W and b values. run_optimization() if step % display_step == 0: pred = linear_regression(X) loss = mean_square(pred, Y) print("step: %i, loss: %f, W: %f, b: %f" % (step, loss, W.numpy(), b.numpy())) # + import matplotlib.pyplot as plt # Graphic display plt.plot(X, Y, 'ro', label='Original data') plt.plot(X, np.array(W * X + b), label='Fitted line') plt.legend() plt.show() # -
quizs/tensorflow_linear_regression.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from sqlalchemy import event import pandas as pd import numpy as np import sqlalchemy import time import matplotlib.pyplot as plt import psutil import random from sqlalchemy import create_engine def save_results_to_csv(results,file): #Guardamos los resultados en csv from datetime import datetime csv_df = pd.DataFrame(results, columns=['Registros', 'Tiempo', 'CPU','Memoria']) dia = datetime.now().strftime("%d%m%Y_%H_%M_%S") csv_df.to_csv(file.format(str(dia))) #Ficheros de salida resultados_delete = '../Results/MySQL/MySQLDelete_test_{}.csv' engine = create_engine('mysql://root:mysql@127.0.0.1:6446/CurrentAccountDomainSchema') engine.execute('set global group_replication_transaction_size_limit = 300000000;') # + @event.listens_for(engine, "before_cursor_execute") def receive_before_cursor_execute( conn, cursor, statement, params, context, executemany ): if executemany: cursor.fast_executemany = True # - #Obtenemos los posibles valores de pais. Se iterará por ellos para cambiar en bucle los registros result = engine.execute("""SELECT DISTINCT (PartyId) FROM CustomerProfileDomainSchema.CustomerProfile""") partyid_list= [] for partyId in result.fetchall(): partyid_list.append(partyId) len(partyid_list) partyid_list_dp = random.sample(partyid_list,100) # + time_inicial = 0 time_final = 0 registers = [] iteracion = 0 for partyId in partyid_list_dp: delete_query = """DELETE FROM CustomerProfileDomainSchema.CustomerProfile USING CustomerProfileDomainSchema.CustomerProfile JOIN CurrentAccountDomainSchema.CurrentAccount JOIN PositionKeepingDomainSchema.PositionKeeping WHERE CustomerProfileDomainSchema.CustomerProfile.PartyId = CurrentAccountDomainSchema.CurrentAccount.PartyId AND CurrentAccountDomainSchema.CurrentAccount.AccountId = PositionKeepingDomainSchema.PositionKeeping.AccountId AND CustomerProfileDomainSchema.CustomerProfile.PartyId = {}""".format(partyId[0]) time_inicial = time.time() engine.execute(delete_query) time_final = time.time() used_cpu = psutil.cpu_percent() mem_used = psutil.virtual_memory().percent # Tupla con numero de registros, tiempo parcial de la transacción y tiempo acumulado de trxs total_time = round(time_final - time_inicial,3) registers.append((iteracion + 1, total_time ,used_cpu, mem_used)) partyid_list_dp.remove(partyId) #iteracion = iteracion + 1 # - registers #Almacenamos los resultados en csv save_results_to_csv(registers,resultados_delete)
Scripts/Delete_MySQL.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] slideshow={"slide_type": "slide"} # # Advanced techniques # # Let's explore feature engineering techniques with the house prices dataset from Kaggle. # # We can find an illustrative example of how to use Deep feature synthesis [here](https://www.kaggle.com/willkoehrsen/featuretools-for-good), and a good explanation [here](https://stackoverflow.com/questions/52418152/featuretools-can-it-be-applied-on-a-single-table-to-generate-features-even-when). # # The class `Dataset` with some helper functions is also available, in case you want to fork or pull request at [my GitHub repo](https://github.com/renero/class_notebooks/tree/master/src) # # To work will all dependencies: # # git clone git@github.com:renero/class_notebooks.git # cd class_notebooks # pip install -r requirements.txt # + [markdown] slideshow={"slide_type": "slide"} toc=true # <h1>Table of Contents<span class="tocSkip"></span></h1> # <div class="toc"><ul class="toc-item"><li><span><a href="#Advanced-techniques" data-toc-modified-id="Advanced-techniques-1"><span class="toc-item-num">1&nbsp;&nbsp;</span>Advanced techniques</a></span><ul class="toc-item"><li><span><a href="#Setup-the-dataset" data-toc-modified-id="Setup-the-dataset-1.1"><span class="toc-item-num">1.1&nbsp;&nbsp;</span>Setup the dataset</a></span><ul class="toc-item"><li><span><a href="#Feature-Creation" data-toc-modified-id="Feature-Creation-1.1.1"><span class="toc-item-num">1.1.1&nbsp;&nbsp;</span>Feature Creation</a></span></li><li><span><a href="#Scale-numerical-features" data-toc-modified-id="Scale-numerical-features-1.1.2"><span class="toc-item-num">1.1.2&nbsp;&nbsp;</span>Scale numerical features</a></span></li><li><span><a href="#Check-skewness" data-toc-modified-id="Check-skewness-1.1.3"><span class="toc-item-num">1.1.3&nbsp;&nbsp;</span>Check skewness</a></span></li><li><span><a href="#Check-correlation" data-toc-modified-id="Check-correlation-1.1.4"><span class="toc-item-num">1.1.4&nbsp;&nbsp;</span>Check correlation</a></span></li><li><span><a href="#Under-represented-features" data-toc-modified-id="Under-represented-features-1.1.5"><span class="toc-item-num">1.1.5&nbsp;&nbsp;</span>Under represented features</a></span></li><li><span><a href="#OneHot-encoding-for-categorical-variables." data-toc-modified-id="OneHot-encoding-for-categorical-variables.-1.1.6"><span class="toc-item-num">1.1.6&nbsp;&nbsp;</span>OneHot encoding for categorical variables.</a></span></li><li><span><a href="#Baseline-basic-all-numeric-features" data-toc-modified-id="Baseline-basic-all-numeric-features-1.1.7"><span class="toc-item-num">1.1.7&nbsp;&nbsp;</span>Baseline basic all-numeric features</a></span></li></ul></li><li><span><a href="#Deep-Feature-Synthesis" data-toc-modified-id="Deep-Feature-Synthesis-1.2"><span class="toc-item-num">1.2&nbsp;&nbsp;</span>Deep Feature Synthesis</a></span><ul class="toc-item"><li><span><a href="#Build-the-EntitySet" data-toc-modified-id="Build-the-EntitySet-1.2.1"><span class="toc-item-num">1.2.1&nbsp;&nbsp;</span>Build the EntitySet</a></span></li><li><span><a href="#Normalize-the-entity" data-toc-modified-id="Normalize-the-entity-1.2.2"><span class="toc-item-num">1.2.2&nbsp;&nbsp;</span>Normalize the entity</a></span></li><li><span><a href="#Deep-feature-synthesis" data-toc-modified-id="Deep-feature-synthesis-1.2.3"><span class="toc-item-num">1.2.3&nbsp;&nbsp;</span>Deep feature synthesis</a></span></li><li><span><a href="#Get-the-score!" data-toc-modified-id="Get-the-score!-1.2.4"><span class="toc-item-num">1.2.4&nbsp;&nbsp;</span>Get the score!</a></span></li></ul></li></ul></li></ul></div> # + [markdown] slideshow={"slide_type": "slide"} # ## Setup the dataset # + hide_input=true import nbimporter import numpy as np import featuretools as ft import pandas as pd import matplotlib.pyplot as plt import scipy.stats as ss import seaborn as sns import warnings from copy import copy from scipy.stats import skew, boxcox_normmax from scipy.special import boxcox1p from sklearn.linear_model import LinearRegression from sklearn.model_selection import cross_val_score, ShuffleSplit, \ validation_curve, cross_validate from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import Pipeline, make_pipeline warnings.filterwarnings('ignore') warnings.simplefilter(action='ignore', category=FutureWarning) warnings.simplefilter(action='ignore', category=DeprecationWarning) warnings.simplefilter('ignore') from dataset import Dataset # + slideshow={"slide_type": "fragment"} houses = Dataset('./data/houseprices_prepared.csv.gz') houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # We will **replace the NA's** in the dataset with 'None' or 'Unknown' since they're not really NA's. For no good reason the person in charge of encoding the file decided to assign NA's to values where the feature does not apply, but instead of using a value for that special condition (like the string 'None') he/she decided to use the actual NA. # + slideshow={"slide_type": "slide"} houses.replace_na(column='Electrical', value='Unknown') houses.replace_na(column=houses.names('categorical_na'), value='None') houses.set_target('SalePrice') houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Feature Creation # # This is the part where we decide to remove or add features based on our knowledge on the data and phenomena being represented. In this case, we're removing the `Id` field, and summing up some of the numerical fields counting the nr. of bathrooms, porchs and square feet. # + slideshow={"slide_type": "slide"} houses.drop_columns('Id') houses.aggregate(['1stFlrSF','2ndFlrSF','BsmtFinSF1','BsmtFinSF2'], 'House_SF') houses.aggregate(['OpenPorchSF','3SsnPorch','EnclosedPorch', 'ScreenPorch','WoodDeckSF'], 'Porch_sf') houses.aggregate(['FullBath', 'BsmtFullBath', 'HalfBath', 'BsmtHalfBath'], 'Total_Baths') houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Scale numerical features # # Standardization of datasets is a common requirement for many machine learning estimators implemented in scikit-learn; they might behave badly if the individual features do not more or less look like standard normally distributed data: Gaussian with zero mean and unit variance. # # In practice we often ignore the shape of the distribution and just transform the data to center it by removing the mean value of each feature, then scale it by dividing non-constant features by their standard deviation. # + slideshow={"slide_type": "slide"} houses.scale() houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Check skewness # # In many modeling scenarios, normality of the features in a dataset is desirable. Power transforms are a family of parametric, monotonic transformations that aim to map data from any distribution to as close to a Gaussian distribution as possible in order to stabilize variance and minimize skewness. # + slideshow={"slide_type": "slide"} houses.fix_skewness() houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Check correlation # + slideshow={"slide_type": "slide"} numericals_to_drop, corr_num = houses.numerical_correlated(threshold=0.7) print('There are {} correlated columns to remove.'.format( len(numericals_to_drop))) print(numericals_to_drop) houses.plot_correlation_matrix(corr_num) # + slideshow={"slide_type": "slide"} categoricals_to_drop, corr_categ = houses.categorical_correlated(threshold=0.7) print('There are {} correlated columns to remove.'.format( len(categoricals_to_drop))) print(categoricals_to_drop) houses.plot_correlation_matrix(corr_categ) # + slideshow={"slide_type": "slide"} houses.drop_columns(categoricals_to_drop + numericals_to_drop) houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Under represented features # + slideshow={"slide_type": "fragment"} urf = houses.under_represented_features() print('Features with unrepresented categories:\n', urf) houses.drop_columns(urf) print(end='') houses.describe(); # + [markdown] slideshow={"slide_type": "slide"} # ### OneHot encoding for categorical variables. # # Convert categorical variable into dummy/indicator variables. I use pandas `get_dummies` for this task. # # Beware of not using this before measuring correlation, as it will destroy your measurements. # + slideshow={"slide_type": "slide"} houses_prepared = copy(houses) houses.onehot_encode() houses.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Baseline basic all-numeric features # # Time to assess what can a simple and multiple linear regression can do. # + slideshow={"slide_type": "fragment"} X, y = houses.split(test_size=0.2) # + slideshow={"slide_type": "slide"} model = LinearRegression() cv = ShuffleSplit(n_splits=1000, test_size=0.2, random_state=666) scores = cross_val_score(model, X.train, y.train, cv=cv, scoring='r2') print('Obtained {} positive R2 scores'.format(len(scores[scores > 0.0]))) print('Best Validation R2: {:.2f}'.format(max(scores))) print('Avg. Validation R2: {:.2f}'.format(np.mean(scores[scores > 0.0]))) # + hide_input=true slideshow={"slide_type": "slide"} sns.distplot(scores[scores > 0.0], hist = False, kde = True, kde_kws = {'shade': True, 'linewidth': 3}); plt.title('Distribution of R2 scores') plt.show(); # + [markdown] slideshow={"slide_type": "slide"} # It seems that we can evaluate our regression problem using CV, but our 1'st degree polynomial is clearly too simple (high biass) for this problem. The symptom of this is the **extremely negative values of the R2 scores** obtained. # # To improve our solution, let's build a linear regression model with a **higher degree polynomial**. To do so, a Scikit Learn `Pipeline` is used, where `PolynomialFeatures` is used before the linear regression, to try out different `degree` polynomials. # # `PolynomialFeatures` generates a new feature matrix consisting of all **polynomial combinations** of the features with degree less than or equal to the specified degree. For example, if an input sample is two dimensional and of the form [a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2]. # + slideshow={"slide_type": "slide"} cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=123) pipeline = make_pipeline( PolynomialFeatures(degree=2, include_bias=False), LinearRegression(n_jobs=-1)).fit(X.train, y.train) scores = cross_val_score(pipeline, X.train, y.train, scoring="r2", cv=cv) print('Obtained {} positive R2 scores'.format(len(scores[scores > 0.0]))) print('Best CV R2: {:.2f}'.format(max(scores))) print('Avg. CV R2: {:.2f} +/- {:.02}'.format( np.mean(scores[scores > 0.0]), np.std(scores[scores > 0.0]))) print('R2 in hold-out dataset: {:.2f}'.format( pipeline.score(X.test, y.test))) # + [markdown] slideshow={"slide_type": "slide"} # With the information obtained in the cross validation process, I know that **I'm not overfitting**, so my results seem to be OK. The $R^2$ obtained is decent. But I don't know which of my splits is producing the best possible result. So, at this point, I can rely on a single fit, or I can try to use the model trained with the split that produces the best generalization error. # + [markdown] slideshow={"slide_type": "slide"} # To do so: # # 1. I use `cross_validate` method instead of `cross_val_score`, and I also specify that I want the estimator trained with each split to be returned # 2. I score all the different estimators to see which one is producing the best generalization error over the hold-out dataset (`X.test` and `y.test`). # 3. Compare the results obtained with a single estimator over the entire dataset, with the results obtained over a single split (the one producing the best results in generalization). # + slideshow={"slide_type": "slide"} pipeline = Pipeline([ ('polynomials', PolynomialFeatures(degree=2, include_bias=False)), ('linear_regression', LinearRegression(n_jobs=-1))]) pipeline.fit(X.train, y.train) training_score = pipeline.score(X.test, y.test) print('R2 from entire-dataset estimator: {:.2f}'.format(training_score)) # Obtain scores and estimators from different splits and use the best one. scores = cross_validate(pipeline, X.train, y.train, scoring=['r2'], cv=5, return_estimator=True) split_scores = [scores['estimator'][i].score(X.test, y.test) for i in range(len(scores))] index_best = split_scores.index(max(split_scores)) print('Best estimator R2 score: {:.2f}'.format(split_scores[index_best])) # + [markdown] slideshow={"slide_type": "slide"} # ## Deep Feature Synthesis # # Deep Feature Synthesis (DFS) is an automated method for performing feature engineering on relational and temporal data. # + [markdown] slideshow={"slide_type": "slide"} # ### Build the EntitySet # + slideshow={"slide_type": "fragment"} es = ft.EntitySet() es = es.entity_from_dataframe(entity_id='houses', dataframe=pd.concat([houses_prepared.features, houses_prepared.target], axis=1), index = 'Id') es # + [markdown] slideshow={"slide_type": "slide"} # ### Normalize the entity # + slideshow={"slide_type": "fragment"} es.normalize_entity(base_entity_id='houses', new_entity_id='houses_norm', index='Id') es # + [markdown] slideshow={"slide_type": "slide"} # ### Deep feature synthesis # + slideshow={"slide_type": "fragment"} f_matrix, f_defs = ft.dfs(entityset=es, target_entity='houses_norm', verbose=1, n_jobs=-1) # + [markdown] slideshow={"slide_type": "slide"} # Remove new variables that might be related to the target # + slideshow={"slide_type": "fragment"} drop_cols = [] for col in f_matrix: if col == houses_prepared.target.name: pass else: if houses_prepared.target.name in col: drop_cols.append(col) print('Need to drop columns:', drop_cols) f_matrix = f_matrix[[x for x in f_matrix if x not in drop_cols]] # + slideshow={"slide_type": "slide"} # Create correlation matrix corr_matrix = f_matrix.corr().abs() # Select upper triangle of correlation matrix upper = corr_matrix.where( np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool)) # Find index of feature columns with correlation greater than 0.95 to_drop = [column for column in upper.columns if any(upper[column] >= 0.99)] print('Need to remove {} columns with >= 0.99 correlation.'.format(len(to_drop))) f_matrix = f_matrix[[x for x in f_matrix if x not in to_drop]] # + [markdown] slideshow={"slide_type": "slide"} # Build a dataframe with the features created and the original ones to fit a regressor with it. # + slideshow={"slide_type": "fragment"} fs_df = pd.concat( [f_matrix, houses_prepared.features, houses_prepared.target], axis=1) fs = Dataset.from_dataframe(fs_df) fs.describe() # + [markdown] slideshow={"slide_type": "slide"} # Fix some parts that are not correct. # + slideshow={"slide_type": "fragment"} fs.replace_na(column=fs.names('numerical_na'), value=0) fs.set_target(houses_prepared.target.name) fs.onehot_encode() fs.describe() # + [markdown] slideshow={"slide_type": "slide"} # ### Get the score! # + slideshow={"slide_type": "fragment"} X, y = fs.split() cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=123) pipeline = make_pipeline( PolynomialFeatures(degree=2, include_bias=False), LinearRegression(n_jobs=-1)).fit(X.train, y.train) scores = cross_val_score(pipeline, X.train, y.train, scoring="r2", cv=cv) print('Obtained {} positive R2 scores'.format(len(scores[scores > 0.0]))) print('Best CV R2: {:.2f}'.format(max(scores))) print('Avg. CV R2: {:.2f} +/- {:.02}'.format( np.mean(scores[scores > 0.0]), np.std(scores[scores > 0.0]))) print('R2 in hold-out dataset: {:.2f}'.format( pipeline.score(X.test, y.test)))
FE2 Advanced techniques.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import numpy as np # # Distances # + activator = "sgmd" mnist_sgmd = [] hands_sgmd = [] fashn_sgmd = [] for i in range(0, 10): dataset = 'mnist' df_cnn_relu0_1 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'handsign_mnist' df_cnn_relu0_2 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'fashion_mnist' df_cnn_relu0_3 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") max_norm = np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_1)) mnist_sgmd.append(np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_2)) / max_norm) hands_sgmd.append(np.linalg.norm(df_cnn_relu0_2.corrwith(df_cnn_relu0_3)) / max_norm) fashn_sgmd.append(np.linalg.norm(df_cnn_relu0_3.corrwith(df_cnn_relu0_1)) / max_norm) activator = "tanh" mnist_tanh = [] hands_tanh = [] fashn_tanh = [] for i in range(0, 10): dataset = 'mnist' df_cnn_relu0_1 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'handsign_mnist' df_cnn_relu0_2 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'fashion_mnist' df_cnn_relu0_3 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") max_norm = np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_1)) mnist_tanh.append(np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_2)) / max_norm) hands_tanh.append(np.linalg.norm(df_cnn_relu0_2.corrwith(df_cnn_relu0_3)) / max_norm) fashn_tanh.append(np.linalg.norm(df_cnn_relu0_3.corrwith(df_cnn_relu0_1)) / max_norm) activator = "relu" mnist_relu = [] hands_relu = [] fashn_relu = [] for i in range(0, 10): dataset = 'mnist' df_cnn_relu0_1 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'handsign_mnist' df_cnn_relu0_2 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") dataset = 'fashion_mnist' df_cnn_relu0_3 = pd.read_csv(dataset + "/results/" + activator + "/cnn_K" + str(i) + ".csv") max_norm = np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_1)) mnist_relu.append(np.linalg.norm(df_cnn_relu0_1.corrwith(df_cnn_relu0_2)) / max_norm) hands_relu.append(np.linalg.norm(df_cnn_relu0_2.corrwith(df_cnn_relu0_3)) / max_norm) fashn_relu.append(np.linalg.norm(df_cnn_relu0_3.corrwith(df_cnn_relu0_1)) / max_norm) # -
Analysis-master-corrwith-self.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 0. Utility def check_fun(ref_fun, fun, input_gen, ntimes=10, **kwargs): "Check fun with reference implementation." for _ in range(ntimes): input_ = input_gen(**kwargs) assert(fun(*input_) == ref_fun(*input_)) print("Tests passed") # + import random def select_input_gen(n, MAX=(1 << 32 - 1)): "generate select algorithm input." array = [random.randint(0, MAX) for _ in range(n)] return array, random.randint(0, n - 1) # - # ## 1. Selection import math def select(A, k): "Return the k-th (0 index) smallest element of A." w = 5 n = len(A) if n < w: return sorted(A)[k] ngroups = math.ceil(n // w) medians = [None] * ngroups for i in range(ngroups): B = sorted(A[i * w : (i + 1) * w]) medians[i] = B[len(B) // 2] median = select(medians, ngroups // 2) L, R= [], [] for num in A: if num < median: L.append(num) elif num > median: R.append(num) if k < len(L): return select(L, k) elif k < n - len(R): return median else: return select(R, k - (n - len(R))) check_fun((lambda array, k : sorted(array)[k]), select, select_input_gen, ntimes=10, n=1000)
divide-and-conquer/selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + colab={"base_uri": "https://localhost:8080/"} id="a1YPr-eiV07s" executionInfo={"status": "ok", "timestamp": 1626434587952, "user_tz": 180, "elapsed": 5459, "user": {"displayName": "S\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgoLe-UgrNos4x-wRynzWo4Mca81Yi6X7B2fRTcUg=s64", "userId": "15273515199890451153"}} outputId="2fd4a670-ddaf-41bf-950b-eaf945a9209c" pip install PyGithub # + id="f3HKTluUVuXY" executionInfo={"status": "ok", "timestamp": 1626444564775, "user_tz": 180, "elapsed": 381, "user": {"displayName": "S\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgoLe-UgrNos4x-wRynzWo4Mca81Yi6X7B2fRTcUg=s64", "userId": "15273515199890451153"}} from github import Github s = Github("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") #s = Github("ghp_tU<KEY>") #c = Github("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") #c = Github("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") #m = Github("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") from datetime import datetime import pandas as pd # + colab={"base_uri": "https://localhost:8080/"} id="FTmNgXzXV62N" executionInfo={"status": "ok", "timestamp": 1626444565177, "user_tz": 180, "elapsed": 7, "user": {"displayName": "S\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgoLe-UgrNos4x-wRynzWo4Mca81Yi6X7B2fRTcUg=s64", "userId": "15273515199890451153"}} outputId="d686c083-c383-4d53-b76b-d4638303e804" from google.colab import drive drive.mount('/content/drive/') # + colab={"base_uri": "https://localhost:8080/"} id="C3nHUFU4WC1n" executionInfo={"status": "ok", "timestamp": 1626444565178, "user_tz": 180, "elapsed": 6, "user": {"displayName": "S\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgoLe-UgrNos4x-wRynzWo4Mca81Yi6X7B2fRTcUg=s64", "userId": "15273515199890451153"}} outputId="70633888-ec7c-4d27-e3d2-785e34e56053" # cd drive/My Drive/artigo_ia/artigoVEM/0-buscandoCodeSamples # + colab={"base_uri": "https://localhost:8080/", "height": 385} id="6f9907e8-3f0c-4d13-84bd-639675049e65" executionInfo={"status": "error", "timestamp": 1626444987132, "user_tz": 180, "elapsed": 421958, "user": {"displayName": "S\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgoLe-UgrNos4x-wRynzWo4Mca81Yi6X7B2fRTcUg=s64", "userId": "15273515199890451153"}} outputId="00057ddc-5f00-4769-a83d-7a560bb7242d" def getAllIssuesRequests(sample): repo = s.get_repo(sample) print(sample) issues = [] for issue in repo.get_issues(state = 'closed'): if "/issues/" in issue.html_url: issues.append(issue) return issues def getDataframe(dataframe, framework, sample, issues): return dataframe.append(other=[ { "framework" : framework, "sample": sample, "data de fechamento": Issue.closed_at, "data de criação": Issue.created_at, "tempo para fechamento": (Issue.closed_at - Issue.created_at).days if Issue.closed_at != '' and Issue.closed_at is not None else "", "labels": [label.name for label in Issue.labels], "state": Issue.state, "update_at": Issue.updated_at, "titulo": Issue.title, "number": Issue.number, "html-url": Issue.html_url, "creator user cargo": Issue.user.role, "creator user empresa": Issue.user.company, "creator user local": Issue.user.location, "creator user login": Issue.user.login, "creator user email": Issue.user.email, "creator user seguidores": Issue.user.followers, "creator user tempo no GitHub": (Issue.created_at - Issue.user.created_at).days, "closed user cargo": Issue.closed_by.role if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user empresa": Issue.closed_by.company if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user local": Issue.closed_by.location if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user login": Issue.closed_by.login if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user email": Issue.closed_by.email if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user seguidores": Issue.closed_by.followers if Issue.state == "closed" and Issue.closed_by is not None else "", "closed user tempo no GitHub":(Issue.closed_at - Issue.closed_by.created_at).days if Issue.state == "closed" and Issue.closed_by is not None else "" , } for Issue in issues]) def export(dataframe): dataframe.to_csv("GoogleArchiveIssues37.csv") def main(): samples = pd.read_csv("googlearchivesample.txt", names=["sample"]) dataframe = pd.DataFrame(columns=["framework","sample","data de fechamento","data de criação", "tempo para fechamento","labels","state","update_at", "titulo","number", "html-url","creator user cargo","creator user empresa", "creator user local", "creator user login","creator user email","creator user seguidores", "creator user tempo no GitHub","closed user cargo","closed user empresa", "closed user local", "closed user login","closed user email","closed user seguidores", "closed user tempo no GitHub"]) for sample in samples["sample"]: framework = sample.split("/") framework = framework[0] issues = getAllIssuesRequests(sample) dataframe = getDataframe(dataframe, framework, sample, issues) export(dataframe) if __name__ == "__main__": main()
0-buscandoCodeSamples/buscandoIssues.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- import numpy as np #Section 1 : Array Handling (creation, insert, append and delete), Attributes #Create a numpy array of dimension 4x3 a=np.arange(12).reshape(4,3) b=np.array([1,2,3],) c=np.arange(1,5) print("->",c.tolist) print("a=\n{}\nb=\n{}\nc=\n{}".format(a,b,c)) #conversion between list and numpy array d = np.array(list(b)) #list(b) converstion from ndarray to list; d=np.array() conversion from list to ndarray print ("list(b) = {} , type(list(b)) = {}\nd={} , type(d) ={}\n".format(list(b),type(list(b)),d,type(d))) #convert type of array fa=a.astype(np.float64) print ("fa type is {} with content =\n".format(fa.dtype),fa) print ("Python builtin type for a =",type(a)," and element of a=", type(a[0][0])) #Attributes of array print ("Shape of a =",a.shape) print ("Shape of row (axis=0) of a = ", a.shape[0]) print ("Shape of col (axis=1) of a = ", a.shape[1]) print ("Size/Number of elements of a =",a.size) print ("Dimension of a =", a.ndim) print ("Element type a = ", a.dtype) print ("Create Zeros Matrix with builtin function : \n",np.zeros((4,3),dtype=np.int64)) print ("Create Ones Matrix with builtin function : \n",np.ones((4,3),dtype=np.int64)) print ("Create Identity Matrix with builtin function : \n",np.eye(4,3,dtype=np.int64)) print ("Create Diagonal Matrix with builtin function : \n",np.diag(a)) print ("Create matrix with 4 elements, linearly spaced between 1 and 11 :",np.linspace(1,11,4,dtype=np.int64)) print ("Create matrix with from 0 less then 12",np.arange(0,12)) print ("Create matrix with from 0 less then 12 at step of 2",np.arange(0,12,2)) print ("Original array=\n",a) print ("Delete 2nd row : \n",np.delete(a,1,axis=0)) print ("Delete 1st column : \n",np.delete(a,0,axis=1)) print ("Delete 1st and 2nd row : \n",np.delete(a,[0,1],axis=0)) print('Appended row a =\n', np.append(a,[[12,13,14]], axis=0),'\n') print('Insert row at 1st a =\n', np.insert(a,1,[[12,13,14]], axis=0),'\n') print('Insert col at 1st a =\n', np.insert(a,1,c, axis=1),'\n') #Boolean indexing q=a print('Get elements with condition (a > 4)',q[q>4]) print(q,'\n',a) t=a.copy() #shallow copy , to ensure modifying t won't modify a t[t>4]=-1 print('Assign 0 for elements with condition a=\n{}\nt = (a > 4)=\n{}'.format(a,t)) t=np.random.randint(1,200,size=(4,3)) print("t =\n",t) t1=t.copy() t1.sort(axis=1) print("Sorted t axis=1\n",t1) t2=t.copy() t2.sort(axis=0) print("Sorted t axis=0\n",t2) t3=t.copy() t3.sort() print("Sorted t \n",t3) #Section 2 : Broadcast operations a=np.arange(12).reshape(4,3) b=[1,2,3] c=np.reshape(b,(3,1)) print("a=\n{}\nb=\n{}\nc=\n{}".format(a,b,c)) print("a=\n{}\n\nScalar Addition with constant a+2=\n{}".format(a,a+2)) print("Scalar Addition with constant a+2 using method=\n{}".format(np.add(a,2))) print("a=\n{}\nb=\n{}\n\nScalar Addition a+b=\n{}".format(a,b,a+b)) print("Scalar Addition a+b using method=\n{}".format(np.add(a,b))) print("a=\n{}\n\nScalar multiplication with constant a*2=\n{}".format(a,a*2)) print("Scalar multiplication with constant using method=\n{}".format(np.multiply(a,2))) print("a=\n{}\nb=\n{}\n\nScalar multiplication a*b=\n{}".format(a,b,a*b)) print("Scalar multiplication using method=\n{}".format(np.multiply(a,b))) print("a=\n{}\n\nScalar multiplication with constant a*2=\n{}".format(a,a*2)) print("Matrix/Dot multiplication with constant using method=\n{}".format(np.dot(a,2))) print("Matrix/Dot multiplication using method=\n{}".format(np.dot(a,b))) t=[1,2,3] print("Matrix/Cross multiplication using method=\n{}".format(np.cross(a,t))) print("a=\n{}\nb={}\n\nSqrt of a=\n{}".format(a, b, np.sqrt(a))) print("Power of a^2=\n{}".format(np.power(a,2))) print("Power of a^b=\n{}".format(np.power(a,b))) #Section 3 : print("a=\n{}\nSum of column elements (Along row) (a, axis=0)={}".format(a,np.sum(a,axis=0))) print("Sum of row elements (Along col) (a, axis=1)={}".format(np.sum(a,axis=1))) print("Sum of all axis ={}".format(np.sum(a))) # + print("a=\n{}\nProduct of column elements (Along row) (a, axis=0)={}".format(a,np.prod(a,axis=0))) print("Product of row elements (Along col) (a, axis=1)={}".format(np.prod(a,axis=1))) a[0,0]=1 #Modify to avoid product resulting in zero print("\na=\n{}\nProduct of all axis={}".format(a,np.prod(a))) a[0,0]=0 #Restore # - print("a=\n{}\n\nMean of column elements (Along row) (a, axis=0)={}".format(a,np.mean(a,axis=0))) print("Mean of row elements (Along col) (a, axis=1)={}".format(np.mean(a,axis=1))) print("Mean of all axis={}".format(np.mean(a))) print("a=\n{}\n\nMedian of column elements (Along row) (a, axis=0)={}".format(a,np.median(a,axis=0))) print("Median of row elements (Along col) (a, axis=1)={}".format(np.median(a,axis=1))) print("Median of all axis={}".format(np.median(a))) t=np.random.randint(100, 400, size=12) t=t.reshape(4,3) print("t=\n{}\n\Min of column elements (Along row) (t, axis=0)={}".format(t,np.min(t,axis=0))) print("Min of row elements (Along col) (t, axis=1)={}".format(np.min(t,axis=1))) print("Min of all axis={}".format(np.median(t)))
practice-ML-lib-tools/1 numpy-matrix.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Time Series - Lecture 2: Lags, Differencing, AR, and MA # # The goal of time series is similar to regressions in that we are trying to separate **trends/patterns/model** from **resisduals/noise**. The difference is that time is a useful component in finding trends/patterns. # # Today, we will study **how** time variable is used in finding patterns. # # ## Agenda # # 1. Lags and Differencing # 2. Autoregression (AR) # 3. Rolling Window or moving average (MA) # # Let us use the same example to explore these concepts: # import pandas as pd import numpy as np import matplotlib.pylab as plt df = pd.read_csv('data/multiTimeline.csv', skiprows=1) df.columns = ['month', 'diet', 'gym', 'finance'] df.month = pd.to_datetime(df.month) df.set_index('month', inplace=True) df.plot(figsize=(9,9), linewidth=2, fontsize=15) # ## Lags and Differencing # # ### Lags # - Normally, "lag of $X_t$" is denoted $LX_t = X_{t-1}$, $L^2X_t=X_{t-2}$, and etc. # - In data, usually create lags by shifting index by one, and normally you loose one data point df.diet df.diet.shift(-1) df['diet1'] = df.diet.shift(-1) pd.plotting.lag_plot(df.diet,lag=1) plt.scatter(df['diet'],df['diet1']) # ### Differencing # # - Change between consecutive observations, and can be written as: $$y'_t=y_t-y_{t-1}$$ # - Second -order differencing: $$ \begin{align} y''_t & = y'_t-y'_{t-1} \\ & = (y_t-y_{t-1}) - (y_{t-1}-y_{t-2}) \end{align} $$ # - Seasonal (or "lag-m differencing"): $$y'_t=y_t-y_{t-m}$$ where $m$ is the period for a season pd.merge(diet, diet.diff(), left_index=True, right_index=True) pd.merge(diet, diet.diff(periods=2), left_index=True, right_index=True) # - Differencing can eliminates (or reduces) trend and seasonality df['diet'].plot() diet.diff().plot() # See that you have removed much of the trend and you can really see the peaks in January every year. Note: You can also perform additional data manipulations (e.g., 2nd order differencing) if the trend is not yet entirely removed. See [here](https://otexts.com/fpp2/stationarity.html) for more on differencing. # # Differencing can be helpful in turning your time series into a stationary time series. # # > Stationarity: properties of the data generating process do not depend on the time at which the series is observed. # # Stationary time series are useful because many time series forecasting methods are based on the assumption that the time series is approximately stationary. # # More noticeable example can be found in asset prices. For example, let's look into 200 consecurive days of Google stock price and differenced value. from PIL import Image Image.open("images/GOOGdiff.png") # ## Autoregression # # - The model predicts the variable of interest using a linear combination of **historical** values of the variable. # - An **AR(p) model** is written as: # $$y_t = \beta_0 + \beta_1y_{t-1} + \beta_2y_{t-2} + \cdots + \beta_py_{t-p}+\epsilon_t$$ # # For an AR(1) model, when: # # - $\beta_1=0$, the model, or $y_t$ is equivalent to noise # - $\beta_1=1$ and $\beta_0=0$, the model, or $y_t$ is equivalent to random walk # - $\beta_1=1$ and $\beta_0 \neq 0$, the model, or $y_t$ is equivalent to random walk with a drift # - $\beta_1<0$, $y_t$ tends to oscillate around the mean # # We normally use autoregressive models for the following stationary cases: # - for an AR(1) model, $-1<\beta_1<1$ # - for an AR(2) model, $-1<\beta_2<1,\beta_1+\beta_2<1,\beta_2-\beta_1<1$ # # when $p\geq3$, the conditions are more complicated. df.index = pd.DatetimeIndex(df.index).to_period('M') from statsmodels.tsa.arima_model import ARMA model = ARMA(df.diet, order=(1,0)) result = model.fit() print(result.summary()) result.plot_predict(start='2004-01-01',end='2020-08-01') plt.show() df.index # ## Moving Average # - Rather than using the past values, a moving average model uses past **errors** in a regression. # - An **MA(q) model** is written as: $$y_t=\beta_0+\epsilon_t+\beta_1\epsilon_{t-1}+\cdots+\beta_q\epsilon_{t-q}$$ # # We normally use moving average models for the following (invertibility) cases: # - for an MA(1) model, $-1<\beta_1<1$ # - for an MA(2) model, $-1<\beta_2<1,\beta_1+\beta_2>-1,\beta_1-\beta_2<1$ # # when $p\geq3$, the conditions are more complicated. model = ARMA(df.diet, order=(0,7)) result = model.fit() print(result.summary()) result.plot_predict(start='2004-01-01',end='2020-08-01') plt.show() # ## Recap: # - You've learned the concepts of lags, autocorrelation, and differences. # - You've leraned to visualize the relationships for easier interpretation/undertanding. # - You've learned the theory of AR and MA models. # - You've implemented, interpreted, and visualized the AR and MA models in Python. # # # ## Up next: # Deeper look at time series modeling by further learn inaddition to AR and MA model # - ARMA models # - ARIMA models # # Activities # # All of the activities below should be done with your chosen data set. # # 1. Graph lag, ACF, PACF, and difference (play around with different hyperparameters such as lags) to create interpretation/intuition about your data set. # 2. Which model do you think will be appropriate to estimate given your data? Briefly talk through your logic. # 3. Code AR models (play around with different hypterparameters), and visualize. # 4. Code MA models (play around with different hypterparameters), and visuzlize. # ## References # # The materials presented here are inspired and modified from the following sources: # - https://otexts.com/fpp2/ # - https://github.com/CodeOp-tech/tsa-soyhyoj/blob/master/New_Years_Resolutions_Workshop.ipynb # - https://machinelearningmastery.com/gentle-introduction-autocorrelation-partial-autocorrelation/ # - https://s3.amazonaws.com/assets.datacamp.com/production/course_4267/slides/chapter3.pdf
Time Series Analysis - Lecture 2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Solving problems by Searching # # This notebook serves as supporting material for topics covered in **Chapter 3 - Solving Problems by Searching** and **Chapter 4 - Beyond Classical Search** from the book *Artificial Intelligence: A Modern Approach.* This notebook uses implementations from [search.py](https://github.com/aimacode/aima-python/blob/master/search.py) module. Let's start by importing everything from search module. from search import * # ## Review # # Here, we learn about problem solving. Building goal-based agents that can plan ahead to solve problems, in particular navigation problem / route finding problem. First, we will start the problem solving by precicly defining **problems** and their **solutions**. We will look at several general-purpose search algorithms. Broadly, search algorithms are classified into two types: # # * **Uninformed search algorithms**: Search algorithms which explores the search space without having any information aboout the problem other than its definition. # * Examples: # 1. Breadth First Search # 2. Depth First Search # 3. Depth Limited Search # 4. Iterative Deepening Search # # # * **Informed search algorithms**: These type of algorithms leverage any information (hueristics, path cost) on the problem to search through the search space to find the solution efficiently. # * Examples: # 1. Best First Search # 2. Uniform Cost Search # 3. A\* Search # 4. Recursive Best First Search # # *Don't miss the visualisations of these algorithms solving route-finding problem defined on romania map at the end of this notebook.* # ## Problem # # Let's see how we define a Problem. Run the next cell to see how abstract class `Problem` is defined in the search module. # %psource Problem # The `Problem` class has six methods. # # * `__init__(self, initial, goal)` : This is what is called a `constructor` and is the first method called when you create an instance of class. `initial` specifies the initial state of our search problem. It represents the start state from where our agent begins its task of exploration to find the goal state(s) which is given in the `goal` parameter. # # # * `actions(self, state)` : This method returns all the possible actions agent can execute in the given state `state`. # # # * `result(self, state, action)` : This returns the resulting state if action `action` is taken in the state `state`. This `Problem` class only deals with deterministic outcomes. So we know for sure what every action in a state would result to. # # # * `goal_test(self, state)` : Given a graph state, it checks if it is a terminal state. If the state is indeed a goal state, value of `True` is returned. Else, of course, `False` is returned. # # # * `path_cost(self, c, state1, action, state2)` : Return the cost of the path that arrives at `state2` as a result of taking `action` from `state1`, assuming total cost of `c` to get up to `state1`. # # # * `value(self, state)` : This acts as a bit of extra information in problems where we try to optimize a value when we cannot do a goal test. # We will use the abstract class `Problem` to define out real **problem** named `GraphProblem`. You can see how we defing `GraphProblem` by running the next cell. # %psource GraphProblem # Now it's time to define our problem. We will define it by passing `initial`, `goal`, `graph` to `GraphProblem`. So, our problem is to find the goal state starting from the given initial state on the provided graph. Have a look at our romania_map, which is an Undirected Graph containing a dict of nodes as keys and neighbours as values. # + romania_map = UndirectedGraph(dict( Arad=dict(Zerind=75, Sibiu=140, Timisoara=118), Bucharest=dict(Urziceni=85, Pitesti=101, Giurgiu=90, Fagaras=211), Craiova=dict(Drobeta=120, Rimnicu=146, Pitesti=138), Drobeta=dict(Mehadia=75), Eforie=dict(Hirsova=86), Fagaras=dict(Sibiu=99), Hirsova=dict(Urziceni=98), Iasi=dict(Vaslui=92, Neamt=87), Lugoj=dict(Timisoara=111, Mehadia=70), Oradea=dict(Zerind=71, Sibiu=151), Pitesti=dict(Rimnicu=97), Rimnicu=dict(Sibiu=80), Urziceni=dict(Vaslui=142))) romania_map.locations = dict( Arad=(91, 492), Bucharest=(400, 327), Craiova=(253, 288), Drobeta=(165, 299), Eforie=(562, 293), Fagaras=(305, 449), Giurgiu=(375, 270), Hirsova=(534, 350), Iasi=(473, 506), Lugoj=(165, 379), Mehadia=(168, 339), Neamt=(406, 537), Oradea=(131, 571), Pitesti=(320, 368), Rimnicu=(233, 410), Sibiu=(207, 457), Timisoara=(94, 410), Urziceni=(456, 350), Vaslui=(509, 444), Zerind=(108, 531)) # - # It is pretty straight forward to understand this `romania_map`. The first node **Arad** has three neighbours named **Zerind**, **Sibiu**, **Timisoara**. Each of these nodes are 75, 140, 118 units apart from **Arad** respectively. And the same goes with other nodes. # # And `romania_map.locations` contains the positions of each of the nodes. We will use the straight line distance (which is different from the one provided in `romania_map`) between two cities in algorithms like A\*-search and Recursive Best First Search. # # **Define a problem:** # Hmm... say we want to start exploring from **Arad** and try to find **Bucharest** in our romania_map. So, this is how we do it. romania_problem = GraphProblem('Arad', 'Bucharest', romania_map) # # Romania map visualisation # # Let's have a visualisation of Romania map [Figure 3.2] from the book and see how different searching algorithms perform / how frontier expands in each search algorithm for a simple problem named `romania_problem`. # Have a look at `romania_locations`. It is a dictionary defined in search module. We will use these location values to draw the romania graph using **networkx**. romania_locations = romania_map.locations print(romania_locations) # Let's start the visualisations by importing necessary modules. We use networkx and matplotlib to show the map in notebook and we use ipywidgets to interact with the map to see how the searching algorithm works. # + # %matplotlib inline import networkx as nx import matplotlib.pyplot as plt from matplotlib import lines from ipywidgets import interact import ipywidgets as widgets from IPython.display import display import time # - # Let's get started by initializing an empty graph. We will add nodes, place the nodes in their location as shown in the book, add edges to the graph. # + # initialise a graph G = nx.Graph() # use this while labeling nodes in the map node_labels = dict() # use this to modify colors of nodes while exploring the graph. # This is the only dict we send to `show_map(node_colors)` while drawing the map node_colors = dict() for n, p in romania_locations.items(): # add nodes from romania_locations G.add_node(n) # add nodes to node_labels node_labels[n] = n # node_colors to color nodes while exploring romania map node_colors[n] = "white" # we'll save the initial node colors to a dict to use later initial_node_colors = dict(node_colors) # positions for node labels node_label_pos = {k:[v[0],v[1]-10] for k,v in romania_locations.items()} # use thi whiel labeling edges edge_labels = dict() # add edges between cities in romania map - UndirectedGraph defined in search.py for node in romania_map.nodes(): connections = romania_map.get(node) for connection in connections.keys(): distance = connections[connection] # add edges to the graph G.add_edge(node, connection) # add distances to edge_labels edge_labels[(node, connection)] = distance # - # We have completed building our graph based on romania_map and its locations. It's time to display it here in the notebook. This function `show_map(node_colors)` helps us do that. We will be calling this function later on to display the map at each and every interval step while searching using variety of algorithms from the book. def show_map(node_colors): # set the size of the plot plt.figure(figsize=(18,13)) # draw the graph (both nodes and edges) with locations from romania_locations nx.draw(G, pos = romania_locations, node_color = [node_colors[node] for node in G.nodes()]) # draw labels for nodes node_label_handles = nx.draw_networkx_labels(G, pos = node_label_pos, labels = node_labels, font_size = 14) # add a white bounding box behind the node labels [label.set_bbox(dict(facecolor='white', edgecolor='none')) for label in node_label_handles.values()] # add edge lables to the graph nx.draw_networkx_edge_labels(G, pos = romania_locations, edge_labels=edge_labels, font_size = 14) # add a legend white_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="white") orange_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="orange") red_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="red") gray_circle = lines.Line2D([], [], color="white", marker='o', markersize=15, markerfacecolor="gray") plt.legend((white_circle, orange_circle, red_circle, gray_circle), ('Un-explored', 'Frontier', 'Currently exploring', 'Explored'), numpoints=1,prop={'size':16}, loc=(.8,.75)) # show the plot. No need to use in notebooks. nx.draw will show the graph itself. plt.show() # We can simply call the function with node_colors dictionary object to display it. show_map(node_colors) # Voila! You see, the romania map as shown in the Figure[3.2] in the book. Now, see how different searching algorithms perform with our problem statements. # ## Searching algorithms visualisations # # In this section, we have visualisations of the following searching algorithms: # # 1. Breadth First Tree Search - Implemented # 2. Depth First Tree Search # 3. Depth First Graph Search # 4. Breadth First Search - Implemented # 5. Best First Graph Search # 6. Uniform Cost Search - Implemented # 7. Depth Limited Search # 8. Iterative Deepening Search # 9. A\*-Search - Implemented # 10. Recursive Best First Search # # We add the colors to the nodes to have a nice visualisation when displaying. So, these are the different colors we are using in these visuals: # * Un-explored nodes - <font color='black'>white</font> # * Frontier nodes - <font color='orange'>orange</font> # * Currently exploring node - <font color='red'>red</font> # * Already explored nodes - <font color='gray'>gray</font> # # Now, we will define some helper methods to display interactive buttons ans sliders when visualising search algorithms. # + def final_path_colors(problem, solution): "returns a node_colors dict of the final path provided the problem and solution" # get initial node colors final_colors = dict(initial_node_colors) # color all the nodes in solution and starting node to green final_colors[problem.initial] = "green" for node in solution: final_colors[node] = "green" return final_colors def display_visual(user_input, algorithm=None, problem=None): if user_input == False: def slider_callback(iteration): # don't show graph for the first time running the cell calling this function try: show_map(all_node_colors[iteration]) except: pass def visualize_callback(Visualize): if Visualize is True: button.value = False global all_node_colors iterations, all_node_colors, node = algorithm(problem) solution = node.solution() all_node_colors.append(final_path_colors(problem, solution)) slider.max = len(all_node_colors) - 1 for i in range(slider.max + 1): slider.value = i # time.sleep(.5) slider = widgets.IntSlider(min=0, max=1, step=1, value=0) slider_visual = widgets.interactive(slider_callback, iteration = slider) display(slider_visual) button = widgets.ToggleButton(value = False) button_visual = widgets.interactive(visualize_callback, Visualize = button) display(button_visual) if user_input == True: node_colors = dict(initial_node_colors) if algorithm == None: algorithms = {"Breadth First Tree Search": breadth_first_tree_search, "Breadth First Search": breadth_first_search, "Uniform Cost Search": uniform_cost_search, "A-star Search": astar_search} algo_dropdown = widgets.Dropdown(description = "Search algorithm: ", options = sorted(list(algorithms.keys())), value = "Breadth First Tree Search") display(algo_dropdown) def slider_callback(iteration): # don't show graph for the first time running the cell calling this function try: show_map(all_node_colors[iteration]) except: pass def visualize_callback(Visualize): if Visualize is True: button.value = False problem = GraphProblem(start_dropdown.value, end_dropdown.value, romania_map) global all_node_colors if algorithm == None: user_algorithm = algorithms[algo_dropdown.value] # print(user_algorithm) # print(problem) iterations, all_node_colors, node = user_algorithm(problem) solution = node.solution() all_node_colors.append(final_path_colors(problem, solution)) slider.max = len(all_node_colors) - 1 for i in range(slider.max + 1): slider.value = i # time.sleep(.5) start_dropdown = widgets.Dropdown(description = "Start city: ", options = sorted(list(node_colors.keys())), value = "Arad") display(start_dropdown) end_dropdown = widgets.Dropdown(description = "Goal city: ", options = sorted(list(node_colors.keys())), value = "Fagaras") display(end_dropdown) button = widgets.ToggleButton(value = False) button_visual = widgets.interactive(visualize_callback, Visualize = button) display(button_visual) slider = widgets.IntSlider(min=0, max=1, step=1, value=0) slider_visual = widgets.interactive(slider_callback, iteration = slider) display(slider_visual) # - # # ## Breadth first tree search # # We have a working implementation in search module. But as we want to interact with the graph while it is searching, we need to modify the implementation. Here's the modified breadth first tree search. # # # + def tree_search(problem, frontier): """Search through the successors of a problem to find a goal. The argument frontier should be an empty queue. Don't worry about repeated paths to a state. [Figure 3.7]""" # we use these two variables at the time of visualisations iterations = 0 all_node_colors = [] node_colors = dict(initial_node_colors) frontier.append(Node(problem.initial)) node_colors[Node(problem.initial).state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) while frontier: node = frontier.pop() # modify the currently searching node to red node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): # modify goal node to green after reaching the goal node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) frontier.extend(node.expand(problem)) for n in node.expand(problem): node_colors[n.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) # modify the color of explored nodes to gray node_colors[node.state] = "gray" iterations += 1 all_node_colors.append(dict(node_colors)) return None def breadth_first_tree_search(problem): "Search the shallowest nodes in the search tree first." iterations, all_node_colors, node = tree_search(problem, FIFOQueue()) return(iterations, all_node_colors, node) # - # Now, we use ipywidgets to display a slider, a button and our romania map. By sliding the slider we can have a look at all the intermediate steps of a particular search algorithm. By pressing the button **Visualize**, you can see all the steps without interacting with the slider. These two helper functions are the callback function which are called when we interact with slider and the button. # # all_node_colors = [] romania_problem = GraphProblem('Arad', 'Fagaras', romania_map) display_visual(user_input = False, algorithm = breadth_first_tree_search, problem = romania_problem) # ## Breadth first search # # Let's change all the node_colors to starting position and define a different problem statement. def breadth_first_search(problem): "[Figure 3.11]" # we use these two variables at the time of visualisations iterations = 0 all_node_colors = [] node_colors = dict(initial_node_colors) node = Node(problem.initial) node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) frontier = FIFOQueue() frontier.append(node) # modify the color of frontier nodes to blue node_colors[node.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) explored = set() while frontier: node = frontier.pop() node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: if problem.goal_test(child.state): node_colors[child.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, child) frontier.append(child) node_colors[child.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) node_colors[node.state] = "gray" iterations += 1 all_node_colors.append(dict(node_colors)) return None all_node_colors = [] romania_problem = GraphProblem('Arad', 'Bucharest', romania_map) display_visual(user_input = False, algorithm = breadth_first_search, problem = romania_problem) # ## Uniform cost search # # Let's change all the node_colors to starting position and define a different problem statement. # + def best_first_graph_search(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" # we use these two variables at the time of visualisations iterations = 0 all_node_colors = [] node_colors = dict(initial_node_colors) f = memoize(f, 'f') node = Node(problem.initial) node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) frontier = PriorityQueue(min, f) frontier.append(node) node_colors[node.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) explored = set() while frontier: node = frontier.pop() node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) node_colors[child.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) node_colors[child.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) node_colors[node.state] = "gray" iterations += 1 all_node_colors.append(dict(node_colors)) return None def uniform_cost_search(problem): "[Figure 3.14]" iterations, all_node_colors, node = best_first_graph_search(problem, lambda node: node.path_cost) return(iterations, all_node_colors, node) # - all_node_colors = [] romania_problem = GraphProblem('Arad', 'Bucharest', romania_map) display_visual(user_input = False, algorithm = uniform_cost_search, problem = romania_problem) # ## A* search # # Let's change all the node_colors to starting position and define a different problem statement. # + def best_first_graph_search(problem, f): """Search the nodes with the lowest f scores first. You specify the function f(node) that you want to minimize; for example, if f is a heuristic estimate to the goal, then we have greedy best first search; if f is node.depth then we have breadth-first search. There is a subtlety: the line "f = memoize(f, 'f')" means that the f values will be cached on the nodes as they are computed. So after doing a best first search you can examine the f values of the path returned.""" # we use these two variables at the time of visualisations iterations = 0 all_node_colors = [] node_colors = dict(initial_node_colors) f = memoize(f, 'f') node = Node(problem.initial) node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) frontier = PriorityQueue(min, f) frontier.append(node) node_colors[node.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) explored = set() while frontier: node = frontier.pop() node_colors[node.state] = "red" iterations += 1 all_node_colors.append(dict(node_colors)) if problem.goal_test(node.state): node_colors[node.state] = "green" iterations += 1 all_node_colors.append(dict(node_colors)) return(iterations, all_node_colors, node) explored.add(node.state) for child in node.expand(problem): if child.state not in explored and child not in frontier: frontier.append(child) node_colors[child.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) elif child in frontier: incumbent = frontier[child] if f(child) < f(incumbent): del frontier[incumbent] frontier.append(child) node_colors[child.state] = "orange" iterations += 1 all_node_colors.append(dict(node_colors)) node_colors[node.state] = "gray" iterations += 1 all_node_colors.append(dict(node_colors)) return None def astar_search(problem, h=None): """A* search is best-first graph search with f(n) = g(n)+h(n). You need to specify the h function when you call astar_search, or else in your Problem subclass.""" h = memoize(h or problem.h, 'h') iterations, all_node_colors, node = best_first_graph_search(problem, lambda n: n.path_cost + h(n)) return(iterations, all_node_colors, node) # - all_node_colors = [] romania_problem = GraphProblem('Arad', 'Bucharest', romania_map) display_visual(user_input = False, algorithm = astar_search, problem = romania_problem) all_node_colors = [] # display_visual(user_input = True, algorithm = breadth_first_tree_search) display_visual(user_input = True)
aimacode/search.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import pandas as pd import numpy as np import sqlalchemy import oursql import matplotlib.pyplot as plt import seaborn as sns #to display the notebook's plots (no more --pylab inline) # #%matplotlib inline # + def load_from_mysql(): """ Read the data from mysql remark: python3/mysql/sqlalchemy connection is a pain, I had to install a fork of "oursql" to get this to work """ conn = sqlalchemy.create_engine("mysql+oursql://steve:zissou@localhost/torque") df = pd.read_sql('raw_logs',conn) return df def load_torque_keys(keyf='/home/aahu/chinar/ryancompton.net/assets/torque/torque_keys.csv'): """ Read the mapping between human-readable variable names and what Torque uses """ d = {} with open(keyf,'r') as fin: for line in fin: d[line.split(',')[0]] = line.split(',')[1].replace('\n','') return d def load_from_file(): df = pd.read_csv('/home/aahu/Desktop/torque_data.tsv', sep='\t') df = df.rename(columns=load_torque_keys()) return df # - df = load_from_file() # + """ Draw the histogram of my observed mpg data compare against 24–26 city / 28–32 hwy """ dfmpg = df[df['Miles Per Gallon(Instant)'] > 0] dfmpg = dfmpg[dfmpg['Miles Per Gallon(Instant)'] < 200] # outliers.. mpg = dfmpg['Miles Per Gallon(Instant)'] mpg.hist(bins=100, label='observed mpg') plt.axvspan(24, 26, alpha=0.5, color='red', label='24–26 (advertised city mpg)') plt.axvspan(28, 32, alpha=0.5, color="orange", label='28-32 (advertised hwy mpg)') plt.xlabel('Miles per gallon') plt.ylabel('Frequency') plt.title('Histogram of instantaneous mpg readouts\n\ 2003 Suzuki Aerio SX 5sp manual\n\ 573 miles traveled. Data collected in Los Angeles using Open Torque Viewer.') plt.legend() plt.savefig('/home/aahu/chinar/ryancompton.net/assets/torque/mpg_hist.png') plt.close() # - """ restrict to speed data that has GPS measurements and convert to freedom units """ dfgps = df[df['Speed (GPS)'] > 0] df_spd = dfgps[['Speed (GPS)','Speed (OBD)']]*0.621 #conver kph to mph spd_discrep = abs(df_spd['Speed (GPS)'] - df_spd['Speed (OBD)']) # + #plot 1D speed histogram sns.distplot(spd_discrep,bins=200) plt.xlim([0,10]) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) mu = spd_discrep.mean() median = spd_discrep.median() sigma = spd_discrep.std() textstr = '$\mu=%.2f$\n$\mathrm{median}=%.2f$\n$\sigma=%.2f$'%(mu, median, sigma) plt.text(8.12,.29,textstr, bbox=props) plt.xlabel('Discrepancy (mph)') plt.ylabel('Normalized frequency') plt.title('Discrepancies between GPS-measured speed and my speedometer') plt.savefig('/home/aahu/chinar/ryancompton.net/assets/torque/speed_hist.png') plt.close() # - #plot 2D speed histogram sns.jointplot('Speed (GPS)', 'Speed (OBD)', df_spd, joint_kws={'alpha':0.25}) plt.savefig('/home/aahu/chinar/ryancompton.net/assets/torque/speed_joint.png') plt.close() # + #conejo grade study #box that defines when I'm on the grade lat0 = 34.209165 lng0 = -118.99 lat1 = 34.195597 lng1 = -118.950455 df_conejo = df[(lng0 < df['GPS Longitude']) &( df['GPS Longitude'] < lng1) & (lat1 < df['GPS Latitude']) & (df['GPS Latitude'] < lat0)] df_conejo = df_conejo[df_conejo['Miles Per Gallon(Instant)'] > 0] # + """ 2D plot """ g = sns.JointGrid('Miles Per Gallon(Instant)','Throttle Position(Manifold)', df_conejo, space=0) g.plot_marginals(sns.distplot, bins=20)#, shade=True) g.plot_joint(sns.kdeplot, shade=True, n_levels=20, alpha=.8) g.plot_joint(plt.scatter, alpha=.5) plt.xlim([0,190]) plt.ylim([0,70]) props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) textstr = 'Downhill' plt.text(130,20, textstr, bbox=props) textstr = 'Uphill' plt.text(40,35, textstr, bbox=props) #seaborn jointplot/jointgrid can't do titles g.fig.suptitle('Throttle position vs. mpg while driving the Conejo Grade', y=.995) plt.savefig('/home/aahu/chinar/ryancompton.net/assets/torque/conejo_joint.png') plt.close() # + """ 1D plot """ dfmpg = df_conejo[df_conejo['Miles Per Gallon(Instant)'] > 0] dfmpg = dfmpg[dfmpg['Miles Per Gallon(Instant)'] < 200] mpg = dfmpg['Miles Per Gallon(Instant)'] mpg.hist(bins=100, label='observed mpg') plt.axvspan(24, 26, alpha=0.5, color='red', label='24–26 (advertised city mpg)') plt.axvspan(28, 32, alpha=0.5, color="orange", label='28-32 (advertised hwy mpg)') plt.xlabel('Miles per gallon') plt.ylabel('Frequency') plt.title('Histogram of instantaneous mpg readouts while driving the Conejo Grade') plt.legend() props = dict(boxstyle='round', facecolor='wheat', alpha=0.5) textstr = 'Downhill' plt.text(160,25, textstr, bbox=props) textstr = 'Uphill' plt.text(1,25, textstr, bbox=props) plt.savefig('/home/aahu/chinar/ryancompton.net/assets/torque/mpg_hist_conejo.png') plt.close() # + """ I could not figure from the table how much distance I've traveled in total?? Figure distance traveled by integrating speed... """ from scipy import integrate import pandas as pd import numpy as np def integrate_method(self, how='trapz', unit='s'): '''Numerically integrate the time series. @param how: the method to use (trapz by default) @return Available methods: * trapz - trapezoidal * cumtrapz - cumulative trapezoidal * simps - Simpson's rule * romb - Romberger's rule See http://docs.scipy.org/doc/scipy/reference/integrate.html for the method details. or the source code https://github.com/scipy/scipy/blob/master/scipy/integrate/quadrature.py ''' available_rules = set(['trapz', 'cumtrapz', 'simps', 'romb']) if how in available_rules: rule = integrate.__getattribute__(how) else: print('Unsupported integration rule: %s' % (how)) print('Expecting one of these sample-based integration rules: %s' % (str(list(available_rules)))) raise AttributeError result = rule(self.values, self.index.astype(np.int64) / 10**9) #result = rule(self.values) return result pd.TimeSeries.integrate = integrate_method #integrate to get total distance... ts = df[['Timestamp','Speed (OBD)']] import datetime df2 = df[['Timestamp', 'Speed (OBD)']] df2['Timestamp'] = df2['Timestamp'].map(lambda x: datetime.datetime.fromtimestamp(x//1000)) ts = df2.set_index('Timestamp').resample('s')['Speed (OBD)'].fillna(0) ts = ts/(60*60) ts.integrate('simps') # total miles traveled # -
assets/torque/mpg_plots.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # Goal: regular experiments with 2 factors # =========================================== # # Increase sales from an a small online business. # # Data avaialble: # # * **S** = free shipping over €30 or over €50 [numeric factor] # * **P** = purchaser must provide a profile [categorical factor] from process_improve import * from bokeh.plotting import output_notebook output_notebook() # + # S = Free shipping if order amount is €30 or more [-1], or if order amount is over €50 [+1] S = c(-1, +1, -1, +1, -1, +1, -1, +1, name='Free shipping amount') # P = Does the purchaser need to create a profile first [+1] or not [-1]? P = c(-1, -1, +1, +1, -1, -1, +1, +1, name='Create profile: No/Yes') # Response: daily sales amount y = c(348, 359, 327, 243, 356, 363, 296, 257, units="€ sales") # Linear model using S, P and S*P to predict the response expt = gather(S=S, P=P, y=y, title='Experiments to boost sales') model_sales = lm("y ~ S*P", expt) # - summary(model_sales); # The model shows that: # # * The $R^2$ value is ____ # * The standard error has a value of _____ <units?> which indicates _____ # * Compare this standard error of ____ to the spread (the deviation) between repeated experiments run under the same conditions. # # Interpretation of the model coefficients # ---------------------------------------- # # * Coefficient **S** has a value of _____, which shows that sales ______ (increase/decrease) by €13 for every € ____ increase in the free-shipping threshold, keeping all other factors constant. # * Coefficient **P** has a value of _____, which shows that sales ______ (increase/decrease) by ____ if ______________________, keeping all other factors constant. # # # contour_plot(model_sales, dpi=80); # From the contour plot, and the model above, this advice can be given: # # * ___ # * ___ # Experiments with mistakes # =========================== # # Imagine one experiment was run incorrectly by mistake: # # * free shipping was set to €60, and required a profile. # * Instead of € 2570 sales, a value of €2200 was recorded. # # *Original model*: # # * Intercept: ___ # * Effect of **S**: ____ # * Effect of **P**: ____ # # *With the mistake*: # # * Intercept: ___ # * Effect of **S**: ____ # * Effect of **P**: ____ # + # S = Free shipping if order amount is €30 or more [-1], or if order amount is over €50 [+1] # Notice that a mistake was made with the last experiment: order minimum for free shipping was €60 [+2]. S = c(-1, +1, -1, +1, -1, +1, -1, +2, name='Free shipping amount') # P = Does the purchaser need to create a profile first [+1] or not [-1]? P = c(-1, -1, +1, +1, -1, -1, +1, +1, name='Create profile: No/Yes') # Response: daily sales amount y = c(348, 359, 327, 243, 356, 363, 296, 220, units="€ sales") # Linear model using S, P and S*P to predict the response expt = gather(S=S, P=P, y=y, title='Experiments to boost sales') model_sales_mistake = lm("y ~ S*P", expt) summary(model_sales_mistake) contour_plot(model_sales_mistake, dpi=80);
Samples/doe-notebooks-master/Week4--Full-factorials-2-factors.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # <center> # <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/Logos/organization_logo/organization_logo.png" width="300" alt="cognitiveclass.ai logo" /> # </center> # # <h1 align=center><font size = 5>Assignment: Notebook for Peer Assignment</font></h1> # # # Introduction # # Using this Python notebook you will: # # 1. Understand three Chicago datasets # 2. Load the three datasets into three tables in a Db2 database # 3. Execute SQL queries to answer assignment questions # # ## Understand the datasets # # To complete the assignment problems in this notebook you will be using three datasets that are available on the city of Chicago's Data Portal: # # 1. <a href="https://data.cityofchicago.org/Health-Human-Services/Census-Data-Selected-socioeconomic-indicators-in-C/kn9c-c2s2">Socioeconomic Indicators in Chicago</a> # 2. <a href="https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t">Chicago Public Schools</a> # 3. <a href="https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2">Chicago Crime Data</a> # # ### 1. Socioeconomic Indicators in Chicago # # This dataset contains a selection of six socioeconomic indicators of public health significance and a “hardship index,” for each Chicago community area, for the years 2008 – 2012. # # A detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at: # [https://data.cityofchicago.org/Health-Human-Services/Census-Data-Selected-socioeconomic-indicators-in-C/kn9c-c2s2](https://data.cityofchicago.org/Health-Human-Services/Census-Data-Selected-socioeconomic-indicators-in-C/kn9c-c2s2?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) # # ### 2. Chicago Public Schools # # This dataset shows all school level performance data used to create CPS School Report Cards for the 2011-2012 school year. This dataset is provided by the city of Chicago's Data Portal. # # A detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at: # [https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t](https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) # # ### 3. Chicago Crime Data # # This dataset reflects reported incidents of crime (with the exception of murders where data exists for each victim) that occurred in the City of Chicago from 2001 to present, minus the most recent seven days. # # A detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at: # [https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2](https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ) # # ### Download the datasets # # This assignment requires you to have these three tables populated with a subset of the whole datasets. # # In many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. Click on the links below to download and save the datasets (.CSV files): # # - <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_Coursera_V5/data/ChicagoCensusData.csv" target="_blank">Chicago Census Data</a> # # - <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_Coursera_V5/data/ChicagoPublicSchools.csv" target="_blank">Chicago Public Schools</a> # # - <a href="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_Coursera_V5/data/ChicagoCrimeData.csv" target="_blank">Chicago Crime Data</a> # # **NOTE:** Ensure you have downloaded the datasets using the links above instead of directly from the Chicago Data Portal. The versions linked here are subsets of the original datasets and have some of the column names modified to be more database friendly which will make it easier to complete this assignment. # # ### Store the datasets in database tables # # To analyze the data using SQL, it first needs to be stored in the database. # # While it is easier to read the dataset into a Pandas dataframe and then PERSIST it into the database as we saw in Week 3 Lab 3, it results in mapping to default datatypes which may not be optimal for SQL querying. For example a long textual field may map to a CLOB instead of a VARCHAR. # # Therefore, **it is highly recommended to manually load the table using the database console LOAD tool, as indicated in Week 2 Lab 1 Part II**. The only difference with that lab is that in Step 5 of the instructions you will need to click on create "(+) New Table" and specify the name of the table you want to create and then click "Next". # # <img src = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork/labs/FinalModule_Coursera_V5/images/LoadingData.png"> # # ##### Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the first dataset, Next create a New Table, and then follow the steps on-screen instructions to load the data. Name the new tables as follows: # # 1. **CENSUS_DATA** # 2. **CHICAGO_PUBLIC_SCHOOLS** # 3. **CHICAGO_CRIME_DATA** # # ### Connect to the database # # Let us first load the SQL extension and establish a connection with the database # # !pip install ipython-sql # !pip install ibm-db-sa # !pip install sqlalchemy==1.3.9 import os os._exit(00) import ibm_db # + dsn_hostname = "dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net" # e.g.: "dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net" dsn_uid = "dhk96058" # e.g. "abc12345" dsn_pwd = "<PASSWORD>" # e.g. "<PASSWORD>" dsn_driver = "{IBM DB2 ODBC DRIVER}" dsn_database = "BLUDB" # e.g. "BLUDB" dsn_port = "50000" # e.g. "50000" dsn_protocol = "TCPIP" # i.e. "TCPIP" # + dsn = ( "DRIVER={0};" "DATABASE={1};" "HOSTNAME={2};" "PORT={3};" "PROTOCOL={4};" "UID={5};" "PWD={6};").format(dsn_driver, dsn_database, dsn_hostname, dsn_port, dsn_protocol, dsn_uid, dsn_pwd) #print the connection string to check correct values are specified print(dsn) # + #Create database connection try: conn = ibm_db.connect(dsn, "", "") print ("Connected to database: ", dsn_database, "as user: ", dsn_uid, "on host: ", dsn_hostname) except: print ("Unable to connect: ", ibm_db.conn_errormsg() ) # - # %load_ext sql # %sql ibm_db_sa://dhk96058:5v6tm9g40kwlc%405v@dashdb-txn-sbox-yp-dal09-08.services.dal.bluemix.net:50000/BLUDB # ## Problems # # Now write and execute SQL queries to solve assignment problems # # ### Problem 1 # # ##### Find the total number of crimes recorded in the CRIME table. # # %sql SELECT COUNT(ID) FROM chicago_crime_data; # ### Problem 2 # # ##### List community areas with per capita income less than 11000. # # %sql SELECT community_area_name FROM chicago_socioeconomic_data WHERE per_capita_income_ < 110000; # ### Problem 3 # # ##### List all case numbers for crimes involving minors? # # %sql SELECT case_number FROM chicago_crime_data WHERE description LIKE '%MINOR%'; # ### Problem 4 # # ##### List all kidnapping crimes involving a child? # # %sql SELECT * FROM chicago_crime_data WHERE primary_type = 'KIDNAPPING' AND description LIKE '%CHILD%'; # ### Problem 5 # # ##### What kinds of crimes were recorded at schools? # # %sql SELECT DISTINCT(primary_type) FROM chicago_crime_data WHERE location_description LIKE '%SCHOOL%'; # ### Problem 6 # # ##### List the average safety score for all types of schools. # # %sql SELECT AVG(safety_score) AS average_score FROM chicago_public_schools; # ### Problem 7 # # ##### List 5 community areas with highest % of households below poverty line # # + magic_args="SELECT community_area_name, percent_households_below_poverty" language="sql" # FROM census_data # ORDER BY percent_households_below_poverty DESC nulls last LIMIT 5; # - # ### Problem 8 # # ##### Which community area is most crime prone? # # + language="sql" # SELECT cd.community_area_name AS most_criminal_community # FROM census_data cd, (SELECT ccd.community_area_number AS community_area_number, COUNT(ccd.community_area_number) AS crime_number # FROM chicago_crime_data ccd # GROUP BY ccd.community_area_number # ORDER BY crime_number DESC LIMIT 1) mostcrimearea # WHERE cd.community_area_number = mostcrimearea.community_area_number; # - # ### Problem 9 # # ##### Use a sub-query to find the name of the community area with highest hardship index # # + magic_args="SELECT community_area_name,hardship_index FROM census_data" language="sql" # WHERE hardship_index = (SELECT MAX(hardship_index) FROM census_data); # - # ### Problem 10 # # ##### Use a sub-query to determine the Community Area Name with most number of crimes? # # + language="sql" # SELECT cd.community_area_name AS most_criminal_community, mostcrimearea.crime_number # FROM census_data cd, (SELECT ccd.community_area_number AS community_area_number, COUNT(ccd.community_area_number) AS crime_number # FROM chicago_crime_data ccd # GROUP BY ccd.community_area_number # ORDER BY crime_number DESC LIMIT 1) mostcrimearea # WHERE cd.community_area_number = mostcrimearea.community_area_number; # - # Copyright © 2020 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license?cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ&cm_mmc=Email_Newsletter-_-Developer_Ed%2BTech-_-WW_WW-_-SkillsNetwork-Courses-IBMDeveloperSkillsNetwork-DB0201EN-SkillsNetwork-20127838&cm_mmca1=000026UJ&cm_mmca2=10006555&cm_mmca3=M12345678&cvosrc=email.Newsletter.M12345678&cvo_campaign=000026UJ). #
Final_Assignment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # # EDA of N2 400mM 10s ISI data # <NAME> | July 4, 2020 # # Some data seems to be ethanol data. Let's take a look with neural network machine learning predictions import os, glob, sys import numpy as np import pandas as pd import scipy.integrate as integrate import matplotlib.pyplot as plt # %matplotlib inline import seaborn as sns # + tags=[] # keep going up parent until find config.py or hit Dropbox/CA or Code/ dir_path = os.getcwd() config_file = dir_path+'/config.py' dir_top = 'Dropbox/CA' while not os.path.isfile(config_file): # go up a level dir_path = os.path.dirname(dir_path) # break if hit dir_top if dir_path == dir_top: print(f'reaching dir_top: {dir_top}') break # get next level config config_file = dir_path+'/config.py' print(config_file) print(f'found config here: {config_file}') # import config sys.path.insert(0, dir_path) import config # get measures config.MEASURES # + # plot to see individuals msr='RevFreq' plt.figure() normal = df.index.get_level_values(1) == '0mM' alcohol = df.index.get_level_values(1) == '400mM' plt.plot(df[msr].loc[normal].values.transpose(), color='black', alpha=0.4) plt.plot(df[msr].loc[alcohol].values.transpose(), color='red', alpha=0.1) plt.show()# plot to see individuals msr='RevFreq' plt.figure() normal = df.index.get_level_values(1) == '0mM' alcohol = df.index.get_level_values(1) == '400mM' plt.plot(df[msr].loc[normal].values.transpose(), color='black', alpha=0.4) plt.plot(df[msr].loc[alcohol].values.transpose(), color='red', alpha=0.1) plt.show() # + tags=[] # -
3-Results/Fig2-wt 400mM/eda.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda root] # language: python # name: conda-root-py # --- # # Self-Driving Car Engineer Nanodegree # # ## Deep Learning # # ## Project: Build a Traffic Sign Recognition Classifier # --- # # ## Step 1: Dataset Exploration # # # The pickled data is a dictionary with 4 key/value pairs: # # - features -> the images pixel values, (width, height, channels) # - labels -> the label of the traffic sign # - sizes -> the original width and height of the image, (width, height) # - coords -> coordinates of a bounding box around the sign in the image, (x1, y1, x2, y2). Based the original image (not the resized version). # + # Load pickled data import pickle import os training_file = "./train.p" testing_file = "./test.p" with open(training_file, mode='rb') as f: train = pickle.load(f) with open(testing_file, mode='rb') as f: test = pickle.load(f) X_train, y_train = train['features'], train['labels'] X_test, y_test = test['features'], test['labels'] # + ### To start off let's do a basic data summary. import random import numpy as np keep_ratio = 1 image_shape = X_train[0].shape train_idx = np.random.randint(0, X_train.shape[0], size=(X_train.shape[0] * keep_ratio)) n_train = int(X_train.shape[0] * keep_ratio) test_idx = np.random.randint(0, X_test.shape[0], size=(X_test.shape[0] * keep_ratio)) n_test = int(X_test.shape[0] * keep_ratio) X_train = X_train[train_idx] y_train = y_train[train_idx] X_test = X_test[test_idx] y_test = y_test[test_idx] n_classes = y_train.max() + 1 print("Number of training examples =", n_train) print("Number of testing examples =", n_test) print("Image data shape =", image_shape) print("Number of classes =", n_classes) # + ### Data exploration visualization goes here. ### Feel free to use as many code cells as needed. import matplotlib.pyplot as plt import random # %matplotlib inline fig = plt.figure() for i in range(1, 9): a=fig.add_subplot(2,4,i) idx = random.randint(0, n_train) plt.imshow(X_train[idx]) # - # ---- # # ## Step 2: Design and Test a Model Architecture # # The model is trained and tested on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset). # + ### Preprocess the data here. ### Feel free to use as many code cells as needed. import cv2 import numpy as np from sklearn import preprocessing def rgb_to_grayscale(images, flatten=0): """ images: matrix of RGB images return: flattened grayscale images """ image_shape = images.shape if flatten: return np.average(images, axis=3).reshape(image_shape[0], image_shape[1] * image_shape[2]) else: return np.average(images, axis=3).reshape(image_shape[0], image_shape[1], image_shape[2], 1) def normalize(images, flatten=0): """ images: matrix of grayscale return: mean subtracted, scaled between -1 and 1 """ return images n_train = images.shape[0] if flatten: subtracted_mean = images - np.mean(images, axis=1).reshape(n_train, 1) else: subtracted_mean = images - np.mean(images) return subtracted_mean #return preprocessing.scale(images) #min_max_scaler = preprocessing.MinMaxScaler(feature_range=(-1,1)) #return min_max_scaler.fit_transform(subtracted_mean) # - # ### Question 1 # # _Describe the techniques used to preprocess the data._ # **Answer:** # # I initially tried preprocessing the images by obtaining the grayscale, and to flatetenning the the images into a single color channel. Additionally, I applied min-max scaling to have the values between 0 and 1 to ensure that all features (or pixels) are treated equally, and hence improves the accuracy of the classifier. # # I then decided to keep the 3 color channels, as color is important in determining the meaning of a traffic sign. # # The labels use one-hot encoding since that is the input to the model, and was computed with the sklearn module. # + ### Generate data additional (if you want to!) ### and split the data into training/validation/testing sets here. ### Feel free to use as many code cells as needed. from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelBinarizer """ X_train_gray = rgb_to_grayscale(X_train) X_train_normalized = normalize(X_train_gray) X_test_gray = rgb_to_grayscale(X_test) test_features = normalize(X_test_gray) """ train_features = normalize(X_train) test_features = normalize(X_test) encoder = LabelBinarizer() encoder.fit(y_train) train_labels = encoder.transform(y_train) test_labels = encoder.transform(y_test) train_labels = train_labels.astype(np.float32) test_labels = test_labels.astype(np.float32) # Get randomized datasets for training and validation train_features, valid_features, train_labels, valid_labels = train_test_split( train_features, train_labels, test_size=0.05, random_state=832289) # + import os pickle_file = 'traffic_signs_preprocessed.pickle' if not os.path.isfile(pickle_file): print('Saving data to pickle file...') try: with open(pickle_file, 'wb') as pfile: pickle.dump( { 'train_dataset': train_features, 'train_labels': train_labels, 'valid_dataset': valid_features, 'valid_labels': valid_labels, 'test_dataset': test_features, 'test_labels': test_labels, }, pfile, pickle.HIGHEST_PROTOCOL) except Exception as e: print('Unable to save data to', pickle_file, ':', e) raise print('Data cached in pickle file.') # - # ### Question 2 # # _Describe how you set up the training, validation and testing data for your model. If you generated additional data, why?_ # **Answer:** # # The training set was divided into two groups: the training and the validation data. Using sklearn's train_test_split, I chose 5% of the total training data to be used for validation, ensuring that we aren't overfitting to the training data. The test set is kept as is. # # To improve the model, fake data can be generated by manipulating the current training data and applying some iamge processing to simulate new data. Since traffic signs in the real world can be affected by different lighting conditions, obstructed and many more things, we can fake these conditions by changing the luminosity of the image, adding shadows, rotating the signs, adding noise and cropping out the images to have more data. # + import tensorflow as tf tf.reset_default_graph() def get_weight(shape): return tf.Variable(tf.truncated_normal(shape, stddev=0.1)) def get_bias(shape, constant=1): if constant != 1: return tf.Variable(tf.zeros(shape)) else: return tf.constant(0.1, shape=shape) def get_conv2d(x, W, stride): return tf.nn.conv2d(x, W, [1, stride, stride, 1], padding='SAME') def get_loss(logits, y_true): cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_true) #cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y_true) loss = tf.reduce_mean(cross_entropy) return loss def get_maxpool2d(x, k=2): return tf.nn.max_pool( x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') def inference(images, keep_prob): n_features = image_shape[0] * image_shape[1] filter_size_width = 5 filter_size_height = 5 color_channels = 3 k_output = [32, 64, 192, 256] learning_rate = 0.001 # conv0 layer : 32 x 32 x 3 with tf.name_scope('conv0'): W_conv0 = get_weight([filter_size_width, filter_size_height, color_channels, k_output[0]]) b_conv0 = get_bias([k_output[0]], constant=0) conv0 = get_conv2d(images, W_conv0, stride=1) h_conv0 = tf.nn.relu(conv0 + b_conv0) h_conv0 = get_maxpool2d(h_conv0, k=2) # conv1 layer : 16 x 16 x 32 with tf.name_scope('conv1'): W_conv1 = get_weight([filter_size_width, filter_size_height, k_output[0], k_output[1]]) b_conv1 = get_bias([k_output[1]]) conv1 = get_conv2d(h_conv0, W_conv1, stride=1) h_conv1 = tf.nn.relu(conv1 + b_conv1) h_conv1 = get_maxpool2d(h_conv1, k=2) # conv2 layer : 8 x 8 x 64 with tf.name_scope('conv2'): W_conv2 = get_weight([filter_size_width, filter_size_height, k_output[1], k_output[2]]) b_conv2 = get_bias([k_output[2]]) conv2 = get_conv2d(h_conv1, W_conv2, stride=1) h_conv2 = tf.nn.relu(conv2 + b_conv2) h_conv2 = get_maxpool2d(h_conv2, k=2) # fc1 layer : 4 x 4 x 192 with tf.name_scope('fc1'): prev_layer_shape = h_conv2.get_shape().as_list() prev_dim = prev_layer_shape[1] * prev_layer_shape[2] * prev_layer_shape[3] W_fc1 = get_weight([prev_dim, 512]) b_fc1 = get_bias([512]) h_conv2_flat = tf.reshape(h_conv2, [-1, prev_dim]) # 1 x 1 x 3072 fc1 = tf.matmul(h_conv2_flat, W_fc1) + b_fc1 fc1 = tf.nn.relu(fc1) # fc2 layer : 1 x 1 x 512 with tf.name_scope('fc2'): W_fc2 = get_weight([512, 256]) b_fc2 = get_bias([256]) fc2 = tf.matmul(fc1, W_fc2) + b_fc2 fc2 = tf.nn.relu(fc2) fc2 = tf.nn.dropout(fc2, keep_prob=keep_prob, seed=66478) # fc3 layer : 1 x 1 x 256 with tf.name_scope('fc3'): W_fc3 = get_weight([256, n_classes]) b_fc3 = get_bias([n_classes]) fc3 = tf.matmul(fc2, W_fc3) + b_fc3 #fc3 = tf.nn.relu(fc3) # 1 x 1 x 43 # L2 regularization for the fully connected parameters. regularizers = (tf.nn.l2_loss(W_fc1) + tf.nn.l2_loss(b_fc1) + tf.nn.l2_loss(W_fc2) + tf.nn.l2_loss(b_fc2) + tf.nn.l2_loss(W_fc3) + tf.nn.l2_loss(b_fc3)) return fc3, regularizers x = tf.placeholder(tf.float32, [None, image_shape[0], image_shape[1], image_shape[2]]) y = tf.placeholder(tf.float32, [None, n_classes]) keep_prob = tf.placeholder(tf.float32) logits, regularizers = inference(x, keep_prob) ######## testing ######## learning_rate = 0.0001 loss = get_loss(logits, y) # Add the regularization term to the loss. loss += 5e-4 * regularizers with tf.name_scope('accuracy'): # Determine if the predictions are correct is_correct_prediction = tf.equal(tf.argmax(tf.nn.softmax(logits), 1), tf.argmax(y, 1)) # Calculate the accuracy of the predictions accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32)) tf.scalar_summary('accuracy', accuracy) # Add a scalar summary for the snapshot loss. tf.scalar_summary("loss_value", loss) # Create a variable to track the global step. global_step = tf.Variable(0, name='global_step', trainable=False) #optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)#, global_step=global_step) optimizer = tf.train.AdamOptimizer(5e-4).minimize(loss) # Build the summary Tensor based on the TF collection of Summaries. summary = tf.merge_all_summaries() init = tf.initialize_all_variables() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # + import time training_epochs = 50 batch_size = 100 display_step = 1 log_batch_step = 50 dropout_keep_prob = 0.5 batches = [] loss_batch = [] train_acc_batch = [] valid_acc_batch = [] # Feed dicts for training, validation, and test session train_feed_dict = {x: train_features, y: train_labels, keep_prob: dropout_keep_prob} valid_feed_dict = {x: valid_features, y: valid_labels, keep_prob: 1.0} test_feed_dict = {x: test_features, y: test_labels, keep_prob: 1.0} log_dir = "data3" # Instantiate a SummaryWriter to output summaries and the Graph. with tf.Session() as sess: summary_writer = tf.train.SummaryWriter(log_dir, sess.graph) #sess.run(init) saver.restore(sess, "data3/checkpoint-14") print("Model restored.") total_batches = int(len(train_features)/batch_size) for epoch in range(training_epochs): start_time = time.time() for i in range(total_batches): batch_start = i * batch_size batch_features = train_features[batch_start:batch_start + batch_size] batch_labels = train_labels[batch_start:batch_start + batch_size] _, l = sess.run( [optimizer, loss], feed_dict={x: batch_features, y: batch_labels, keep_prob: 0.8}) if i % log_batch_step == 0: previous_batch = batches[-1] if batches else 0 batches.append(log_batch_step + previous_batch) training_accuracy = sess.run(accuracy, feed_dict={x: batch_features, y: batch_labels, keep_prob: 1.0}) validation_accuracy = sess.run(accuracy, feed_dict=valid_feed_dict) loss_batch.append(l) train_acc_batch.append(training_accuracy) valid_acc_batch.append(validation_accuracy) duration = time.time() - start_time print("Epoch:", '%04d' % (epoch+1), "Step: %d" % (epoch * batch_size + i), "loss =", \ "{:.9f}".format(l), "Accuracy: %.7f" % (validation_accuracy),"duration = ", duration) summary_str = sess.run(summary, feed_dict=valid_feed_dict) summary_writer.add_summary(summary_str, epoch) summary_writer.flush() checkpoint_file = os.path.join(log_dir, 'checkpoint') saver.save(sess, checkpoint_file, global_step=epoch) # Check accuracy against Validation data validation_accuracy = sess.run(accuracy, feed_dict=valid_feed_dict) print("Validation Accuracy:", validation_accuracy) #test_accuracy = sess.run(accuracy, feed_dict=test_feed_dict) #print("Test Accuracy:", test_accuracy) # - # ### Question 3 # # _What does your final architecture look like? (Type of model, layers, sizes, connectivity, etc.) For reference on how to build a deep neural network using TensorFlow, see [Deep Neural Network in TensorFlow # ](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/b516a270-8600-4f93-a0a3-20dfeabe5da6/concepts/83a3a2a2-a9bd-4b7b-95b0-eb924ab14432) from the classroom._ # # **Answer:** # # My final architecture has 3 convolutional layers which use a ReLu as activation function, followed by a maxpool with a stride of 2. Spatially, the input image dimensions (width and height) are reduced progressively, but increase in depth. # # The network then includes 3 fully connected layers, where the final layer has a size of 43 classes. Before the third and final fully connected layer, a dropout component was added with a certain probability. # + ### Train your model here. ### Feel free to use as many code cells as needed. # Parameters # - # ### Question 4 # # _How did you train your model? (Type of optimizer, batch size, epochs, hyperparameters, etc.)_ # # **Answer:** # # Initially, I used the GradientDescentOptimizer, and tried with various step sizes (0.001 and 0.0001 for example). However, I would either hit a plateau around ~3.00 loss and always obtain less than 10% accuracy. # # Using AdamOptimizer obtained significantly better results. I got passed the initial local minimum where I seemed to get stuck with the previous optimizer. One down side of this optimizer is that it is slightly longer to compute. # # In both cases, I experimented with batch sizes of 50 and 100 (the difference was not thoroughly compared). It took approximately 20 epochs to obtain a loss of 0.50 and accuracy over 90%. The plots from Tensorboard are attached below. # # <img src="http://i.imgur.com/Wbgkihh.png"> # ### Question 5 # # # _What approach did you take in coming up with a solution to this problem?_ # **Answer:** # # To come up with the model architecture, I was inspired by AlexNet which has proven to be effective at classifying images (see ImageNet results) and is relatively simple to implement. Since I am limited by my computing resources, I tried to simplify the architecture as much as I could, without compromising performance. In the end, my model includes convolutional layers to capture enough complexity in the image data, fully connected layers to finally end up with the class predictions and pooling to downscale and reduce the dimensions of the images. # # This model is a simplified version of AlexNet which allowed for reasonable training time given my resources, but still resulted in good performance (> 96%) as seen above. # # Finally, the prevent overfitting, I added dropout layers with 50% probability, and added regularization to penalize weights with higher value. # # As mentionned above, I tried to play around with the learning rate to overcome the issues I had. For instance, my learning rate value was too high, which caused the loss the diverge (it was reaching Inf/NaN). After lowering the learning rate, I got into alot of plateaus and local minimas. Tweaking the learning rate was enough to get past the difficulties. In the above plot, we can see that past tries where I struggled to improve the loss/accuracy. # --- # # ## Step 3: Test a Model on New Images # # Take several pictures of traffic signs that you find on the web or around you (at least five), and run them through your classifier on your computer to produce example results. The classifier might not recognize some local signs but it could prove interesting nonetheless. # # You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name. # ### Implementation # # Use the code cell (or multiple code cells, if necessary) to implement the first step of your project. Once you have completed your implementation and are satisfied with the results, be sure to thoroughly answer the questions that follow. # + ### Load the images and plot them here. ### Feel free to use as many code cells as needed. import matplotlib.image as mpimg from PIL import Image #reading in an image image1 = Image.open('100km.jpg') image1 = image1.resize((32, 32), Image.ANTIALIAS) image1 = np.array(image1) image2 = Image.open('60km.jpg') image2 = image2.resize((32, 32), Image.ANTIALIAS) image2 = np.array(image2) image3 = Image.open('stop-quebec.JPG') image3 = image3.resize((32, 32), Image.ANTIALIAS) image3 = np.array(image3) image4 = Image.open('yield.jpg') image4 = image4.resize((32, 32), Image.ANTIALIAS) image4 = np.array(image4) image5 = Image.open('priority-road.jpg') image5 = image5.resize((32, 32), Image.ANTIALIAS) image5 = np.array(image5) new_images = [image1, image2, image3, image4, image5] new_labels = [7, 3, 14, 13, 12] #printing out some stats and plotting fig = plt.figure() for i in range(1, 6): a = fig.add_subplot(1,5,i) a.set_title(str(new_labels[i-1])) plt.imshow(new_images[i-1]) new_labels = encoder.transform(new_labels) new_labels = new_labels.astype(np.float32) # + test_feed_dict = {x: new_images, y: new_labels, keep_prob: 1.0} prediction = tf.argmax(tf.nn.softmax(logits), 1) with tf.Session() as sess: saver.restore(sess, "data3/checkpoint-17") print("Model restored.") prediction = sess.run(prediction, feed_dict=test_feed_dict) print(prediction) # - # ### Question 6 # # _Choose five candidate images of traffic signs and provide them in the report. Are there any particular qualities of the image(s) that might make classification difficult? It would be helpful to plot the images in the notebook._ # # # **Answer:** # # My algorithm properly classified the stop sign. the yield and the priority road sign. However, for speed limit signs, it had a false prediction, thinking it was a "right of way at the next intersection" sign. If we look at the speed limit signs from the training set, it is clear that the speed limit sign shape is different than the ones that I selected online (Quebec road signs). Indeed, the German speed limit signs have circular shape, and have a red outline. Without any training images that have square shaped speed limit signs, it is very likely that the classifier does not predict them properly. # + fig = plt.figure() for i in range(1, 5): a=fig.add_subplot(3,4,i) plt.imshow(test_features[y_test == 7][i]) a=fig.add_subplot(3,4,i+4) plt.imshow(test_features[y_test == 3][i]) a=fig.add_subplot(3,4,i+8) plt.imshow(test_features[y_test == 11][i]) # - # Below are results from the testing data # + ### Run the predictions here. ### Feel free to use as many code cells as needed. test_feed_dict = {x: test_features, y: test_labels, keep_prob: 1.0} with tf.Session() as sess: saver.restore(sess, "data3/checkpoint-17") print("Model restored.") validation_accuracy = sess.run(accuracy, feed_dict=test_feed_dict) print("Test Accuracy:", validation_accuracy) # + #test_feed_dict = {x: test_features, y: test_labels, keep_prob: 1.0} fig = plt.figure() for i in range(1, 6): a = fig.add_subplot(1,5,i) a.set_title(str(np.argmax(test_labels[i+1]))) plt.imshow(test_features[i+1]) # + test_feed_dict = {x: test_features[2:7], y: test_labels[2:7], keep_prob: 1.0} prediction = tf.argmax(tf.nn.softmax(logits), 1) with tf.Session() as sess: saver.restore(sess, "data3/checkpoint-17") print("Model restored.") prediction = sess.run(prediction, feed_dict=test_feed_dict) print(prediction) # - # As seen from the output above, the algorithm correctly predicted 4 out of 5 signs, but had trouble with the first sign (true label = 12 or "Priority road"). The image is in fact of poor quality and would be difficult even for a human driver. It is too bright and the content of the sign is unreadable, which explains the incorrect prediction. # ### Question 7 # # _Is your model able to perform equally well on captured pictures or a live camera stream when compared to testing on the dataset?_ # # **Answer:** It would depend on the source of the captured images or the live camera stream. Additionally, these images are cropped out to only have the road sign in the image. In a live camera stream there are lots of other images, hence one would need to detect the objects, crop them and out only pass those to the classifier. # # The current classifier for would perform fairly well on capture pictures/live camera stream of someone driving in Germany, since that is what the classifier was trained mostly with. # # However, it was seen from above that the results from the testing set that the performance was inferior to the training set scores, which yielded ~90% accuracy compared to >96% accuracy. # # When comparing with new images, the captured pictures had an accuracy of 80% which is close to the testing sets' score of 89%. As mentioned previously, this would be resolved with a larger training set that goes beyond ther German traffic sign dataset (since the captured images were from Quebec, Canada). # + ### Visualize the softmax probabilities here. ### Feel free to use as many code cells as needed. test_feed_dict = {x: new_images, y: new_labels, keep_prob: 1.0} probs = tf.nn.softmax(logits) top_5 = tf.nn.top_k(probs, 5) with tf.Session() as sess: saver.restore(sess, "data3/checkpoint-17") print("Model restored.") probs = sess.run(top_5, feed_dict=test_feed_dict) print(probs) # - # ### Question 8 # # *Use the model's softmax probabilities to visualize the **certainty** of its predictions, [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.11/api_docs/python/nn.html#top_k) could prove helpful here. Which predictions is the model certain of? Uncertain? If the model was incorrect in its initial prediction, does the correct prediction appear in the top k? (k should be 5 at most)* # # **Answer:** As seen from above, the classifier is almost 100% certain about its predictions for the 3 last signs. However, for the speed limits (class 7 and 3), it first predicted class 11 with above 90% for both cases. Unfortunately, they do not appear in the top 5 results, which somewhat indicates that perhaps my classifier is not very strong at identifying the letters in the road sign content (since the numbers of the speed limit is consistent despite the shape).
p2-traffic-sign-classification/Traffic_Signs_Recognition.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # READING DATA # + import os import zipfile import urllib.request import zipfile DOWNLOAD_ROOT="https://d3c33hcgiwev3.cloudfront.net/zVO-XrdgTM-Tvl63YDzP_Q_127c70857e6a4cc2aebc05de76a94b07_01a_DEMO_Reading_Data.zip?Expires=1638144000&Signature=<KEY>&Key-Pair-Id=<KEY>" DATA_PATH=os.path.join("IBM_DATA") def loadZipDataToLocalDrive(url=DOWNLOAD_ROOT,drive_path=DATA_PATH,rar="file.rar",extract_folder_name=DATA_PATH): os.makedirs(drive_path,exist_ok=True) zip_path=os.path.join(drive_path,rar) zip_file=urllib.request.urlretrieve(url,zip_path) data_zip = zipfile.ZipFile(zip_path, 'r') data_zip.extractall(path=drive_path+extract_folder_name) loadZipDataToLocalDrive(extract_folder_name="/C1W1DATA") # + #SECURING CONNECTION TO DATABASE [Here SQLite Database, other databse has got their own packages] import sqlite3 as sq3 import pandas.io.sql as pds import pandas as pd path=os.path.join(DATA_PATH,"C1W1DATA","data","classic_rock.db") print("DATABASE PATH: ",path) con=sq3.Connection(path) print(con) # + #WRITE DOWN QUERY query=''' SELECT * FROM rock_songs; ''' #Execute the query observations=pds.read_sql(query,con) observations.head() # + #Any query is supported query=''' Select Artist,Release_Year,COUNT(*) AS num_of_songs, AVG(PlayCount) AS avg_plays FROM rock_songs GROUP BY Artist,Release_Year ORDER BY num_of_songs desc; ''' observations=pd.read_sql(query,con) observations.head() # - observations_generator=pds.read_sql(query, con, coerce_float=True, parse_dates=["Release_Year"], chunksize=5 ) for index,observations in enumerate(observations_generator): if index<5: print(f'Observations index: {index}.format(index)') display(observations) # # 01b_LAB_READING_DATA # + active="" # Lab Exercise: Reading in database files # Create a variable, path, containing the path to the baseball.db contained in resources/ # Create a connection, con, that is connected to database at path # Create a variable, query, containing a SQL query which reads in all data from the allstarfull table # Create a variable, observations, by using pandas' read_sql # # Optional # Create a variable, tables, which reads in all data from the table sqlite_master # Pretend that you were interesting in creating a new baseball hall of fame. Join and analyze the tables to evaluate the top 3 all time best baseball players. # - url="https://d3c33hcgiwev3.cloudfront.net/5Y1nHm5xSouNZx5ucaqLZg_c93a585749804b1e87603f707f04bd3f_01b_LAB_Reading_Data.zip?Expires=1638144000&Signature=joQ0zrR~MmJJUKWNulSa53bFy9AlWQB8iAGkZreEgWu~F25OmgCh2O4VEBPpKvbf70o062vOBEt-Iv-8YtsoNuyNBgyBz0C~l0-WFVjaPZ1VpKcTC4zGBpWqKTC4cKkKoaXao~B6AAqPiQlWRoiELUIZRYREHeFP1EQEk04KvBc_&Key-Pair-Id=<KEY>" loadZipDataToLocalDrive(url=url,rar="file2.rar",extract_folder_name="/C1W1LAB01b") # + import sqlite3 as sq3 import pandas.io.sql as pds import pandas as pd path=os.path.join(DATA_PATH,"C1W1LAB01b","baseball.db") print("DATABASE PATH: ",path) con=sq3.Connection(path) print(con) # - query=''' SELECT * FROM allstarfull; ''' observations=pds.read_sql(query,con) observations.tail() query=''' SELECT * FROM sqlite_master; ''' all_tables=pds.read_sql(query,con) all_tables.head() # + active="" # # -
IBM_C1_W1.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" # !pip install tldextract # !pip install sweetviz # + # Importing Packages import pandas as pd import numpy as np import spacy import sys sys.path = [ '../input/readability-package/', ] + sys.path import readability import nltk from nltk.corpus import stopwords from nltk.tokenize import word_tokenize from nltk import pos_tag, pos_tag_sents from urllib.parse import urlparse import re from tldextract import extract from sklearn import metrics, preprocessing, model_selection import lightgbm as lgb import copy import sweetviz as sv from sklearn.decomposition import PCA from sklearn.preprocessing import StandardScaler import random # - # Reading Data pd.options.display.max_rows = 4000 train = pd.read_csv('../input/commonlitreadabilityprize/train.csv', low_memory=False) test = pd.read_csv('../input/commonlitreadabilityprize/test.csv', low_memory=False) # + # Taken this from https://www.kaggle.com/ravishah1/readability-feature-engineering-non-nn-baseline def readability_measurements(passage: str): """ This function uses the readability library for feature engineering. It includes textual statistics, readability scales and metric, and some pos stats """ results = readability.getmeasures(passage, lang='en') chars_per_word = results['sentence info']['characters_per_word'] syll_per_word = results['sentence info']['syll_per_word'] words_per_sent = results['sentence info']['words_per_sentence'] kincaid = results['readability grades']['Kincaid'] ari = results['readability grades']['ARI'] coleman_liau = results['readability grades']['Coleman-Liau'] flesch = results['readability grades']['FleschReadingEase'] gunning_fog = results['readability grades']['GunningFogIndex'] lix = results['readability grades']['LIX'] smog = results['readability grades']['SMOGIndex'] rix = results['readability grades']['RIX'] dale_chall = results['readability grades']['DaleChallIndex'] tobeverb = results['word usage']['tobeverb'] auxverb = results['word usage']['auxverb'] conjunction = results['word usage']['conjunction'] pronoun = results['word usage']['pronoun'] preposition = results['word usage']['preposition'] nominalization = results['word usage']['nominalization'] pronoun_b = results['sentence beginnings']['pronoun'] interrogative = results['sentence beginnings']['interrogative'] article = results['sentence beginnings']['article'] subordination = results['sentence beginnings']['subordination'] conjunction_b = results['sentence beginnings']['conjunction'] preposition_b = results['sentence beginnings']['preposition'] return [chars_per_word, syll_per_word, words_per_sent, kincaid, ari, coleman_liau, flesch, gunning_fog, lix, smog, rix, dale_chall, tobeverb, auxverb, conjunction, pronoun, preposition, nominalization, pronoun_b, interrogative, article, subordination, conjunction_b, preposition_b] # Taken this from https://www.kaggle.com/ravishah1/readability-feature-engineering-non-nn-baseline def spacy_features(df: pd.DataFrame): """ This function generates features using spacy en_core_wb_lg I learned about this from these resources: https://www.kaggle.com/konradb/linear-baseline-with-cv https://www.kaggle.com/anaverageengineer/comlrp-baseline-for-complete-beginners """ nlp = spacy.load('en_core_web_lg') with nlp.disable_pipes(): vectors = np.array([nlp(text).vector for text in df.excerpt]) return vectors def get_spacy_col_names(): names = list() for i in range(300): names.append(f"spacy_{i}") return names # Taken this from https://www.kaggle.com/ravishah1/readability-feature-engineering-non-nn-baseline def pos_tag_features(passage: str): """ This function counts the number of times different parts of speech occur in an excerpt """ pos_tags = ["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", "POS", "PRP", "RB", "RBR", "RBS", "RP", "TO", "UH", "VB", "VBD", "VBG", "VBZ", "WDT", "WP", "WRB"] tags = pos_tag(word_tokenize(passage)) tag_list= list() for tag in pos_tags: tag_list.append(len([i[0] for i in tags if i[1] == tag])) return tag_list # Taken this from https://www.kaggle.com/ravishah1/readability-feature-engineering-non-nn-baseline def generate_other_features(passage: str): """ This function is where I test miscellaneous features This is experimental """ # punctuation count periods = passage.count(".") commas = passage.count(",") semis = passage.count(";") exclaims = passage.count("!") questions = passage.count("?") # Some other stats num_char = len(passage) num_words = len(passage.split(" ")) unique_words = len(set(passage.split(" ") )) word_diversity = unique_words/num_words word_len = [len(w) for w in passage.split(" ")] longest_word = np.max(word_len) avg_len_word = np.mean(word_len) return [periods, commas, semis, exclaims, questions, num_char, num_words, unique_words, word_diversity, longest_word, avg_len_word] def extract_features(df): scores_df = pd.DataFrame(df["excerpt"].apply(lambda p : readability_measurements(p)).tolist(), columns=["chars_per_word", "syll_per_word", "words_per_sent", "kincaid", "ari", "coleman_liau", "flesch", "gunning_fog", "lix", "smog", "rix", "dale_chall", "tobeverb", "auxverb", "conjunction", "pronoun", "preposition", "nominalization", "pronoun_b", "interrogative", "article", "subordination", "conjunction_b", "preposition_b"]) df = pd.merge(df, scores_df, left_index=True, right_index=True) spacy_df = pd.DataFrame(spacy_features(df), columns=get_spacy_col_names()) df = pd.merge(df, spacy_df, left_index=True, right_index=True) pos_df = pd.DataFrame(df["excerpt"].apply(lambda p : pos_tag_features(p)).tolist(), columns=["CC", "CD", "DT", "EX", "FW", "IN", "JJ", "JJR", "JJS", "LS", "MD", "NN", "NNS", "NNP", "NNPS", "PDT", "POS", "PRP", "RB", "RBR", "RBS", "RP", "TO", "UH", "VB", "VBD", "VBG", "VBZ", "WDT", "WP", "WRB"]) df = pd.merge(df, pos_df, left_index=True, right_index=True) other_df = pd.DataFrame(df["excerpt"].apply(lambda p : generate_other_features(p)).tolist(), columns=["periods", "commas", "semis", "exclaims", "questions", "num_char", "num_words", "unique_words", "word_diversity", "longest_word", "avg_len_word"]) df = pd.merge(df, other_df, left_index=True, right_index=True) return df def extract_url_license_feat(df): temp = pd.DataFrame() temp['article_year'] = df['url_legal'].apply(lambda x : x if x is np.nan else re.search('(2\d{3})|$', urlparse(x).path).group()) temp['subdomain'] = df['url_legal'].apply(lambda x : x if x is np.nan else extract(x)[0]) temp['domain'] = df['url_legal'].apply(lambda x : x if x is np.nan else extract(x)[1]) temp['suffix'] = df['url_legal'].apply(lambda x : x if x is np.nan else extract(x)[2]) temp['is_pdf'] = df['url_legal'].apply(lambda x : x if x is np.nan else ('Y' if '.pdf' in str(x) else 'N')) temp['is_cc'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'CC' in str(x) else 'N')) temp['is_by'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'BY' in str(x) else 'N')) temp['is_sa'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'SA' in str(x) else 'N')) temp['is_nc'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'NC' in str(x) else 'N')) temp['is_nd'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'ND' in str(x) else 'N')) temp['is_gnu'] = df['license'].apply(lambda x : x if x is np.nan else ('Y' if 'GNU' in str(x) else 'N')) temp['license_version'] = df['license'].apply(lambda x : x if x is np.nan else(float(0) if re.search('([0-9][.][0-9])|$', urlparse(x).path).group() == '' else float(re.search('([0-9][.][0-9])|$', urlparse(x).path).group()))) df = pd.concat([df, temp], axis = 1) return df def handle_cate_NA(df, columns_to_ignore=[]): temp = copy.deepcopy(df) cate_cols = list(set(temp.select_dtypes('object').columns.tolist()) - set(columns_to_ignore)) for col in cate_cols: if temp[col].isna().sum() > 0: column_name = 'NA_POS_'+col col_values = ['Y' if pd.isna(value[1]) else 'N' for value in df[col].items()] temp[col].fillna(value='ABS', inplace=True) temp[column_name] = col_values return temp def handle_cont_NA(df, method='mean'): action = ''.join(c.lower() for c in method if not c.isspace()) temp = copy.deepcopy(df) num_cols = temp.select_dtypes(include='number') for col in num_cols: if temp[col].isna().sum() > 0: column_name = 'NA_POS_'+col col_values = ['Y' if pd.isna(value[1]) else 'N' for value in df[col].items()] #value_if_true if condition else value_if_false fill_value = np.mean(temp[col]) if 'mean' == action else np.median(temp[col]) temp[col].fillna(value = fill_value, inplace=True) temp[column_name] = col_values return temp def train_pca(df, list_of_columns, column_prefix): temp = copy.deepcopy(df) x = temp.loc[:, list_of_columns].values ss = StandardScaler().fit(x) x = ss.transform(x) pca = PCA(n_components=2) pca.fit(x) principalComponents = pca.transform(x) print(column_prefix, pca.explained_variance_ratio_) principalDf = pd.DataFrame(data = principalComponents, columns = [column_prefix+'_1', column_prefix+'_2']) # temp.drop(columns=list_of_columns, axis=1, inplace=True) temp = pd.concat([temp, principalDf], axis = 1) result_dict = { 'pca': pca, 'ss': ss, 'list_of_columns': list_of_columns, 'column_prefix': column_prefix } return result_dict, temp def apply_pca(trained_pca, df): temp = copy.deepcopy(df) x = temp.loc[:, trained_pca.get('list_of_columns')].values x = trained_pca.get('ss').transform(x) principalComponents = trained_pca.get('pca').transform(x) principalDf = pd.DataFrame(data = principalComponents, columns = [trained_pca.get('column_prefix')+'_1', trained_pca.get('column_prefix')+'_2']) # temp.drop(columns=trained_pca.get('list_of_columns'), axis=1, inplace=True) temp = pd.concat([temp, principalDf], axis = 1) return temp # + # train_feat = extract_features(train) # train_feat = extract_url_license_feat(train_feat) # train_feat = handle_cate_NA(train_feat) # train_feat = handle_cont_NA(train_feat) # train_feat.head() train_feat = pd.read_csv('../input/commonlitfe/train_feat.csv', low_memory=False) # + # test_feat = extract_features(test) # test_feat = extract_url_license_feat(test_feat) # test_feat = handle_cate_NA(test_feat) # test_feat = handle_cont_NA(test_feat) # test_feat.head() test_feat = pd.read_csv('../input/commonlitfe/test_feat.csv', low_memory=False) # + pca_groups = [['smog', 'syll_per_word', 'spacy_29'], ['coleman_liau', 'nominalization', 'IN'], ['spacy_68', 'spacy_86', 'spacy_208', 'spacy_262', 'spacy_147', 'spacy_261'], ['spacy_110', 'spacy_114', 'spacy_298', 'spacy_269', 'spacy_151'], ['spacy_76', 'spacy_122', 'periods', 'spacy_72', 'spacy_196'], ['spacy_4', 'spacy_214', 'spacy_101', 'flesch', 'periods'], ['pronoun', 'spacy_269', 'spacy_294', 'spacy_151', 'spacy_147', 'spacy_110', 'spacy_196'], ['spacy_264', 'spacy_134', 'spacy_122', 'spacy_86', 'spacy_254', 'spacy_72'], ['spacy_76', 'spacy_114', 'spacy_298', 'spacy_69'], ['spacy_28', 'spacy_269', 'spacy_151', 'spacy_122', 'spacy_72', 'spacy_69', 'spacy_134', 'spacy_9', 'spacy_254'], ['spacy_101', 'spacy_214', 'spacy_262', 'spacy_89', 'spacy_110', 'spacy_208'], ['spacy_86', 'spacy_105', 'spacy_249', 'spacy_294', 'VBD', 'spacy_147', 'flesch', 'periods'], ['spacy_28', 'pronoun', 'spacy_122', 'spacy_101', 'spacy_110', 'periods', 'spacy_9'], ['spacy_249', 'PRP', 'spacy_68', 'spacy_294', 'VBD', 'spacy_261', 'spacy_4', 'spacy_298'], ['spacy_76', 'spacy_72', 'spacy_208', 'spacy_89', 'flesch', 'spacy_196', 'spacy_69'], ['spacy_249', 'spacy_68', 'spacy_76', 'spacy_122', 'spacy_208', 'spacy_214', 'spacy_101', 'spacy_254'], ['PRP', 'spacy_114', 'spacy_28', 'spacy_151', 'spacy_4', 'spacy_110', 'spacy_279', 'spacy_232'], ['VBD', 'spacy_264', 'spacy_134', 'spacy_269', 'spacy_261'], ['spacy_249', 'spacy_110', 'spacy_68', 'spacy_9', 'spacy_122', 'spacy_28', 'spacy_147'], ['spacy_269', 'PRP', 'spacy_151', 'spacy_298', 'spacy_101', 'spacy_198', 'spacy_72'], ['spacy_198', 'spacy_28', 'spacy_151', 'spacy_269', 'spacy_261'], ['spacy_249', 'PRP', 'spacy_114', 'spacy_122', 'spacy_110', 'spacy_264', 'spacy_208', 'spacy_133'], ['spacy_214', 'PRP', 'spacy_114', 'spacy_86', 'periods', 'spacy_28'], ['pronoun', 'spacy_76', 'spacy_68', 'spacy_262', 'VBD', 'spacy_122', 'spacy_105', 'spacy_298'], ['spacy_151', 'spacy_134', 'spacy_269', 'spacy_279', 'periods', 'spacy_89', 'spacy_133', 'spacy_147', 'spacy_232'], ['spacy_249', 'spacy_86', 'PRP', 'spacy_114', 'spacy_122', 'spacy_69', 'spacy_294', 'spacy_68', 'spacy_254', 'spacy_110'], ['PRP', 'spacy_214', 'pronoun', 'VBD', 'spacy_114', 'spacy_254', 'spacy_294', 'spacy_261', 'spacy_208', 'spacy_134', 'spacy_4', 'spacy_89', 'spacy_298'], ['spacy_269', 'spacy_249', 'spacy_151', 'spacy_76', 'spacy_122', 'spacy_101', 'periods'], ['spacy_294', 'spacy_214', 'spacy_76', 'spacy_28', 'spacy_86', 'spacy_264', 'spacy_232', 'spacy_122', 'spacy_9'], ['spacy_294', 'spacy_68', 'spacy_122', 'spacy_4', 'spacy_264', 'spacy_261', 'spacy_196', 'spacy_9'], ['spacy_114', 'spacy_249', 'spacy_86', 'spacy_151', 'spacy_134', 'spacy_101', 'spacy_76', 'spacy_254', 'spacy_262'], ['spacy_249', 'PRP', 'pronoun', 'spacy_28', 'spacy_269', 'spacy_114', 'spacy_68', 'spacy_294', 'spacy_122', 'spacy_261', 'spacy_196', 'spacy_72', 'spacy_133'], ['spacy_86', 'spacy_76', 'flesch', 'spacy_4', 'spacy_89', 'spacy_110', 'spacy_9', 'spacy_151'], ['spacy_110', 'spacy_86', 'spacy_208', 'spacy_214', 'spacy_134'], ['spacy_114', 'spacy_269', 'spacy_249', 'spacy_101', 'spacy_76', 'spacy_198', 'spacy_9', 'flesch'], ['spacy_122', 'spacy_294', 'spacy_72'], ['spacy_298', 'spacy_214', 'VBD', 'spacy_114', 'spacy_264', 'spacy_68', 'spacy_9', 'spacy_134', 'spacy_262', 'spacy_4', 'spacy_147', 'flesch'], ['spacy_249', 'spacy_69', 'spacy_105', 'spacy_89', 'spacy_110'], ['preposition', 'smog'], ['spacy_14', 'chars_per_word', 'spacy_29'], ['IN', 'spacy_107'], ['spacy_160', 'nominalization', 'spacy_2'], ['syll_per_word', 'NN', 'spacy_200'], ['coleman_liau', 'NN', 'spacy_60', 'spacy_263'], ['spacy_14', 'dale_chall', 'JJ', 'spacy_182'], ['nominalization', 'spacy_46', 'spacy_155', 'spacy_107'], ['spacy_203', 'smog', 'spacy_149', 'spacy_24'], ['spacy_103', 'avg_len_word', 'num_char', 'spacy_200'], ['spacy_146', 'rix'], ['syll_per_word', 'IN', 'spacy_159', 'spacy_10'], ['preposition', 'spacy_29', 'syll_per_word', 'IN', 'spacy_10'], ['IN', 'spacy_29', 'num_char', 'spacy_182', 'spacy_240', 'spacy_10'], ['chars_per_word', 'avg_len_word', 'nominalization', 'rix', 'spacy_149', 'JJ', 'spacy_2'], ['smog', 'spacy_60', 'spacy_146', 'spacy_97', 'spacy_162'], ['spacy_50', 'spacy_14', 'spacy_38', 'spacy_192', 'dale_chall', 'spacy_24'], ['spacy_203', 'coleman_liau', 'spacy_107', 'preposition'], ['spacy_14', 'coleman_liau', 'spacy_43', 'spacy_182', 'spacy_258'], ['smog', 'rix', 'dale_chall', 'spacy_197', 'spacy_251', 'spacy_107', 'spacy_240'], ['spacy_160', 'IN', 'spacy_27', 'spacy_192', 'JJ', 'nominalization'], ['chars_per_word', 'spacy_7', 'spacy_148', 'spacy_97', 'spacy_159', 'spacy_217', 'lix', 'spacy_200'], ['spacy_155', 'spacy_203', 'avg_len_word', 'spacy_60', 'spacy_252', 'NN', 'spacy_146', 'gunning_fog'], ['syll_per_word', 'num_char', 'spacy_38', 'spacy_24', 'preposition'], ['coleman_liau', 'spacy_2', 'spacy_160', 'spacy_107', 'spacy_162'], ['smog', 'spacy_43', 'spacy_197', 'dale_chall', 'spacy_155'], ['spacy_29', 'IN', 'chars_per_word', 'spacy_252', 'spacy_24', 'preposition', 'lix', 'spacy_200', 'spacy_50'], ['spacy_14', 'syll_per_word', 'spacy_7', 'spacy_27', 'nominalization', 'spacy_211', 'gunning_fog', 'spacy_10'], ['avg_len_word', 'spacy_251', 'rix', 'spacy_30', 'spacy_217', 'spacy_149'], ['spacy_97', 'spacy_149', 'spacy_200', 'spacy_263', 'spacy_162', 'spacy_182', 'gunning_fog'], ['spacy_14', 'spacy_203', 'spacy_43', 'JJ', 'spacy_30', 'dale_chall', 'spacy_146', 'spacy_50', 'spacy_10'], ['coleman_liau', 'spacy_251', 'nominalization', 'spacy_7', 'spacy_46', 'spacy_155', 'preposition', 'spacy_240', 'spacy_266', 'lix'], ['chars_per_word', 'syll_per_word', 'smog', 'IN', 'spacy_38', 'spacy_107', 'spacy_2'], ['nominalization', 'spacy_251', 'IN', 'spacy_155', 'NN', 'spacy_24', 'gunning_fog'], ['smog', 'coleman_liau', 'spacy_14', 'spacy_146', 'spacy_266', 'spacy_240', 'spacy_182'], ['syll_per_word', 'spacy_43', 'spacy_197', 'spacy_46', 'spacy_217', 'spacy_38', 'dale_chall', 'spacy_192', 'rix', 'spacy_200', 'spacy_162'], ['num_char', 'spacy_29', 'lix', 'spacy_10'], ['avg_len_word', 'spacy_10', 'preposition'], ['spacy_217', 'spacy_149', 'spacy_97', 'spacy_24', 'num_char', 'syll_per_word', 'spacy_266'], ['spacy_252', 'spacy_29', 'spacy_14', 'spacy_30', 'spacy_211', 'spacy_192', 'dale_chall', 'spacy_107', 'spacy_60', 'lix'], ['coleman_liau', 'smog', 'spacy_197', 'spacy_251', 'rix', 'spacy_27'], ['spacy_103', 'spacy_160', 'chars_per_word', 'spacy_7', 'spacy_146', 'spacy_240'], ['chars_per_word', 'spacy_29', 'spacy_30', 'spacy_217', 'spacy_197', 'spacy_155', 'spacy_24', 'spacy_97', 'spacy_162', 'spacy_182', 'spacy_10'], ['coleman_liau', 'avg_len_word', 'smog', 'spacy_258', 'spacy_7', 'preposition', 'JJ', 'spacy_240'], ['IN', 'num_char', 'spacy_38', 'lix'], ['spacy_29', 'spacy_2', 'dale_chall', 'spacy_203', 'spacy_263', 'spacy_107', 'spacy_160'], ['coleman_liau', 'syll_per_word', 'avg_len_word', 'nominalization', 'spacy_103', 'num_char', 'spacy_38', 'spacy_192', 'JJ'], ['chars_per_word', 'spacy_252', 'spacy_14', 'spacy_197', 'spacy_211', 'spacy_182', 'spacy_240', 'lix', 'gunning_fog', 'spacy_266'], ['chars_per_word', 'spacy_27', 'spacy_149', 'spacy_197', 'NN', 'preposition', 'spacy_97', 'gunning_fog'], ['nominalization', 'rix', 'spacy_14', 'num_char'], ['coleman_liau', 'syll_per_word', 'avg_len_word', 'smog', 'spacy_103', 'JJ', 'spacy_251', 'dale_chall'], ['spacy_43', 'spacy_2', 'spacy_103', 'spacy_192', 'spacy_240', 'spacy_200', 'spacy_146', 'spacy_149', 'spacy_24', 'spacy_107', 'spacy_263'], ['coleman_liau', 'avg_len_word', 'nominalization', 'smog', 'spacy_7', 'rix', 'spacy_46', 'spacy_29', 'spacy_30', 'spacy_162', 'lix', 'gunning_fog', 'spacy_10'], ['spacy_251', 'spacy_38', 'spacy_60', 'dale_chall', 'num_char', 'spacy_252', 'spacy_266', 'JJ', 'spacy_217', 'spacy_24', 'spacy_182'], ['nominalization', 'spacy_97', 'spacy_148', 'spacy_240'], ['spacy_7', 'spacy_29', 'spacy_14', 'spacy_43', 'spacy_2'], ['coleman_liau', 'chars_per_word', 'syll_per_word', 'rix', 'spacy_146', 'spacy_197', 'spacy_192'], ['spacy_148', 'spacy_258', 'spacy_7', 'spacy_149', 'rix', 'spacy_107', 'dale_chall'], ['syll_per_word', 'avg_len_word', 'smog', 'nominalization', 'spacy_263', 'spacy_203', 'spacy_211', 'preposition', 'spacy_200', 'spacy_162'], ['spacy_251', 'spacy_46', 'spacy_43', 'IN', 'spacy_155', 'gunning_fog'], ['spacy_29', 'spacy_30', 'spacy_38', 'JJ', 'spacy_107', 'spacy_162', 'lix', 'gunning_fog'], ['coleman_liau', 'spacy_46', 'spacy_252', 'spacy_14', 'spacy_251', 'spacy_200', 'nominalization', 'rix', 'num_char', 'spacy_155'], ['syll_per_word', 'chars_per_word', 'spacy_146', 'spacy_217', 'spacy_159', 'spacy_266'], ['chars_per_word', 'syll_per_word', 'IN', 'spacy_146', 'smog'], ['rix', 'spacy_14', 'spacy_263'], ['coleman_liau', 'avg_len_word', 'spacy_43', 'nominalization', 'spacy_203', 'dale_chall'], ['spacy_203', 'spacy_2', 'dale_chall', 'spacy_217', 'spacy_266', 'preposition', 'spacy_159', 'spacy_162', 'lix'], ['num_char', 'spacy_149', 'JJ', 'spacy_46', 'gunning_fog'], ['syll_per_word', 'coleman_liau', 'chars_per_word', 'smog', 'spacy_43', 'nominalization', 'num_char', 'dale_chall', 'spacy_197', 'spacy_192', 'spacy_149', 'spacy_97', 'spacy_146', 'lix'], ['spacy_27', 'rix', 'spacy_103', 'IN', 'spacy_162', 'NN', 'gunning_fog', 'spacy_10'], ['spacy_107', 'preposition', 'spacy_266'], ['chars_per_word', 'avg_len_word', 'smog', 'spacy_103', 'nominalization', 'spacy_46', 'spacy_14'], ['spacy_155', 'spacy_97', 'spacy_27', 'spacy_107', 'spacy_182', 'lix', 'gunning_fog', 'spacy_266'], ['coleman_liau', 'chars_per_word', 'nominalization', 'rix', 'smog', 'spacy_263', 'spacy_2'], ['syll_per_word', 'avg_len_word', 'spacy_103', 'spacy_149'], ['spacy_27', 'dale_chall', 'spacy_211', 'NN', 'spacy_24'], ['spacy_155', 'spacy_27', 'dale_chall', 'spacy_211', 'NN', 'avg_len_word', 'spacy_162', 'spacy_24'], ['spacy_14', 'avg_len_word', 'spacy_160', 'spacy_29', 'spacy_197', 'spacy_30', 'spacy_155', 'spacy_27', 'dale_chall']] drop_other_fe = ['periods', 'commas', 'semis', 'exclaims', 'questions', 'num_char', 'num_words', 'unique_words', 'word_diversity', 'longest_word', 'avg_len_word'] for index, group in enumerate(pca_groups): key = 'f'+str(index) pca_res, train_feat = train_pca(train_feat, group, key) test_feat = apply_pca(pca_res, test_feat) train_feat.drop(columns = drop_other_fe, inplace=True, axis = 1) test_feat.drop(columns = drop_other_fe, inplace=True, axis = 1) # - std_error = copy.deepcopy(train_feat['standard_error']) train_feat.drop(columns=['standard_error'], axis=1, inplace=True) # + from sklearn.preprocessing import LabelEncoder ignore_cols = ['id','url_legal','license','excerpt', 'target'] for col in train_feat.select_dtypes('object').columns.tolist(): if col not in ignore_cols: lbl = LabelEncoder() train_feat[col] = lbl.fit_transform(train_feat[col]) test_feat[col] = lbl.transform(test_feat[col]) # - X_train = train_feat[[i for i in train_feat.columns if i not in ignore_cols]] y_train = train_feat['target'] test_X = test_feat[[i for i in test_feat.columns if i not in ignore_cols]] [i for i in train_feat.columns if i not in test_feat.columns] print(X_train.shape) print(test_X.shape) # + from sklearn import metrics, preprocessing, model_selection import lightgbm as lgb def runLGB_reg(train_X, train_y, test_X, sample_weight, test_y=None, test_X2=None, dep=8, seed=0, data_leaf=50, rounds=20000): params = {} params["objective"] = "regression" params['metric'] = 'rmse' params["max_depth"] = dep params["num_leaves"] = 30 params["min_data_in_leaf"] = data_leaf # params["min_sum_hessian_in_leaf"] = 50 params["learning_rate"] = 0.01 params["bagging_fraction"] = 0.8 params["feature_fraction"] = 0.2 params["feature_fraction_seed"] = seed params["bagging_freq"] = 1 params["bagging_seed"] = seed params["lambda_l2"] = 3 params["lambda_l1"] = 3 params["verbosity"] = -1 num_rounds = rounds plst = list(params.items()) lgtrain = lgb.Dataset(train_X, label=train_y) if test_y is not None: lgtest = lgb.Dataset(test_X, label=test_y) model = lgb.train(params, lgtrain, num_rounds, valid_sets=[lgtest], early_stopping_rounds=200, verbose_eval=500) else: lgtest = lgb.DMatrix(test_X) model = lgb.train(params, lgtrain, num_rounds) pred_test_y = model.predict(test_X, num_iteration=model.best_iteration) pred_test_y2 = model.predict(test_X2, num_iteration=model.best_iteration) loss = 0 if test_y is not None: loss = np.sqrt(metrics.mean_squared_error(test_y, pred_test_y)) print(loss) return model, loss, pred_test_y, pred_test_y2 else: return model, loss, pred_test_y, pred_test_y2 # - print("Building model..") cv_scores = [] pred_test_full = 0 pred_train = np.zeros(X_train.shape[0]) n_splits = 5 kf = model_selection.KFold(n_splits=n_splits, shuffle=True, random_state=7988) model_name = "lgb" for dev_index, val_index in kf.split(X_train, y_train): dev_X, val_X = X_train.iloc[dev_index,:], X_train.iloc[val_index,:] dev_y, val_y = y_train[dev_index], y_train[val_index] std_error_x = std_error[dev_index] pred_val = 0 pred_test = 0 n_models = 0. model, loss, pred_v, pred_t = runLGB_reg(dev_X, dev_y, val_X, std_error_x, val_y, test_X, dep=6, data_leaf=200, seed=2019) pred_val += pred_v pred_test += pred_t n_models += 1 model, loss, pred_v, pred_t = runLGB_reg(dev_X, dev_y, val_X, std_error_x, val_y, test_X, dep=7, data_leaf=180, seed=9873) pred_val += pred_v pred_test += pred_t n_models += 1 pred_val /= n_models pred_test /= n_models loss = np.sqrt(metrics.mean_squared_error(val_y, pred_val)) pred_train[val_index] = pred_val pred_test_full += pred_test / n_splits cv_scores.append(loss) print(cv_scores) print(np.mean(cv_scores))
notebook/models/clrpb.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + #----------------------------------------------------------------------------- # UFRJ - Universidade Federal do Rio de Janeiro # IM - Instituto de Matematica # DMA - Departamento de Matematica Aplicada # # TMAA - MAE 353 - Programacao I # # Teste 2 - 18 de junho de 2019 # # Aluna: <NAME> 115037241 # Aluna: <NAME> 116043481 # Aluna: <NAME> 117050512 # Aluno: <NAME> 119039091 #------------------ # - import csv with open("./perfil_eleitorado_ATUAL.txt", encoding='latin-1') as arq: result = {} dados = csv.reader(arq, delimiter=';') for line in dados: key = line[1] if key not in result: result[key] = int(line[8]) else: result[key] += int(line[8]) #Printing dict print (result) #Printing in a better way for key, value in result.items(): print (key + ': ' + str(value))
Teste2/analysis.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Initialization # #### Notebook stuff # + init_cell=true from IPython.display import display, Latex, HTML display(HTML(open('01.css').read())) # - # #### Numpy and Scipy # + init_cell=true import numpy as np from numpy import array, cos, diag, eye, linspace, pi from numpy import poly1d, sign, sin, sqrt, where, zeros from scipy.linalg import eigh, inv, det # - # #### Matplotlib # + init_cell=true # %matplotlib inline import matplotlib.pyplot as plt plt.style.use('seaborn-paper') plt.rcParams['figure.dpi'] = 115 plt.rcParams['figure.figsize'] = (7.5, 2.5) plt.rcParams['axes.grid'] = True # - # #### Miscellaneous definitions # # In the following `ld` and `pmat` are used to display mathematical formulas generated by the program, `rounder` ensures that a floating point number _close_ to an integer will be rounded correctly when formatted as an integer, `p` is a shorthand to calling `poly1d` that is long and requires a single argument, `vw` computes the virtual work done by moments `m` for the curvatures `c`, when the lengths of the beams are `l` and eventually # `p0_p1` given an array of values `p` returns first `p[0], p[1]` then `p[1], p[2]` then... # + init_cell=true def ld(*items): display(Latex('$$' + ' '.join(items) + '$$')) def pmat(mat, env='bmatrix', fmt='%+f'): opener = '\\begin{'+env+'}\n ' closer = '\n\\end{'+env+'}' formatted = '\\\\\n '.join('&'.join(fmt%elt for elt in row) for row in mat) return opener+formatted+closer def rounder(mat): return mat+0.01*sign(mat) def p(*l): return poly1d(l) def vw(emme, chi, L): return sum(((m*c).integ()(l)-(m*c).integ()(0)) for (m, c, l) in zip(emme, chi, L)) def p0_p1(p): from itertools import tee a, b = tee(p) next(b, None) return zip(a, b) # - # # 3 DOF System # ## Input motion # # We need the imposed displacement, the imposed velocity (an intermediate result) and the imposed acceleration. It is convenient to express these quantities in terms of an adimensional time coordinate $a = \omega_0 t$, # # \begin{align} # u &= \frac{4/3\omega_0 t - \sin(4/3\omega_0 t)}{2\pi} # = \frac{\lambda_0 a- \sin(\lambda_0 a)}{2\pi},\\ # \dot{u} &= \frac{4}{3}\omega_0 \frac{1-\cos(4/3\omega_0t)}{2\pi} # = \lambda_0 \omega_0 \frac{1-\cos(\lambda_0 a)}{2\pi},\\ # \ddot{u} &= \frac{16}{9}\omega_0^2 \frac{\sin(4/3\omega_0t)}{2\pi} # = \lambda_0^2\omega_0^2 \frac{\sin(\lambda_0 a)}{2\pi}, # \end{align} # # with $\lambda_0=4/3$. # # The equations above are valid in the interval # # $$ 0 \le t \le \frac{2\pi}{4/3 \omega_0} \rightarrow # 0 \le a \le \frac{3\pi}2 $$ # # (we have multiplied all terms by $\omega_0$ and simplified the last term). # Following a similar reasoning, the plotting interval is equal to $0\le a\le2\pi$. l0 = 4/3 # define a function to get back the time array and the 3 dependent vars def a_uA_vA_aA(t0, t1, npoints): a = linspace(t0, t1, npoints) uA = where(a<3*pi/2, (l0*a-sin(l0*a))/2/pi, 1) vA = where(a<3*pi/2, (1-cos(l0*a))/2/pi, 0) aA = where(a<3*pi/2, 16*sin(l0*a)/18/pi, 0) return a, uA, vA, aA # and use it a, uA, vA, aA = a_uA_vA_aA(0, 2*pi, 501) # #### The plots # + plt.plot(a/pi, uA) plt.xlabel(r'$\omega_0 t/\pi$') plt.ylabel(r'$u_A/\delta$') plt.title('Imposed support motion'); # - plt.plot(a/pi, vA) plt.xlabel(r'$\omega_0 t/\pi$') plt.ylabel(r'$\dot u_A/\delta\omega_0$') plt.title('Imposed support velocity'); plt.plot(a/pi, aA) plt.xlabel(r'$\omega_0 t/\pi$') plt.ylabel(r'$\ddot u_A/\delta\omega_0^2$') plt.title('Imposed support acceleration'); # ## Equation of Motion # # The EoM expressed in adimensional coordinates and using adimensional structural matrices is # # $$ m\omega_0^2\hat{\boldsymbol M} \frac{\partial^2\boldsymbol x}{\partial a^2} # + \frac{EJ}{L^3}\hat{\boldsymbol K}\boldsymbol x = # m \hat{\boldsymbol M} \boldsymbol e \omega_0^2 \frac{\partial^2 u_A}{\partial a^2} # $$ # # using the dot notation to denote derivatives with respect to $a$, if we divide both members by $m\omega_0^2$ we have # # $$ \hat{\boldsymbol M} \ddot{\boldsymbol x} # + \hat{\boldsymbol K}\boldsymbol x = # \hat{\boldsymbol M} \boldsymbol e \ddot{u}_A. # $$ # # We must determine the influence vector $\boldsymbol e$ and the adimensional structural matrices # # ### Influence vector # # To impose a horizontal displacement in $A$ we must remove one constraint, so that the structure has 1 DOF as a rigid system and the influence vector must be determined by a kinematic analysis. display(HTML(open('figures/trab1kin_conv.svg').read())) # The left beam is constrained by a roller and by the right beam, the first requires that the Centre of Instantaneous Rotation (CIR) belongs to the vertical line in $A$, while the second requires that the CIR belongs to the line that connects the hinges # of the right beam. # # The angles of rotation are $\theta_\text{left} = u_A/L$ and $\theta_\text{right} # = -2 u_A/L$ and eventually we have $x_1=x_2=x_3=2u_A$ and # # $$ \boldsymbol e = \begin{Bmatrix}2\\2\\2\end{Bmatrix}.$$ e = array((2.0, 2.0, 2.0)) # ### Structural Matrices display(HTML(open('figures/trab1_conv.svg').read())) # Compute the 3x3 flexibility using the Principle of Virtual Displacements and the 3x3 stiffness using inversion, while the mass matrix is directly assembled with the understanding that the lumped mass on $x_1$ is $2m$. # # The code uses a structure `m` where each of the three rows contains the # computational represention (as polynomial coefficients) of the bending moments due to # a unit load applied in the position of each of the three degrees of freedom, # in each row six groups of polynomial coefficients, one group for each of the six # intervals of definition in which the structure has been subdivided (a possible seventh interval is omitted because the bending moment is always zero for every possible unit load). # + l = [1, 2, 2, 1, 1, 1] h = 0.5 ; t = 3*h m = [[p(2,0),p(h,0),p(h,1),p(h,0),p(h,h),p(1,0)], [p(2,0),p(1,0),p(0,2),p(1,0),p(1,1),p(2,0)], [p(2,0),p(h,0),p(h,1),p(h,0),p(t,h),p(2,0)]] F = array([[vw(emme, chi, l) for emme in m] for chi in m]) K = inv(F) M = array(((2.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0))) iM = inv(M) ld('\\boldsymbol F = \\frac{L^3}{12EJ}\\,', pmat(rounder(F*12), fmt='%+d')) ld('\\boldsymbol K = \\frac{3 EJ}{1588L^3}\\,', pmat(rounder(K*1588/3), fmt='%+d'), '= \\frac{EJ}{L^3}\\;\\hat{\\boldsymbol K}.') ld('\\boldsymbol M = m\\,', pmat(M, fmt='%d'), '= m\\;\\hat{\\boldsymbol M}.') # - # ### The eigenvalues problem # # We solve immediately the eigenvalue problem because when we know the shortest modal period of vibration it is possible to choose the integration time step $h$ to avoid numerical unstability issues with the linear acceleration algorithm. # + wn2, Psi = eigh(K, M) wn = sqrt(wn2) li = wn Lambda2 = diag(wn2) Lambda = diag(wn) # eigenvectors are normalized → M* is a unit matrix, as well as its inverse Mstar, iMstar = eye(3), eye(3) ld(r'\boldsymbol\Omega^2 = \omega_0^2\,', pmat(Lambda2), r'=\omega_0^2\,\boldsymbol\Lambda^2.') ld(r'\boldsymbol\Omega=\omega_0\,', pmat(Lambda), r'=\omega_0\,\boldsymbol\Lambda.') ld(r'\boldsymbol T_\text{n}=\frac{2\pi}{\omega_0}\,', pmat(inv(Lambda)), r'= t_0\,\boldsymbol\Theta.') ld(r'\Psi=', pmat(Psi), '.') # - # ## Numerical Integration # # The shortest period is $T_3 = 2\pi\,0.562/\omega_0 \rightarrow A_3 = 1.124 \pi$ hence to avoid unstability of the linear acceleration algorithm we shall use a non dimensional time step $h<0.55A_3\approx0.6\pi$. We can anticipate that the modal response associated with mode 2 is important ($\lambda_2\approx\lambda_0$) so we choose an adimensional time step $h=A_2/20=2\pi\,0.760/20\approx0.08\pi$ that is much smaller than the maximum time step for which we have a stable behaviour. # # ### Initialization # # First a new, longer adimensional time vector and the corresponding support acceleration, then the efficace load vector (`peff` is an array with 2001 rows and 3 columns, each row corresponding to the force vector in a particular instant of time) nsppi = 200 a, _, _, aA = a_uA_vA_aA(0, 16*pi, nsppi*16+1) peff = (- M @ e) * aA[:,None] # The constants that we need in the linear acceleration algorithm — note that we have an undamped system or, in other words, $\boldsymbol C = \boldsymbol 0$ h = pi/nsppi K_ = K + 6*M/h**2 F_ = inv(K_) dp_v = 6*M/h dp_a = 3*M # ### The integration loop # # First we initialize the containers where to save the new results with the initial values at $a=0$, next the loop on the values of the load at times $t_i$ and $t_{i+1}$ with $i=0,\ldots,1999$. Xl, Vl = [zeros(3)], [zeros(3)] for p0, p1 in p0_p1(peff): x0, v0 = Xl[-1], Vl[-1] a0 = iM @ (p0 -K@x0) dp = (p1-p0) + dp_a@a0 + dp_v@v0 dx = F_@dp dv = 3*dx/h - 3*v0 - a0*h/2 Xl.append(x0+dx), Vl.append(v0+dv) Xl = array(Xl) ; Vl = array(Vl) # #### Plotting for i, line in enumerate(plt.plot(a/pi, Xl), 1): line.set_label(r'$x_{%d}$'%i) plt.xlabel(r'$\omega_0 t/\pi$') plt.ylabel(r'$x_i/\delta$') plt.title('Response — numerical integration — lin.acc.') plt.legend(); # ## Equation of Motion # # Denoting with $\boldsymbol x$ the dynamic component of the displacements, with $\boldsymbol x_\text{tot} = \boldsymbol x + \boldsymbol x_\text{stat} = \boldsymbol x + \boldsymbol e \;u_\mathcal{A}$ the equation of motion is (the independent variable being $a=\omega_0t$) # # $$ \hat{\boldsymbol M} \ddot{\boldsymbol x} + # \hat{\boldsymbol K} \boldsymbol x = # - \hat{\boldsymbol M} \boldsymbol e \ddot u_\mathcal{A}. $$ # # Using mass-normalized eigenvectors, with $\boldsymbol x = \delta\boldsymbol\Psi\boldsymbol q$ we have # # $$ \boldsymbol I \ddot{\boldsymbol q} + # \boldsymbol\Lambda^2\boldsymbol q = # \boldsymbol\Psi^T\hat{\boldsymbol M} \boldsymbol e \frac{\ddot u_A}{\delta}.$$ # # It is $$\frac{\ddot u_A}{\delta} = \frac{1}{2\pi}\,\lambda_0^2\,\sin(\lambda_0a)$$ # # and $$ \ddot q_i + \lambda_i^2 q_i = # \frac{\Gamma_i}{2\pi}\,\lambda_0^2\,\sin(\lambda_0 a),\qquad\text{with } # \Gamma_i = -\boldsymbol\psi_i^T \hat{\boldsymbol M} \boldsymbol e\text{ and } # \lambda_0 = \frac43.$$ G = - Psi.T @ M @ e # Substituting a particular integral $\xi_i=C_i\sin(\lambda_0 a)$ in the # modal equation of motion we have # # $$(\lambda^2_i-\lambda^2_0)\,C_i\sin(\lambda_0 a) = # \frac{\Gamma_i}{2\pi}\,\lambda_0^2\,\sin(\lambda_0 a)$$ # # and solving w/r to $C_i$ we have # # $$ C_i = \frac{\Gamma_i}{2\pi}\,\frac{\lambda_0^2}{\lambda_i^2-\lambda_0^2}$$ C = G*l0**2/(li**2-l0**2)/2/pi # The modal response, taking into account that we start from rest conditions, is # # $$ q_i = C_i\left(\sin(\lambda_0 a) - # \frac{\lambda_0}{\lambda_i}\,\sin(\lambda_i a)\right)$$ # $$ \dot q_i = \lambda_0 C_i \left( # \cos(\lambda_0 a) - \cos(\lambda_i a) \right).$$ for n in range(3): i = n+1 ld(r'q_%d=%+10f\left(\sin\frac43a-%10f\sin%1fa\right)' % (i,C[n],l0/li[n],li[n]), r'\qquad\text{for }0 \le a \le \frac32\pi') # ### Free vibration phase, $a\ge 3\pi/2 = a_1$ # # When the forced phase end, the system is in free vibrations and we can determine the constants of integration requiring that the displacements and velocities of the free vibration equal the displacements and velocities of the forced response at $t=t_0$. # # \begin{align} # + (\cos\lambda_i a_1)\, A_i + (\sin\lambda_i a_1)\, B_i &= # q_i(a_1) \\ # - (\sin\lambda_i a_1)\, A_i + (\cos\lambda_i a_1)\, B_i &= # \frac{\dot q_i(a_1)}{\lambda_i} # \end{align} # # Because the coefficients form an othogonal matrix, # # \begin{align} # A_i &= + (\cos\lambda_i a_1)\, q_i(a_1) # - (\sin\lambda_i a_1)\, \frac{\dot q_i(a_1)}{\lambda_i}\\ # B_i &= + (\sin\lambda_i a_1)\, q_i(a_1) # + (\cos\lambda_i a_1)\, \frac{\dot q_i(a_1)}{\lambda_i}. # \end{align} # + a1 = 3*pi/2 q_a1 = C*(sin(l0*a1)-l0*sin(li*a1)/li) v_a1 = C*l0*(cos(l0*a1)-cos(li*a1)) ABs = [] for i in range(3): b = array((q_a1[i], v_a1[i]/li[i])) A = array(((+cos(li[i]*a1), -sin(li[i]*a1)), (+sin(li[i]*a1), +cos(li[i]*a1)))) ABs.append(A@b) ABs = array(ABs) # - # #### Analytical expressions display(Latex(r'Modal responses for $a_1 \le a$.')) for n in range(3): i, l, A_, B_ = n+1, li[n], *ABs[n] display(Latex((r'$$q_{%d} = '+ r'%+6.3f\cos%6.3fa '+ r'%+6.3f\sin%6.3fa$$')%(i, A_, l, B_, l))) # #### Stitching the two responses # # We must evaluate numerically the analytical responses # + ac = a[:,None] q = where(ac<=a1, C*(sin(l0*ac)-l0*sin(li*ac)/li), ABs[:,0]*cos(li*ac) + ABs[:,1]*sin(li*ac)) # - # #### Plotting the Analytical Response # First, we zoom around $a_1$ to verify the continuity of displacements and velocities # #### Plot zooming around a1 low, hi = int(0.8*a1*nsppi/pi), int(1.2*a1*nsppi/pi) for i, line in enumerate(plt.plot(a[low:hi]/pi, q[low:hi]), 1): line.set_label('$q_{%d}$'%i) plt.title('Modal Responses, zoom on transition zone') plt.xlabel(r'$\omega_0 t/\pi$') plt.legend(loc='best') plt.show() # next, the modal responses over the interval $0 \le a \le 16\pi$ # #### Plot in 0 ≤ a ≤ 16 pi for i, line in enumerate(plt.plot(a/pi, q), 1): line.set_label('$q_{%d}$'%i) plt.title('Modal Responses') plt.xlabel(r'$\omega_0 t/\pi$') plt.legend(loc='best'); plt.xticks() plt.show(); # ### Nodal responses x = q@Psi.T # Why `x = q@Psi.T` rather than `x = Psi@q`? Because for different reasons (mostly, ease of use with the plotting libraries) we have all the response arrays organized in the shape of `(Nsteps × 3)`. # # That's equivalent to say that `q` and `x`, the Pyton objects, are isomorph to $\boldsymbol q^T$ and $\boldsymbol x^T$ and because it is $$\boldsymbol x^T = (\boldsymbol\Psi \boldsymbol q)^T = \boldsymbol q^T \boldsymbol \Psi^T,$$ # in Python to write `x = q@Psi.T` we have. # # That said. here are the plot of the nodal responses. Compare with the numerical solutions. for i, line in enumerate(plt.plot(a/pi, x), 1): line.set_label('$x_{%d}/\delta$'%i) plt.title('Normalized Nodal Displacements — analytical solution') plt.xlabel(r'$\omega_0 t / \pi$') plt.legend(loc='best') plt.show();
dati_2017/hw03/01.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # # Step 1 - Prepare roads # This notebook merges roads/paths and prepares speeds based on their input data parameters. It is set up for transport networks in Bhutan but can be adapted to other contexts import os, sys import pandas as pd import geopandas as gpd import numpy as np import pyproj as pyp # Projection dest_crs = 'EPSG:32642' # #### Background: essential data prep # Extracting raster values to lines in python is cumbersome and for our purposes, pointlessly so\: the increased control yields no gains in accuracy. Therefore I prefer to join in average slopes per road segment in QGIS / ArcGIS ahead of time and process the resulting slope categories here. You can use the Add Surface Information tool in ArcGIS to accomplish (theoretically the SAGA toolkit has similar tools in QGIS but I wasn't able to maek them work). Compute the Average Slope for every layer this way. # #### Set parameters # + # Production date for outputs being created prod_date = '210923' print(prod_date) # + tags=[] input_pth = r'P:\PAK\GEO' osm_pth = r'P:\PAK\\Code\Accessibility\osm_kpk_final_sep' transport_pth = r'P:\PAK\\Code\Accessibility\Pak_Official_Roads_Combined' ghs_pth = r'P:\PAK\\Code\Accessibility\GHS' # - # #### OSM data # + # Load in the latest OSM data # Assumes data to have been downloaded from Geofabrik to a folder with today's date osm = gpd.read_file(os.path.join(osm_pth,'osm_kpk_roads_split.shp')) # - # Rename Geofabrik's default 'flcass' column to the standard 'highway' osm.rename({'fclass':'highway'},axis=1,inplace=True) osm.highway.unique() # + # dicts containing lists of values to replace, with the new key listed lasts track_dct = dict.fromkeys(['track_grade1','track_grade2','track_grade3','track_grade4','track_grade5'], 'track') minor_rd_dct = dict.fromkeys(['unclassified','road','service','residential', 'living_street'], 'minor_road') pth_dct = dict.fromkeys(['path','footway','steps','pedestrian', 'bridleway'], 'path') # + # Update the original dict with these new dicts highway_replace_dct = {} highway_replace_dct.update(track_dct) highway_replace_dct.update(minor_rd_dct) highway_replace_dct.update(pth_dct) # - highway_replace_dct # streamline highway values to a few key types using the above dictionary osm['highway'] = osm['highway'].replace(highway_replace_dct) osm.highway.unique() # Filter out any lingering highway types we don't want using a list of values accepted_road_types = ['path',\ 'track','minor_road',\ 'tertiary','secondary','primary','trunk','motorway',\ 'tertiary_link','secondary_link','primary_link','trunk_link','motorway_link'] osm = osm[osm['highway'].isin(accepted_road_types)] osm.highway.unique() osm.head(2) # Rename OSM data categories to gov categories # + # dicts containing lists of values to replace, with the new key listed lasts provincial_dct = dict.fromkeys(['primary','primary_link','trunk','trunk_link','motorway', 'motorway_link'], 'Provincial') district_dct = dict.fromkeys(['secondary','secondary_link','tertiary','tertiary_link'], 'District') access_dct = dict.fromkeys(['track'], 'Access') collector_dct = dict.fromkeys(['minor_road'], 'Collector') # + gov_align_dct = {} gov_align_dct.update(provincial_dct) gov_align_dct.update(district_dct) gov_align_dct.update(access_dct) gov_align_dct.update(collector_dct) # - gov_align_dct osm['Road_Class'] = osm['highway'].map(gov_align_dct).fillna('Path') osm.tail() # Spatial join on administrative information # + #osm = gpd.sjoin(osm,adm2[['geometry','Dzongkhag','Gewog','adm1_code','adm2_code','hourly_wage']],how='left',op='within') # + #osm.drop('index_right',axis=1,inplace=True) # + #osm.head() # - # Slim down to just the key columns to join osm[['Surface','MNT_Num','District_RONET','Road_Class_RONET','Surface1_RONET','Road_Condition_RONET','Surface2_RONET','TYPE']] = np.nan # + #osm.head(2) # - osm.head(2) osm_slim = osm[['geometry','Road_Class','Surface','MNT_Num','District_RONET','Road_Class_RONET','Surface1_RONET','Road_Condition_RONET','Surface2_RONET','TYPE','Avg_Slope','Z_Mean']] # + [markdown] tags=[] # #### Gov data # - gov = gpd.read_file(os.path.join(transport_pth,'combined_KPK_mnt_splt_utm.shp')) # + #gov.rename({'GEWOG':'Gewog','DZONGKHAG':'Dzongkhag','SURFACETYP':'Surface','ROAD_CLASS':'Road_class'},axis=1,inplace=True) # - gov.head(2) # Check out the data gov.Road_Class.unique() gov.Pavement_T.unique() gov.MNT_RONET_.unique() # + # dicts containing lists of values to replace, with the new key listed lasts provi_dct = dict.fromkeys(['PKHA'], 'Provincial') dis_dct = dict.fromkeys(['A','S'], 'District') acc_dct = dict.fromkeys(['B'], 'Access') # + new_gov_dct = {} new_gov_dct.update(provi_dct) new_gov_dct.update(dis_dct) new_gov_dct.update(acc_dct) # - new_gov_dct gov['Road_Class'] = gov['Road_Class'].replace(new_gov_dct).fillna('Collector') gov.Road_Class.unique() gov.head(2) gov.rename({'MNT_Distri':'District_RONET','MNT_Road_2':'Road_Class_RONET',\ 'MNT_Paveme':'Surface1_RONET','MNT_RONET_':'Road_Condition_RONET',\ 'MNT_RONE_2':'Surface2_RONET','Pavement_T':'Surface'},axis=1,inplace=True) gov.head(2) gov_slim = gov[['geometry','Road_Class','Surface','MNT_Num','District_RONET','Road_Class_RONET','Surface1_RONET','Road_Condition_RONET','Surface2_RONET','TYPE','Avg_Slope','Z_Mean']] gov.dtypes # ### Merge datasets # Merge together the two slimmed down datasets into one master road file for analysis pre_master = gpd.GeoDataFrame(pd.concat([gov_slim,osm_slim],ignore_index=True)) pre_master.head() # + #Create Default Road Condition # - pre_master.Surface.unique() pre_master.Surface1_RONET.unique() pre_master.Road_Class.unique() pre_master.Road_Condition_RONET.unique() pre_master.Surface2_RONET.unique() # + # dicts containing lists of values to replace, with the new key listed lasts #good_dct = dict.fromkeys(['Asphaltic Mix'], 'Good') #fair_dct = dict.fromkeys(['Cement Concrete','Earth'], 'Fair') #poor_dct = dict.fromkeys(['Surface Treatment','Gravel'], 'Poor') # + #road_cond_dct = {} #road_cond_dct.update(good_dct) #road_cond_dct.update(fair_dct) #road_cond_dct.update(poor_dct) # + #master['Temp_Road_Cond'] = master['Surface2_RONET'].map(road_cond_dct) # + #Add GHS dataset to distinguish urban and rural datasets # - ghs = gpd.read_file(os.path.join(ghs_pth,'ghs_smod_32642.shp')) ghs.head() ghs = ghs.rename(columns={'fid':'Number'}) ghs.head() master = gpd.sjoin(pre_master, ghs, how = "left", op = "intersects") master.head() # + #Save the file to check the spatial joints -- optional # + #master.to_file('P:\PAK\Code\Accessibility\GHS\master_check_Aug28.gpkg',layer='master_layer',driver='GPKG') # - # + [markdown] tags=[] # ### Manipulate datasets to arrive at final speeds # - # Slope information has already been joined in to the input shapefiles in a pre-processing step. Thefore, proceed to... # Using slope, generate terrain category # + # old #master['Terrain'] = pd.cut(master['Z_Mean'], [-np.inf, 1500, 2299, np.inf], # labels = ['Plains', 'Hills', 'Mountains']) # change labels herelabels = ['Plains', 'Hills', 'Mountains']) # change labels here # new master['Terrain'] = pd.cut(master['Avg_Slope'], [-np.inf, 8, 16, np.inf], labels = ['Plains', 'Hills', 'Mountains']) # change labels herelabels = ['Plains', 'Hills', 'Mountains']) # change labels here # - # Generate dry season speed based on terrain category and road type terrain_class_filter = [master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Collector'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Collector'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Collector')] # Corresponding list of speeds, with line breaks by terrain type for editing and readability speeds_lst = [80,50,40,20,\ 60,40,30,15,\ 40,30,20,10] master['base_speed'] = np.select(terrain_class_filter,speeds_lst,default=0.5) # very low default as path speeds will be re-calculated separately using terrain down the road # must convert the Terrain CategoricalDType to String to export as a geopackage master['Terrain'] = master['Terrain'].astype(str) master.head() # Reclassify various surface types into simpler categories master.Surface2_RONET.unique() master.Road_Class.unique() master['Temp_Surface'] = np.where((master['Road_Class'] =='Provincial'),master['Road_Class'],master['Surface']) master.Temp_Surface.unique() master.Surface.unique() master['Temp_Surface_Consolidated'] = np.where((master['Surface2_RONET'].isnull()==False),master['Surface2_RONET'],master['Temp_Surface']) master.head(2) master.Temp_Surface_Consolidated.unique() # + #Reclassify Temp_Surface_Consolidated categories # - pave_dct = dict.fromkeys(['Provincial','Black Topping','Triple Surface Treatment','Cement Concrete','AC/TST'],'Paved') gravel_dct = dict.fromkeys(['Asphaltic Mix','Surface Treatment','TSR','A','C','D','DO','RP','RP= 1.5','RP= 1.25','RP = 1','RP= 1','Gravel'],'Gravel') earth_dct = dict.fromkeys(['E','F','SD','Earth','None','nan'],'Earthen') master.Temp_Surface_Consolidated = master.Temp_Surface_Consolidated.replace(pave_dct) master.Temp_Surface_Consolidated = master.Temp_Surface_Consolidated.replace(gravel_dct) master.Temp_Surface_Consolidated = master.Temp_Surface_Consolidated.replace(earth_dct) master.Temp_Surface_Consolidated = master.Temp_Surface_Consolidated.fillna('Earthen') master.Temp_Surface_Consolidated.unique() master['Surface_Final'] = np.where((master['NAME_MAIN'].isnull()==False), 'Paved', master['Temp_Surface_Consolidated']) master.Surface_Final.unique() master.head(2) master.Surface1_RONET.unique() master.Surface2_RONET.unique() master.Road_Condition_RONET.unique() # + #paved1_dct = dict.fromkeys(['AC','MobileAsphlt','Asphalt','M/A','PCC'],'Paved') #gravel1_dct = dict.fromkeys(['Gravel','TST','GR','RP','BT','Premix'],'Gravel') #earth1_dct = dict.fromkeys(['Kacha','Subgrade','Earth Work','SD','Shingle','None','nan'],'Earthen') # + #master.Surface1_RONET = master.Surface1_RONET.replace(paved1_dct) #master.Surface1_RONET = master.Surface1_RONET.replace(gravel1_dct) #master.Surface1_RONET = master.Surface1_RONET.replace(earth1_dct) #master.Surface1_RONET = master.Surface1_RONET.fillna('Earthen') # - # Road condition based on Road_Class and Terrain master['Terrain'] = master['Terrain'].astype(str) road_condition_filter1 = [master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Plains') & master['Road_Class'].str.contains('Collector'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Hills') & master['Road_Class'].str.contains('Collector'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Provincial'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('District'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Access'), master['Terrain'].str.contains('Mountains') & master['Road_Class'].str.contains('Collector')] default_cond1 = ['Good','Poor','Poor','Poor',\ 'Good','Poor','Poor','Good',\ 'Fair','Poor','Poor','Poor'] master['default_road_cond1'] = np.select(road_condition_filter1, default_cond1) master.dtypes # + # Road Condition based on Road Surface (Surface2_RONET) and Terrain # - master['Terrain'] = master['Terrain'].astype(str) road_condition_filter2 = [master['Terrain'].str.contains('Plains') & master['Surface2_RONET'].str.contains('Asphaltic Mix'), master['Terrain'].str.contains('Plains') & master['Surface2_RONET'].str.contains('Cement Concrete'), master['Terrain'].str.contains('Plains') & master['Surface2_RONET'].str.contains('Earthen'), master['Terrain'].str.contains('Plains') & master['Surface2_RONET'].str.contains('Gravel'), master['Terrain'].str.contains('Plains') & master['Surface2_RONET'].str.contains('Surface Treatment'), master['Terrain'].str.contains('Hills') & master['Surface2_RONET'].str.contains('Asphaltic Mix'), master['Terrain'].str.contains('Hills') & master['Surface2_RONET'].str.contains('Cement Concrete'), master['Terrain'].str.contains('Hills') & master['Surface2_RONET'].str.contains('Earthen'), master['Terrain'].str.contains('Hills') & master['Surface2_RONET'].str.contains('Gravel'), master['Terrain'].str.contains('Hills') & master['Surface2_RONET'].str.contains('Surface Treatment'), master['Terrain'].str.contains('Mountains') & master['Surface2_RONET'].str.contains('Asphaltic Mix'), master['Terrain'].str.contains('Mountains') & master['Surface2_RONET'].str.contains('Cement Concrete'), master['Terrain'].str.contains('Mountains') & master['Surface2_RONET'].str.contains('Earthen'), master['Terrain'].str.contains('Mountains') & master['Surface2_RONET'].str.contains('Gravel'), master['Terrain'].str.contains('Mountains') & master['Surface2_RONET'].str.contains('Surface Treatment')] default_cond2 = ['Good','Poor','Poor','Poor','Poor',\ 'Good','Poor','None','Poor','Poor',\ 'Fair','Poor','None','Poor','Fair'] master['default_road_cond2'] = np.select(road_condition_filter2, default_cond2, default = None) master.default_road_cond2.unique() # + # overwrite the default_cond1 with default_cond2 # - master['Temp_Road_Cond'] = np.where((master['default_road_cond2'].isnull()==False),master['default_road_cond2'],master['default_road_cond1']) # + #overwrite Temp_Road_Cond with Road_Condition_RONET to arrive at "Road_Cond_Final" # - master.Temp_Road_Cond.unique() master.head() master.Road_Condition_RONET.unique() master['Road_Condition_RONET'] = np.where(master['Road_Condition_RONET'] == 'poor','Poor', master['Road_Condition_RONET']) master['Road_Cond_Final'] = np.where((master['Road_Condition_RONET'].isnull()==False),master['Road_Condition_RONET'],master['Temp_Road_Cond']) master.Road_Cond_Final.unique() master.head() # ### Calculate speeds speed_adj_class_filter = [master['Surface_Final'].str.contains('Earthen') & master['Road_Cond_Final'].str.contains('Good'), master['Surface_Final'].str.contains('Earthen') & master['Road_Cond_Final'].str.contains('Fair'), master['Surface_Final'].str.contains('Earthen') & master['Road_Cond_Final'].str.contains('Poor'), master['Surface_Final'].str.contains('Gravel') & master['Road_Cond_Final'].str.contains('Good'), master['Surface_Final'].str.contains('Gravel') & master['Road_Cond_Final'].str.contains('Fair'), master['Surface_Final'].str.contains('Gravel') & master['Road_Cond_Final'].str.contains('Poor'), master['Surface_Final'].str.contains('Paved') & master['Road_Cond_Final'].str.contains('Good'), master['Surface_Final'].str.contains('Paved') & master['Road_Cond_Final'].str.contains('Fair'), master['Surface_Final'].str.contains('Paved') & master['Road_Cond_Final'].str.contains('Poor')] # Dry season modeling: dry season speeds should also be (less dramatically) modified by the surface/class combination. # + # Corresponding list of dry season speed modifiers, with line breaks by terrain type for editing and readability dry_mods_lst = [ 0.6, 0.5, 0.4,\ 0.75, 0.6, 0.5,\ 1, 0.8, 0.6 ] master['dry_mod'] = np.select(speed_adj_class_filter,dry_mods_lst,default=0.5) # very low default as path speeds will be re-calculated separately using terrain down the road master['dry_speed'] = master.dry_mod * master.base_speed # - # Generate monsoon season speed based on terrain category, road type, and surface # + # Corresponding list of monsoon speed modifiers, with line breaks by terrain type for editing and readability msn_mods_lst = [ 0.4, 0.3, 0.2,\ 0.7, 0.5, 0.4,\ 0.9, 0.75, 0.5 ] # - master['msn_mod'] = np.select(speed_adj_class_filter,msn_mods_lst,default=0.5) # very low default as path speeds will be re-calculated separately using terrain down the road master['msn_speed'] = master.msn_mod * master.base_speed master.head() # Winter Speed Modelling # List of winter speed modifiers, with line breaks by terrain type for editing and readability winter_mods_lst = [ 0.4, 0.3, 0.2,\ 0.7, 0.5, 0.4,\ 1, 0.75, 0.5 ] master['winter_mod'] = np.select(speed_adj_class_filter,winter_mods_lst,default=0.5) # + # Define which winter mods to apply to which road based on their terrain winter_cutoff_lst = [master['Terrain'] == 'Plains', \ master['Terrain'] == 'Hills', \ master['Terrain'] == 'Mountains'] # + # Corresponding list of monsoon speed modifiers, with line breaks by terrain type for editing and readability winter_mods_lst = [ 0.4, 0.3, 0.2,\ 0.7, 0.5, 0.4,\ 1, 0.75, 0.5, 1, 0.9, 0.75] # - winter_mod_revised = [ master['dry_speed'], \ (master['dry_speed'] * (master['winter_mod'] + (1 - master['winter_mod']) /2) ), \ (master['base_speed'] * master['winter_mod'])] master['winter_speed'] = np.select(winter_cutoff_lst, winter_mod_revised, default=master['dry_speed']) master['winter_speed_final'] = np.where((master['TYPE'] == 'Snow Bound'), (master['dry_speed'] * master['winter_mod']), master['winter_speed']) master.head(2) master.head(2) # Checking final outputs before export master[master['Terrain'] =='Mountains'] master[master['Terrain']=='Hills'] # ## Export final data # Export master.to_file('P:\PAK\Code\Accessibility\Inputs\master_transport_Oct19.gpkg',layer='master_transport_all',driver='GPKG') #master_rds.to_file('P:\PAK\Code\Accessibility\Inputs\master_transport.gpkg',layer='master_roads',driver='GPKG') #master_pths.to_file('P:\PAK\Code\Accessibility\Inputs\master_transport.gpkg',layer='master_paths',driver='GPKG') master.to_csv('P:\PAK\Code\Accessibility\Inputs\master_transport_Oct19.csv')
notebooks/Friction_Surface/Step 1 - Prepare Roads.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np #Creating sample array arr = np.arange(0,11) #Show arr arr[8] arr[1:5] #Get values in a range arr[0:5] # + arr[0:5]=100 #Show arr # + arr = np.arange(0,11) #Show arr # + #Important notes on Slices slice_of_arr = arr[0:6] #Show slice slice_of_arr # + #Change Slice slice_of_arr[:]=99 #Show Slice again slice_of_arr # - arr # + #To get a copy, need to be explicit arr_copy = arr.copy() arr_copy # + arr_2d = np.array(([5,10,15],[20,25,30],[35,40,45])) #Show arr_2d # - #Indexing row arr_2d[1] # + # Format is arr_2d[row][col] or arr_2d[row,col] # Getting individual element value arr_2d[1][0] # - # Getting individual element value arr_2d[1,0] # + # 2D array slicing #Shape (2,2) from top right corner arr_2d[:2,1:] # - #Shape bottom row arr_2d[2] #Shape bottom row arr_2d[2,:] #Set up matrix arr2d = np.zeros((10,10)) #Length of array arr_length = arr2d.shape[1] # + #Set up array for i in range(arr_length): arr2d[i] = i arr2d # - arr2d[[2,4,6,8]] #Allows in any order arr2d[[6,4,2,7]] arr = np.arange(1,11) arr arr > 4 bool_arr = arr>4 bool_arr arr[bool_arr] arr[arr>2] x = 2 arr[arr>x]
Numpy/Numpy Indexing and selection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: ipykernel_py2 # --- # ## Dictionaries # *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).* # This is the menu of a close-by restaurant: Menu = {'meal_1':'Spaghetti', 'meal_2':'Fries', 'meal_3':'Hamburger', 'meal_4':'Lasagna'} # What is the second meal in the list? # Add a new meal - "Soup". # Replace the Hamburger with a Cheeseburger. # Attach the Desserts list in the form of a sixth meal. # Create a new dictionary that contains the first five meals as keys and assign the following five values as prices (in dollars): # 10, 5, 8, 12, 5. # Start by *Price_list = {}*. # Use the *.get()* method to check the price of the Spaghetti.
Python for Finance - Code Files/37 Dictionaries/Python 2/Dictionaries - Exercise_Py2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Employee Retention # + #Import data and necessary libraries import pandas as pd import numpy as np import datetime import os import seaborn as sns sns.set(style="whitegrid") import matplotlib.pyplot as plt # %matplotlib inline working_dir = os.getcwd() employee_data = pd.read_csv(os.path.join(working_dir,"data/employee_retention_data.csv"), index_col = 0) #Check if each row is a unique employee id if len(employee_data) == len(employee_data.index.unique()): print("Each row represents a unique employee ID") # + #Variable for how long employee has been at company #If still works there, assume tenure is from start date until 12/13/2015 (per data challenge description) todays_date = pd.to_datetime(datetime.date(2015, 12, 13)) def tenure_quit(row): if pd.isnull(row['quit_date']): tenure = pd.to_datetime(todays_date) - pd.to_datetime(row['join_date']) else: tenure = pd.to_datetime(row['quit_date']) - pd.to_datetime(row['join_date']) return(tenure) employee_data['tenure'] = employee_data.apply(tenure_quit, axis=1) #Binary flag for quitting and numerical tenure employee_data['quit_binary'] = np.where(pd.isnull(employee_data.quit_date),0,1) employee_data['tenure_int'] = round(pd.to_numeric(employee_data.tenure)/1e14) # - # ## EDA # #### From the EDA below, the following becomes clear: # 1. The proportion of employees that quit is similiar across companies (around 50%), except company 11 where it is higher (closer to 80%) # 2. In aggregate, the proprtion of employees that quit is similiar across department, seniority, and salary level. # 3. Looking at time until an employee quits (i.e., duration of employement), many employees seem to quit after 1 year, and then again after year 2, regardless of salary or seniority. Therefore, this problem is best modelled as a survival analysis (i.e., time to event), which I explore in the next section #Look at quit rates by company ax = sns.catplot(x="company_id", y='quit_binary', data=employee_data.groupby('company_id').agg('mean').reset_index(), kind="bar") ax.set(xlabel='Company', ylabel='Proportion that quit') ax.set_xticklabels(rotation=30) #Look at quit rates by department ax = sns.catplot(x="dept", y='quit_binary', data=employee_data.groupby('dept').agg('mean').reset_index(), kind="bar") ax.set(xlabel='', ylabel='Proportion that quit') ax.set_xticklabels(rotation=30) #Look at quit rates by seniority ax = sns.catplot(x="seniority", y='quit_binary', data=employee_data.groupby('seniority').agg('mean').reset_index(), kind="bar") ax.set(xlabel='Seniority', ylabel='Proportion that quit') ax.set_xticklabels(rotation=90) #Seniority is missing for two records (set at 99 and 98 years, which cannot be accurate) #Take these records out employee_data = employee_data[(employee_data.seniority != 98) & (employee_data.seniority != 99)] #Seniority and days at company scatterplot ax = sns.scatterplot(x =round(pd.to_numeric(employee_data.tenure)/1e14), y='seniority', hue='quit_binary', data=employee_data) ax.set(xlabel='Days at company', ylabel='Seniority level') #Look at quit rates by salary ax = sns.boxplot(x='quit_binary', y='salary', data=employee_data) ax.set(xlabel='0 = Still at company, 1 = Quit', ylabel='Salary') #Look at quit rates by salary (scatterplot) ax = sns.scatterplot(x =round(pd.to_numeric(employee_data.tenure)/1e14), y='salary', hue='quit_binary', data=employee_data) ax.set(xlabel='Days at company', ylabel='Salary') #Look at quit rates by salary level #Low = < 94,000 #Mid = 94,000 - 163,000 #High = 163,000 + employee_data['salary_level'] = pd.qcut(employee_data.salary, 3, labels=(0,1,2)) ax = sns.scatterplot(x =round(pd.to_numeric(employee_data.tenure)/1e14), y='salary_level', hue='quit_binary', data=employee_data) ax.set(xlabel='Days at company', ylabel='0 = < 94,000 1 = 94-163,000 2 = 163,000+') # ## Survival Analysis # #### This section models the time for an employee to quit using a Kaplan Meier analysis. From it, we learn that: # 1. Over all the data, the probability that an employee is still at their job descreases linearly with time at a company, with there being a 50% chance that an employee is at their job after 2 years, and almost a 0% chance after 4 years. # 2. This relationship is similiar across departments and salary, however, differs again by company. At company 11, employees stay longer (with a 50% chance that an employee is at their job within 3 years) even though many end up quitting. At Company 12, employees leave at a faster rate than the average. # 3. Looking at the company level, there is a difference in the employee retention rate by department. Therefore, I create a variable that is an interaction between the company ID and the department category. # + from lifelines import KaplanMeierFitter from lifelines.utils import datetimes_to_durations T = employee_data['tenure_int'] E = employee_data['survival'] kmf = KaplanMeierFitter() kmf.fit(T, event_observed=E) kmf.survival_function_.plot() plt.title('Probability that you are still at your job'); plt.xlabel('Days Worked at Job') # - print("50% chance of employee quitting after " + str(kmf.median_) + ' days') # + #Probability that you are still at your job by department cust_service = (employee_data["dept"] == "customer_service") marketing = (employee_data["dept"] == "marketing") data_science = (employee_data["dept"] == "data_science") engineer = (employee_data["dept"] == "engineer") sales = (employee_data["dept"] == "sales") design = (employee_data["dept"] == "design") ax = plt.subplot(111) kmf.fit(T[cust_service], event_observed=E[cust_service], label="Customer Service") kmf.plot(ax=ax) kmf.fit(T[marketing], event_observed=E[marketing], label="Marketing") kmf.plot(ax=ax) kmf.fit(T[data_science], event_observed=E[data_science], label="Data Science") kmf.plot(ax=ax) kmf.fit(T[engineer], event_observed=E[engineer], label="Engineer") kmf.plot(ax=ax) kmf.fit(T[sales], event_observed=E[sales], label="Sales") kmf.plot(ax=ax) kmf.fit(T[design], event_observed=E[design], label="Design") kmf.plot(ax=ax) # + #Probability that you are still at your job by salary level low_salary = (employee_data["salary_level"] == 0) mid_salary = (employee_data["salary_level"] == 1) high_salary = (employee_data["salary_level"] == 2) ax = plt.subplot(111) kmf.fit(T[low_salary], event_observed=E[low_salary], label="< 94k Salary ") print("Low Salary: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[mid_salary], event_observed=E[mid_salary], label="94-163k Salary") print("Mid Salary: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[high_salary], event_observed=E[high_salary], label=">163k Salary") print("High Salary: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) # + #Probability that you are still at your job by company company_1 = (employee_data["company_id"] == 1) company_2 = (employee_data["company_id"] == 2) company_3 = (employee_data["company_id"] == 3) company_4 = (employee_data["company_id"] == 4) company_5 = (employee_data["company_id"] == 5) company_6 = (employee_data["company_id"] == 6) company_7 = (employee_data["company_id"] == 7) company_8 = (employee_data["company_id"] == 8) company_9 = (employee_data["company_id"] == 9) company_10 = (employee_data["company_id"] == 10) company_11 = (employee_data["company_id"] == 11) company_12 = (employee_data["company_id"] == 12) ax = plt.subplot(111) kmf.fit(T[company_1], event_observed=E[company_1], label="Company 1") print("Company 1: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_2], event_observed=E[company_2], label="Company 2") print("Company 2: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_3], event_observed=E[company_3], label="Company 3") print("Company 3: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_4], event_observed=E[company_4], label="Company 4") print("Company 4: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_5], event_observed=E[company_5], label="Company 5") print("Company 5: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_6], event_observed=E[company_6], label="Company 6") print("Company 6: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_7], event_observed=E[company_7], label="Company 7") print("Company 7: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_8], event_observed=E[company_8], label="Company 8") print("Company 8: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_9], event_observed=E[company_9], label="Company 9") print("Company 9: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_10], event_observed=E[company_10], label="Company 10") print("Company 10: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_11], event_observed=E[company_11], label="Company 11") print("Company 11: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) kmf.fit(T[company_12], event_observed=E[company_12], label="Company 12") print("Company 12: 50% chance of employee quitting after " + str(kmf.median_) + ' days') kmf.plot(ax=ax) # + #Probability that you are still at your job by department at Company 11 ax = plt.subplot(111) kmf.fit(T[(cust_service) & (company_11)], event_observed=E[(cust_service) & (company_11)], label="Customer Service") kmf.plot(ax=ax) kmf.fit(T[(marketing) & (company_11)], event_observed=E[(marketing) & (company_11)], label="Marketing") kmf.plot(ax=ax) kmf.fit(T[(data_science) & (company_11)], event_observed=E[(data_science) & (company_11)], label="Data Science") kmf.plot(ax=ax) kmf.fit(T[(engineer) & (company_11)], event_observed=E[(engineer) & (company_11)], label="Engineer") kmf.plot(ax=ax) # + #Probability that you are still at your job by department at Company 12 ax = plt.subplot(111) kmf.fit(T[(cust_service) & (company_12)], event_observed=E[(cust_service) & (company_12)], label="Customer Service") kmf.plot(ax=ax) kmf.fit(T[(marketing) & (company_12)], event_observed=E[(marketing) & (company_12)], label="Marketing") kmf.plot(ax=ax) kmf.fit(T[(data_science) & (company_12)], event_observed=E[(data_science) & (company_12)], label="Data Science") kmf.plot(ax=ax) kmf.fit(T[(engineer) & (company_12)], event_observed=E[(engineer) & (company_12)], label="Engineer") kmf.plot(ax=ax) kmf.fit(T[(sales) & (company_12)], event_observed=E[(sales) & (company_12)], label="Sales") kmf.plot(ax=ax) kmf.fit(T[(design) & (company_12)], event_observed=E[(design) & (company_12)], label="Design") kmf.plot(ax=ax) # - # ## Model # #### The model below is a cox proportional hazards model that evaluates the likelihood of an employee still being at their job after X days of employement using the following features: # 1. The log of their salary # 2. The log of their years of seniority # 3. The type of department they belong to # 4. The company they work for # 5. An interaction term between company and department # 6. The number of employees in the company # # #### It is trained on 80% of the data, and then predicts the likelihood that the other 20% of employees will still be working at the company in 1, 2, 3, and 4 years, given the characteristics above # #### The main assumption of the model is that time is the most important predictor of when an employee will leave, which makes sense. Other than that, the cox model shows that salary and seniority also matter. If I could have one other variable, it would be employee satisifcation (e.g., Glassdoor) or the number of times an employee has been promoted. #Some variation by company and department #Make interaction variable between department and company number employee_data['ln_salary'] = np.log(employee_data.salary) employee_data['ln_seniority'] = np.log(employee_data.seniority) employee_data['dept_code'] = employee_data.dept.astype('category').cat.codes+1 employee_data['dept_company_interaction'] = employee_data.company_id*employee_data.dept.astype('category').cat.codes employee_data = employee_data.merge(pd.DataFrame(employee_data.groupby('company_id').agg('count').dept).rename(columns={'dept': 'num_employees'}).reset_index(), on='company_id') # + from lifelines import CoxPHFitter from sklearn.model_selection import train_test_split X_train, X_test = train_test_split(employee_data[['ln_salary', 'ln_seniority','dept_code', 'dept_company_interaction', 'company_id','tenure_int', 'quit_binary', 'num_employees']], test_size=0.2, random_state=42) cph = CoxPHFitter() cph.fit(X_train, duration_col='tenure_int', event_col='quit_binary', show_progress=True) cph.print_summary() # - pd.DataFrame(cph.predict_survival_function(X_test, times=[365, 730, 1095, 1460]).T).rename(columns={365:'1 year', 730:'2 years',1095:'3 years',1460:'4 years' })
others/Data_Challenge1/Mike_Munsell_EmployeeRetention.ipynb
# -*- coding: utf-8 -*- # --- # jupyter: # jupytext: # text_representation: # extension: .r # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: R # language: R # name: ir # --- # # Introducción # # ¿Para qué se necesitan los métodos multivariantes? ¿Cuáles son los inconvenientes de analizar una variable a la vez? # # ## Objetivos de las técnicas multivariantes # # Como un ejemplo de un análisis multivariante vamos a crear un intervalo de confianza conjunto (simultáneo) de 95% de la mediana de los precios de los precios del alquiler de un apartamento y de la compra de vivienda en cada estado de los EEUU. housing <- read.table(file = "data//housing.txt") head(housing) # Los intervalos de confianza marginales de 95% para cada variable se muestran como un rectángulo, mientras que el intervalo bivariante se representa como un elipsoide. La siguiente función nos sirve para calcular el intervalo de confianza bivariante. # # `s` es la matriz de covarianzas, `xbar` es un vector de medias, `n` el número de observaciones, `alpha` la probabilidad, y `m` el número de puntos utilizado para representarlo. bivCI <- function(s, xbar, n, alpha, m){ # returns m (x,y) coordinates of 1-alpha # joint confidence ellipse of mean # m points on a unit circle x <- sin(2 * pi * (0 : (m - 1)) / (m - 1)) y <- cos(2 * pi * (0 : (m - 1)) / (m - 1)) # chisquared critical value cv <- qchisq(1 - alpha, 2) # value of quadratic form cv <- cv / n for (i in 1 : m){ pair <- c(x[i], y[i]) # ith (x,y) pair q <- pair %*% solve(s, pair) # quadratic form x[i] <- x[i] * sqrt(cv / q) + xbar[1] y[i] <- y[i] * sqrt(cv / q) + xbar[2] } cbind(x, y) } # Calculamos los intervalos de confianza individuales. 95% es la probabilidad por defecto. aptci <- t.test(housing$Apartment)$conf.int hoci <- t.test(housing$House)$conf.int # Y dibujamos el gráfico correspondiente. # + library(repr) options(repr.plot.width=4, repr.plot.height=4) plot(bivCI(var(housing), colMeans(housing), dim(housing)[1], .05, 2000), lwd = 3, type = "l", xlab = colnames(housing)[1], ylab = colnames(housing)[2], col = 2, xlim = aptci * c(.95, 1.05), ylim = hoci * c(.95, 1.05)) lines(colMeans(housing)[1], colMeans(housing)[2], pch = 3, cex = 2, type = "p", col = 4, lwd = 3) lines(aptci[c(1, 2, 2, 1, 1)], hoci[c(1, 1, 2, 2, 1)], type = "l", col = 3, lwd = 3) # - # El área elíptica y el rectángulo se superponen. También hay áreas que están incluidas en una figura, pero no en la otra. Más importante aún, notemos que el área de la elipse es más pequeña que la del rectángulo. Esta diferencia en el área ilustra el beneficio de usar métodos multivariados sobre el enfoque marginal. # # Si utilizáramos métodos univariantes y obtuviéramos intervalos de confianza para cada variable individualmente, entonces la región de confianza resultante sería mayor que la región que tiene en cuenta la relación bivariante de alquileres de apartamentos y precios de vivienda. Esta figura proporciona una ilustración gráfica de los beneficios de usar métodos multivariados sobre el uso de una serie de análisis univariados. # # ## Reducción de datos o simplificación estructural # # Aunque los expertos que realizan un estudio recogen los medidas que se consideran más útiles, la primera tarea del analista consiste en determinar cuáles variables son las que requieren nuestra atención. Muchos de los datos recabados pueden ser redundantes. Un objetivo del análisis de datos consiste en identificar los datos/variables que se deben mantener y los que pueden descartarse de forma segura. # # Consideremos los datos de los resultados de las pruebas estandarizadas PISA para distintos países o economías. La prueba PISA básicamente evalúa las capacidades académicas de estudiantes de 15 años de edad en 70 países diferentes. # # La puntuación total se divide en 5 subescalas diferentes que miden destrezas específicas. La puntuación de matemáticas y ciencia se indica aparte. ¿Qué beneficio ofrecen tener las distintas subescalas?, ¿Es posible eliminar o combinar subescalas sin que se pierda mucho detalles? # # A continuación gráficamos una matriz de diagramas de dispersión de las ocho puntuaciones para los países OECD del año 2019: # + options(repr.plot.width=6, repr.plot.height=6) PISA <- read.table(file = "data/OECD PISA.txt", row.names= 1, header=TRUE) pairs(PISA, gap = 0, col="red", pch=16, xaxt="n", yaxt="n") # - # La impresión inmediata que obtenemos de esta figura es que están altamente correlacionadas todas las mediciones entre sí. # # Hay algunos valores atípicos y algunos pares de mediciones están más estrechamente correlacionados, por supuesto, pero está claro que cualquier medida académica en los datos podría ser una buena representación para la mayoría de las demás. # # Es evidente que se pueden simplificar estos datos en gran medida sin una pérdida significativa de información. # ## Agrupación y clasificación de observaciones # # En el 2006 Plutón dejó de ser considerado un planeta de nuestro sistema solar. La argumentación consistió en el hecho que hay una gran cantidad de objetos mayores y menores orbitando el sol a una distancia similar. # # Uno de estos grupos recibe el nombre de *<NAME>* que están a partir de la órbita de Netptuno de 30 a 55 unidades astronómicas (una unidad astronómica equivale a la distancia de la tierra al sol). # # El *Albedo* se refiere a la relación porcentual entre la luz reflejada y la absorbida. La *magnitud absoluta* es una medida del brillo aparente, corregida por la distancia del objeto, medido en una distancia algorítmica. # Una magnitud absoluta mayor indica objetos más oscuros. El *eje semimayor* es la mayor distancia de la órbita al sol, medida en UA. # # Muchos de los valores de esta tabla son, en el mejor de los casos, estimaciones aproximadas de estos objetos distantes y poco comprendidos. # # ¿Es Plutón realmente algo diferente, o es similar al resto? ¿Merece volver obtener su estatus de planeta o solamente es miembro de un club más grande? # # ¿Puede esta información resumirse de manera que retenga información útil? ¿Dónde cae Plutón desde este punto de vista más amplio? kuiper <- read.table( "data//Kuiper.txt", header=TRUE) kuiper # Otro ejemplo de agrupación son las tasas de cáncer en los 50 estados de los EEUU. En la siguiente tabla se listan las tasas de cáncer de cada uno de los 50 estados de los EEUU y para algunos de los tipos de cáncer más comunes. Estas tasas se reportan como los casos por cada 100 mil personas. Cada estado tiene distintas distribuciones de edades en su población por lo que estas tasas están *ajustadas por la edad*. cancer_rates <- read.csv( "data//US cancer rates.csv", header=TRUE) head(cancer_rates) # ¿Cómo se relacionan los distintos tipos de cáncer entre ellos? ¿Cómo se podrían agrupar los distintos tipos de cáncer con patrones similares? # # También se podrían buscar agrupaciones entres estados, ¿Cuáles estados son similares entre ellos?, ¿Tienen tasas similares estados vecinos?, ¿Cómo se comparan los estados del sur con los del norte? # # ¿Cómo se podrían agrupar todos los datos conjuntamente?, ¿Es la tasa de todos los cánceres un resumen razonable de las tasas para cada estado?, ¿Los estados que se agrupan también tienen tasas comparables de sus "todos los cánceres"?. En lugar de una simple suma de todas las tasas individuales, ¿sería mejor construir un promedio ponderado, donde algunos cánceres reciben más énfasis que otros? ¿Deberían los cánceres más raros (hígado y cuello uterino, por ejemplo) tener más o menos peso que los cánceres más comunes?. # ## Examinar la dependencia entre variables # # La tabla a continuación es un resumen de los lugares recomendados de inversión para las principales empresas de gestión financiera a principios del 2011. Los valores representan las asignaciones porcentuales para sus *portafolios modelo*, pero también podrían haber recomendaciones específicas para circunstancias individuales. # # Las acciones y bonos se dividen en tres categorías: Estados Unidos, países industrializados no estadounidenses y países en desarrollo. # # Las inversiones alternativas incluyen arrendamientos, sociedades de petróleo y gas, propiedades inmobiliarias, metales preciosos e inversiones similares. El efectivo incluye inversiones a corto plazo como el mercado monetario, depósitos bancarios y certificados de depósito. # # Es de esperar una pequeña correlación entre las recomendaciones. Se pueden encontrar relaciones entre los porcentajes asignados a las acciones y bonos tradicionales, y también en relación a las empresas especializadas en empresas nacionales y extranjeras. # + invest_alloc <- read.csv( "data//investment allocations.csv", header=TRUE) head(invest_alloc) # - # Otro ejemplo son los datos nutricionales de la colección de algunas de las hamburguesas con más calorías servidas en cadenas de restaurantes que se encuentran en el sitio web fatsecret.com. La nutrición es un concepto intrínsecamente multivariado. No podemos hablar sólo de las calorías cuando hablamos de nutrición, sino que también necesitamos incluir datos sobre cuánto del valor calórico se deriva de las grasas. Las cantidades de sodio (sal) y proteína no forman parte del recuento de calorías y proporcionan diferentes calidades al contenido nutricional total. # # El contenido nutricional se obtuvo de las páginas web de cada restaurante. Cada hamburguesa tiene una lista de calorías y calorías de grasa, grasa, grasa saturada, sodio, carbohidratos y contenido de proteínas. El contenido nutricional de cada elemento del menú no es una medida única, sino que se expresa como estos siete componentes separados. Los siete valores están relacionados entre sí y deben ser tomados como un todo. Este es el concepto fundamental de los datos multivariados: cada elemento individual se caracteriza por un conjunto de varias mediciones relacionadas con cada uno de ellos. # + burguers <- read.csv( "data//MY burger.csv", header=TRUE) head(burguers) # - # ## Describiendo relaciones entre grupos de variables # # Volviendo a la tabla de precios de alquiler de apartamentos y compra de viviendas, ¿Cómo se pueden describir las diferencias de los costos asociados con vivir en cada uno de los estados?. Si añadimos el índice del costo de la vida, aunque las tres variables estén correlacionadas, el uso de regresiones separadas haría que se perdieran las interacciones entre las variables. ¿Qué parte del costo puede explicarse por la población y el ingreso?, y una vez corregido por esta relación, ¿Cuánta diferencia entre los estados puede atribuirse al costo de la vivienda? # # ## Formulación de hipótesis y pruebas # # En disciplinas como la astronomía en el que se adquieren grandes cantidades de datos, se generan hipótesis que buscan confirmación estadística. En el siguiente gráfico se muestra la magnitud visual de 3858 galaxias en una parte del cielo. Las posiciones en el cielo se establecen en coordenadas polares del mismo modo en que se indican los lugares en la tierra: la ascención recta es similar a la latitud, y la declinación similar a la latitud. # + # Shapley galaxy data galaxy <- read.table( "data//Shapley_galaxy.dat", header=TRUE) galaxy <- galaxy[galaxy$Mag > 0, ] # Omit missing values? head(galaxy) # - plot(galaxy$R.A., galaxy$Dec., pch = 16, cex = .5, xlab = "Right ascension", ylab = "Declination", col=c("blue", "red")[1 + (galaxy$Mag < 16)]) # La magnitud es el brillo visual medido en una escala de registro invertida: los números más grandes son más oscuros. Cada incremento en una unidad es aproximadamente 2.5 veces más débil. Una magnitud de 6 es aproximadamente el límite del ojo humano; esto se extiende a unos 10 con binoculares. La magnitud 27 es el límite aproximado de los telescopios terrestres más grandes, y el Telescopio Espacial Hubble puede detectar la magnitud 32. En el gráfico, las galaxias más brillantes (y generalmente más cercanas) están en rojo y las más tenues en azul. # # Las galaxias más brillantes en rojo parecen estar distribuidas uniformemente a través de este campo, pero las más tenues en azul parecen agruparse en la parte superior izquierda de esta figura. Se sabe que las galaxias no están distribuidas uniformemente, sino más bien, agrupadas como la superficie de las burbujas, en paredes curvas, centradas alrededor de vastos espacios aparentemente vacíos. # # Por supuesto, esta figura sólo muestra los objetos registrados en el momento en que se recopilaron los datos. Las galaxias más oscuras, que aún no han sido detectadas, no están. En la medida que mejore la tecnología, habría muchos más objetos que se dibujarían en azul.
docs/01introduccion.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Interdigitated Transmon Qubits # This demo notebook goes over how to use the interdigitated transmon component, similar to those describedin Gambetta et. al., IEEE Trans. on Superconductivity Vol. 27, No. 1 (2007). # # First, let's import the key libraries for qiskit metal: # Demo notebook for interdigitatd transmon qubit design import qiskit_metal as metal from qiskit_metal import designs, draw from qiskit_metal import MetalGUI, Dict #, open_docs # Next, let's fire up the GUI: design = designs.DesignPlanar() gui = MetalGUI(design) # The name of the component located in the qlibrary is "Transmon_Interdigitated" and we can take a look at the various input options: from qiskit_metal.qlibrary.qubits.Transmon_Interdigitated import TransmonInterdigitated TransmonInterdigitated.default_options # Now let's create three transmons, each centered at a specific (x,y) coordinate: from qiskit_metal.qlibrary.qubits.Transmon_Interdigitated import TransmonInterdigitated design.overwrite_enabled = True q1 = TransmonInterdigitated(design, 'qubit1', options=dict(pos_x='-2.0mm',orientation='-90')) gui.rebuild() gui.autoscale() gui.zoom_on_components(['qubit1']) #Can also gui.zoom_on_components([q1.name]) #Save screenshot as a .png formatted file. gui.screenshot() # + tags=["nbsphinx-thumbnail"] # Screenshot the canvas only as a .png formatted file. gui.figure.savefig('shot.png') from IPython.display import Image, display _disp_ops = dict(width=500) display(Image('shot.png', **_disp_ops)) # - # ## Closing the Qiskit Metal GUI gui.main_window.close()
docs/circuit-examples/A.Qubits/04-Interdigitated_Transmon.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] _uuid="eefe260736ba6ae7070912a2cb2f0179593974dc" _cell_guid="ceb3b54f-4199-46e1-acce-7e5d79432f2e" # # Introduction # # You have learned how to select relevant data from `DataFrame` and `Series` objects. Plucking the right data out of our data representation is critical to getting work done. # # However, the data does not always come in the format we want. Sometimes we have to do some more work ourselves to reformat it for our desired task. # # The remainder of this tutorial will cover different operations we can apply to our data to get the input "just right". We'll start off in this section by looking at the most commonly looked built-in reshaping operations. Along the way we'll cover data `dtypes`, a concept essential to working with `pandas` effectively. # + [markdown] _uuid="c9843421bc63a259dd07cf4d20f7b375d24c9f7c" _cell_guid="27cc0f37-f9b5-40da-b1e4-1eca4c26b8df" # # Relevant Resources # * **[Summary functions and maps](https://www.kaggle.com/residentmario/summary-functions-and-maps-reference)** # * [Official pandas cheat sheet](https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf) # # # Set Up # Run the code cell below to load your data and the necessary utility functions. # + _uuid="f93afaffdb6993c6fbd1a0229fdb130cf372e8a4" _cell_guid="fde3f674-6fd0-41f2-bf55-b2af076c065f" import pandas as pd pd.set_option('max_rows', 5) import numpy as np from learntools.advanced_pandas.summary_functions_maps import * reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0) # + [markdown] _uuid="f928b1b5d733f9a5abca1f60a5cf84518438d768" _cell_guid="872d5b77-dc25-41ac-8127-93da91565712" # Look at an overview of your data by running the line below: # + [markdown] _uuid="ffa2d47df5c3119929fee7234fdfdc53ac15071c" _cell_guid="6a5f3cbe-2b0a-4ef6-816f-65adb866f63c" # # Checking Answers # # **Check your answers in each exercise using the `check_qN` function** (replacing `N` with the number of the exercise). For example here's how you would check an incorrect answer to exercise 1: # + _uuid="42bad74646c49cec61bf5f0014200bf37eed46f8" _cell_guid="547ec9e3-02f9-4b09-885a-3f97fc0bedbe" check_q1(pd.DataFrame()) # + [markdown] _uuid="aebe052470e69f579b33bfa7c9dbfa5c1b59e31b" _cell_guid="50b1febe-4440-4c6a-8ea0-9290360d332a" # If you get stuck, **use the `answer_qN` function to see the code with the correct answer.** # # For the first set of questions, running the `check_qN` on the correct answer returns `True`. # # For the second set of questions, using this function to check a correct answer will present an informative graph! # # + [markdown] _uuid="a921868d97a94733892f77700c894272bfd5c215" _cell_guid="97d5f969-1ce2-43f5-aab2-97c617fb73a9" # ## Exercises # # Look at your data by running the cell below: # + _uuid="39e56884c533d7c31ef912eace787a0d2dfb2fe3" _cell_guid="e1b2acaf-1ec9-42cf-a732-7a13fa5131cd" reviews.head() # + [markdown] _uuid="ddf41baa215da4eac422b653716d028d86619f29" _cell_guid="27108510-72e4-4b96-9a9f-91ca467ae69e" # **Exercise 1**: What is the median of the `points` column? # + _uuid="b0cedffda6b609ce018b7bf58ad511307458dd6e" _cell_guid="57257af5-6a8d-4585-9708-d9293ef5fdd0" q1 = reviews.points.median() print(check_q1(q1)) answer_q1() # + [markdown] _uuid="f91395e1752525f0d8d73700407c95e7d8ad35af" _cell_guid="5d18dd56-6cb9-4a31-994c-19cfdc67c0d9" # **Exercise 2**: What countries are represented in the dataset? # + _uuid="19219235053287a1fb1ae3f9c15f112e8625f14e" _cell_guid="2219317d-6627-4082-930d-ab1962b444e4" q2 = reviews.country.unique() print(check_q2(q2)) answer_q2() # + [markdown] _uuid="0b788fafed337639a804c96afab89b92fb8ec56e" _cell_guid="505f59af-ab2a-4648-81b1-cb5ae2c1ae74" # **Exercise 3**: What countries appear in the dataset most often? # + _uuid="3d485ec9b3177ec9b70ebeac7914f51500faa878" _cell_guid="89283723-297f-4850-a179-6295a6615683" q3 = reviews.country.value_counts() print(check_q3(q3)) answer_q3() # + [markdown] _uuid="a885a5e54b1fe2955098a10637eac3af9118f8b9" _cell_guid="ce551e25-c3cc-4db4-8a12-ff0aaff0df37" # **Exercise 4**: Remap the `price` column by subtracting the median price. Use the `Series.map` method. # + _uuid="db1648614ffab480292ec81009339267d9c5d42b" _cell_guid="c334f6f6-76b3-42c0-8716-43d9319dac46" q4 = reviews.price - reviews.price.median() #median_price = reviews.price.median() #q4 = reviews.price.map(lambda p: p - median_price) print(check_q4(q4)) answer_q4() # + [markdown] _uuid="4c7dccfc6e6dcc09f97c38d29e016f84c6607062" _cell_guid="2f1a49c1-87a2-4e0e-8f90-3d0770c6935a" # **Exercise 5**: I"m an economical wine buyer. Which wine in is the "best bargain", e.g., which wine has the highest points-to-price ratio in the dataset? # # Hint: use a map and the [`argmax` function](http://pandas.pydata.org/pandas-docs/version/0.19.2/generated/pandas.Series.argmax.html). # + _uuid="a64044cabdda4291bec09c7ded3de3c62f9df8b7" _cell_guid="7d8cd2ad-c123-41b9-a6f4-9a69d95d836b" ratio_ind = (reviews.points / reviews.price).idxmax() q5 = reviews.loc[ratio_ind, :] check_q5(q5) answer_q6() # + [markdown] _uuid="161b8f55577ad163e0f4406536f31f4a4847e706" _cell_guid="433eaff6-4e2b-4304-8694-e70a11bf05df" # Now it's time for some visual exercises. In the questions that follow, generate the data that we will need to have in order to produce the plots that follow. These exercises will use skills from this workbook as well as from previous ones. They look a lot like questions you will actually be asking when working with your own data! # + [markdown] _uuid="fa295d216882dadfbe127ad369efb67f521e47c6" _cell_guid="94e7f19d-ccf9-45a8-b2bf-432d1f9b7d90" # <!-- # **Exercise 6**: Sometimes the `province` and `region_1` provided in the dataset is the same value. Create a `Series` whose values counts how many times this occurs (`True`) and doesn't occur (`False`). # --> # + [markdown] _uuid="cb263542035a146fe49efe78d7e1bb18ae874b22" _cell_guid="ea01da04-cd01-45bf-9592-6dab2d6f991c" # **Exercise 6**: Is a wine more likely to be "tropical" or "fruity"? Create a `Series` counting how many times each of these two words appears in the `description` column in the dataset. # # Hint: use a map to check each description for the string `tropical`, then count up the number of times this is `True`. Repeat this for `fruity`. Create a `Series` combining the two values at the end. # + _uuid="0dc4c1ea5cc2646e7cebcf905a00fce71d7740e9" _cell_guid="a033c426-1e02-4462-8d5a-9924a2773758" reviews.description[:5] tropical_wine = reviews.description.map(lambda d: "tropical" in d).value_counts() fruity_wine = reviews.description.map(lambda d: "fruity" in d).value_counts() #q6 = pd.Series([tropical_wine[True], fruity_wine[True]], index=["tropical", "fruity"]) q6 = pd.Series({"tropical": tropical_wine[True], "fruity": fruity_wine[True]}) check_q7(q6) answer_q7() # + [markdown] _uuid="20cf342b102dee91b36aecfa5d213fb9a3fee8b9" _cell_guid="e64c1c0d-dd13-4945-a74b-4b64f2e65186" # **Exercise 7**: What combination of countries and varieties are most common? # # Create a `Series` whose index consists of strings of the form `"<Country> - <Wine Variety>"`. For example, a pinot noir produced in the US should map to `"US - Pinot Noir"`. The values should be counts of how many times the given wine appears in the dataset. Drop any reviews with incomplete `country` or `variety` data. # # Hint: you can do this in three steps. First, generate a `DataFrame` whose `country` and `variety` columns are non-null. Then use a map to create a series whose entries are a `str` concatenation of those two columns. Finally, generate a `Series` counting how many times each label appears in the dataset. # + _uuid="5301cd803c94500d641fad28c8617d1f7b4b35d1" _cell_guid="a8c2749e-4996-41bf-9031-31b9c3bf2cc2" country_variety = (reviews.loc[:, ["country", "variety"]]).dropna() series = country_variety.country + " - " + country_variety.variety q7 = series.value_counts() check_q8(q7) answer_q8() # + [markdown] _uuid="a38c20f7ad13253cd75df2a97e0a0d2e182b22df" _cell_guid="7484fe30-6db2-4a0a-8b1f-67db1f52012b" # # Keep Going # **[Continue to grouping and sorting](https://www.kaggle.com/kernels/fork/598715).**
notebooks/kaggle/pandas/03 - Summary functions and maps.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import pandas as pd import matplotlib.pyplot as plt all_news = pd.read_csv('all-the-news-2-1.csv') all_news.head() data = pd.read_csv('./COVID_news_with_dom_topics.csv',index_col='date',parse_dates=['date']) data.head() all_news.publication.value_counts().plot(kind='bar') #data.publication.value_counts().plot(kind='bar') data.publication.value_counts().plot(kind='bar') (data.publication.value_counts()/all_news[all_news.year ==2020].publication.value_counts()).sort_values(ascending = False).plot(kind='bar') data[data.dom_topic==17].groupby('date').count().dom_topic.plot() data[data.dom_topic==23].groupby('date').count().dom_topic.plot() data[data.dom_topic==19].groupby('date').count().dom_topic.plot() data[data.dom_topic==12].groupby('date').count().dom_topic.plot() # China Wuhan data[data.dom_topic==11].groupby('date').count().dom_topic.plot()
notebooks/EDA_news.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Randomized Benchmarking # # # ## Introduction # # **Randomization benchmarking (RB)** is a well-known technique to measure average gate performance by running sequences of random Clifford gates that should return the qubits to the initial state. # Qiskit Ignis has tools to generate one- and two-qubit Clifford gate sequences simultaneously. # # This notebook gives an example for how to use the ``ignis.verification.randomized_benchmarking`` module. This particular example shows how to run 2-qubit randomized benchmarking (RB) simultaneous with 1-qubit RB. There are also examples on how to use some of the companion functions for predicting RB fidelity. # # + #Import general libraries (needed for functions) import numpy as np import matplotlib.pyplot as plt from IPython import display #Import Qiskit classes import qiskit from qiskit.providers.aer.noise import NoiseModel from qiskit.providers.aer.noise.errors.standard_errors import depolarizing_error, thermal_relaxation_error #Import the RB Functions import qiskit.ignis.verification.randomized_benchmarking as rb # - # ## 1) Select the Parameters of the RB Run <a name='select_params_RB'></a> # # First, wee need to choose the following parameters: # # - **nseeds:** The number of seeds. For each seed you will get a separate list of output circuits in rb_circs. # - **length_vector:** The length vector of Clifford lengths. Must be in ascending order. RB sequences of increasing length grow on top of the previous sequences. # - **rb_pattern:** A list of the form [[i,j],[k],...] which will make simultaneous RB sequences where Qi,Qj are a 2-qubit RB sequence and Qk is a 1-qubit sequence, etc. The number of qubits is the sum of the entries. For 'regular' RB the qubit_pattern is just [[0]],[[0,1]]. # - **length_multiplier:** If this is an array it scales each rb_sequence by the multiplier. # - **seed_offset:** What to start the seeds at (e.g. if we want to add more seeds later). # - **align_cliffs:** If true adds a barrier across all qubits in rb_pattern after each set of cliffords. # # In this example we have 3 qubits Q0,Q1,Q2. # We are running 2Q RB (on qubits Q0,Q2) and 1Q RB (on qubit Q1) simultaneously, # where there are twice as many 1Q Clifford gates. #Number of qubits nQ = 3 #There are 3 qubits: Q0,Q1,Q2. #Number of seeds (random sequences) nseeds = 5 #Number of Cliffords in the sequence (start, stop, steps) nCliffs = np.arange(1,200,20) #2Q RB on Q0,Q2 and 1Q RB on Q1 rb_pattern = [[0,2],[1]] #Do three times as many 1Q Cliffords length_multiplier = [1,3] # ## 2) Generate the RB sequences <a name='gen_RB_seq'></a> # # We generate RB sequences. We start with a small example (so it doesn't take too long to run). # # In order to generate the RB sequences **rb_circs**, which is a list of lists of quantum circuits, # we run the function `rb.randomized_benchmarking_seq`. # # This function returns: # # - **rb_circs:** A list of lists of circuits for the rb sequences (separate list for each seed). # - **xdata:** The Clifford lengths (with multiplier if applicable). rb_opts = {} rb_opts['length_vector'] = nCliffs rb_opts['nseeds'] = nseeds rb_opts['rb_pattern'] = rb_pattern rb_opts['length_multiplier'] = length_multiplier rb_circs, xdata = rb.randomized_benchmarking_seq(**rb_opts) # As an example, we print the circuit corresponding to the first RB sequence: print(rb_circs[0][0]) # ## Look at the Unitary for 1 Circuit # The Unitary representing each RB circuit should be the identity (with a global phase), # since we multiply random Clifford elements, including a computed reversal gate. We simulate this using an Aer unitary simulator. #Create a new circuit without the measurement qc = qiskit.QuantumCircuit(*rb_circs[0][-1].qregs,*rb_circs[0][-1].cregs) for i in rb_circs[0][-1][0:-nQ]: qc.data.append(i) #The Unitary is an identity (with a global phase) backend = qiskit.Aer.get_backend('unitary_simulator') basis_gates = ['u1', 'u2', 'u3', 'cx'] # use U,CX for now job = qiskit.execute(qc, backend=backend, basis_gates=basis_gates) print(np.around(job.result().get_unitary(), 3)) # ## Define the noise model # We define a noise model for the simulator. To simulate decay, we add depolarizing error probabilities to the CNOT and U gates. noise_model = NoiseModel() p1Q = 0.002 p2Q = 0.01 noise_model.add_all_qubit_quantum_error(depolarizing_error(p1Q, 1), 'u2') noise_model.add_all_qubit_quantum_error(depolarizing_error(2*p1Q, 1), 'u3') noise_model.add_all_qubit_quantum_error(depolarizing_error(p2Q, 2), 'cx') # ## 3) Execute the RB sequences on Aer simulator <a name='ex_RB_seq'></a> # # We can execute the RB sequences either using a Qiskit Aer Simulator (with some noise model) or using an IBMQ provider, # and obtain a list of results, `result_list`. backend = qiskit.Aer.get_backend('qasm_simulator') basis_gates = ['u1','u2','u3','cx'] # use U,CX for now shots = 200 result_list = [] transpile_list = [] import time for rb_seed,rb_circ_seed in enumerate(rb_circs): print('Compiling seed %d'%rb_seed) rb_circ_transpile = qiskit.transpile(rb_circ_seed, basis_gates=basis_gates) print('Simulating seed %d'%rb_seed) job = qiskit.execute(rb_circ_transpile, noise_model=noise_model, shots=shots, backend=backend, backend_options={'max_parallel_experiments': 0}) result_list.append(job.result()) transpile_list.append(rb_circ_transpile) print("Finished Simulating") # ## 4) Fit the RB results and calculate the gate fidelity <a name='fit_RB'></a> # # ### Get statistics about the survival probabilities # # The results in **result_list** should fit to an exponentially decaying function $A \cdot \alpha ^ m + B$, where $m$ is the Clifford length. # # From $\alpha$ we can calculate the **Error per Clifford (EPC)**: # $$ EPC = \frac{2^n-1}{2^n} (1-\alpha)$$ # (where $n=nQ$ is the number of qubits). #Create an RBFitter object with 1 seed of data rbfit = rb.fitters.RBFitter(result_list[0], xdata, rb_opts['rb_pattern']) # ### Plot After 1 Seed # + plt.figure(figsize=(15, 6)) for i in range(2): ax = plt.subplot(1, 2, i+1) pattern_ind = i # Plot the essence by calling plot_rb_data rbfit.plot_rb_data(pattern_ind, ax=ax, add_label=True, show_plt=False) # Add title and label ax.set_title('%d Qubit RB'%(len(rb_opts['rb_pattern'][i])), fontsize=18) plt.show() # - # ### Plot with the Rest of the Seeds # The plot is being updated after each seed. # + rbfit = rb.fitters.RBFitter(result_list[0], xdata, rb_opts['rb_pattern']) for seed_num, data in enumerate(result_list):#range(1,len(result_list)): plt.figure(figsize=(15, 6)) axis = [plt.subplot(1, 2, 1), plt.subplot(1, 2, 2)] # Add another seed to the data rbfit.add_data([data]) for i in range(2): pattern_ind = i # Plot the essence by calling plot_rb_data rbfit.plot_rb_data(pattern_ind, ax=axis[i], add_label=True, show_plt=False) # Add title and label axis[i].set_title('%d Qubit RB - after seed %d'%(len(rb_opts['rb_pattern'][i]), seed_num), fontsize=18) # Display display.display(plt.gcf()) # Clear display after each seed and close display.clear_output(wait=True) time.sleep(1.0) plt.close() # - # ### Add more shots to the data shots = 200 result_list = [] transpile_list = [] for rb_seed,rb_circ_seed in enumerate(rb_circs): print('Compiling seed %d'%rb_seed) rb_circ_transpile = qiskit.transpile(rb_circ_seed, basis_gates=basis_gates) print('Simulating seed %d'%rb_seed) job = qiskit.execute(rb_circ_transpile, noise_model=noise_model, shots=shots, backend=backend, backend_options={'max_parallel_experiments': 0}) result_list.append(job.result()) transpile_list.append(rb_circ_transpile) print("Finished Simulating") # + #Add this data to the previous fit rbfit.add_data(result_list) #Replot plt.figure(figsize=(15, 6)) for i in range(2): ax = plt.subplot(1, 2, i+1) pattern_ind = i # Plot the essence by calling plot_rb_data rbfit.plot_rb_data(pattern_ind, ax=ax, add_label=True, show_plt=False) # Add title and label ax.set_title('%d Qubit RB'%(len(rb_opts['rb_pattern'][i])), fontsize=18) plt.show() # - # ### Predicted Gate Fidelity # From the known depolarizing errors on the simulation we can predict the **fidelity**. # First we need to count the number of **gates per Clifford**. # # The function **gates_per_clifford** takes a list of transpiled RB circuits and outputs the number of basis gates in each circuit. #Count the number of single and 2Q gates in the 2Q Cliffords gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list,xdata[0],basis_gates,rb_opts['rb_pattern'][0]) for basis_gate in basis_gates: print("Number of %s gates per Clifford: %f "%(basis_gate , np.mean([gates_per_cliff[0][basis_gate], gates_per_cliff[2][basis_gate]]))) # The function **calculate_2q_epc** gives measured errors in the basis gates that were used to construct the Clifford. # It assumes that the error in the underlying gates is depolarizing. It outputs the error per a 2-qubit Clifford. # # The input to this function is: # - **gate_per_cliff:** dictionary of gate per Clifford. # - **epg_2q:** EPG estimated by error model. # - **qubit_pair:** index of two qubits to calculate EPC. # - **list_epgs_1q:** list of single qubit EPGs of qubit listed in ``qubit_pair``. # - **two_qubit_name:** name of two qubit gate in ``basis gates`` (default is ``cx``). # + # Error per gate from noise model epgs_1q = {'u1': 0, 'u2': p1Q/2, 'u3': 2*p1Q/2} epg_2q = p2Q*3/4 pred_epc = rb.rb_utils.calculate_2q_epc( gate_per_cliff=gates_per_cliff, epg_2q=epg_2q, qubit_pair=[0, 2], list_epgs_1q=[epgs_1q, epgs_1q]) # Calculate the predicted epc print("Predicted 2Q Error per Clifford: %e"%pred_epc) # - # ## Run an RB Sequence with T1,T2 Errors # # We now choose RB sequences that contain only 2-qubit Cliffords. # # We execute these sequences as before, but with a noise model extended with T1/T2 thermal relaxation error, and fit the exponentially decaying curve. # + rb_opts2 = rb_opts.copy() rb_opts2['rb_pattern'] = [[0,1]] rb_opts2['length_multiplier'] = 1 rb_circs2, xdata2 = rb.randomized_benchmarking_seq(**rb_opts2) noise_model2 = NoiseModel() #Add T1/T2 noise to the simulation t1 = 100. t2 = 80. gate1Q = 0.1 gate2Q = 0.5 noise_model2.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,gate1Q), 'u2') noise_model2.add_all_qubit_quantum_error(thermal_relaxation_error(t1,t2,2*gate1Q), 'u3') noise_model2.add_all_qubit_quantum_error( thermal_relaxation_error(t1,t2,gate2Q).tensor(thermal_relaxation_error(t1,t2,gate2Q)), 'cx') # - backend = qiskit.Aer.get_backend('qasm_simulator') basis_gates = ['u1','u2','u3','cx'] # use U,CX for now shots = 500 result_list2 = [] transpile_list2 = [] for rb_seed,rb_circ_seed in enumerate(rb_circs2): print('Compiling seed %d'%rb_seed) rb_circ_transpile = qiskit.transpile(rb_circ_seed, basis_gates=basis_gates) print('Simulating seed %d'%rb_seed) job = qiskit.execute(rb_circ_transpile, noise_model=noise_model, shots=shots, backend=backend, backend_options={'max_parallel_experiments': 0}) result_list2.append(job.result()) transpile_list2.append(rb_circ_transpile) print("Finished Simulating") # + tags=["nbsphinx-thumbnail"] #Create an RBFitter object rbfit = rb.RBFitter(result_list2, xdata2, rb_opts2['rb_pattern']) plt.figure(figsize=(10, 6)) ax = plt.gca() # Plot the essence by calling plot_rb_data rbfit.plot_rb_data(0, ax=ax, add_label=True, show_plt=False) # Add title and label ax.set_title('2 Qubit RB with T1/T2 noise', fontsize=18) plt.show() # - # We count again the number of **gates per Clifford** as before, and calculate the **two-qubit Clifford gate error**, using the predicted primitive gate errors from the coherence limit. #Count the number of single and 2Q gates in the 2Q Cliffords gates_per_cliff = rb.rb_utils.gates_per_clifford(transpile_list2,xdata[0],basis_gates,rb_opts2['rb_pattern'][0]) for basis_gate in basis_gates: print("Number of %s gates per Clifford: %f "%(basis_gate , np.mean([gates_per_cliff[0][basis_gate], gates_per_cliff[1][basis_gate]]))) # + # Predicted primitive gate errors from the coherence limit u2_error = rb.rb_utils.coherence_limit(1,[t1],[t2],gate1Q) u3_error = rb.rb_utils.coherence_limit(1,[t1],[t2],2*gate1Q) epg_2q = rb.rb_utils.coherence_limit(2,[t1,t1],[t2,t2],gate2Q) epgs_1q = {'u1': 0, 'u2': u2_error, 'u3': u3_error} pred_epc = rb.rb_utils.calculate_2q_epc( gate_per_cliff=gates_per_cliff, epg_2q=epg_2q, qubit_pair=[0, 1], list_epgs_1q=[epgs_1q, epgs_1q]) # Calculate the predicted epc print("Predicted 2Q Error per Clifford: %e"%pred_epc) # - import qiskit.tools.jupyter # %qiskit_version_table # %qiskit_copyright
tutorials/noise/4_randomized_benchmarking.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## Task # 1. Read the data set (advertisement.csv) # 2. Perform analysis on data set # 3. Handling the duplicates and null values in the data set # 4. Standardise the data set by using different scalling methods # 5. Plot the dataset(include all fetaures) # 6. visualise the relation ship between any input features and target # 7. visualise the relation ship between all input features and target # ## Day agenda # - cubhelix palette observations using heat map # - Diffrent styles of plots in seaborn # - catagorical scatter plots # - strip plots # - swarm plots # - catagorical distributed plots # - histogram plots # - BoX # - kde plots # - voilin plots # - Categorical Estimator plots # - bar plot # - point plot # - count plot # - joint plot # - pair plot # import seaborn as sns import numpy as np x=np.random.randint(1,20,10).reshape(2,5) c=sns.cubehelix_palette(as_cmap=True) sns.heatmap(x,cmap=c) sns.heatmap(x,annot=True,linewidths=2) # ## catagorical scatter plots # - strip plots # - swarm plots # ## Get datasets from seaborn # import seaborn as sns sns.get_dataset_names() # ## Load the dataset df=sns.load_dataset("iris") df.head() df.info() df.describe() df['species'].value_counts() df.groupby(by='species').sum() # ## catplot # - sns.catplot() # - interface for drawing categorical plots # sns.catplot(data=df) df.columns sns.catplot(x='sepal_length',y='petal_length',data=df) sns.catplot(x='sepal_length',y='petal_length',data=df,hue='petal_width') sns.catplot(x='sepal_length',y='petal_length',data=df,hue='species') sns.catplot(x='sepal_length',y='species',data=df,hue='species',kind='box') sns.catplot(x='sepal_length',y='species',data=df,hue='species',kind='bar',palette=['r','g','b']) sns.catplot(x='sepal_length',y='species',data=df,kind='bar',palette=['r','g','b'],col='species') sns.catplot(x='sepal_length',y='species',data=df,kind='violin',palette=['r','g','b'],col='species') sns.catplot(x='sepal_length',y='species',data=df,palette=['r','g','b'],marker='^') sns.catplot(x='sepal_length',y='species',kind='boxen',data=df,palette=['r','g','b']) sns.catplot(x='sepal_length',y='species',kind='swarm',data=df,palette=['r','g','b']) # ## catagorical distributed plots # - histogram plots # - BoX # - kde plots # - voilin plots # ## histogram plots # - Plot univariate or bivariate histograms to show distributions of datasets. # # - A histogram is a classic visualization tool that represents the distribution of one or more variables by counting the number of observations that fall within disrete bins. df1=sns.load_dataset("tips") df1.head() df1.info() df1.columns sns.histplot(data=df1) sns.histplot(x='sex',data=df1) sns.histplot(x='total_bill',data=df1) sns.histplot(x='total_bill',data=df1,bins=5) sns.histplot(y='total_bill',data=df1,binwidth=3) sns.histplot(x='total_bill',data=df1,kde=True) sns.histplot(x='total_bill',data=df1,hue="sex") import seaborn as sns sns.histplot(df1) sns.__version__
Notebooks/Day29-Seaborn Plots/Seaborn style plots.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## MongoDB Testing # Examples from: http://api.mongodb.com/python/current/tutorial.html import datetime import pymongo from pprint import pprint from pymongo import MongoClient client = MongoClient('localhost', 27017) db = client['chriscoin'] users = db.blockchain print(users.find_one()) # + db = client['test-database'] post = { "author": "Mike", "text": "My first blog post!", "tags": ["mongodb", "python", "pymongo"], "date": datetime.datetime.utcnow() } posts = db.posts post_id = posts.insert_one(post).inserted_id post_id # - import pprint pprint.pprint(posts.find_one()) posts.find_one({"author": "Eliot"}) pprint.pprint(posts.find_one({"_id": post_id})) post_id_as_str = str(post_id) posts.find_one({"_id": post_id_as_str}) # + new_posts = [ { "author": "Mike", "text": "Another post!", "tags": ["bulk", "insert"], "date": datetime.datetime(2009, 11, 12 , 11, 14) }, { "author": "Eliot", "title": "MongoDB is fun", "text": "and pretty easy too!", "date": datetime.datetime(2009, 11, 10 , 10, 45) } ] result = posts.insert_many(new_posts) result.inserted_ids # - for post in posts.find(): pprint.pprint(post) posts.count() posts.find({"author": "Mike"}).count() d = datetime.datetime(2009, 11, 12, 12) for post in posts.find({"date": {"$lt": d}}).sort("author"): pprint.pprint(post) result = db.profiles.create_index([('user_id', pymongo.ASCENDING)], unique=True) sorted(list(db.profiles.index_information())) user_profiles = [ {'user_id': 211, 'name': 'Luke'}, {'user_id': 212, 'name': 'Ziltoid'} ] result = db.profiles.insert_many(user_profiles) new_profile = {'user_id': 213, 'name': 'Drew'} duplicate_profile = {'user_id': 212, 'name': 'Tommy'} result = db.profiles.insert_one(new_profile) result = db.profiles.insert_one(duplicate_profile) users = db.users result = db.users.create_index([('username', pymongo.ASCENDING)], unique=True) # + new_user = { 'username': 'Alice', 'password': 'password' } users.insert_one(new_user) # + # for user in users.find(): # pprint.pprint(type(user)) user = users.delete_many({'username': 'Alice'}) # - for user in users.find(): pprint.pprint(user) user = users.find_one({'username': 'Alice'}) print(user) db = client['chriscoin_database'] users = db.users print(users.find_one())
notebooks/pymongo_testing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.8.9 64-bit (''research_track-0UGwfk25'': venv)' # language: python # name: python3 # --- # ## Plots # + import matplotlib.pyplot as plt from matplotlib import rcParams import pandas as pd import seaborn as sns PHYSICAL_CORES=64 def plot(p_data, p_yId, p_xId, p_hueId, p_styleId, p_logScale=False, p_core_marker=False): rcParams['figure.figsize'] = 11.7,8.27 plot = sns.lineplot(x=p_xId, y=p_yId, hue=p_hueId, style=p_styleId, data=p_data) if p_logScale == True: plot.set_yscale('log') plot.set_xscale('log') plot.set(xlabel=p_xId, ylabel=p_yId) plt.grid(True,which="both",ls="--",c='lightgray') if(p_core_marker == True): plt.axvline(64, linestyle='--', color='red') plt.figure(figsize=(1, 1), dpi=80) plt.show() # - # ### Gauss3 # #### Efficiency by threads # + import pandas as pd import seaborn as sns sns.set_theme() sns.set_style("ticks") data_frame = pd.read_csv('./e_efficiency_by_threads.csv') data_frame = data_frame[data_frame.region_id == 'apply'] data_frame['id'] = data_frame['impl_id']+data_frame['func_id'] # display(data_frame) plot(p_data=data_frame, p_yId='runtime', p_xId='threads', p_hueId='id', p_styleId=None, p_logScale=True, p_core_marker=True) plot(p_data=data_frame, p_yId='efficiency', p_xId='threads', p_hueId='id', p_styleId=None, p_logScale=True, p_core_marker=True)
src_optimization/40_nonlinear_cg/e_plot.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Testing page elements import numpy as np import matplotlib.pyplot as plt square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) # ## Hiding inputs # + tags=["hide_input"] # Hide input square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # - # ## Hiding outputs # + tags=["hide_output"] # Hide input square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # - # ## Hiding markdown # + [markdown] tags=["hide_input"] # ```{note} # This is a hidden markdown cell # # It should be hidden! # ``` # - # ## Hiding both inputs and outputs # + tags=["hide_output", "hide_input"] square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # - # ## Hiding the whole cell # + tags=["hide_cell"] square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # - # ## Removing inputs / outputs # + tags=["remove_input"] # Remove input square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # + tags=["remove_output"] # Remove output square = np.random.randn(100, 100) wide = np.random.randn(100, 1000) fig, ax = plt.subplots() ax.imshow(square) fig, ax = plt.subplots() ax.imshow(wide) # - # ## Removing markdown cells # # Remove the markdown cell below (below should not be there) # + [markdown] tags=["remove_input"] # # This markdown should be removed # - # ## Full width # + tags=["full_width"] ## A full-width square figure fig, ax = plt.subplots() ax.imshow(square) # + tags=["full_width"] ## A full-width wide figure fig, ax = plt.subplots() ax.imshow(wide) # - # Now here's the same figure at regular width fig, ax = plt.subplots() ax.imshow(wide) # + [markdown] tags=["full_width"] # ## Full-width markdown # # This is some markdown that should be shown at full width. # # Here's the Jupyter logo: # # ![](https://raw.githubusercontent.com/adebar/awesome-jupyter/master/logo.png) # - # ## Sidebar # + tags=["popout"] ## code cell in the sidebar with output fig, ax = plt.subplots() ax.imshow(wide) # + [markdown] tags=["popout"] # Markdown cell with code in sidebar # # ```python # a = 2 # b = 3 # def aplusb(a, b): # return a+b # ``` # and now r # # ```r # a <- 2 # b <- 4 # a+b # ``` # # how does it look? # # Markdown cell with images in sidebar # # <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" style="max-width:200px" /> # - # # More content after the popouts # # This is extra content after the popouts to see if cells overlap and such. # Also to make sure you can still interact with the popout content. # This is extra content after the popouts to see if cells overlap and such. # Also to make sure you can still interact with the popout content. # # ```python # a = 2 # ``` # # This is extra content after the popouts to see if cells overlap and such. # Also to make sure you can still interact with the popout content. # This is extra content after the popouts to see if cells overlap and such. # Also to make sure you can still interact with the popout content. # This is extra content after the popouts to see if cells overlap and such. # Also to make sure you can still interact with the popout content. # # Quotations and epigraphs # # This last section shows quotations and epigraphs. First off, we'll have a quotation: # # A quote with no attribution: # # > Here's my quote, it's pretty neat. # > I wonder how many lines I can create with # > a single stream-of-consciousness quote. # > I could try to add a list of ideas to talk about. # > I suppose I could just keep going on forever, # > but I'll stop here. # # A quote with attribution # # > Here's my quote, it's pretty neat. # > I wonder how many lines I can create with # > a single stream-of-consciousness quote. # > I could try to add a list of ideas to talk about. # > I suppose I could just keep going on forever, # > but I'll stop here. # > # > - <NAME> # + [markdown] tags=["epigraph"] # And now here's the same thing with an epigraph! # # A quote with no attribution: # # > Here's my quote, it's pretty neat. # > I wonder how many lines I can create with # > a single stream-of-consciousness quote. # > I could try to add a list of ideas to talk about. # > I suppose I could just keep going on forever, # > but I'll stop here. # # A quote with attribution # # > Here's my quote, it's pretty neat. # > I wonder how many lines I can create with # > a single stream-of-consciousness quote. # > I could try to add a list of ideas to talk about. # > I suppose I could just keep going on forever, # > but I'll stop here. # > - <NAME> # -
docs/test_pages/layout_elements.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: SageMath 8.7 # language: '' # name: sagemath # --- # + [markdown] deletable=false # # [Introduction to Data Science: A Comp-Math-Stat Approach](https://lamastex.github.io/scalable-data-science/as/2019/) # ## YOIYUI001, Summer 2019 # &copy;2019 <NAME>. [Attribution 4.0 International (CC BY 4.0)](https://creativecommons.org/licenses/by/4.0/) # - # # 10. Convergence of Limits of Random Variables, Confidence Set Estimation and Testing # # # Inference and Estimation: The Big Picture # # - Limits # - Limits of Sequences of Real Numbers # - Limits of Functions # - Limit of a Sequence of Random Variables # - Convergence in Distribution # - Convergence in Probability # - Some Basic Limit Laws in Statistics # - Weak Law of Large Numbers # - Central Limit Theorem # - Asymptotic Normality of the Maximum Likelihood Estimator # - Set Estimators - Confidence Intervals and Sets from Maximum Likelihood Estimators # - Parametric Hypothesis Test - From Confidence Interval to Wald test # # # ### Inference and Estimation: The Big Picture # # The Models and their maximum likelihood estimators we discussed earlier fit into our Big Picture, which is about inference and estimation and especially inference and estimation problems where computational techniques are helpful. # # <table border="1" cellspacing="2" cellpadding="2" align="center"> # <tbody> # <tr> # <td style="background-color: #ccccff;" align="center">&nbsp;</td> # <td style="background-color: #ccccff;" align="center"><strong>Point estimation</strong></td> # <td style="background-color: #ccccff;" align="center"><strong>Set estimation</strong></td> # <td style="background-color: #ccccff;" align="center"><strong>Hypothesis Testing</strong></td> # </tr> # <tr> # <td style="background-color: #ccccff;"> # <p><strong>Parametric</strong></p> # <p>&nbsp;</p> # </td> # <td style="background-color: #ccccff;" align="center"> # <p>MLE of finitely many parameters<br /><span style="color: #3366ff;"><em>done</em></span></p> # </td> # <td style="background-color: #ccccff;" align="center"> # <p>Asymptotically Normal Confidence Intervals<br /><span style="color: #3366ff;"><em>about to see ...</em></span></p> # </td> # <td style="background-color: #ccccff;" align="center"> # <p>Wald Test from Confidence Interval<br /><span style="color: #3366ff;"><em>about to see ...</em></span></p> # </td> # </tr> # <tr> # <td style="background-color: #ccccff;"> # <p><strong>Non-parametric</strong><br /> (infinite-dimensional parameter space)</p> # </td> # <td style="background-color: #ccccff;" align="center"><strong><em><span style="color: #3366ff;">coming up ... </span></em></strong></td> # <td style="background-color: #ccccff;" align="center"><strong><em><span style="color: #3366ff;">coming up ... </span></em></strong></td> # <td style="background-color: #ccccff;" align="center"><strong><em><span style="color: #3366ff;">coming up ... </span></em></strong></td> # </td> # </tr> # </tbody> # </table> # # But before we move on we have to discuss what makes it all work: the idea of limits - where do you get to if you just keep going? # # ## Limits # # We talked about the likelihood function and maximum likelihood estimators for making point estimates of model parameters. For example for the $Bernoulli(\theta^*)$ RV (a $Bernoulli$ RV with true but possibly unknown parameter $\theta^*$, we found that the likelihood function was $L_n(\theta) = \theta^{t_n}(1-\theta)^{(n-t_n)}$ where $t_n = \displaystyle\sum_{i=1}^n x_i$. We also found the maxmimum likelihood estimator (MLE) for the $Bernoulli$ model, $\widehat{\theta}_n = \frac{1}{n}\displaystyle\sum_{i=1}^n x_i$. # # We demonstrated these ideas using samples simulated from a $Bernoulli$ process with a secret $\theta^*$. We had an interactive plot of the likelihood function where we could increase $n$, the number of simulated samples or the amount of data we had to base our estimate on, and see the effect on the shape of the likelihood function. The animation belows shows the changing likelihood function for the Bernoulli process with unknown $\theta^*$ as $n$ (the amount of data) increases. # # # <table style="width:100%"> # <tr> # <th>Likelihood function for Bernoulli process, as $n$ goes from 1 to 1000 in a continuous loop.</th> # </tr> # <tr> # <th><img src="images/bernoulliLikelihoodAnim.gif" width=300></th> # </tr> # </table> # # For large $n$, you can probably make your own guess about the true value of $\theta^*$ even without knowing $t_n$. As the animation progresses, we can see the likelihood function 'homing in' on $\theta = 0.3$. # # We can see this in another way, by just looking at the sample mean as $n$ increases. An easy way to do this is with running means: generate a very large sample and then calculate the mean first over just the first observation in the sample, then the first two, first three, etc etc (running means were discussed in an earlier worksheet if you want to go back and review them in detail in your own time). Here we just define a function so that we can easily generate sequences of running means for our $Bernoulli$ process with the unknown $\theta^*$. # #### Preparation: Let's just evaluate the next cel and focus on concepts. # # You can see what they are as you need to. # + def likelihoodBernoulli(theta, n, tStatistic): '''Bernoulli likelihood function. theta in [0,1] is the theta to evaluate the likelihood at. n is the number of observations. tStatistic is the sum of the n Bernoulli observations. return a value for the likelihood of theta given the n observations and tStatistic.''' retValue = 0 # default return value if (theta >= 0 and theta <= 1): # check on theta mpfrTheta = RR(theta) # make sure we use a Sage mpfr retValue = (mpfrTheta^tStatistic)*(1-mpfrTheta)^(n-tStatistic) return retValue def bernoulliFInverse(u, theta): '''A function to evaluate the inverse CDF of a bernoulli. Param u is the value to evaluate the inverse CDF at. Param theta is the distribution parameters. Returns inverse CDF under theta evaluated at u''' return floor(u + theta) def bernoulliSample(n, theta, simSeed=None): '''A function to simulate samples from a bernoulli distribution. Param n is the number of samples to simulate. Param theta is the bernoulli distribution parameter. Param simSeed is a seed for the random number generator, defaulting to 30. Returns a simulated Bernoulli sample as a list.''' set_random_seed(simSeed) us = [random() for i in range(n)] set_random_seed(None) return [bernoulliFInverse(u, theta) for u in us] # use bernoulliFInverse in a list comprehension def bernoulliSampleSecretTheta(n, theta=0.30, simSeed=30): '''A function to simulate samples from a bernoulli distribution. Param n is the number of samples to simulate. Param theta is the bernoulli distribution parameter. Param simSeed is a seed for the random number generator, defaulting to 30. Returns a simulated Bernoulli sample as a list.''' set_random_seed(simSeed) us = [random() for i in range(n)] set_random_seed(None) return [bernoulliFInverse(u, theta) for u in us] # use bernoulliFInverse in a list comprehension def bernoulliRunningMeans(n, myTheta, mySeed = None): '''Function to give a list of n running means from bernoulli with specified theta. Param n is the number of running means to generate. Param myTheta is the theta for the Bernoulli distribution Param mySeed is a value for the seed of the random number generator, defaulting to None.''' sample = bernoulliSample(n, theta=myTheta, simSeed = mySeed) from pylab import cumsum # we can import in the middle of code csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] #return a plot object for BernoulliLikelihood using the secret theta bernoulli generator def plotBernoulliLikelihoodSecretTheta(n): '''Return a plot object for BernoulliLikelihood using the secret theta bernoulli generator. Param n is the number of simulated samples to generate and do likelihood plot for.''' thisBSample = bernoulliSampleSecretTheta(n) # make sample tn = sum(thisBSample) # summary statistic from pylab import arange ths = arange(0,1,0.01) # get some values to plot against liks = [likelihoodBernoulli(t,n,tn) for t in ths] # use the likelihood function to generate likelihoods redshade = 1*n/1000 # fancy colours blueshade = 1 - redshade return line(zip(ths, liks), rgbcolor = (redshade, 0, blueshade)) def cauchyFInverse(u): '''A function to evaluate the inverse CDF of a standard Cauchy distribution. Param u is the value to evaluate the inverse CDF at.''' return RR(tan(pi*(u-0.5))) def cauchySample(n): '''A function to simulate samples from a standard Cauchy distribution. Param n is the number of samples to simulate.''' us = [random() for i in range(n)] return [cauchyFInverse(u) for u in us] def cauchyRunningMeans(n): '''Function to give a list of n running means from standardCauchy. Param n is the number of running means to generate.''' sample = cauchySample(n) from pylab import cumsum csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] def twoRunningMeansPlot(nToPlot, iters): '''Function to return a graphics array containing plots of running means for Bernoulli and Standard Cauchy. Param nToPlot is the number of running means to simulate for each iteration. Param iters is the number of iterations or sequences of running means or lines on each plot to draw. Returns a graphics array object containing both plots with titles.''' xvalues = range(1, nToPlot+1,1) for i in range(iters): shade = 0.5*(iters - 1 - i)/iters # to get different colours for the lines bRunningMeans = bernoulliSecretThetaRunningMeans(nToPlot) cRunningMeans = cauchyRunningMeans(nToPlot) bPts = zip(xvalues, bRunningMeans) cPts = zip(xvalues, cRunningMeans) if (i < 1): p1 = line(bPts, rgbcolor = (shade, 0, 1)) p2 = line(cPts, rgbcolor = (1-shade, 0, shade)) cauchyTitleMax = max(cRunningMeans) # for placement of cauchy title else: p1 += line(bPts, rgbcolor = (shade, 0, 1)) p2 += line(cPts, rgbcolor = (1-shade, 0, shade)) if max(cRunningMeans) > cauchyTitleMax: cauchyTitleMax = max(cRunningMeans) titleText1 = "Bernoulli running means" # make title text t1 = text(titleText1, (nToGenerate/2,1), rgbcolor='blue',fontsize=10) titleText2 = "Standard Cauchy running means" # make title text t2 = text(titleText2, (nToGenerate/2,ceil(cauchyTitleMax)+1), rgbcolor='red',fontsize=10) return graphics_array((p1+t1,p2+t2)) def pmfPointMassPlot(theta): '''Returns a pmf plot for a point mass function with parameter theta.''' ptsize = 10 linethick = 2 fudgefactor = 0.07 # to fudge the bottom line drawing pmf = points((theta,1), rgbcolor="blue", pointsize=ptsize) pmf += line([(theta,0),(theta,1)], rgbcolor="blue", linestyle=':') pmf += points((theta,0), rgbcolor = "white", faceted = true, pointsize=ptsize) pmf += line([(min(theta-2,-2),0),(theta-0.05,0)], rgbcolor="blue",thickness=linethick) pmf += line([(theta+.05,0),(theta+2,0)], rgbcolor="blue",thickness=linethick) pmf+= text("Point mass f", (theta,1.1), rgbcolor='blue',fontsize=10) pmf.axes_color('grey') return pmf def cdfPointMassPlot(theta): '''Returns a cdf plot for a point mass function with parameter theta.''' ptsize = 10 linethick = 2 fudgefactor = 0.07 # to fudge the bottom line drawing cdf = line([(min(theta-2,-2),0),(theta-0.05,0)], rgbcolor="blue",thickness=linethick) # padding cdf += points((theta,1), rgbcolor="blue", pointsize=ptsize) cdf += line([(theta,0),(theta,1)], rgbcolor="blue", linestyle=':') cdf += line([(theta,1),(theta+2,1)], rgbcolor="blue", thickness=linethick) # padding cdf += points((theta,0), rgbcolor = "white", faceted = true, pointsize=ptsize) cdf+= text("Point mass F", (theta,1.1), rgbcolor='blue',fontsize=10) cdf.axes_color('grey') return cdf def uniformFInverse(u, theta1, theta2): '''A function to evaluate the inverse CDF of a uniform(theta1, theta2) distribution. u, u should be 0 <= u <= 1, is the value to evaluate the inverse CDF at. theta1, theta2, theta2 > theta1, are the uniform distribution parameters.''' return theta1 + (theta2 - theta1)*u def uniformSample(n, theta1, theta2): '''A function to simulate samples from a uniform distribution. n > 0 is the number of samples to simulate. theta1, theta2 (theta2 > theta1) are the uniform distribution parameters.''' us = [random() for i in range(n)] return [uniformFInverse(u, theta1, theta2) for u in us] def exponentialFInverse(u, lam): '''A function to evaluate the inverse CDF of a exponential distribution. u is the value to evaluate the inverse CDF at. lam is the exponential distribution parameter.''' # log without a base is the natural logarithm return (-1.0/lam)*log(1 - u) def exponentialSample(n, lam): '''A function to simulate samples from an exponential distribution. n is the number of samples to simulate. lam is the exponential distribution parameter.''' us = [random() for i in range(n)] return [exponentialFInverse(u, lam) for u in us] # - # To get back to our running means of Bernoullin RVs: def bernoulliSecretThetaRunningMeans(n, mySeed = None): '''Function to give a list of n running means from Bernoulli with unknown theta. Param n is the number of running means to generate. Param mySeed is a value for the seed of the random number generator, defaulting to None Note: the unknown theta parameter for the Bernoulli process is defined in bernoulliSampleSecretTheta Return a list of n running means.''' sample = bernoulliSampleSecretTheta(n, simSeed = mySeed) from pylab import cumsum # we can import in the middle of code csSample = list(cumsum(sample)) samplesizes = range(1, n+1,1) return [RR(csSample[i])/samplesizes[i] for i in range(n)] # Now we can use this function to look at say 5 different sequences of running means (they will be different, because for each iteration, we will simulate a different sample of $Bernoulli$ observations). nToGenerate = 1500 iterations = 5 xvalues = range(1, nToGenerate+1,1) for i in range(iterations): redshade = 0.5*(iterations - 1 - i)/iterations # to get different colours for the lines bRunningMeans = bernoulliSecretThetaRunningMeans(nToGenerate) pts = zip(xvalues,bRunningMeans) if (i == 0): p = line(pts, rgbcolor = (redshade,0,1)) else: p += line(pts, rgbcolor = (redshade,0,1)) show(p, figsize=[5,3], axes_labels=['n','sample mean']) # What we notice is how the different lines **converge** on a sample mean of close to 0.3. # # Is life always this easy? Unfortunately no. In the plot below we show the well-behaved running means for the $Bernoulli$ and beside them the running means for simulated standard $Cauchy$ random variables. They are all over the place, and each time you re-evaluate the cell you'll get different all-over-the-place behaviour. nToGenerate = 1500 iterations = 5 g = twoRunningMeansPlot(nToGenerate, iterations) # uses above function to make plot show(g,figsize=[10,5]) # We talked about the Cauchy in more detail in an earlier notebook. If you cannot recall the detail and are interested, go back to that in your own time. The message here is that although with the Bernoulli process, the sample means converge as the number of observations increases, with the Cauchy they do not. # # # # # Limits of a Sequence of Real Numbers # # A sequence of real numbers $x_1, x_2, x_3, \ldots $ (which we can also write as $\{ x_i\}_{i=1}^\infty$) is said to converge to a limit $a \in \mathbb{R}$, # # $$\underset{i \rightarrow \infty}{\lim} x_i = a$$ # # if for every natural number $m \in \mathbb{N}$, a natural number $N_m \in \mathbb{N}$ exists such that for every $j \geq N_m$, $\left|x_j - a\right| \leq \frac{1}{m}$ # # What is this saying? $\left|x_j - a\right|$ is measuring the closeness of the $j$th value in the sequence to $a$. If we pick bigger and bigger $m$, $\frac{1}{m}$ will get smaller and smaller. The definition of the limit is saying that if $a$ is the limit of the sequence then we can get the sequence to become as close as we want ('arbitrarily close') to $a$, and to stay that close, by going far enough into the sequence ('for every $j \geq N_m$, $\left|x_j - a\right| \leq \frac{1}{m}$') # # ($\mathbb{N}$, the natural numbers, are just the 'counting numbers' $\{1, 2, 3, \ldots\}$.) # # # # Take a trivial example, the sequence $\{x_i\}_{i=1}^\infty = 17, 17, 17, \ldots$ # # Clearly, $\underset{i \rightarrow \infty}{\lim} x_i = 17$, but let's do this formally: # # For every $m \in \mathbb{N}$, take $N_m =1$, then # # $\forall$ $j \geq N_m=1, \left|x_j -17\right| = \left|17 - 17\right| = 0 \leq \frac{1}{m}$, as required. # # ($\forall$ is mathspeak for 'for all' or 'for every') # # # # What about $\{x_i\}_{i=1}^\infty = \displaystyle\frac{1}{1}, \frac{1}{2}, \frac{1}{3}, \ldots$, i.e., $x_i = \frac{1}{i}$? # # $\underset{i \rightarrow \infty}{\lim} x_i = \underset{i \rightarrow \infty}{\lim}\frac{1}{i} = 0$ # # For every $m \in \mathbb{N}$, take $N_m = m$, then $\forall$ $j \geq m$, $\left|x_j - 0\right| \leq \left |\frac{1}{m} - 0\right| = \frac{1}{m}$ # # ### YouTry # # Think about $\{x_i\}_{i=1}^\infty = \frac{1}{1^p}, \frac{1}{2^p}, \frac{1}{3^p}, \ldots$ with $p > 0$. The limit$\underset{i \rightarrow \infty}{\lim} \displaystyle\frac{1}{i^p} = 0$, provided $p > 0$. # # You can draw the plot of this very easily using the Sage symbolic expressions we have already met (`f.subs(...)` allows us to substitute a particular value for one of the symbolic variables in the symbolic function `f`, in this case a value to use for $p$). var('i, p') f = 1/(i^p) # make and show plot, note we can use f in the label plot(f.subs(p=1), (x, 0.1, 3), axes_labels=('i',f)).show(figsize=[6,3]) # What about $\{x_i\}_{i=1}^\infty = 1^{\frac{1}{1}}, 2^{\frac{1}{2}}, 3^{\frac{1}{3}}, \ldots$. The limit$\underset{i \rightarrow \infty}{\lim} i^{\frac{1}{i}} = 1$. # # This one is not as easy to see intuitively, but again we can plot it with SageMath. var('i') f = i^(1/i) n=500 p=plot(f.subs(p=1), (x, 0, n), axes_labels=('i',f)) # main plot p+=line([(0,1),(n,1)],linestyle=':') # add a dotted line at height 1 p.show(figsize=[6,3]) # show the plot # Finally, $\{x_i\}_{i=1}^\infty = p^{\frac{1}{1}}, p^{\frac{1}{2}}, p^{\frac{1}{3}}, \ldots$, with $p > 0$. The limit$\underset{i \rightarrow \infty}{\lim} p^{\frac{1}{i}} = 1$ provided $p > 0$. # # You can cut and paste (with suitable adaptations) to try to plot this one as well ... x # (end of You Try) # # --- # # *back to the real stuff ...* # # # Limits of Functions # # We say that a function $f(x): \mathbb{R} \rightarrow \mathbb{R}$ has a limit $L \in \mathbb{R}$ as $x$ approaches $a$: # # $$\underset{x \rightarrow a}{\lim} f(x) = L$$ # # provided $f(x)$ is arbitrarily close to $L$ for all ($\forall$) values of $x$ that are sufficiently close to but not equal to $a$. # # For example # # Consider the function $f(x) = (1+x)^{\frac{1}{x}}$ # # $\underset{x \rightarrow 0}{\lim} f(x) = \underset{x \rightarrow 0}{\lim} (1+x)^{\frac{1}{x}} = e \approx 2.71828\cdots$ # # even though $f(0) = (1+0)^{\frac{1}{0}}$ is undefined! # x is defined as a symbolic variable by default by Sage so we do not need var('x') f = (1+x)^(1/x) # uncomment and try evaluating next line #f.subs(x=0) # this will give you an error message # BUT: If you are intersted in the "Art of dividing by zero" talk to Professor <NAME> in Maths Department! # You can get some idea of what is going on with two plots on different scales f = (1+x)^(1/x) n1=5 p1=plot(f.subs(p=1), (x, 0.001, n1), axes_labels=('x',f)) # main plot t1 = text("Large scale plot", (n1/2,e), rgbcolor='blue',fontsize=10) n2=0.1 p2=plot(f.subs(p=1), (x, 0.0000001, n2), axes_labels=('x',f)) # main plot p2+=line([(0,e),(n2,e)],linestyle=':') # add a dotted line at height e t2 = text("Small scale plot", (n2/2,e+.01), rgbcolor='blue',fontsize=10) show(graphics_array((p1+t1,p2+t2)),figsize=[6,3]) # show the plot # all this has been laying the groundwork for the topic of real interest to us ... # # # Limit of a Sequence of Random Variables # # We want to be able to say things like $\underset{i \rightarrow \infty}{\lim} X_i = X$ in some sensible way. $X_i$ are some random variables, $X$ is some 'limiting random variable', but what do we mean by 'limiting random variable'? # # To help us, lets introduce a very very simple random variable, one that puts all its mass in one place. theta = 2.0 show(graphics_array((pmfPointMassPlot(theta),cdfPointMassPlot(theta))),\ figsize=[8,2]) # show the plots # This is known as the $Point\,Mass(\theta)$ random variable, $\theta \in \mathbb(R)$: the density $f(x)$ is 1 if $x=\theta$ and 0 everywhere else # # $$ # f(x;\theta) = # \begin{cases} # 0 & \text{ if } x \neq \theta \\ # 1 & \text{ if } x = \theta # \end{cases} # $$ # # $$ # F(x;\theta) = # \begin{cases} # 0 & \text{ if } x < \theta \\ # 1 & \text{ if } x \geq \theta # \end{cases} # $$ # # So, if we had some sequence $\{\theta_i\}_{i=1}^\infty$ and $\underset{i \rightarrow \infty}{\lim} \theta_i = \theta$ # # and we had a sequence of random variables $X_i \sim Point\,Mass(\theta_i)$, $i = 1, 2, 3, \ldots$ # # then we could talk about a limiting random variable as $X \sim Point\,Mass(\theta)$: # # i.e., we could talk about $\underset{i \rightarrow \infty}{\lim} X_i = X$ # mock up a picture of a sequence of point mass rvs converging on theta = 0 ptsize = 20 i = 1 theta_i = 1/i p = points((theta_i,1), rgbcolor="blue", pointsize=ptsize) p += line([(theta_i,0),(theta_i,1)], rgbcolor="blue", linestyle=':') while theta_i > 0.01: i+=1 theta_i = 1/i p += points((theta_i,1), rgbcolor="blue", pointsize=ptsize) p += line([(theta_i,0),(theta_i,1)], rgbcolor="blue", linestyle=':') p += points((0,1), rgbcolor="red", pointsize=ptsize) p += line([(0,0),(0,1)], rgbcolor="red", linestyle=':') p.show(xmin=-1, xmax = 2, ymin=0, ymax = 1.1, axes=false, gridlines=[None,[0]], \ figsize=[7,2]) # Now, we want to generalise this notion of a limit to other random variables (that are not necessarily $Point\,Mass(\theta_i)$ RVs) # # What about one many of you will be familiar with - the 'bell-shaped curve' # # ## The $Gaussian(\mu, \sigma^2)$ or $Normal(\mu, \sigma^2)$ RV? # # The probability density function (PDF) $f(x)$ is given by # # $$ # f(x ;\mu, \sigma) = \displaystyle\frac{1}{\sigma\sqrt{2\pi}}\exp\left(\frac{-1}{2\sigma^2}(x-\mu)^2\right) # $$ # # The two parameters, $\mu \in \mathbb{R} := (-\infty,\infty)$ and $\sigma \in (0,\infty)$, are sometimes referred to as the location and scale parameters. # # To see why this is, use the interactive plot below to have a look at what happens to the shape of the density function $f(x)$ when you change $\mu$ or increase or decrease $\sigma$: @interact def _(my_mu=input_box(0, label='mu') ,my_sigma=input_box(1,label='sigma')): '''Interactive function to plot the normal pdf and ecdf.''' if my_sigma > 0: html('<h4>Normal('+str(my_mu)+','+str(my_sigma)+'<sup>2</sup>)</h4>') var('mu sigma') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p1=plot(f.subs(mu=my_mu,sigma=my_sigma), \ (x, my_mu - 3*my_sigma - 2, my_mu + 3*my_sigma + 2),\ axes_labels=('x','f(x)')) show(p1,figsize=[8,3]) else: print "sigma must be greater than 0" # Consider the sequence of random variables $X_1, X_2, X_3, \ldots$, where # # - $X_1 \sim Normal(0, 1)$ # - $X_2 \sim Normal(0, \frac{1}{2})$ # - $X_3 \sim Normal(0, \frac{1}{3})$ # - $X_4 \sim Normal(0, \frac{1}{4})$ # - $\vdots$ # - $X_i \sim Normal(0, \frac{1}{i})$ # - $\vdots$ # # We can use the animation below to see how the PDF $f_{i}(x)$ looks as we move through the sequence of $X_i$ (the animation only goes to $i = 25$, $\sigma = 0.04$ but you get the picture ...) # # <table style="width:100%"> # <tr> # <th>Normal curve animation, looping through $\sigma = \frac{1}{i}$ for $i = 1, \dots, 25$</th> # </tr> # <tr> # <th><img src="images/normalDecreasing.gif" width=300></th> # </tr> # </table> # # We can see that the probability mass of $X_i \sim Normal(0, \frac{1}{i})$ increasingly concentrates about 0 as $i \rightarrow \infty$ and $\frac{1}{i} \rightarrow 0$ # # Does this mean that $\underset{i \rightarrow \infty}{\lim} X_i = Point\,Mass(0)$? # # No, because for any $i$, however large, $P(X_i = 0) = 0$ because $X_i$ is a continuous RV (for any continous RV $X$, for any $x \in \mathbb{R}$, $P(X=x) = 0$). # # So, we need to refine our notions of convergence when we are dealing with random variables # # # Convergence in Distribution # # Let $X_1, X_2, \ldots$ be a sequence of random variables and let $X$ be another random variable. Let $F_i$ denote the distribution function (DF) of $X_i$ and let $F$ denote the distribution function of $X$. # # Now, if for any real number $t$ at which $F$ is continuous, # # $$\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$$ # # (in the sense of the convergence or limits of functions we talked about earlier) # # Then we can say that the sequence or RVs $X_i$, $i = 1, 2, \ldots$ **converges to $X$ in distribution** and write $X_i \overset{d}{\rightarrow} X$. # # An equivalent way of defining convergence in distribution is to go right back to the meaning of the probabilty space 'under the hood' of a random variable, a random variable $X$ as a mapping from the sample space $\Omega$ to the real line ($X: \Omega \rightarrow \mathbb{R}$), and the sample points or outcomes in the sample space, the $\omega \in \Omega$. For $\omega \in \Omega$, $X(\omega)$ is the mapping of $\omega$ to the real line $\mathbb{R}$. We could look at the set of $\omega$ such that $X(\omega) \leq t$, i.e. the set of $\omega$ that map to some value on the real line less than or equal to $t$, $\{\omega: X(\omega) \leq t \}$. # # Saying that for any $t \in \mathbb{R}$, $\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$ is the equivalent of saying that for any $t \in \mathbb{R}$, # # $$\underset{i \rightarrow \infty}{\lim} P\left(\{\omega:X_i(\omega) \leq t \}\right) = P\left(\{\omega: X(\omega) \leq t\right)$$ # # Armed with this, we can go back to our sequence of $Normal$ random variables $X_1, X_2, X_3, \ldots$, where # # - $X_1 \sim Normal(0, 1)$ # - $X_2 \sim Normal(0, \frac{1}{2})$ # - $X_3 \sim Normal(0, \frac{1}{3})$ # - $X_4 \sim Normal(0, \frac{1}{4})$ # - $\vdots$ # - $X_i \sim Normal(0, \frac{1}{i})$ # - $\vdots$ # # and let $X \sim Point\,Mass(0)$, # # and say that the $X_i$ **converge in distribution** to the $x \sim Point\,Mass$ RV $X$, # # $$X_i \overset{d}{\rightarrow} X$$ # # What we are saying with convergence in distribution, informally, is that as $i$ increases, we increasingly expect to see the next outcome in a sequence of random experiments becoming better and better modeled by the limiting random variable. In this case, as $i$ increases, the $Point\,Mass(0)$ is becoming a better and better model for the next outcome of a random experiment with outcomes $\sim Normal(0,\frac{1}{i})$. # mock up a picture of a sequence of converging normal distributions my_mu = 0 upper = my_mu + 5; lower = -upper; # limits for plot var('mu sigma') stop_i = 12 html('<h4>N(0,1) to N(0, 1/'+str(stop_i)+')</h4>') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p=plot(f.subs(mu=my_mu,sigma=1.0), (x, lower, upper), rgbcolor = (0,0,1)) for i in range(2, stop_i, 1): # just do a few of them shade = 1-11/i # make them different colours p+=plot(f.subs(mu=my_mu,sigma=1/i), (x, lower, upper), rgbcolor = (1-shade, 0, shade)) textOffset = -0.2 # offset for placement of text - may need adjusting p+=text("0",(0,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(upper.n(digits=2)),(upper,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(lower.n(digits=2)),(lower,textOffset),fontsize = 10, rgbcolor='grey') p.show(axes=false, gridlines=[None,[0]], figsize=[7,3]) # #### There is an interesting point to note about this convergence: # # We have said that the $X_i \sim Normal(0,\frac{1}{i})$ with distribution functions $F_i$ converge in distribution to $X \sim Point\,Mass(0)$ with distribution function $F$, which means that we must be able to show that for any real number $t$ at which $F$ is continuous, # # $$\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$$ # # Note that for any of the $X_i \sim Normal(0, \frac{1}{i})$, $F_i(0) = \frac{1}{2}$, and also note that for $X \sim Point,Mass(0)$, $F(0) = 1$, so clearly $F_i(0) \neq F(0)$. # # What has gone wrong? # # Nothing: we said that we had to be able to show that $\underset{i \rightarrow \infty}{\lim} F_i(t) = F(t)$ for any $t \in \mathbb{R}$ at which $F$ is continuous, but the $Point\,Mass(0)$ distribution function $F$ is not continous at 0! theta = 0.0 # show the plots show(graphics_array((pmfPointMassPlot(theta),cdfPointMassPlot(theta))),figsize=[8,2]) # # Convergence in Probability # # Let $X_1, X_2, \ldots$ be a sequence of random variables and let $X$ be another random variable. Let $F_i$ denote the distribution function (DF) of$X_i$ and let $F$ denote the distribution function of $X$. # # Now, if for any real number $\varepsilon > 0$, # # $$\underset{i \rightarrow \infty}{\lim} P\left(|X_i - X| > \varepsilon\right) = 0$$ # # Then we can say that the sequence $X_i$, $i = 1, 2, \ldots$ **converges to $X$ in probability** and write $X_i \overset{P}{\rightarrow} X$. # # Or, going back again to the probability space 'under the hood' of a random variable, we could look the way the $X_i$ maps each outcome $\omega \in \Omega$, $X_i(\omega)$, which is some point on the real line, and compare this to mapping $X(\omega)$. # # Saying that for any $\varepsilon \in \mathbb{R}$, $\underset{i \rightarrow \infty}{\lim} P\left(|X_i - X| > \varepsilon\right) = 0$ is the equivalent of saying that for any $\varepsilon \in \mathbb{R}$, # # $$\underset{i \rightarrow \infty}{\lim} P\left(\{\omega:|X_i(\omega) - X(\omega)| > \varepsilon \}\right) = 0$$ # # Informally, we are saying $X$ is a limit in probabilty if, by going far enough into the sequence $X_i$, we can ensure that the mappings $X_i(\omega)$ and $X(\omega)$ will be arbitrarily close to each other on the real line for all $\omega \in \Omega$. # # **Note** that convergence in distribution is implied by convergence in probability: convergence in distribution is the weakest form of convergence; any sequence of RV's that converges in probability to some RV $X$ also converges in distribution to $X$ (but not necessarily vice versa). # mock up a picture of a sequence of converging normal distributions my_mu = 0 var('mu sigma') upper = 0.2; lower = -upper i = 20 # start part way into the sequence lim = 100 # how far to go stop_i = 12 html('<h4>N(0,1/'+str(i)+') to N(0, 1/'+str(lim)+')</h4>') f = (1/(sigma*sqrt(2.0*pi)))*exp(-1.0/(2*sigma^2)*(x - mu)^2) p=plot(f.subs(mu=my_mu,sigma=1.0/i), (x, lower, upper), rgbcolor = (0,0,1)) for j in range(i, lim+1, 4): # just do a few of them shade = 1-(j-i)/(lim-i) # make them different colours p+=plot(f.subs(mu=my_mu,sigma=1/j), (x, lower,upper), rgbcolor = (1-shade, 0, shade)) textOffset = -1.5 # offset for placement of text - may need adjusting p+=text("0",(0,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(upper.n(digits=2)),(upper,textOffset),fontsize = 10, rgbcolor='grey') p+=text(str(lower.n(digits=2)),(lower,textOffset),fontsize = 10, rgbcolor='grey') p.show(axes=false, gridlines=[None,[0]], figsize=[7,3]) # For our sequence of $Normal$ random variables $X_1, X_2, X_3, \ldots$, where # # - $X_1 \sim Normal(0, 1)$ # - $X_2 \sim Normal(0, \frac{1}{2})$ # - $X_3 \sim Normal(0, \frac{1}{3})$ # - $X_4 \sim Normal(0, \frac{1}{4})$ # - $\vdots$ # - $X_i \sim Normal(0, \frac{1}{i})$ # - $\vdots$ # # and $X \sim Point\,Mass(0)$, # # It can be shown that the $X_i$ converge in probability to $X \sim Point\,Mass(0)$ RV $X$, # # $$X_i \overset{P}{\rightarrow} X$$ # # (the formal proof of this involves Markov's Inequality, which is beyond the scope of this course). # # # Some Basic Limit Laws in Statistics # # Intuition behind Law of Large Numbers and Central Limit Theorem # # Take a look at the Khan academy videos on the Law of Large Numbers and the Central Limit Theorem. This will give you a working idea of these theorems. In the sequel, we will strive for a deeper understanding of these theorems on the basis of the two notions of convergence of sequences of random variables we just saw. # # # ## Weak Law of Large Numbers # # Remember that a statistic is a random variable, so a sample mean is a random variable. If we are given a sequence of independent and identically distributed RVs, $X_1,X_2,\ldots \overset{IID}{\sim} X_1$, then we can also think of a sequence of random variables $\overline{X}_1, \overline{X}_2, \ldots, \overline{X}_n, \ldots$ ($n$ being the sample size). # # Since $X_1, X_2, \ldots$ are $IID$, they all have the same expection, say $E(X_1)$ by convention. # # If $E(X_1)$ exists, then the sample mean $\overline{X}_n$ converges in probability to $E(X_1)$ (i.e., to the expectatation of any one of the individual RVs): # # $$ # \text{If} \quad X_1,X_2,\ldots \overset{IID}{\sim} X_1 \ \text{and if } \ E(X_1) \ \text{exists, then } \ \overline{X}_n \overset{P}{\rightarrow} E(X_1) \ . # $$ # # Going back to our definition of convergence in probability, we see that this means that for any real number $\varepsilon > 0$, $\underset{n \rightarrow \infty}{\lim} P\left(|\overline{X}_n - E(X_1)| > \varepsilon\right) = 0$ # # Informally, this means that means that, by taking larger and larger samples we can make the probability that the average of the observations is more than $\varepsilon$ away from the expected value get smaller and smaller. # # Proof of this is beyond the scope of this course, but we have already seen it in action when we looked at the $Bernoulli$ running means. Have another look, this time with only one sequence of running means. You can increase $n$, the sample size, and change $\theta$. Note that the seed for the random number generator is also under your control. This means that you can get replicable samples: in particular, in this interact, when you increase the sample size it looks as though you are just adding more to an existing sample rather than starting from scratch with a new one. @interact def _(nToGen=slider(1,1500,1,100,label='n'),my_theta=input_box(0.3,label='theta'),rSeed=input_box(1234,label='random seed')): '''Interactive function to plot running mean for a Bernoulli with specified n, theta and random number seed.''' if my_theta >= 0 and my_theta <= 1: html('<h4>Bernoulli('+str(my_theta.n(digits=2))+')</h4>') xvalues = range(1, nToGen+1,1) bRunningMeans = bernoulliRunningMeans(nToGen, myTheta=my_theta, mySeed=rSeed) pts = zip(xvalues, bRunningMeans) p = line(pts, rgbcolor = (0,0,1)) p+=line([(0,my_theta),(nToGen,my_theta)],linestyle=':',rgbcolor='grey') show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'Theta must be between 0 and 1' # # Central Limit Theorem # # You have probably all heard of the Central Limit Theorem before, but now we can relate it to our definition of convergence in distribution. # # Let $X_1,X_2,\ldots \overset{IID}{\sim} X_1$ and suppose $E(X_1)$ and $V(X_1)$ both exist, # # then # # $$ # \overline{X}_n = \frac{1}{n} \sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right) # $$ # # And remember $Z \sim Normal(0,1)$? # # Consider $Z_n := \displaystyle\frac{\overline{X}_n-E(\overline{X}_n)}{\sqrt{V(\overline{X}_n)}} = \displaystyle\frac{\sqrt{n} \left( \overline{X}_n -E(X_1) \right)}{\sqrt{V(X_1)}}$ # # If $\overline{X}_n = \displaystyle\frac{1}{n} \displaystyle\sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right)$, then $\overline{X}_n -E(X_1) \overset{d}{\rightarrow} X-E(X_1) \sim Normal \left( 0,\frac{V(X_1)}{n} \right)$ # # and $\sqrt{n} \left( \overline{X}_n -E(X_1) \right) \overset{d}{\rightarrow} \sqrt{n} \left( X-E(X_1) \right) \sim Normal \left( 0,V(X_1) \right)$ # # so $Z_n := \displaystyle \frac{\overline{X}_n-E(\overline{X}_n)}{\sqrt{V(\overline{X}_n)}} = \displaystyle\frac{\sqrt{n} \left( \overline{X}_n -E(X_1) \right)}{\sqrt{V(X_1)}} \overset{d}{\rightarrow} Z \sim Normal \left( 0,1 \right)$ # # Thus, for sufficiently large $n$ (say $n>30$), probability statements about $\overline{X}_n$ can be approximated using the $Normal$ distribution. # # The beauty of the CLT, as you have probably seen from other courses, is that $\overline{X}_n \overset{d}{\rightarrow} Normal \left( E(X_1), \frac{V(X_1)}{n} \right)$ does not require the $X_i$ to be normally distributed. # # We can try this with our $Bernoulli$ RV generator. First, a small number of samples: theta, n, samples = 0.6, 10, 5 # concise way to set some variable values sampleMeans=[] # empty list for i in range(0, samples, 1): # loop thisMean = QQ(sum(bernoulliSample(n, theta)))/n # get a sample and find the mean sampleMeans.append(thisMean) # add mean to the list of means sampleMeans # disclose the sample means # You can use the interactive plot to increase the number of samples and make a histogram of the sample means. According to the CLT, for lots of reasonably-sized samples we should get a nice symmetric bell-curve-ish histogram centred on $\theta$. You can adjust the number of bins in the histogram as well as the number of samples, sample size, and $\theta$. import pylab @interact def _(replicates=slider(1,3000,1,100,label='replicates'), \ nToGen=slider(1,1500,1,100,label='sample size n'),\ my_theta=input_box(0.3,label='theta'),Bins=5): '''Interactive function to plot distribution of replicates of sample means for n IID Bernoulli trials.''' if my_theta >= 0 and my_theta <= 1 and replicates > 0: sampleMeans=[] # empty list for i in range(0, replicates, 1): thisMean = RR(sum(bernoulliSample(nToGen, my_theta)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, density=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Bernoulli sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'Theta must be between 0 and 1, and samples > 0' # Increase the sample size and the numbe rof bins in the above interact and see if the histograms of the sample means are looking more and more normal as the CLT would have us believe. # But although the $X_i$ do not have to be $\sim Normal$ for $\overline{X}_n = \overset{d}{\rightarrow} X \sim Normal\left(E(X_1),\frac{V(X_1)}{n} \right)$, remember that we said "Let $X_1,X_2,\ldots \overset{IID}{\sim} X_1$ and suppose $E(X_1)$ and $V(X_1)$ both exist", then, # # $$ # \overline{X}_n = \frac{1}{n} \sum_{i=1}^n X_i \overset{d}{\rightarrow} X \sim Normal \left(E(X_1),\frac{V(X_1)}{n} \right) # $$ # # This is where is all goes horribly wrong for the standard $Cauchy$ distribution (any $Cauchy$ distribution in fact): neither the expectation nor the variance exist for this distribution. The Central Limit Theorem cannot be applied here. In fact, if $X_1,X_2,\ldots \overset{IID}{\sim}$ standard $Cauchy$, then $\overline{X}_n = \displaystyle \frac{1}{n} \sum_{i=1}^n X_i \sim$ standard $Cauchy$. # # ### YouTry # # Try looking at samples from two other RVs where the expectation and variance do exist, the $Uniform$ and the $Exponential$: import pylab @interact def _(replicates=input_box(100,label='replicates'), \ nToGen=slider(1,1500,1,100,label='sample size n'),\ my_theta1=input_box(2,label='theta1'),\ my_theta2=input_box(4,label='theta1'),Bins=5): '''Interactive function to plot distribution of sample means for n IID Uniform(theta1, theta2) trials.''' if (my_theta1 < my_theta2) and replicates > 0: sampleMeans=[] # empty list for i in range(0, replicates, 1): thisMean = RR(sum(uniformSample(nToGen, my_theta1, my_theta2)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, density=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Uniform sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'theta1 must be less than theta2, and samples > 0' import pylab @interact def _(replicates=input_box(100,label='replicates'), \ nToGen=slider(1,1500,1,100,label='sample size n'),\ my_lambda=input_box(0.1,label='lambda'),Bins=5): '''Interactive function to plot distribution of \ sample means for an Exponential(lambda) process.''' if my_lambda > 0 and replicates > 0: sampleMeans=[] # empty list for i in range(0, replicates, 1): thisMean = RR(sum(exponentialSample(nToGen, my_lambda)))/nToGen sampleMeans.append(thisMean) pylab.clf() # clear current figure n, bins, patches = pylab.hist(sampleMeans, Bins, density=true) pylab.ylabel('normalised count') pylab.title('Normalised histogram for Exponential sample means') pylab.savefig('myHist') # to actually display the figure pylab.show() #show(p, figsize=[5,3], axes_labels=['n','sample mean'],ymax=1) else: print 'lambda must be greater than 0, and samples > 0' # # Properties of the MLE # # The LLN (law of large numbers) and CLT (central limit theorem) are statements about the limiting distribution of the sample mean of IID random variables whose expectation and variance exists. How does this apply to the MLE (maximum likelihood estimator)? # # Consider the following generic parametric model for our data or observations: # # $$ # X_1,X_2,\ldots,X_n \overset{IID}{\sim} F(x; \theta^*) \ \text{ or } \ f(x; \theta^*) # $$ # # We do not know the true parameter $\theta^*$ under the model for our data. Our task is to estimate the unknown parameter $\theta^*$ using the MLE: # # $$\widehat{\Theta}_n = argmax_{\theta \in \mathbf{\Theta}} l(\theta)$$ # # The amazing think about the MLE is its following properties: # # ### 1. The MLE is *asymptotically consistent* # # $$\boxed{\widehat{\Theta}_n \overset{P}{\rightarrow} \theta^*}$$ # # So when the number of observations (sample size $n$) goes to infinity, our MLE converges in probability to the true parameter $\theta^* \in \mathbf{\Theta}$. # # Interestingly, one can work out the details and find that the MLE $\widehat{\Theta}_n$, which is also a random variable based on $n$ IID samples that takes values in the parameter space $\mathbf{\Theta}$, is also normally distributed for large sample sizes. # # ### 2. The MLE is *equivariant* # # $$\boxed{\text{If } \ \widehat{\Theta}_n \ \text{ is the MLE of } \ \theta^* \ \text{ then } \ g(\widehat{\Theta}_n) \ \text{ is the MLE of } \ g(\theta^*)}$$ # # This is a very useful property, since any function $g : \mathbf{\Theta} \to \mathbb{R}$ of interest is at our disposal by merely applying $g$ to the the MLE. Often $g$ is some sort of reward that depends on the unknown parameter $\theta^*$. # # ### 3. The MLE is *asymptotically normal* # # $$\boxed{ \frac{\left(\widehat{\Theta}_n - \theta^*\right)}{\widehat{se}_n} \overset{d}{\rightarrow} Normal(0,1) } # \quad \text{ or equivalently, } \quad # \boxed{ \widehat{\Theta}_n \overset{d}{\rightarrow} Normal( \theta^*, \widehat{se}_n^2) } # $$ # # where, $\widehat{se}_n$ is the *estimated standard error* of the MLE: # # $$\boxed{ \widehat{se}_n \ \text{ is an estimate of the } \ \sqrt{V\left(\widehat{\Theta}_n \right)}}$$ # # We can compute $\widehat{se}_n$ with the following formula: # # $$\boxed{\widehat{se}_n = \sqrt{\frac{1}{ \left. n E \left(-\frac{\partial^2 \log f(X;\theta)}{\partial \theta^2} \right) \right\vert_{\theta=\widehat{\theta}_n} } }}$$ # # where, the expectation is called the *Fisher information* of one sample or $I_1$: # # $$\boxed{ I_1 := E \left(-\frac{\partial^2 \log f(X;\theta)}{\partial \theta^2} \right) = # \begin{cases} # \displaystyle{\int{\left(-\frac{\partial^2 \log f(x;\theta)}{\partial \theta^2} \right) f(x; \theta)} dx} & \text{ for continuous RV } X\\ # \displaystyle{\sum_x{\left(-\frac{\partial^2 \log f(x;\theta)}{\partial \theta^2} \right) f(x; \theta)}}& \text{ for discrete RV } X # \end{cases} # } # $$ # # Other two properties (not needed for this course) include: # # - *asymptotic efficiency*, i.e., among a class of well-behaved estimators, the MLE has the smallest variance at least for large samples, and # - *approximately Bayes*, i.e., the MLE is approximately the *Bayes estimator* (some of you may see Bayesian methods of estimation in advanced courses in statistical machine learning or in latest AI methods). # # Confidence Interval and Set Estimation from MLE # # An immediate implication of the asymptotic normality of the MLE, which informally states that the distribution of the MLE can be approximated by a Normal random variable, is to obtain confidence intervals for the unkown parameter $\theta^*$. # # Recall that in set estimation, as opposed to point estimation, we estimate the unknown parameter using a random set based on the data (typically intervals in 1D) that "traps" the true parameter $\theta^*$ with a very high probability, say $0.95$. We typically express such probality in terms of $1-\alpha$, so the $95\%$ confidence interval is seen as a $1-\alpha$ confidence interval with $\alpha=0.05$. From the the asymptotic normality of the MLE, we get the following confidence interval for the unknown $\theta^*$: # # # $$ # \boxed{\text{If } \quad # \displaystyle{C_n := \left( \widehat{\Theta}_n - z_{\alpha/2} \widehat{se}_n, \, \widehat{\Theta}_n + z_{\alpha/2} \widehat{se}_n \right)} \quad \text{ then } \quad P \left( \{ \theta^* \in C_n \} ; \theta^* \right) \underset{n \to \infty}{\longrightarrow} 1-\alpha , \quad \text{ where } z_{\alpha/2} = \Phi^{[-1]}(1-\alpha/2). # } # $$ # # Recall that $P \left( \{ \theta^* \in C_n \} ; \theta^* \right)$ is simply the probability of the event that $\theta^*$ will be in $C_n$, the $1-\alpha$ confidence interval, given the data is distributed according to the model with true parameter $\theta^*$. # # NOTE: $\Phi^{[-1]}(1-\alpha/2)$ is merely the inverse distribution function (CDF) of the standard normal RV. # # $$ # \text{For } \alpha=0.05, z_{\alpha/2}=1.96 \approxeq 2, \text{ so: } \quad \boxed{\widehat{\Theta}_n \pm 2 \widehat{se}_n} \quad \text{ is an approximate 95% confidence interval.} # $$ # ## Example of Confidence Interval for IID $Bernoulli(\theta)$ Trials # # We already know that the MLE for the model with $n$ IID $Bernoulli(\theta)$ Trials is the sample mean, i.e., # # $$X_1,X_2,\ldots, X_n \overset{IID}{\sim} Bernoulli(\theta^*) \implies \widehat{\Theta}_n = \overline{X}_n$$ # # Our task now is to obtain the $1-\alpha$ confidence interval based on this MLE. # # To get the confidence interval we need to obtain $\widehat{se}_n$ by computing the following: # # $$ # \begin{array}{cc} # \widehat{se}_n &=& \displaystyle{\sqrt{\frac{1}{ \left. n E \left(-\frac{\partial^2 \log f(X;\theta)}{\partial \theta^2} \right) \right\vert_{\theta=\widehat{\theta}_n} } }} # \end{array} # $$ # $I_1 := E \left(-\frac{\partial^2 \log f(X;\theta)}{\partial \theta^2} \right)$ is called the Fisher Information of one sample. # Since our IID samples are from a discrete distribution with # # $$ # \begin{array}{cc} # f(x; \theta) = \theta^x (1-\theta)^{1-x} # &\implies& \displaystyle{\log \left( f(x;\theta) \right) = x \log(\theta) +(1-x) \log(1-\theta)}\\ # &\implies& \displaystyle{\frac{\partial}{\partial \theta} \left(\log \left( f(x;\theta) \right)\right)} # = \displaystyle{\frac{x}{\theta} -\frac{1-x}{1-\theta}} \\ # &\implies& \displaystyle{\frac{\partial^2}{\partial \theta^2} \left(\log \left( f(x;\theta) \right)\right)} # = \displaystyle{-\frac{x}{\theta^2} - \frac{1-x}{(1-\theta)^2}}\\ # &\implies& \displaystyle{E \left( - \frac{\partial^2}{\partial \theta^2} \left(\log \left( f(x;\theta) \right)\right) \right)} # = \displaystyle{\sum_{x\in\{0,1\}} \left( \frac{x}{\theta^2} + \frac{1-x}{(1-\theta)^2} \right) f(x; \theta) = \frac{\theta}{\theta^2} + \frac{1-\theta}{(1-\theta)^2} = \frac{1}{\theta(1-\theta)}} # \end{array} # $$ # # Note that we have implicitly assumed that the $x$ values are only $0$ or $1$ by ignoring the indicator term $\mathbf{1}_{\{0,1\}}(x)$ in $f(x;\theta)$. But this is okay as we are carefully doing the sums over just $x \in \{0,1\}$. # # Now, by using the formula for $\widehat{se}_n$, we can obtain: # # $$ # \begin{array}{cc} # \widehat{se}_n # &=& \displaystyle{\sqrt{\frac{1}{ \left. n E \left(-\frac{\partial^2 \log f(X;\theta)}{\partial \theta^2} \right) \right\vert_{\theta=\widehat{\theta}_n} } }}\\ # &=& \displaystyle{\sqrt{\frac{1}{ \left. n \frac{1}{\theta(1-\theta)} \right\vert_{\theta=\widehat{\theta}_n} } }}\\ # &=& \displaystyle{\sqrt{\frac{\widehat{\theta}_n(1-\widehat{\theta}_n)}{n}}} # \end{array} # $$ # # Finally, we can complete our task by obtaining the 95% confidence interval for $\theta^*$ as follows: # # $$ # \displaystyle{ \widehat{\theta}_n \pm 2 \widehat{se}_n = \widehat{\theta}_n \pm 2 \sqrt{\frac{\widehat{\theta}_n(1-\widehat{\theta}_n)}{n}} = \overline{x}_n \pm 2 \sqrt{\frac{\overline{x}_n(1-\overline{x}_n)}{n}} } # $$ nToGenerate = 100 replicates = 20 xvalues = range(1, nToGenerate+1,1) for i in range(replicates): redshade = 0.5*(replicates - 1 - i)/replicates # to get different colours for the lines bRunningMeans = bernoulliSecretThetaRunningMeans(nToGenerate) pts = zip(xvalues,bRunningMeans) if (i == 0): p = line(pts, rgbcolor = (redshade,0,1)) else: p += line(pts, rgbcolor = (redshade,0,1)) mle=bRunningMeans[nToGenerate-1] se95Correction=2.0*sqrt(mle*(1-mle)/nToGenerate) lower95CI = mle-se95Correction upper95CI = mle+se95Correction p += line([(nToGenerate+i,lower95CI),(nToGenerate+i,upper95CI)], rgbcolor = (redshade,0,1), thickness=0.5) p += line([(1,0.3),(nToGenerate+replicates,0.3)], rgbcolor='black', thickness='2') p += text('sample mean up to n='+str(nToGenerate)+' and their 95% confidence intervals',(nToGenerate/1.5,1),fontsize=16) show(p, figsize=[10,6]) # # Sample Exam Problem 5 # # Obtain the 95% Confidence Interval for the $\lambda^*$ from the experiment based on $n$ IID $Exponential(\lambda)$ trials. # # Write down your answer by returning the right answer in the function `SampleExamProblem5` in the next cell. # Your function call `SampleExamProblem5(sampleWaitingTimes)` on the Orbiter waiting times data should return the 95% confidence interval for the unknown parameter $\lambda^*$. # + # Sample Exam Problem 5 # Only replace the XXX below, do not change the function naemes or parameters sampleWaitingTimes = np.array([8,3,7,18,18,3,7,9,9,25,0,0,25,6,10,0,10,8,16,9,1,5,16,6,4,1,3,21,0,28,3,8,6,6,11,\ 8,10,15,0,8,7,11,10,9,12,13,8,10,11,8,7,11,5,9,11,14,13,5,8,9,12,10,13,6,11,13,0,\ 0,11,1,9,5,14,16,2,10,21,1,14,2,10,24,6,1,14,14,0,14,4,11,15,0,10,2,13,2,22,10,5,\ 6,13,1,13,10,11,4,7,9,12,8,16,15,14,5,10,12,9,8,0,5,13,13,6,8,4,13,15,7,11,6,23,1]) def SampleExamProblem5(exponentialSamples): '''return the 95% confidence interval as a 2-tuple for the unknown rate parameter lambda* from n IID Exponential(lambda*) trials in the input numpy array called exponentialSamples''' XXX XXX XXX lower95CI=XXX upper95CI=XXX return (lower95CI,upper95CI) # do NOT change anything below lowerCISampleExamProblem5,upperCISampleExamProblem5 = SampleExamProblem5(sampleWaitingTimes) print "The 95% CI for lambda in the Orbiter Waiting time experiment = " print (lowerCISampleExamProblem5,upperCISampleExamProblem5) # - # # Sample Exam Problem 5 Solution # # We can obtain the 95% Confidence Interval for the $\lambda^*$ for the experiment based on $n$ IID $Exponential(\lambda)$ trials, by hand or using SageMath symbolic computations (typically both). # # Let $X_1,X_2,\ldots,X_n \overset{IID}{\sim} Exponential(\lambda^*)$. # # We saw that the ML estimator of $\lambda^* \in (0,\infty)$ is $\widehat{\Lambda}_n = 1/\, \overline{X}_n$ and its ML estimate is $\widehat{\lambda}_n=1/\, \overline{x}_n$, where $x_1,x_2,\ldots,x_n$ are our observed data. # # Let us obtain $I_1$, the Fisher Information of one sample, for this experiment to find the standard error: # # $$ # \widehat{\mathsf{se}}_n(\widehat{\Lambda}_n) = \frac{1}{\sqrt{n \left. I_1 \right\vert_{\lambda=\widehat{\lambda}_n}}} # $$ # # and construct an approximate $95\%$ confidence interval for $\lambda^*$ using the asymptotic normality of its ML estimator $\widehat{\Lambda}_n$. # # Since the probability density function $f(x;\lambda)=\lambda e^{-\lambda x}$, for $x\in [0,\infty)$, we have, # # $$ # \begin{align} # I_1 &= - E \left( \frac{\partial^2 \log f(X;\lambda)}{\partial^2 \lambda} \right) = - \int_{x \in [0,\infty)} \left( \frac{\partial^2 \log \left( \lambda e^{-\lambda x} \right)}{\partial^2 \lambda} \right) \lambda e^{-\lambda x} \ dx # \end{align} # $$ # # Let us compute the above integrand next. # $$ # \begin{align} # \frac{\partial^2 \log \left( \lambda e^{-\lambda x} \right)}{\partial^2 \lambda} # &:= # \frac{\partial}{\partial \lambda} \left( \frac{\partial}{\partial \lambda} \left( \log \left( \lambda e^{-\lambda x} \right) \right) \right) # = \frac{\partial}{\partial \lambda} \left( \frac{\partial}{\partial \lambda} \left( \log(\lambda) + \log(e^{-\lambda x} \right) \right) \\ # &= \frac{\partial}{\partial \lambda} \left( \frac{\partial}{\partial \lambda} \left( \log(\lambda) -\lambda x \right) \right) # = \frac{\partial}{\partial \lambda} \left( {\lambda}^{-1} - x \right) = - \lambda^{-2} - 0 = -\frac{1}{\lambda^2} # \end{align} # $$ # Now, let us evaluate the integral by recalling that the expectation of the constant $1$ is 1 for any RV $X$ governed by some parameter, say $\theta$. For instance when $X$ is a continuous RV, $E_{\theta}(1) = \int_{x \in \mathbb{X}} 1 \ f(x;\theta) = \int_{x \in \mathbb{X}} \ f(x;\theta) = 1$. Therefore, the Fisher Information of one sample is # $$ # \begin{align} # I_1(\theta) = - \int_{x \in \mathbb{X} = [0,\infty)} \left( \frac{\partial^2 \log \left( \lambda e^{-\lambda x} \right)}{\partial^2 \lambda} \right) \lambda e^{-\lambda x} \ dx # &= - \int_{0}^{\infty} \left(-\frac{1}{\lambda^2} \right) \lambda e^{-\lambda x} \ dx \\ # & = - \left(-\frac{1}{\lambda^2} \right) \int_{0}^{\infty} \lambda e^{-\lambda x} \ dx = \frac{1}{\lambda^2} \ 1 = \frac{1}{\lambda^2} # \end{align} # $$ # Now, we can compute the desired estimated standard error, by substituting in the ML estimate $\widehat{\lambda}_n = 1/(\overline{x}_n) := 1 / \left( \sum_{i=1}^n x_i \right)$ of $\lambda^*$, as follows: # $$ # \widehat{\mathsf{se}}_n(\widehat{\Lambda}_n) # = \frac{1}{\sqrt{n \left. I_1 \right\vert_{\lambda=\widehat{\lambda}_n}}} # = \frac{1}{\sqrt{n \frac{1}{\widehat{\lambda}_n^2} }} # = \frac{\widehat{\lambda}_n}{\sqrt{n}} # = \frac{1}{\sqrt{n} \ \overline{x}_n} # $$ # Using $\widehat{\mathsf{se}}_n(\widehat{\lambda}_n)$ we can construct an approximate $95\%$ confidence interval $C_n$ for $\lambda^*$, due to the asymptotic normality of the ML estimator of $\lambda^*$, as follows: # $$ # C_n # = \widehat{\lambda}_n \pm 2 \frac{\widehat{\lambda}_n}{\sqrt{n}} # = \frac{1}{\overline{x}_n} \pm 2 \frac{1}{\sqrt{n} \ \overline{x}_n} . # $$ # Let us compute the ML estimate and the $95\%$ confidence interval for the rate parameter for the waiting times at the Orbiter bus-stop. The sample mean $\overline{x}_{132}=9.0758$ and the ML estimate is: # $$\widehat{\lambda}_{132}=1/\,\overline{x}_{132}=1/9.0758=0.1102 ,$$ # and the $95\%$ confidence interval is: # $$ # C_n # = \widehat{\lambda}_{132} \pm 2 \frac{\widehat{\lambda}_{132}}{\sqrt{132}} # = \frac{1}{\overline{x}_{132}} \pm 2 \frac{1}{\sqrt{132} \, \overline{x}_{132}} = 0.1102 \pm 2 \cdot 0.0096 = [0.091, 0.129] . # $$ # # + # Sample Exam Problem 5 Solution # solution is straightforward by following these steps symbolically # or you can do it by hand with pen/paper or do both to be safe ## STEP 1 - define the variables you need lam,x,n = var('lam','x','n') ## STEP 2 - get symbolic expression for the likelihood of one sample logfx = log(lam*exp(-lam*x)).full_simplify() print "logfx = ", logfx ## STEP 3 - find second derivate of expression from STEP 2 w.r.t. parameter d2logfx = logfx.diff(lam,2).full_simplify() print "d2logfx = ", d2logfx ## STEP 4 - to get Fisher Information of one sample ## integrate d2logfx * f(x) over x in [0,Infinity), f(x) id PDF lam*exp(-lam*x) assume(lam>0) # usually you need make such assume's for integrate to work - see suggestions in error messages FisherInformation1 = -integrate(d2logfx*lam*exp(-lam*x),x,0,Infinity) print "FisherInformation1 = ",FisherInformation1 ## STEP 5 - get Standard Error from FisherInformation1 StdErr = 1/sqrt(n*FisherInformation1) print "StdErr = ",StdErr ## STEP 6 - get Standard Error from Standard Error and MLE or lamHat # lamHat = 1/xBar = 1/sampleMean; know from before lamHat,sampMean = var('lamHat','sampMean') lamHat = 1/sampMean EstStdErr = StdErr.subs(lam=lamHat) print "EstStdErr = ",EstStdErr ## STEP 7 - Get lower and upper 95% CI (lamHat-2*EstStdErr, lamHat+2*EstStdErr) # + # Sample Exam Problem 5 Solution # Only replace the XXX below, do not change the function naemes or parameters import numpy as np sampleWaitingTimes = np.array([8,3,7,18,18,3,7,9,9,25,0,0,25,6,10,0,10,8,16,9,1,5,16,6,4,1,3,21,0,28,3,8,6,6,11,\ 8,10,15,0,8,7,11,10,9,12,13,8,10,11,8,7,11,5,9,11,14,13,5,8,9,12,10,13,6,11,13,0,\ 0,11,1,9,5,14,16,2,10,21,1,14,2,10,24,6,1,14,14,0,14,4,11,15,0,10,2,13,2,22,10,5,\ 6,13,1,13,10,11,4,7,9,12,8,16,15,14,5,10,12,9,8,0,5,13,13,6,8,4,13,15,7,11,6,23,1]) def SampleExamProblem5(exponentialSamples): '''return the 95% confidence interval as a 2-tuple for the unknown rate parameter lambda* from n IID Exponential(lambda*) trials in the input numpy array called exponentialSamples''' sampleMean = exponentialSamples.mean() n=len(exponentialSamples) correction=RR(2/(sqrt(n)*sampleMean)) # you can also replace RR by float here or you get expressions lower95CI=1.0/sampleMean - correction upper95CI=1.0/sampleMean + correction return (lower95CI,upper95CI) # do NOT change anything below lowerCISampleExamProblem5,upperCISampleExamProblem5 = SampleExamProblem5(sampleWaitingTimes) print "The 95% CI for lambda in the Orbiter Waiting time experiment = " print (lowerCISampleExamProblem5,upperCISampleExamProblem5) # + [markdown] lx_assignment_number="3" lx_problem_cell_type="PROBLEM" # --- # ## Assignment 3, PROBLEM 5 # Maximum Points = 3 # + [markdown] deletable=false lx_assignment_number="3" lx_assignment_type="ASSIGNMENT" lx_assignment_type2print="Assignment" lx_problem_cell_type="PROBLEM" lx_problem_number="5" lx_problem_points="3" # # Obtain the 95% CI based on the asymptotic normality of the MLE for the mean paramater $\lambda$ based on $n$ IID $Poisson(\lambda^*)$ trials. # # Recall that a random variable $X \sim Poisson(\lambda)$ if its probability mass function is: # # $$ # f(x; \lambda) = \exp{(-\lambda)} \frac{\lambda^x}{x!}, \quad \lambda > 0, \quad x \in \{0,1,2,\ldots\} # $$ # # The MLe $\widehat{\lambda}_n = \overline{x}_n$, the sample mean. # # Work out your answer and express it in the next cell by replacing `XXX`s. # + deletable=false lx_assignment_number="3" lx_assignment_type="ASSIGNMENT" lx_assignment_type2print="Assignment" lx_problem_cell_type="PROBLEM" lx_problem_number="5" lx_problem_points="3" # Only replace the XXX below, do not change the function naemes or parameters import numpy as np samplePoissonCounts = np.array([0,5,11,5,6,8,9,0,1,14,2,4,4,11,2,12,10,5,6,1,7,9,8,0,5,7,11,6,0,1]) def Assignment3Problem5(poissonSamples): '''return the 95% confidence interval as a 2-tuple for the unknown parameter lambda* from n IID Poisson(lambda*) trials in the input numpy array called samplePoissonCounts''' XXX XXX XXX lower95CI=XXX upper95CI=XXX return (lower95CI,upper95CI) # do NOT change anything below lowerCISampleExamProblem5,upperCISampleExamProblem5 = Assignment3Problem5(samplePoissonCounts) print "The 95% CI for lambda based on IID Poisson(lambda) data in samplePoissonCounts = " print (lowerCISampleExamProblem5,upperCISampleExamProblem5) # - # # Hypothesis Testing # # The subset of *all posable hypotheses* that have the property of *[falsifiability](https://en.wikipedia.org/wiki/Falsifiability)* constitute the space of *scientific hypotheses*. # Roughly, a falsifiable statistical hypothesis is one for which a statistical experiment can be designed to produce data or empirical observations that an experimenter can use to falsify or reject it. # In the *statistical decision problem of hypothesis testing*, we are interested in empirically falsifying a scientific hypothesis, i.e. we attempt to reject a hypothesis on the basis of empirical observations or data. # Thus, hypothesis testing has its roots in the *philosophy of science* and is based on *Karl Popper's falsifiability criterion for demarcating scientific hypotheses from the set of all posable hypotheses*. # # ## Introduction # Usually, the hypothesis we **attempt to reject or falsify** is called the **null hypothesis** or $H_0$ and its complement is called the **alternative hypothesis** or $H_1$. # For example, consider the following two hypotheses: # # - $H_0$: The average waiting time at an Orbiter bus stop *is less than or equal to* $10$ minutes. # - $H_1$: The average waiting time at an Orbiter bus stop *is more than* $10$ minutes. # # If the sample mean $\overline{x}_n$ is much larger than $10$ minutes then we may be inclined to reject the null hypothesis that the average waiting time is less than or equal to $10$ minutes. # # Suppose we are interested in the following slightly different hypothesis test for the Orbiter bus stop problem: # # - $H_0$: The average waiting time at an Orbiter bus stop *is equal to* $10$ minutes. # - $H_1$: The average waiting time at an Orbiter bus stop *is not* $10$ minutes. # # Once again we can use the sample mean as the test statistic, but this time we may be inclined to reject the null hypothesis if the sample mean $\overline{x}_n$ is much larger than *or* much smaller than $10$ minutes. # The procedure for rejecting such a null hypothesis is called the **Wald test** we are about to see. # # More generally, suppose we have the following parametric experiment based on $n$ IID trials: # $$ # X_1,X_2,\ldots,X_n \overset{IID}{\sim} F(x_1;\theta^*), \quad \text{ with an unknown (and fixed) } \theta^* \in \mathbf{\Theta} \ . # $$ # # Let us partition the parameter space $\mathbf{\Theta}$ into $\mathbf{\Theta}_0$, the null parameter space, and $\mathbf{\Theta}_1$, the alternative parameter space, i.e., # $$\mathbf{\Theta}_0 \cup \mathbf{\Theta}_1 = \mathbf{\Theta}, \qquad \text{and} \qquad \mathbf{\Theta}_0 \cap \mathbf{\Theta}_1 = \emptyset \ .$$ # # Then, we can formalise testing the null hypothesis versus the alternative as follows: # $$ # H_0 : \theta^* \in \mathbf{\Theta}_0 \qquad \text{versus} \qquad H_1 : \theta^* \subset \mathbf{\Theta}_1 \ . # $$ # # The basic idea involves finding an appropriate **rejection region** $\mathbb{X}_R$ within the **data space** $\mathbb{X}$ and rejecting $H_0$ if the observed data $x:=(x_1,x_2,\ldots,x_n)$ falls inside the rejection region $\mathbb{X}_R$, # $$ # \text{If $x:=(x_1,x_2,\ldots,x_n) \in \mathbb{X}_R \subset \mathbb{X}$, then reject $H_0$, else do not reject $H_0$.} # $$ # Typically, the rejection region $\mathbb{X}_R$ is of the form: # $$ # \mathbb{X}_R := \{ x:=(x_1,x_2,\ldots,x_n) : T(x) > c \} # $$ # where, $T$ is the **test statistic** and $c$ is the **critical value**. Thus, the problem of finding $\mathbb{X}_R$ boils down to that of finding $T$ and $c$ that are appropriate. Once the rejection region is defined, the possible outcomes of a hypothesis test are summarised in the following table. # # # The outcomes of a hypothesis test, in general, are: # # <table border="1" cellspacing="2" cellpadding="2" align="center"> # <tbody> # <tr> # <td align="center">'true state of nature'</td> # <td align="center"><strong>Do not reject $H_0$<br /></strong></td> # <td align="center"><strong>Reject $H_0$<br /></strong></td> # </tr> # <tr> # <td> # <p><strong>$H_0$ is true<br /></strong></p> # <p>&nbsp;</p> # </td> # <td align="center"> # <p>OK<span style="color: #3366ff;">&nbsp;</span></p> # </td> # <td align="center"> # <p>Type I error</p> # </td> # </tr> # <tr> # <td> # <p><strong>$H_0$ is false</strong></p> # </td> # <td align="center">Type II error</td> # <td align="center">OK</td> # </tr> # </tbody> # </table> # # So, intuitively speaking, we want a small probability that we reject $H_0$ when $H_0$ is true (minimise Type I error). Similarly, we want to minimise the probability that we fail to reject $H_0$ when $H_0$ is false (type II error). Let us formally see how to achieve these goals. # # ## Power, Size and Level of a Test # # ### Power Function # # The **power function** of a test with rejection region $\mathbb{X}_R$ is # $$ # \boxed{ # \beta(\theta) := P_{\theta}(x \in \mathbb{X}_R) # } # $$ # So $\beta(\theta)$ is the power of the test if the data were generated under the parameter value $\theta$, i.e. the probability that the observed data $x$, sampled from the distribution specified by $\theta$, falls in the rejection region $\mathbb{X}_R$ and thereby leads to a rejection of the null hypothesis. # # ### Size of a test # The $\mathsf{size}$ of a test with rejection region $\mathbb{X}_R$ is the supreme power under the null hypothesis, i.e.~the supreme probability of rejecting the null hypothesis when the null hypothesis is true: # $$ # \boxed{ # \mathsf{size} := \sup_{\theta \in \mathbf{\Theta}_0} \beta(\theta) := \sup_{\theta \in \mathbf{\Theta}_0} P{\theta}(x \in \mathbb{X}_R) \ . # } # $$ # The $\mathsf{size}$ of a test is often denoted by $\alpha$. A test is said to have $\mathsf{level}$ $\alpha$ if its $\mathsf{size}$ is less than or equal to $\alpha$. # # # ## Wald test # # The Wald test is based on a direct relationship between the $1-\alpha$ confidence interval and a $\mathsf{size}$ $\alpha$ test. It can be used for testing simple hypotheses involving a scalar parameter. # # ### Definition # # Let $\widehat{\Theta}_n$ be an asymptotically normal estimator of the fixed and possibly unknown parameter $\theta^* \in \mathbf{\Theta} \subset \mathbb{X}$ in the parametric IID experiment: # # $$ # X_1,X_2,\ldots,X_n \overset{IID}{\sim} F(x_1;\theta^*) \enspace . # $$ # # Consider testing: # # $$ # \boxed{H_0: \theta^* = \theta_0 \qquad \text{versus} \qquad H_1: \theta^* \neq \theta_0 \enspace .} # $$ # # Suppose that the null hypothesis is true and the estimator $\widehat{\Theta}_n$ of $\theta^*=\theta_0$ is asymptotically normal: # # $$ # \boxed{ # \theta^*=\theta_0, \qquad \frac{\widehat{\Theta}_n - \theta_0}{\widehat{\mathsf{se}}_n} \overset{d}{\to} Normal(0,1) \enspace .} # $$ # # Then, **the Wald test based on the test statistic $W$** is: # $$ # \boxed{ # \text{Reject $H_0$ when $|W|>z_{\alpha/2}$, where $W:=W((X_1,\ldots,X_n))=\frac{\widehat{\Theta}_n ((X_1,\ldots,X_n)) - \theta_0}{\widehat{\mathsf{se}}_n}$.} # } # $$ # The rejection region for the Wald test is: # $$ # \boxed{ # \mathbb{X}_R = \{ x:=(x_1,\ldots,x_n) : |W (x_1,\ldots,x_n) | > z_{\alpha/2} \} \enspace . # } # $$ # # ### Asymptotic $\mathsf{size}$ of a Wald test # # As the sample size $n$ approaches infinity, the $\mathsf{size}$ of the Wald test approaches $\alpha$ : # # $$ # \boxed{ # \mathsf{size} = P_{\theta_0} \left( |W| > z_{\alpha/2} \right) \to \alpha \enspace .} # $$ # # **Proof:** Let $Z \sim Normal(0,1)$. The $\mathsf{size}$ of the Wald test, i.e.~the supreme power under $H_0$ is: # # $$ # \begin{align} # \mathsf{size} # & := \sup_{\theta \in \mathbf{\Theta}_0} \beta(\theta) := \sup_{\theta \in \{\theta_0\}} P_{\theta}(x \in \mathbb{X}_R) = P_{\theta_0}(x \in \mathbb{X}_R) \\ # & = P_{\theta_0} \left( |W| > z_{\alpha/2} \right) = P_{\theta_0} \left( \frac{|\widehat{\theta}_n - \theta_0|}{\widehat{\mathsf{se}}_n} > z_{\alpha/2} \right) \\ # & \to P \left( |Z| > z_{\alpha/2} \right)\\ # & = \alpha \enspace . # \end{align} # $$ # # Next, let us look at the power of the Wald test when the null hypothesis is false. # # ### Asymptotic power of a Wald test # # Suppose $\theta^* \neq \theta_0$. The power $\beta(\theta^*)$, which is the probability of correctly rejecting the null hypothesis, is approximately equal to: # # $$ # \boxed{ # \Phi \left( \frac{\theta_0-\theta^*}{\widehat{\mathsf{se}}_n} - z_{\alpha/2} \right) + # \left( 1- \Phi \left( \frac{\theta_0-\theta^*}{\widehat{\mathsf{se}}_n} + z_{\alpha/2} \right) \right) \enspace , # } # $$ # where, $\Phi$ is the DF of $Normal(0,1)$ RV. Since ${\widehat{\mathsf{se}}_n} \to 0$ as $n \to 0$ the power increase with sample $\mathsf{size}$ $n$. Also, the power increases when $|\theta_0-\theta^*|$ is large. # # Now, let us make the connection between the $\mathsf{size}$ $\alpha$ Wald test and the $1-\alpha$ confidence interval explicit. # # ### The $\mathsf{size}$ Wald test # # The $\mathsf{size}$ $\alpha$ Wald test rejects: # # $$ # \boxed{ # \text{ $H_0: \theta^*=\theta_0$ versus $H_1: \theta^* \neq \theta_0$ if and only if $\theta_0 \notin C_n := (\widehat{\theta}_n-{\widehat{\mathsf{se}}_n} z_{\alpha/2}, \widehat{\theta}_n+{\widehat{\mathsf{se}}_n} z_{\alpha/2})$. # }} # $$ # # $$\boxed{\text{Therefore, testing the hypothesis is equivalent to verifying whether the null value $\theta_0$ is in the confidence interval.}}$$ # # # ### Example: Wald test for the mean waiting times at our Orbiter bus-stop # # Let us use the Wald test to attempt to reject the null hypothesis that the mean waiting time at our Orbiter bus-stop is $10$ minutes under an IID $Exponential(\lambda^*)$ model. Let $\alpha=0.05$ for this test. We can formulate this test as follows: # $$ # H_0: \lambda^* = \lambda_0= \frac{1}{10} \quad \text{versus} \quad H_1: \lambda^* \neq \frac{1}{10}, \quad \text{where, } \quad X_1\ldots,X_{132} \overset{IID}{\sim} Exponential(\lambda^*) \enspace . # $$ # We already obtained the $95\%$ confidence interval based on its MLE's asymptotic normality property to be $[0.0914, 0.1290]$. # # $$\boxed{\text{Since our null value $\lambda_0=0.1$ belongs to this confidence interval, we fail to reject the null hypothesis from a $\mathsf{size}$ $\alpha=0.05$ Wald test.}}$$ # # We will revisit this example in a more computationally explicit fasion soon below. # ### A Live Example: Simulating Bernoulli Trials to understand Wald Tests # # Let's revisit the MLE for the $Bernoulli(\theta^*)$ model with $n$ IID trails, we have already seen, and test the null hypothesis that the unknown $\theta^* = \theta_0 = 0.5$. # # Thus, we are interested in the null hypothesis $H_0$ versus the alternative hypothesis $H_1$: # # $$\displaystyle{H_0: \theta^*=\theta_0 \quad \text{ versus } \quad H_1: \theta^* \neq \theta_0, \qquad \text{ with }\theta_0=0.5}$$ # # We can test this hypothesis with Type I error at $\alpha$ using the **size-$\alpha$ Wald Test** that builds on the asymptotic normality of the MLE, i.e., # $$\displaystyle{ \frac{\widehat{\theta}_n - \theta_0}{\widehat{se}_n} \overset{d}{\to} Normal(0,1)}$$ # # The size-$\alpha$ Wald test is: # # $$ # \boxed{ # \text{Reject } \ H_0 \quad \text{ when } |W| > z_{\alpha/2}, \quad \text{ where, } \quad W = \frac{\widehat{\theta}_n - \theta_0}{\widehat{se}_n} # } # $$ import numpy as np # do a live simulation ... to implement this test... # simulate from Bernoulli(theta0) n samples # make mle # construct Wald test # make a decision - i.e., decide if you will reject or fail to reject the H0: theta0=0.5 trueTheta=0.45 n=20 myBernSamples=np.array([floor(random()+trueTheta) for i in range(0,n)]) #myBernSamples mle=myBernSamples.mean() # 1/mean mle NullTheta=0.5 se=sqrt(mle*(1.0-mle)/n) W=(mle-NullTheta)/se print abs(W) alpha = 0.05 abs(W) > 2 # alpha=0.05, so z_{alpha/2} =1.96 approx=2 # # Sample Exam Problem 6 # # Consider the following model for the parity (odd=1, even=0) of the first Lotto ball to pop out of the NZ Lotto machine. We had $n=1114$ IID trials: # # $$\displaystyle{X_1,X_2,\ldots,X_{1114} \overset{IID}{\sim} Bernoulli(\theta^*)}$$ # # and know from this dataset that the number of odd balls is $546=\sum_{i=1}^{1114} x_i$. # # Your task is to perform a Wald Test of size $\alpha=0.05$ to try to reject the null hypothesis that the chance of seeing an odd ball out of the NZ Lotto machine is exactly $1/2$, i.e., # # $$\displaystyle{H_0: \theta^*=\theta_0 \quad \text{ versus } \quad H_1: \theta^* \neq \theta_0, \qquad \text{ with }\theta_0=0.5}$$ # # Show you work by replacing `XXX`s with the right expressions in the next cell. # + # Sample Exam Problem 6 Problem ## STEP 1: get the MLE thetaHat thetaHat=XXX print "mle thetaHat = ",thetaHat ## STEP 2: get the NullTheta or theta0 NullTheta=XXX print "Null value of theta under H0 = ", NullTheta ## STEP 3: get estimated standard error seTheta=XXX # for Bernoulli trials from earleir in 10.ipynb print "estimated standard error",seTheta # STEP 4: get Wald Statistic W=XXX print "Wald staatistic = ",W # STEP 5: conduct the size alpha=0.05 Wald test # do NOT change anything below rejectNullSampleExamProblem6 = abs(W) > 2.0 # alpha=0.05, so z_{alpha/2} =1.96 approx=2.0 if (rejectNullSampleExamProblem6): print "we reject the null hypothesis that theta_0=0.5" else: print "we fail to reject the null hypothesis that theta_0=0.5" # + # Sample Exam Problem 6 Solution ## STEP 1: get the MLE thetaHat n=1114 # sample size thetaHat=546/n # MLE is sample mean for IID Bernoulli trials print "mle thetaHat = ",thetaHat ## STEP 2: get the NullTheta or theta0 NullTheta=0.5 print "Null value of theta under H0 = ", NullTheta ## STEP 3: get estimated standard error seTheta=sqrt(thetaHat*(1.0-thetaHat)/n) # for Bernoulli trials from earleir in 10.ipynb print "estimated standard error",seTheta # STEP 4: get Wald Statistic W=(thetaHat-NullTheta)/seTheta print "Wald staatistic = ",W # STEP 5: conduct the size alpha=0.05 Wald test rejectNullSampleExamProblem6 = abs(W) > 2.0 # alpha=0.05, so z_{alpha/2} =1.96 approx=2.0 if (rejectNullSampleExamProblem6): print "we reject the null hypothesis that theta_0=0.5" else: print "we fail to reject the null hypothesis that theta_0=0.5" # + [markdown] lx_assignment_number="3" lx_problem_cell_type="PROBLEM" # --- # ## Assignment 3, PROBLEM 6 # Maximum Points = 3 # + [markdown] deletable=false lx_assignment_number="3" lx_assignment_type="ASSIGNMENT" lx_assignment_type2print="Assignment" lx_problem_cell_type="PROBLEM" lx_problem_number="6" lx_problem_points="3" # # For the Orbiter waiting time problem, assuming IID trials as follows: # # $$\displaystyle{X_1,X_2,\ldots,X_{n} \overset{IID}{\sim} Exponential(\lambda^*)}$$ # # Your task is to perform a Wald Test of size $\alpha=0.05$ to try to reject the null hypothesis that the waiting time at the Orbiter bus-stop, i.e., the inter-arrival time between buses, is exactly $10$ minutes: # # $$\displaystyle{H_0: \lambda^*=\lambda_0 \quad \text{ versus } \quad H_1: \lambda^* \neq \lambda_0, \qquad \text{ with }\lambda_0=0.1}$$ # # Show you work by replacing `XXX`s with the right expressions in the next cell. # + deletable=false lx_assignment_number="3" lx_assignment_type="ASSIGNMENT" lx_assignment_type2print="Assignment" lx_problem_cell_type="PROBLEM" lx_problem_number="6" lx_problem_points="3" import numpy as np sampleWaitingTimes = np.array([8,3,7,18,18,3,7,9,9,25,0,0,25,6,10,0,10,8,16,9,1,5,16,6,4,1,3,21,0,28,3,8,6,6,11,\ 8,10,15,0,8,7,11,10,9,12,13,8,10,11,8,7,11,5,9,11,14,13,5,8,9,12,10,13,6,11,13,0,\ 0,11,1,9,5,14,16,2,10,21,1,14,2,10,24,6,1,14,14,0,14,4,11,15,0,10,2,13,2,22,10,5,\ 6,13,1,13,10,11,4,7,9,12,8,16,15,14,5,10,12,9,8,0,5,13,13,6,8,4,13,15,7,11,6,23,1]) #test H0: lambda=0.1 ## STEP 1: get the MLE thetaHat lambdaHat=XXX # you need to use sampleWaitingTimes here! print "mle lambdaHat = ",lambdaHat ## STEP 2: get the NullLambda or lambda0 NullLambda=XXX print "Null value of lambda under H0 = ", NullLambda ## STEP 3: get estimated standard error seLambda=XXX # see Sample Exam Problem 5 in 10.ipynb print "estimated standard error",seLambda # STEP 4: get Wald Statistic W=XXX print "Wald statistic = ",W # STEP 5: conduct the size alpha=0.05 Wald test # do NOT change anything below rejectNullAssignment3Problem6 = abs(W) > 2.0 # alpha=0.05, so z_{alpha/2} =1.96 approx=2.0 if (rejectNullAssignment3Problem6): print "we reject the null hypothesis that lambda0=0.1" else: print "we fail to reject the null hypothesis that lambda0=0.1" # - # ## P-value # # It is desirable to have a more informative decision than simply reporting "reject $H_0$" or "fail to reject $H_0$." # # For instance, we could ask whether the test rejects $H_0$ for each $\mathsf{size}=\alpha$. # Typically, if the test rejects at $\mathsf{size}$ $\alpha$ it will also reject at a larger $\mathsf{size}$ $\alpha' > \alpha$. # Therefore, there is a smallest $\mathsf{size}$ $\alpha$ at which the test rejects $H_0$ and we call this $\alpha$ the $\text{p-value}$ of the test. # # $$\boxed{\text{The smallest $\alpha$ at which a $\mathsf{size}$ $\alpha$ test rejects the null hypothesis $H_0$ is the $\text{p-value}$.}}$$ # p=text('Reject $H_0$?',(12,12)); p+=text('No',(30,10)); p+=text('Yes',(30,15)); p+=text('p-value',(70,10)) p+=text('size',(65,4)); p+=text('$0$',(40,4)); p+=text('$1$',(90,4)); p+=points((59,5),rgbcolor='red',size=50) p+=line([(40,17),(40,5),(95,5)]); p+=line([(40,10),(59,10),(59,15),(90,15)]); p+=line([(68,9.5),(59.5,5.5)],rgbcolor='red'); p.show(axes=False) # ### Definition of p-value # Suppose that for every $\alpha \in (0,1)$ we have a $\mathsf{size}$ $\alpha$ test with rejection region $\mathbb{X}_{R,\alpha}$ and test statistic $T$. Then, # $$ # \text{p-value} := \inf \{ \alpha: T(X) \in \mathbb{X}_{R,\alpha} \} \enspace . # $$ # That is, the p-value is the smallest $\alpha$ at which a $\mathsf{size}$ $\alpha$ test rejects the null hypothesis. # # ### Understanding p-value # If the evidence against $H_0$ is strong then the p-value will be small. However, a large p-value is not strong evidence in favour of $H_0$. This is because a large p-value can occur for two reasons: # # - $H_0$ is true. # - $H_0$ is false but the test has low power (i.e., high Type II error). # # Finally, it is important to realise that *p-value is not the probability that the null hypothesis is true*, i.e. $\text{p-value} \, \neq P(H_0|x)$, where $x$ is the data. The following itemisation of implications for the evidence scale is useful. # # The scale of the evidence against the null hypothesis $H_0$ in terms of the range of the p-values has the following interpretation that is commonly used: # # - P-value $\in (0.00, 0.01]$ $\implies$ Very strong evidence against $H_0$ # - P-value $\in (0.01, 0.05]$ $\implies$ Strong evidence against $H_0$ # - P-value $\in (0.05, 0.10]$ $\implies$ Weak evidence against $H_0$ # - P-value $\in (0.10, 1.00]$ $\implies$ Little or no evidence against $H_0$ # Next we will see a convenient expression for the p-value for certain tests. # # ### The p-value of a hypothesis test # # Suppose that the $\mathsf{size}$ $\alpha$ test based on the test statistic $T$ and critical value $c_{\alpha}$ is of the form: # # $$ # \text{Reject $H_0$ if and only if $T:=T((X_1,\ldots,X_n))> c_{\alpha}$,} # $$ # # then # # $$ # \boxed{ # \text{p-value} \, = \sup_{\theta \in \mathbf{\Theta}_0} P_{\theta}(T((X_1,\ldots,X_n)) \geq t:=T((x_1,\ldots,x_n))) \enspace ,} # $$ # # where, $(x_1,\ldots,x_n)$ is the observed data and $t$ is the observed value of the test statistic $T$. # # In words, **the p-value is the supreme probability under $H_0$ of observing a value of the test statistic the same as or more extreme than what was actually observed.** # # # Let us revisit the Orbiter waiting times example from the p-value perspective. # # ### Example: p-value for the parametric Orbiter bus waiting times experiment # # Let the waiting times at our bus-stop be $X_1,X_2,\ldots,X_{132} \overset{IID}{\sim} Exponential(\lambda^*)$. Consider the following testing problem: # # $$ # H_0: \lambda^*=\lambda_0=\frac{1}{10} \quad \text{versus} \quad H_1: \lambda^* \neq \lambda_0 \enspace . # $$ # # We already saw that the Wald test statistic is: # # $$ # W:=W(X_1,\ldots,X_n)= \frac{\widehat{\Lambda}_n-\lambda_0}{\widehat{\mathsf{se}}_n(\widehat{\Lambda}_n)} = \frac{\frac{1}{\overline{X}_n}-\lambda_0}{\frac{1}{\sqrt{n}\overline{X}_n}} \enspace . # $$ # # The observed test statistic is: # # $$ # w=W(x_1,\ldots,x_{132})= # \frac{\frac{1}{\overline{X}_{132}}-\lambda_0}{\frac{1}{\sqrt{132}\overline{X}_{132}}} # = \frac{\frac{1}{9.0758}-\frac{1}{10}}{\frac{1}{\sqrt{132} \times 9.0758}} = 1.0618 \enspace . # $$ # Since, $W \overset{d}{\to} Z \sim Normal(0,1)$, the p-value for this Wald test is: # # $$ # \begin{align} # \text{p-value} \, # &= \sup_{\lambda \in \mathbf{\Lambda}_0} P_{\lambda} (|W|>|w|)= \sup_{\lambda \in \{\lambda_0\}} P_{\lambda} (|W|>|w|) = P_{\lambda_0} (|W|>|w|) \\ # & \to P (|Z|>|w|)=2 \Phi(-|w|)=2 \Phi(-|1.0618|)=2 \times 0.1442=0.2884 \enspace . # \end{align} # $$ # # Therefore, there is little or no evidence against $H_0$ that the mean waiting time under an IID $Exponential$ model of inter-arrival times is exactly ten minutes. # # ## Preparation for Nonparametric Estimation and Testing # ### YouTry Later # # Python's `random` for sampling and sequence manipulation # # The Python `random` module, available in SageMath, provides a useful way of taking samples if you have already generated a 'population' to sample from, or otherwise playing around with the elements in a sequence. See http://docs.python.org/library/random.html for more details. Here we will try a few of them. # # The aptly-named sample function allows us to take a sample of a specified size from a sequence. We will use a list as our sequence: popltn = range(1, 101, 1) # make a population sample(popltn, 10) # sample 10 elements from it at random # Each call to sample will select unique elements in the list (note that 'unique' here means that it will not select the element at any particular position in the list more than once, but if there are duplicate elements in the list, such as with a list [1,2,4,2,5,3,1,3], then you may well get any of the repeated elements in your sample more than once). sample samples with replacement, which means that repeated calls to sample may give you samples with the same elements in. popltnWithDuplicates = range(1, 11, 1)*4 # make a population with repeated elements print(popltnWithDuplicates) for i in range (5): print sample(popltnWithDuplicates, 10) # Try experimenting with choice, which allows you to select one element at random from a sequence, and shuffle, which shuffles the sequence in place (i.e, the ordering of the sequence itself is changed rather than you being given a re-ordered copy of the list). It is probably easiest to use lists for your sequences. See how `shuffle` is creating permutations of the list. You could use `sample` and `shuffle` to emulate *permuations of k objects out of n* ... # # You may need to check the documentation to see how use these functions. # + # #?sample # + # #?shuffle # + # #?choice # -
_in/2019/jp/10.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Pipeline AI import os # ### Variables env = { 'MODEL_NAME': 'safedriver', 'MODEL_TAG': 'v1', 'MODEL_TYPE': 'scikit', 'MODEL_RUNTIME': 'python', 'MODEL_PATH': './model/', 'INPUT_HOST_PATH': './model/', 'OUTPUT_HOST_PATH': './model/', 'TRAINING_RUNS_HOST': './model/', 'TRAIN_ARGS': '', 'PREDCIT_PORT': '8989', } # ## Training # # [Train a Scikit-Learn Model with PipelineAI](https://github.com/PipelineAI/pipeline/blob/master/docs/quickstart/docker/README-training.md#train-a-scikit-learn-model) # ### Build Training Server # + cell_style="center" command = \ [ 'pipeline', 'train-server-build', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', '--model-type={MODEL_TYPE}', '--model-path={MODEL_PATH}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### Start Training Server # + command = \ [ 'pipeline', 'train-server-start', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', '--input-host-path={INPUT_HOST_PATH}', '--output-host-path={OUTPUT_HOST_PATH}', '--training-runs-host-path={TRAINING_RUNS_HOST}', '--train-args={TRAIN_ARGS}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### View the Training Logs # + command = \ [ 'pipeline', 'pipeline train-server-logs', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### Stop Training Server # + command = \ [ 'pipeline', 'train-server-stop', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}' ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ## Deployment # # [Deploy a Scikit-Learn Model with PipelineAI](https://github.com/PipelineAI/pipeline/tree/master/docs/quickstart/docker#deploy-a-scikit-learn-model) # + [markdown] cell_style="center" # ### Build the Model into a Runnable Docker Image # + command = \ [ 'pipeline', 'predict-server-build', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', '--model-type={MODEL_TYPE}', '--model-runtime={MODEL_RUNTIME}', '--model-path={MODEL_PATH}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### Start the Model Server # + command = \ [ 'pipeline', 'predict-server-start', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', '--predict-port={PREDCIT_PORT}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### Monitor Runtime Logs # + command = \ [ 'pipeline', 'predict-server-logs', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ## PipelineCLI Predict # + command = \ [ 'pipeline', 'predict-server-test', '--endpoint-url=http://localhost:{PREDCIT_PORT}/invoke', '--test-request-path=./model/pipeline_test_request.json', ] out = os.popen(' '.join(command).format(**env)).read() print(out) # - # ### Stop the Model Server # + command = \ [ 'pipeline', 'predict-server-stop', '--model-name={MODEL_NAME}', '--model-tag={MODEL_TAG}', ] out = os.popen(' '.join(command).format(**env)).read() print(out)
notebooks/1-workflow/06-deployment.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="view-in-github" colab_type="text" # <a href="https://colab.research.google.com/github/OHDSI/ClinicalTrialsWGETL/blob/master/notebooks/phuse_source.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="sH3NEgxWNDv9" # # **PHUSE dataset** # + id="gex47eJkMqfO" cellView="form" #@title **This block of code will install PosgreSQL** # %%capture # !wget -qO- https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - # !echo "deb http://apt.postgresql.org/pub/repos/apt/ bionic-pgdg main" >/etc/apt/sources.list.d/pgdg.list # !apt -qq update # !apt -yq install postgresql-12 postgresql-client-12 # !service postgresql start # make calling psql shorter # !sudo -u postgres psql -c "CREATE USER root WITH SUPERUSER" # !psql postgres -c "CREATE DATABASE root" # now just !psql -c "..." # load SQL extensions # %load_ext sql # %config SqlMagic.feedback=False # %config SqlMagic.autopandas=True # %sql postgresql+psycopg2://@/postgres # + id="6p3GrsecPLOe" cellView="form" #@title **Download the PHUSE data to local environment** # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ae.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/cm.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/dm.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ds.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ex.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/lbch.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/lbhe.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/lbur.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/mh.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qsco.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qsda.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qsgi.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qshi.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qsmm.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/qsni.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/relrec.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/sc.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/se.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/suppae.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/suppdm.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/suppds.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/supplbch.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/supplbhe.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/supplbur.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/sv.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ta.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/te.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ti.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/ts.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/tv.csv # !wget -q https://raw.githubusercontent.com/OHDSI/ClinicalTrialsWGETL/master/data/source/csv/vs.csv # !mkdir -p source # !mv *.csv source/ # + id="ivfm-5tCUMCg" cellView="form" #@title **Check the files** # !ls source # + id="kXA7ijdSPYd9" cellView="form" #@title **Create source tables** # %%sql DROP SCHEMA IF EXISTS src CASCADE; CREATE SCHEMA src; -- DROP TABLE IF EXISTS src.ae; CREATE TABLE src.ae ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), aeseq NUMERIC, aespid VARCHAR(3), aeterm VARCHAR(200), aellt VARCHAR(100), aelltcd INTEGER, aedecod VARCHAR(200), aeptcd INTEGER, aehlt VARCHAR(100), aehltcd INTEGER, aehlgt VARCHAR(100), aehlgtcd INTEGER, aebodsys VARCHAR(67), aebdsycd INTEGER, aesoc VARCHAR(100), aesoccd INTEGER, aesev VARCHAR(8), aeser VARCHAR(1), aeacn VARCHAR(30), aerel VARCHAR(8), aeout VARCHAR(200), aescan VARCHAR(1), aescong VARCHAR(1), aesdisab VARCHAR(1), aesdth VARCHAR(1), aeshosp VARCHAR(1), aeslife VARCHAR(1), aesod VARCHAR(1), epoch VARCHAR(9), aedtc VARCHAR(10), aestdtc VARCHAR(10), aeendtc VARCHAR(10), aedy INTEGER, aestdy INTEGER, aeendy INTEGER ); COPY src.ae( studyid, domain, usubjid, aeseq, aespid, aeterm, aellt, aelltcd, aedecod, aeptcd, aehlt, aehltcd, aehlgt, aehlgtcd, aebodsys, aebdsycd, aesoc, aesoccd, aesev, aeser, aeacn, aerel, aeout, aescan, aescong, aesdisab, aesdth, aeshosp, aeslife, aesod, epoch, aedtc, aestdtc, aeendtc, aedy, aestdy, aeendy ) FROM '/content/source/ae.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.cm; CREATE TABLE src.cm ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), cmseq NUMERIC, cmspid VARCHAR(2), cmtrt VARCHAR(44), cmdecod VARCHAR(24), cmindc VARCHAR(34), cmclas VARCHAR(42), cmdose NUMERIC, cmdosu VARCHAR(17), cmdosfrq VARCHAR(15), cmroute VARCHAR(200), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), cmdtc VARCHAR(10), -- for incomplete dates cmstdtc VARCHAR(10), -- for incomplete dates cmendtc VARCHAR(10), -- for incomplete dates cmdy INTEGER, cmstdy INTEGER, cmendy INTEGER ); COPY src.cm( studyid, domain, usubjid, cmseq, cmspid, cmtrt, cmdecod, cmindc, cmclas, cmdose, cmdosu, cmdosfrq, cmroute, visitnum, visit, visitdy, epoch, cmdtc, cmstdtc, cmendtc, cmdy, cmstdy, cmendy ) FROM '/content/source/cm.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.dm; CREATE TABLE src.dm ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), subjid VARCHAR(4), rfstdtc VARCHAR(10), -- instead of DATE for possible incomplete dates rfendtc VARCHAR(10), -- instead of DATE for possible incomplete dates rfxstdtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates rfxendtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates rficdtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates rfpendtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates dthdtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates dthfl VARCHAR(1), siteid VARCHAR(3), age INTEGER, ageu VARCHAR(6), sex VARCHAR(1), race VARCHAR(78), ethnic VARCHAR(25), armcd VARCHAR(8), arm VARCHAR(20), actarmcd VARCHAR(8), actarm VARCHAR(20), country VARCHAR(3), dmdtc VARCHAR(10), -- instead of DATE for possible incomplete dates dmdy INTEGER ); COPY src.dm( studyid, domain, usubjid, subjid, rfstdtc, rfendtc, rfxstdtc, rfxendtc, rficdtc, rfpendtc, dthdtc, dthfl, siteid, age, ageu, sex, race, ethnic, armcd, arm, actarmcd, actarm, country, dmdtc, dmdy ) FROM '/content/source/dm.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.ds; CREATE TABLE src.ds ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), dsseq NUMERIC, dsspid VARCHAR(2), dsterm VARCHAR(63), dsdecod VARCHAR(63), dscat VARCHAR(17), visitnum NUMERIC, visit VARCHAR(17), epoch VARCHAR(9), dsdtc VARCHAR(20), -- instead of TIMESTAMP for possible incomplete dates dsstdtc VARCHAR(10), -- instead of DATE for possible incomplete dates dsdy INTEGER, dsstdy INTEGER ); COPY src.ds( studyid, domain, usubjid, dsseq, dsspid, dsterm, dsdecod, dscat, visitnum, visit, epoch, dsdtc, dsstdtc, dsdy, dsstdy ) FROM '/content/source/ds.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.ex; CREATE TABLE src.ex ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), exseq NUMERIC, -- in define it's INTEGER extrt VARCHAR(10), exdose NUMERIC, -- in define it's INTEGER exdosu VARCHAR(2), exdosfrm VARCHAR(5), exdosfrq VARCHAR(2), exroute VARCHAR(11), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), exstdtc VARCHAR(10), -- instead of DATE for possible incomplete dates exendtc VARCHAR(10), -- instead of DATE for possible incomplete dates exstdy INTEGER, exendy INTEGER ); COPY src.ex( studyid, domain, usubjid, exseq, extrt, exdose, exdosu, exdosfrm, exdosfrq, exroute, visitnum, visit, visitdy, epoch, exstdtc, exendtc, exstdy, exendy ) FROM '/content/source/ex.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.lbch; CREATE TABLE src.lbch ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), lbseq INTEGER, -- in define it's INTEGER lbtestcd VARCHAR(7), lbtest VARCHAR(200), lbcat VARCHAR(10), lborres VARCHAR(5), lborresu VARCHAR(8), lbornrlo VARCHAR(5), -- in defite it's text with lenght 5 lbornrhi VARCHAR(7), lbstresc VARCHAR(8), lbstresn NUMERIC, -- in define it's INTEGER lbstresu VARCHAR(8), lbstnrlo NUMERIC, -- in define it's INTEGER lbstnrhi NUMERIC, -- in define it's INTEGER lbnrind VARCHAR(200), lbblfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), lbdtc VARCHAR(16), -- instead of TIMESTAMP for possible incomplete dates lbdy INTEGER ); COPY src.lbch( studyid, domain, usubjid, lbseq, lbtestcd, lbtest, lbcat, lborres, lborresu, lbornrlo, lbornrhi, lbstresc, lbstresn, lbstresu, lbstnrlo, lbstnrhi, lbnrind, lbblfl, visitnum, visit, visitdy, epoch, lbdtc, lbdy ) FROM '/content/source/lbch.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.lbhe; CREATE TABLE src.lbhe ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), lbseq INTEGER, -- in define it's INTEGER lbtestcd VARCHAR(7), lbtest VARCHAR(200), lbcat VARCHAR(10), lborres VARCHAR(5), lborresu VARCHAR(8), lbornrlo VARCHAR(5), lbornrhi VARCHAR(7), lbstresc VARCHAR(8), lbstresn NUMERIC, -- in define it's INTEGER lbstresu VARCHAR(8), lbstnrlo NUMERIC, -- in define it's INTEGER lbstnrhi NUMERIC, -- in define it's INTEGER lbnrind VARCHAR(200), lbblfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), lbdtc VARCHAR(16), -- instead of TIMESTAMP for possible incomplete dates lbdy INTEGER ); COPY src.lbhe ( studyid, domain, usubjid, lbseq, lbtestcd, lbtest, lbcat, lborres, lborresu, lbornrlo, lbornrhi, lbstresc, lbstresn, lbstresu, lbstnrlo, lbstnrhi, lbnrind, lbblfl, visitnum, visit, visitdy, epoch, lbdtc, lbdy ) FROM '/content/source/lbhe.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.lbur; CREATE TABLE src.lbur ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), lbseq NUMERIC, -- in define it's INTEGER lbtestcd VARCHAR(7), lbtest VARCHAR(200), lbcat VARCHAR(10), lborres VARCHAR(5), lborresu VARCHAR(8), lbornrlo VARCHAR(5), lbornrhi VARCHAR(7), lbstresc VARCHAR(8), lbstresn NUMERIC, -- in define it's INTEGER lbstresu VARCHAR(8), lbstnrlo NUMERIC, -- in define it's INTEGER lbstnrhi NUMERIC, -- in define it's INTEGER lbnrind VARCHAR(200), lbblfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), lbdtc VARCHAR(16), -- instead of TIMESTAMP for possible incomplete dates lbdy INTEGER ); COPY src.lbur ( studyid, domain, usubjid, lbseq, lbtestcd, lbtest, lbcat, lborres, lborresu, lbornrlo, lbornrhi, lbstresc, lbstresn, lbstresu, lbstnrlo, lbstnrhi, lbnrind, lbblfl, visitnum, visit, visitdy, epoch, lbdtc, lbdy ) FROM '/content/source/lbur.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.mh; CREATE TABLE src.mh ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), mhseq NUMERIC, -- in define it's INTEGER mhspid VARCHAR(3), mhterm VARCHAR(19), mhllt VARCHAR(200), mhdecod VARCHAR(44), mhhlt VARCHAR(200), mhhlgt VARCHAR(200), mhcat VARCHAR(34), mhbodsys VARCHAR(67), mhsev VARCHAR(8), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, mhdtc VARCHAR(10), -- instead of DATE for possible incomplete dates mhstdtc VARCHAR(10), -- instead of DATE for possible incomplete dates mhdy INTEGER ); COPY src.mh ( studyid, domain, usubjid, mhseq, mhspid, mhterm, mhllt, mhdecod, mhhlt, mhhlgt, mhcat, mhbodsys, mhsev, visitnum, visit, visitdy, mhdtc, mhstdtc, mhdy ) FROM '/content/source/mh.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qsco; CREATE TABLE src.qsco ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC, -- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(26), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC, -- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10), -- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qsco ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qsco.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qsda; CREATE TABLE src.qsda ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC,-- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(26), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC, -- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10), -- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qsda ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qsda.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qsgi; CREATE TABLE src.qsgi ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC, -- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(70), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC, -- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10), -- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qsgi ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qsgi.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qshi; CREATE TABLE src.qshi ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC,-- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(26), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC,-- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10),-- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qshi ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qshi.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qsmm; CREATE TABLE src.qsmm ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC,-- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(26), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC,-- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10),-- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qsmm ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qsmm.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.qsni; CREATE TABLE src.qsni ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), qsseq NUMERIC,-- in define it's INTEGER qstestcd VARCHAR(8), qstest VARCHAR(40), qscat VARCHAR(70), qsscat VARCHAR(26), qsorres VARCHAR(20), qsorresu VARCHAR(7), qsstresc VARCHAR(4), qsstresn NUMERIC,-- in define it's INTEGER qsstresu VARCHAR(7), qsstat VARCHAR(8), qsreasnd VARCHAR(17), qsblfl VARCHAR(1), qsdrvfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), qsdtc VARCHAR(10),-- instead of DATE for possible incomplete dates qsdy INTEGER ); COPY src.qsni ( studyid, domain, usubjid, qsseq, qstestcd, qstest, qscat, qsscat, qsorres, qsorresu, qsstresc, qsstresn, qsstresu, qsstat, qsreasnd, qsblfl, qsdrvfl, visitnum, visit, visitdy, epoch, qsdtc, qsdy ) FROM '/content/source/qsni.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.relrec; CREATE TABLE src.relrec ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), reltype VARCHAR(30), relid VARCHAR(200) ); COPY src.relrec ( studyid, rdomain, usubjid, idvar, idvarval, reltype, relid ) FROM '/content/source/relrec.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.sc; CREATE TABLE src.sc ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), scseq NUMERIC,-- in define it's INTEGER sctestcd VARCHAR(8), sctest VARCHAR(27), sccat VARCHAR(9), scorres VARCHAR(2), scorresu VARCHAR(5), scstresc VARCHAR(2), scstresn NUMERIC,-- in define it's INTEGER scstresu VARCHAR(5), scdtc VARCHAR(10),-- instead of DATE for possible incomplete dates scdy INTEGER ); COPY src.sc ( studyid, domain, usubjid, scseq, sctestcd, sctest, sccat, scorres, scorresu, scstresc, scstresn, scstresu, scdtc, scdy ) FROM '/content/source/sc.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.se; CREATE TABLE src.se ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), seseq NUMERIC, -- in define it's INTEGER etcd VARCHAR(200), element VARCHAR(200), seupdes VARCHAR(200), epoch VARCHAR(9), sestdtc VARCHAR(10), -- instead of DATE for possible incomplete dates seendtc VARCHAR(10), -- instead of DATE for possible incomplete dates sestdy INTEGER, seendy INTEGER ); COPY src.se ( studyid, domain, usubjid, seseq, etcd, element, seupdes, epoch, sestdtc, seendtc, sestdy, seendy ) FROM '/content/source/se.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.suppae; CREATE TABLE src.suppae ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.suppae ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/suppae.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.suppdm; CREATE TABLE src.suppdm ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.suppdm ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/suppdm.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.suppds; CREATE TABLE src.suppds ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.suppds ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/suppds.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.supplbch; CREATE TABLE src.supplbch ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.supplbch ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/supplbch.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.supplbhe; CREATE TABLE src.supplbhe ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.supplbhe ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/supplbhe.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.supplbur; CREATE TABLE src.supplbur ( studyid VARCHAR(12), rdomain VARCHAR(2), usubjid VARCHAR(11), idvar VARCHAR(8), idvarval VARCHAR(200), qnam VARCHAR(8), qlabel VARCHAR(40), qval VARCHAR(200), qorig VARCHAR(200), qeval VARCHAR(200) ); COPY src.supplbur ( studyid, rdomain, usubjid, idvar, idvarval, qnam, qlabel, qval, qorig, qeval ) FROM '/content/source/supplbur.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.sv; CREATE TABLE src.sv ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), visitnum NUMERIC, visit VARCHAR(19), visitdy NUMERIC, -- in define it's INTEGER epoch VARCHAR(9), svstdtc VARCHAR(10), -- instead of DATE for possible incomplete dates svendtc VARCHAR(10), -- instead of DATE for possible incomplete dates svstdy INTEGER, svendy INTEGER, svupdes VARCHAR(51) ); COPY src.sv ( studyid, domain, usubjid, visitnum, visit, visitdy, epoch, svstdtc, svendtc, svstdy, svendy, svupdes ) FROM '/content/source/sv.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.ta; CREATE TABLE src.ta ( studyid VARCHAR(12), domain VARCHAR(2), armcd VARCHAR(8), arm VARCHAR(20), taetord INTEGER, etcd VARCHAR(200), element VARCHAR(200), tabranch VARCHAR(200), tatrans VARCHAR(200), epoch VARCHAR(200) ); COPY src.ta ( studyid, domain, armcd, arm, taetord, etcd, element, tabranch, tatrans, epoch ) FROM '/content/source/ta.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.te; CREATE TABLE src.te ( studyid VARCHAR(12), domain VARCHAR(2), etcd VARCHAR(200), element VARCHAR(200), testrl VARCHAR(200), teenrl VARCHAR(200), tedur VARCHAR(200) ); COPY src.te ( studyid, domain, etcd, element, testrl, teenrl, tedur ) FROM '/content/source/te.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.ti; CREATE TABLE src.ti ( studyid VARCHAR(12), domain VARCHAR(2), ietestcd VARCHAR(16), ietest VARCHAR(166), iecat VARCHAR(9) ); COPY src.ti ( studyid, domain, ietestcd, ietest, iecat ) FROM '/content/source/ti.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.ts; CREATE TABLE src.ts ( studyid VARCHAR(12), domain VARCHAR(2), tsseq NUMERIC, -- in define it's INTEGER tsparmcd VARCHAR(200), tsparm VARCHAR(200), tsval VARCHAR(200), tsvalnf VARCHAR(4), tsvalcd VARCHAR(11), tsvcdref VARCHAR(8), tsvcdver VARCHAR(18) ); COPY src.ts ( studyid, domain, tsseq, tsparmcd, tsparm, tsval, tsvalnf, tsvalcd, tsvcdref, tsvcdver ) FROM '/content/source/ts.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.tv; CREATE TABLE src.tv ( studyid VARCHAR(12), domain VARCHAR(2), visitnum NUMERIC, visit VARCHAR(90), visitdy INTEGER, armcd VARCHAR(8), tvstrl VARCHAR(200), tvenrl VARCHAR(200) ); COPY src.tv ( studyid, domain, visitnum, visit, visitdy, armcd, tvstrl, tvenrl ) FROM '/content/source/tv.csv' DELIMITER ',' CSV HEADER; -- DROP TABLE IF EXISTS src.vs; CREATE TABLE src.vs ( studyid VARCHAR(12), domain VARCHAR(2), usubjid VARCHAR(11), vsseq NUMERIC, -- in define it's INTEGER vstestcd VARCHAR(6), vstest VARCHAR(24), vspos VARCHAR(8), vsorres VARCHAR(5), vsorresu VARCHAR(9), vsstresc VARCHAR(6), vsstresn NUMERIC, -- in define it's INTEGER vsstresu VARCHAR(9), vsstat VARCHAR(8), vsloc VARCHAR(11), vsblfl VARCHAR(1), visitnum NUMERIC, visit VARCHAR(19), visitdy INTEGER, epoch VARCHAR(9), vsdtc VARCHAR(10), -- instead of DATE for possible incomplete dates vsdy INTEGER, vstpt VARCHAR(30), vstptnum INTEGER, vseltm VARCHAR(4), vstptref VARCHAR(16) ); COPY src.vs ( studyid, domain, usubjid, vsseq, vstestcd, vstest, vspos, vsorres, vsorresu, vsstresc, vsstresn, vsstresu, vsstat, vsloc, vsblfl, visitnum, visit, visitdy, epoch, vsdtc, vsdy, vstpt, vstptnum, vseltm, vstptref ) FROM '/content/source/vs.csv' DELIMITER ',' CSV HEADER; # + [markdown] id="Y4hvcSq2i5Oq" # ## Adverse Events (AE) # + id="HlmldLRiSJXq" language="sql" # # SELECT * # FROM src.ae # LIMIT 5; # + [markdown] id="ruVBU91ei5Os" # ## Supplemental Qualifiers for AE (SUPPAE) # + id="5hji4UuAi5Os" language="sql" # # SELECT * # FROM src.suppae # LIMIT 5; # + [markdown] id="ZkB_SqV7i5Ot" # ## Concomitant Medications (CM) # + id="TzJ5F3yPw8Fz" language="sql" # # SELECT * # FROM src.cm # LIMIT 5; # + [markdown] id="ErTzN3dXi5Ou" # ## Demographics (DM) # + id="UlcCMomti5Ov" language="sql" # # SELECT * # FROM src.dm # LIMIT 5; # + [markdown] id="68tFydNfi5Ov" # ## Supplemental Qualifiers for DM (SUPPDM) # + id="W5h4tA3Ai5Ow" language="sql" # # SELECT * # FROM src.suppdm # LIMIT 5; # + [markdown] id="kYaGGLVli5Ow" # ## Disposition (DS) # + id="kyRkQFRHi5Ox" language="sql" # # SELECT * # FROM src.ds # LIMIT 5; # + [markdown] id="49KdjhS0i5Oz" # ## Supplemental Qualifiers for DS (SUPPDS) # + id="zYekKKWdi5O0" language="sql" # # SELECT * # FROM src.suppds # LIMIT 5; # + [markdown] id="FMmRNOlbi5O0" # ## Exposure (EX) # + id="ao21olJ3i5O1" language="sql" # # SELECT * # FROM src.ex # LIMIT 5; # + [markdown] id="YHNB31Vri5O1" # ## Laboratory Tests Results (LBCH) # + id="xNDCsOtzi5O2" language="sql" # # SELECT * # FROM src.lbch # LIMIT 5; # + [markdown] id="GbTTTCwei5O3" # ## Supplemental Qualifiers for LB (SUPPLBCH) # + id="INIILnVai5O4" language="sql" # # SELECT * # FROM src.supplbch # LIMIT 5; # + [markdown] id="HgApHoESi5O5" # ## Laboratory Tests Results (LBHE) # + id="fTSe3AL_i5O6" language="sql" # # SELECT * # FROM src.lbhe # LIMIT 5; # + [markdown] id="83124qfZi5O7" # ## Supplemental Qualifiers for LB (SUPPLBHE) # + id="DEK7jDCwi5O7" language="sql" # # SELECT * # FROM src.supplbhe # LIMIT 5; # + [markdown] id="iVxW1EWFi5O8" # ## Laboratory Tests Results (LBUR) # + id="SZUS6D2wi5O8" language="sql" # # SELECT * # FROM src.lbur # LIMIT 5; # + [markdown] id="GyQ8T_3Ri5O8" # ## Supplemental Qualifiers for LB (SUPPLBUR) # + id="qRcWwkgOi5O9" language="sql" # # SELECT * # FROM src.supplbur # LIMIT 5; # + [markdown] id="_UePnzNji5O-" # ## Medical History (MH) # + id="4zaTf1l4i5PB" language="sql" # # SELECT * # FROM src.mh # LIMIT 5; # + [markdown] id="tpcYu_bhi5PC" # ## Questionnaires (QSCO) # + id="IhLDq0zfi5PD" language="sql" # # SELECT * # FROM src.qsco # LIMIT 5; # + [markdown] id="7NDglemgi5PE" # ## Questionnaires (QSDA) # + id="SHiUZmcBi5PE" language="sql" # # SELECT * # FROM src.qsda # LIMIT 5; # + [markdown] id="qYv5Y6ggi5PF" # ## Questionnaires (QSGI) # + id="oRWzQaTSi5PF" language="sql" # # SELECT * # FROM src.qsgi # LIMIT 5; # + [markdown] id="mOx4PQ5gi5PG" # ## Questionnaires (QSHI) # + id="M8Cf8f0ii5PH" language="sql" # # SELECT * # FROM src.qshi # LIMIT 5; # + [markdown] id="xZpEBkyqi5PI" # ## Questionnaires (QSMM) # + id="a74ay0Vmi5PK" language="sql" # # SELECT * # FROM src.qsmm # LIMIT 5; # + [markdown] id="FoEgbQTji5PL" # ## Questionnaires (QSNI) # + id="ZgFHQpX7i5PL" language="sql" # # SELECT * # FROM src.qsni # LIMIT 5; # + [markdown] id="Vunx6tYEi5PL" # ## Related Records (RELREC) # + id="axNSmPx9i5PM" language="sql" # # SELECT * # FROM src.relrec # LIMIT 5; # + [markdown] id="6_6U7LBgi5PM" # ## Subject Characteristics (SC) # + id="lFiKyK4Yi5PM" language="sql" # # SELECT * # FROM src.sc # LIMIT 5; # + [markdown] id="sJyoZWs-i5PN" # ## Subject Elements (SE) # + id="rsxeVbv4i5PN" language="sql" # # SELECT * # FROM src.se # LIMIT 5; # + [markdown] id="0chHj3Jbi5PN" # ## Subject Visits (SV) # + id="_Pn-bL4qi5PN" language="sql" # # SELECT * # FROM src.sv # LIMIT 5; # + [markdown] id="s3sH36_Vi5PN" # ## Trial Arms (TA) # + id="FgZ9ZH5Yi5PO" language="sql" # # SELECT * # FROM src.ta # LIMIT 5; # + [markdown] id="TpnjgO5Oi5PO" # ## Trial Elements (TE) # + id="f5hF2Qrii5PO" language="sql" # # SELECT * # FROM src.te # LIMIT 5; # + [markdown] id="L7wsKDSWi5PO" # ## Trial Inclusion/ Exclusion Criteria (TI) # + id="SlLDmLwOi5PO" language="sql" # # SELECT * # FROM src.ti # LIMIT 5; # + [markdown] id="mQFsLGJPi5PP" # ## Trial Summary (TS) # + id="MSdq5uU2i5PP" language="sql" # # SELECT * # FROM src.ts # LIMIT 5; # + [markdown] id="-GFUw9V-i5PP" # ## Trial Visits (TV) # + id="ojlLSNhNi5PP" language="sql" # # SELECT * # FROM src.tv # LIMIT 5; # + [markdown] id="2ajbd0dHi5PQ" # ## Vital Signs (VS) # + id="jbVFG2rdi5PQ" language="sql" # # SELECT * # FROM src.vs # LIMIT 5;
notebooks/phuse_source.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: PY37 # language: python # name: py37 # --- # + [markdown] toc="true" # # Creating a simple Auto-encoders from scratch with Fashion-MNIST dataset. # - # ## 1) Import modules # + # %matplotlib inline # %config InlineBackend.figure_format = 'retina' import matplotlib.pyplot as plt import pandas as pd import numpy as np import seaborn as sns import warnings warnings.filterwarnings('ignore') from tensorflow.keras.models import Model from tensorflow.keras.layers import Dense, Input from tensorflow.keras.datasets import mnist from tensorflow.keras.datasets import fashion_mnist from tensorflow.keras.regularizers import l1 from tensorflow.keras.optimizers import Adam # - import tensorflow as tf tf.__version__ # ## 2) Utility Function def plot_autoencoder_outputs(autoencoder, n, dims): n = 5 plt.figure(figsize=(10, 4.5)) decoded_imgs = autoencoder.predict(x_test) for i in range(n): # plot original image ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(*dims)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Original Images') # plot reconstruction ax = plt.subplot(2, n, i + 1 + n) plt.imshow(decoded_imgs[i].reshape(*dims)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Reconstructed Images') plt.show() # ## 3) Loading and preparing the dataset # + (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data() x_train = x_train.astype('float32') / 255.0 x_test = x_test.astype('float32') / 255.0 x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:]))) x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:]))) print(x_train.shape) print(x_test.shape) # - # ## 4) Building the Auto-Encoder # + input_size = 784 n_neurons = 64 import tensorflow as tf print('tf version', tf.__version__) input_img = Input(shape=(input_size,)) code = Dense(n_neurons, activation='relu')(input_img) output_img = Dense(input_size, activation='sigmoid')(code) autoencoder = Model(input_img, output_img) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=5) # - # ## 5) Visualize the results Original vs Reconstructed Images plot_autoencoder_outputs(autoencoder, 5, (28, 28)) # + weights = autoencoder.get_weights()[0].T n = 10 plt.figure(figsize=(20, 5)) for i in range(n): ax = plt.subplot(1, n, i + 1) plt.imshow(weights[i+0].reshape(28, 28)) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # - # # Deep Auto-Encoder # ## 4) Buidling the Deep Auto-Encoder # + input_size = 784 hidden_size = 128 code_size = 128 input_img = Input(shape=(input_size,)) hidden_1 = Dense(hidden_size, activation='relu')(input_img) code = Dense(code_size, activation='relu')(hidden_1) hidden_2 = Dense(hidden_size, activation='relu')(code) output_img = Dense(input_size, activation='sigmoid')(hidden_2) autoencoder = Model(input_img, output_img) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.fit(x_train, x_train, epochs=3) # - # ## 5) Visualize the results Original vs Reconstructed Images plot_autoencoder_outputs(autoencoder, 5, (28, 28)) # # Denoising Autoencoder # ## 1) Generating Noisy Images # + noise_factor = 0.4 x_train_noisy = x_train + noise_factor * np.random.normal(size=x_train.shape) x_test_noisy = x_test + noise_factor * np.random.normal(size=x_test.shape) x_train_noisy = np.clip(x_train_noisy, 0.0, 1.0) x_test_noisy = np.clip(x_test_noisy, 0.0, 1.0) n = 5 plt.figure(figsize=(10, 4.5)) for i in range(n): # plot original image ax = plt.subplot(2, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Original Images') # plot noisy image ax = plt.subplot(2, n, i + 1 + n) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Noisy Input') # - # ## 2) Buidling the Deep Auto-Encoder for Image Denoising # + input_size = 784 hidden_size = 128 code_size = 32 input_img = Input(shape=(input_size,)) hidden_1 = Dense(hidden_size, activation='relu')(input_img) code = Dense(code_size, activation='relu')(hidden_1) hidden_2 = Dense(hidden_size, activation='relu')(code) output_img = Dense(input_size, activation='sigmoid')(hidden_2) autoencoder = Model(input_img, output_img) autoencoder.compile(optimizer='adam', loss='binary_crossentropy') autoencoder.fit(x_train_noisy, x_train, epochs=10) # - # ## 3) Visualize the results Original vs Reconstructed Images # + n = 5 plt.figure(figsize=(10, 7)) images = autoencoder.predict(x_test_noisy) for i in range(n): # plot original image ax = plt.subplot(3, n, i + 1) plt.imshow(x_test[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Original Images') # plot noisy image ax = plt.subplot(3, n, i + 1 + n) plt.imshow(x_test_noisy[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Noisy Input') # plot noisy image ax = plt.subplot(3, n, i + 1 + 2*n) plt.imshow(images[i].reshape(28, 28)) plt.gray() ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) if i == n/2: ax.set_title('Autoencoder Output') # -
Section 06/Section 06 - Auto-encoders.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Hyperparameter optimization with Ray Tune # <a target="_blank" href="https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.html"><img class="icon" src="https://recognai.github.io/biome-text/v3.2.1/assets/img/biome-isotype.svg" width=24 /></a> # [View on recogn.ai](https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.html) # # <a target="_blank" href="https://colab.research.google.com/github/recognai/biome-text/blob/v3.2.1/docs/docs/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.ipynb"><img class="icon" src="https://www.tensorflow.org/images/colab_logo_32px.png" width=24 /></a> # [Run in Google Colab](https://colab.research.google.com/github/recognai/biome-text/blob/v3.2.1/docs/docs/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.ipynb) # # <a target="_blank" href="https://github.com/recognai/biome-text/blob/v3.2.1/docs/docs/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.ipynb"><img class="icon" src="https://github.githubassets.com/images/modules/logos_page/GitHub-Mark.png" width=24 /></a> # [View source on GitHub](https://github.com/recognai/biome-text/blob/v3.2.1/docs/docs/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.ipynb) # When running this tutorial in Google Colab, make sure to install *biome.text* and *ray tune* first: # !pip install -U pip # !pip install -U biome-text exit(0) # Force restart of the runtime # If you want to log your runs with [WandB](https://wandb.ai), don't forget to install its client and log in. # !pip install wandb # !wandb login # ::: tip Note # # In this tutorial we will use a GPU by default. # So when running this tutorial in Google Colab, make sure that you request one (*Edit -> Notebook settings*). # # ::: # ## Introduction # Here we will optimize the hyperparameters of the short-text classifier from [this tutorial](https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/1-Training_a_text_classifier.html), hence we recommend to have a look at it first before going through this tutorial. # For the Hyper-Parameter Optimization (HPO) we rely on the awesome [Ray Tune library](https://docs.ray.io/en/latest/tune.html#tune-index). # # For a short introduction to HPO with Ray Tune you can have a look at this nice [talk](https://www.youtube.com/watch?v=VX7HvEoMrsA) by <NAME>. # We will follow his terminology and use the term *trial* to refer to a training run of one set of hyperparameters. # ### Imports # # First let's import all the stuff we need for this tutorial: import os from biome.text import Pipeline, Dataset from biome.text.configuration import TrainerConfiguration from biome.text.hpo import TuneExperiment from ray import tune # ## Creating the datasets # As a first step we will download the training and validation data to our local machine, and create our datasets. # !curl -O https://biome-tutorials-data.s3-eu-west-1.amazonaws.com/text_classifier/business.cat.train.csv # !curl -O https://biome-tutorials-data.s3-eu-west-1.amazonaws.com/text_classifier/business.cat.valid.csv train_ds = Dataset.from_csv("business.cat.train.csv") valid_ds = Dataset.from_csv("business.cat.valid.csv") # ## Defining the pipeline and the search space # As mentioned in the introduction we will use the same pipeline configuration as used in the [base tutorial](https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/1-Training_a_text_classifier.html)). # # To perform a random hyperparameter search (as well as a grid search) we simply have to replace the parameters we want to optimize with methods from the [Random Distributions API](https://docs.ray.io/en/latest/tune/api_docs/search_space.html#random-distributions-api) or the [Grid Search API](https://docs.ray.io/en/latest/tune/api_docs/search_space.html#grid-search-api) in our configuration dictionaries. # For a complete description of both APIs and how they interplay with each other, see the corresponding section in the [Ray Tune docs](https://docs.ray.io/en/latest/tune/api_docs/search_space.html). # # In our case we will tune 9 parameters: # - the output dimensions of our `word` and `char` features # - the dropout of our `char` feature # - the architecture of our pooler (*GRU* versus *LSTM*) # - number of layers and hidden size of our pooler, as well as if it should be bidirectional # - hidden dimension of our feed forward network # - and the learning rate # # For most of the parameters we will provide discrete values from which Tune will sample randomly, while for the dropout and learning rate we will provide a continuous linear and logarithmic range, respectively. # Since we want to directly compare the outcome of the optimization with the base configuration of the [underlying tutorial](https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/1-Training_a_text_classifier.html), we will fix the number of epochs to 3. # # Not all of the parameters above are worth tuning, but we want to stress the flexibility that *Ray Tune* and *biome.text* offers you. # # ::: tip Tip # # Keep in mind that the learning rate "*is often the single most important hyper-parameter and one should always make sure that it has been tuned (up to approximately a factor of 2). ... If there is only time to optimize one hyper-parameter and one uses stochastic gradient descent, then this is the hyper-parameter that is worth tuning.*" ([<NAME>](https://arxiv.org/abs/1206.5533)). # # ::: # # In the following configuration dictionaries we replaced the relevant parameters with tune's search spaces. # + # defining the search spaces in our pipeline config pipeline_config = { "name": "german_business_names", "tokenizer": { "text_cleaning": { "rules": ["strip_spaces"] } }, "features": { "word": { "embedding_dim": tune.choice([32, 64]), "lowercase_tokens": True, }, "char": { "embedding_dim": 32, "lowercase_characters": True, "encoder": { "type": "gru", "num_layers": 1, "hidden_size": tune.choice([32, 64]), "bidirectional": True, }, "dropout": tune.uniform(0, 0.5), }, }, "head": { "type": "TextClassification", "labels": list(set(train_ds["label"])), "pooler": { "type": tune.choice(["gru", "lstm"]), "num_layers": tune.choice([1, 2]), "hidden_size": tune.choice([32, 64]), "bidirectional": tune.choice([True, False]), }, "feedforward": { "num_layers": 1, "hidden_dims": tune.choice([32, 64]), "activations": ["relu"], "dropout": [0.0], }, }, } # defining the search spaces in our trainer config trainer_config = TrainerConfiguration( optimizer={ "type": "adam", "lr": tune.loguniform(0.001, 0.01) }, max_epochs=3, monitor="validation_accuracy", monitor_mode="max" ) # - # We also want to make sure that the model with the highest accuracy is saved in the end, that is why we specified the `monitor` and `monitor_mode` argument in the trainer configuration. # # ::: tip Note # # By default we will use a GPU if available. # If you do not want to use your GPU, just set `gpus=0` in your `TrainerConfiguration` above. # # ::: # ## Starting the random search # Before starting our random hyperparameter search we first have to create a `TuneExperiment` instance with our configurations dicts and our datasets. # We also set a name that will mainly be used as project and experiment name for the integrated WandB and MLFlow logger, respectively. # # Furthermore, we can pass on all the parameters available for the underlying [`tune.Experiment`](https://docs.ray.io/en/master/tune/api_docs/execution.html#tune-experiment) class: # # - The number of trials our search will go through depends on the `num_samples` parameter. # In our case, a random search, it equals the number of trials, whereas in the case of a grid search the total number of trials is `num_samples` times the grid configurations (see the [Tune docs](https://docs.ray.io/en/latest/tune/api_docs/search_space.html#overview) for illustrative examples). # # - The `local_dir` parameter defines the output directory of the HPO results and will also contain the training results of each trial (that is the model weights and metrics). # # - The number of parallel running trials depends on your `resources_per_trial` configuration and your local resources. # The default value is `{"cpu": 1, "gpu": 0}` and results, for example, in 8 parallel running trials on a machine with 8 CPUs. # You can also use fractional values. To share a GPU between 2 trials, for example, pass on `{"gpu": 0.5}`. # # ::: tip Note # # Keep in mind: to run your HPO on GPUs, you have to specify them in the `resources_per_trial` parameter when calling `tune.run()`. # If you do not want to use a GPU, just set the value to 0 `{"cpu": 1, "gpu": 0}`. # # ::: # # my_random_search = TuneExperiment( pipeline_config=pipeline_config, trainer_config=trainer_config, train_dataset=train_ds, valid_dataset=valid_ds, name="My first random search", # `tune.Experiment` kwargs: num_samples=50, local_dir="tune_runs", resources_per_trial={"cpu": 1, "gpu": 0.5}, ) # With our `TuneExperiment` object at hand, we simply have to pass it on to the [`tune.run`](https://docs.ray.io/en/master/tune/api_docs/execution.html#tune-run) function to start our random search. # # In this tutorial we will perform a random search together with the [Asynchronous Successive Halving Algorithm (ASHA)](https://blog.ml.cmu.edu/2018/12/12/massively-parallel-hyperparameter-optimization/) to schedule our trials. # The Ray Tune developers advocate this `scheduler` as a good starting point for its aggressive termination of low-performing trials. # You can look up the available configurations in the [ASHAScheduler docs](https://docs.ray.io/en/latest/tune/api_docs/schedulers.html#asha-tune-schedulers-ashascheduler), here we will just use the default parameters. # # We also have to specify on what `metric` to optimize and its `mode` (should the metric be *minimized* (`min`) or *maximized* (`max`) ). # This should be the same as the `validation_metric` specified in your trainer configuration. This guarantees the alignment of the patience mechanism and the trial scheduler, and also makes sure that the best model weights correspond to the best metrics reported by ray tune. # # The `progress_reporter` is a nice feature to keep track of the progress inside a Jupyter Notebook, for example. # ::: tip Tip # # You can reduce the size of the training output by specifying `progress_bar_refresh_rate=0` in your `TrainingConfiguration` to disable the progress bar. # # ::: analysis = tune.run( my_random_search, scheduler=tune.schedulers.ASHAScheduler(), metric="validation_accuracy", mode="max", progress_reporter=tune.JupyterNotebookReporter(overwrite=True) ) # ::: tip Tip # # You can also create an `Analysis` object from the output directory of the HPO run, once it has finished: # ```python # from ray.tune.analysis.experiment_analysis import Analysis # analysis = Analysis( # experiment_dir='tune_runs/My first random search") # default_metric="validation_accuracy", # default_mode="max", # ) # ``` # # ::: # ### Following the progress with tensorboard (optional) # Ray Tune automatically logs its results with [TensorBoard](https://www.tensorflow.org/tensorboard/). # We can take advantage of this and launch a TensorBoard instance before starting the hyperparameter search to follow its progress. # The `RayTuneTrainable` class will also log the metrics to MLFlow and WandB, if you prefer those platforms. # %load_ext tensorboard # %tensorboard --logdir ./runs/tune # ![Screenshot of TensorBoard with Ray Tune](./img/hpo_tensorboard.png) # *Screenshot of TensorBoard* # ## Checking the results # The *analysis* object returned by `tune.run` can be accessed through a *pandas DataFrame*. analysis.dataframe() # Event though with 50 trials we visit just a small space of our possible configurations, we should have achieved an accuracy of ~0.94, an increase of roughly 3 points compared to the original configuration of the [base tutorial](https://recognai.github.io/biome-text/v3.2.1/documentation/tutorials/1-Training_a_text_classifier.html). # # In a real-life example, though, you probably should increase the number of epochs, since the validation loss in general seems to be decreasing further. # # A next step could be to fix some of the tuned parameters to the preferred value, and tune other parameters further or limit their value space. # # ::: tip Tip # # To obtain insights about the importance and tendencies of each hyperparameter for the model, we recommend using TensorBoard's *HPARAM* section and follow <NAME>'s suggestions at the end of his [talk](https://www.youtube.com/watch?v=VX7HvEoMrsA). # Another very useful tool is the parallel coordinates panel in [Weights & Biases](https://wandb.ai/site), see this [quick walkthrough](https://youtu.be/91HhNtmb0B4) # # ::: # ### Evaluating the best performing model # The *analysis* object also provides some convenient methods to obtain the best performing configuration, as well as the `logdir` where the results of the trial are saved. analysis.get_best_config("validation_accuracy", "max") # We can use the `best_logdir` to create a pipeline with the best performing model and start making predictions. best_logdir = analysis.get_best_logdir("validation_accuracy", "max") best_model = os.path.join(best_logdir, "output", "model.tar.gz") pl_trained = Pipeline.from_pretrained(best_model) pl_trained.predict(text="Autohaus Recognai") # + [markdown] pycharm={"name": "#%% md\n"} # ::: tip Note # # For an unbiased evaluation of the model you should use a test dataset that was not used during the HPO! # # :::
docs/docs/documentation/tutorials/3-Hyperparameter_optimization_with_Ray_Tune.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + Q1. Write a Python Program to Display Fibonacci Sequence Using Recursion? Solution :- # + def recur(n): if n <= 1: return n else: return(recur(n-1) + recur(n-2)) nterms = 10 if nterms <= 0: print("Plese enter a positive integer") else: print("Fibonacci sequence:") for i in range(nterms): print(recur_fibo(i)) # + Q2. Write a Python Program to Find Factorial of Number Using Recursion? Solution :- # + def recursion(n): if n == 1: return n else: return n*recursion(n-1) num = 10 if num < 0: print("Sorry, factorial does not exist for negative numbers") elif num == 0: print("The factorial of 0 is 1") else: print("The factorial of", num, "is", recursion(num)) # + Q3. Write a Python Program to calculate your Body Mass Index? Solution :- # - height = float(input("Input your height in Feet: ")) weight = float(input("Input your weight in Kilogram: ")) print("Your body mass index is: ", round(weight / (height * height), 2)) # + Q4. Write a Python Program to calculate the natural logarithm of any number? Solution :- # + import math print ("Natural logarithm of 14 is : ", end="") print (math.log(14)) # + Q5. Write a Python Program for cube sum of first n natural numbers? Solution :- # + n = 3 def sum(n): x = (n * (n + 1) / 2) return (int)(x * x) print(sum(n)) # -
Python Programming Basic Assignments/Code 6.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/ import os ####################you will need to change some paths here!##################### #list of input files #output files #filename_out_nc='F:/data/cruise_data/saildrone/baja-2018/daily_files/sd-1002/data_so_far.nc' #filename_out_kml='F:/data/cruise_data/saildrone/baja-2018/daily_files/sd-1002/data_so_far.kml' filename_ccmp='f:/data/mmd/mmd06c_post_processed/MMD6c_drifter_yearly_extract_2010_ccmp2.np' fname_mmdb='f:/data/mmd/mmd06c_post_processed/MMD6c_drifter_yearly_extract_2010_pos.nc'; fname_mmdb2='f:/data/mmd/mmd06c_post_processed/MMD6c_drifter_yearly_extract_2010_dtime.nc'; fname_mmdb3='f:/data/mmd/mmd06c_post_processed/MMD6c_drifter_yearly_extract_2010_wnd.nc'; dir_ccmp='F:/data/sat_data/ccmp/v02.0/Y' ################################################################################# import datetime as dt import xarray as xr from datetime import datetime import pandas as pd import matplotlib as mpl #import openpyxl #from mpl_toolkits.basemap import Basemap import matplotlib.pyplot as plt import numpy as np import math #from math import cos, radians # + date_1993 = dt.datetime(1993,1,1,0,0,0) # start date is 11/17/1958 dx=0.25 dy=0.25 dx_offset = -179.875 dy_offset = -78.3750 with xr.open_dataset(fname_mmdb2) as dsx: drifter_dysince = dsx.time.T with xr.open_dataset(fname_mmdb) as dsx: lats_amsr = dsx.lat.T lons_amsr = dsx.lon.T #lons goes from 0 to 360 amsr_dysince = dsx.time.T with xr.open_dataset(fname_mmdb3) as dsx: nwp_uwnd = dsx.uwnd nwp_vwnd = dsx.vwnd #amsr_date64=dsx.time[:,0].T.values #print(amsr_date64[0:2]) #amsr_date=pd.to_datetime(amsr_date64, unit='ns') dims=lats_amsr.shape tdim=dims[0] print(tdim) # + #print(amsr_date64) #print(amsr_date.year) # - #dsx = xr.open_dataset(fname_mmdb,decode_times=True) # + #amsr_date_array=[0]*tdim #for i in range(0,tdim): # amsr_date_array[i]=date_1993+dt.timedelta(seconds=float(amsr_dysince[i].values)) #create new time array that can be queried for year etc # + #amsr_date_array2=[0]*tdim #[amsr_date_array2[i]=date_1993 + dt.timedelta(seconds=float(amsr_dysince[i].values)) for i in range(0,1000)] # - plt.plot(amsr_dysince); plt.title('amsr'); plt.xlabel('index'); plt.ylabel('seconds'); # + idysv=0 istart=0 col_wndu=[0]*tdim col_wndv=[0]*tdim for i in range(0,tdim): amsr_date=date_1993+dt.timedelta(seconds=float(amsr_dysince[i].values)) #create new time array that can be queried for year etc #print(i,amsr_date,idysv) if istart==0: #initialize data for incr in range(-1,2): amsr_date2=amsr_date+dt.timedelta(days=float(incr)) #create new time array that can be queried for year etc syr=str(amsr_date2.year).zfill(4) smon=str(amsr_date2.month).zfill(2) sdym=str(amsr_date2.day).zfill(2) sjdy=str(amsr_date2.timetuple().tm_yday).zfill(3) fname_tem='/CCMP_Wind_Analysis_' + syr + smon + sdym + '_V02.0_L3.0_RSS.nc' ccmp_filename = dir_ccmp + syr + '/M' + smon + fname_tem print(ccmp_filename) nc_fid = Dataset(ccmp_filename, 'r') wndu = nc_fid.variables['uwnd'][:] # wndu = np.append(tem[:,720:],tem[:,:720], axis=1) wndv = nc_fid.variables['vwnd'][:] # wndv = np.append(tem[:,720:],tem[:,:720], axis=1) mlat_ccmp = nc_fid.variables['latitude'][:] mlon_ccmp = nc_fid.variables['longitude'][:] #print(min(mlon_ccmp),max(mlon_ccmp)) # mlon_ccmp = np.append(tem[720:],tem[:720], axis=0) # mlon_ccmp = ((mlon_ccmp - 180) % 360) - 180 #make -180 to 180 rather than 0 360 t=nc_fid.variables['time'][:] #units: hours since 1987-01-01 00:00:00 time_ccmp=[0]*4 for itt in range(0,4): time_ccmp[itt]=dt.datetime(1987,1,1,0,0,0)+dt.timedelta(hours=t[itt]) nc_fid.close() if incr==-1: wndu2=wndu wndv2=wndv time_ccmp2=time_ccmp else: wndu2 = np.append(wndu2,wndu, axis=0) wndv2 = np.append(wndv2,wndv, axis=0) time_ccmp2 = np.append(time_ccmp2,time_ccmp, axis = 0) idysv=amsr_date.day istart=1 if amsr_date.day!=idysv: #print('read in new data',amsr_date.day,idysv) amsr_date2=amsr_date+dt.timedelta(days=float(1)) #create new time array that can be queried for year etc syr=str(amsr_date2.year).zfill(4) smon=str(amsr_date2.month).zfill(2) sdym=str(amsr_date2.day).zfill(2) sjdy=str(amsr_date2.timetuple().tm_yday).zfill(3) fname_tem='/CCMP_Wind_Analysis_' + syr + smon + sdym + '_V02.0_L3.0_RSS.nc' ccmp_filename = dir_ccmp + syr + '/M' + smon + fname_tem print(ccmp_filename) nc_fid = Dataset(ccmp_filename, 'r') wndu = nc_fid.variables['uwnd'][:] #wndu = np.append(tem[:,720:],tem[:,:720], axis=1) wndv = nc_fid.variables['vwnd'][:] #wndv = np.append(tem[:,720:],tem[:,:720], axis=1) mlat_ccmp = nc_fid.variables['latitude'][:] mlon_ccmp = nc_fid.variables['longitude'][:] #mlon_ccmp = np.append(tem[720:],tem[:720], axis=0) #mlon_ccmp = ((mlon_ccmp - 180) % 360) - 180 #make -180 to 180 rather than 0 360 t=nc_fid.variables['time'][:] #units: hours since 1987-01-01 00:00:00 time_ccmp=[0]*4 for itt in range(0,4): time_ccmp[itt]=dt.datetime(1987,1,1,0,0,0)+dt.timedelta(hours=t[itt]) nc_fid.close() idysv=amsr_date.day wndu2[0:8,:,:]=wndu2[4:12,:,:] wndv2[0:8,:,:]=wndv2[4:12,:,:] time_ccmp2[0:8]=time_ccmp2[4:12] wndu2[8:12,:,:]=wndu[:] wndv2[8:12,:,:]=wndv[:] time_ccmp2[8:12]=time_ccmp[:] alat=lats_amsr[i] alon=lons_amsr[i] if alon<0: alon=alon+360 latli = np.argmin( np.abs( mlat_ccmp - alat ) ) lonli = np.argmin( np.abs( mlon_ccmp - alon ) ) timei = np.argmin( np.abs( time_ccmp2 - amsr_date ) ) dttime=abs(time_ccmp2[timei] - amsr_date) f2=dttime.seconds/(6.*60*60) f1=abs(f2-1.) if time_ccmp2[timei]<amsr_date: timej=timei+1 if time_ccmp2[timei]>=amsr_date: timej=timei-1 #print(f1,f2,timei,timej) #print(latli,mlat_ccmp[latli],alat.data) #print(lonli,mlon_ccmp[lonli],alon.data) #print(time_ccmp2[timei],amsr_date,time_ccmp2[timej]) #print(f1,f2) tem=f1*wndu2[timei,latli,lonli]+f2*wndu2[timej,latli,lonli] col_wndu[i]=tem col_wndv[i]=f1*wndv2[timei,latli,lonli]+f2*wndv2[timej,latli,lonli] #print(alat.data,alon.data,amsr_date) #print('n',col_wndu[i],col_wndv[i]) #print('a',nwp_uwnd[0,i].data,nwp_vwnd[0,i].data) #print('a',nwp_uwnd[0,i].data,wndu2[timei,latli,lonli],wndu2[timej,latli,lonli]) #print('a',nwp_vwnd[0,i].data,wndv2[timei,latli,lonli],wndv2[timej,latli,lonli]) #print('n',f1,f2) #print('n',f1*wndu2[timei,latli,lonli],f2*wndu2[timej,latli,lonli]) #print('n',f1*wndu2[timei,latli,lonli]+f2*wndu2[timej,latli,lonli]) #print('n',tem) if 50000*int(i/50000)==i: np.savez(filename_ccmp, name1=col_wndu, name2=col_wndv) ilenu=len(col_wndu) filename_ccmp_nc='f:/data/mmd/mmd06c_post_processed/MMD6c_drifter_yearly_extract_2010_ccmp.nc' fnc = Dataset(filename_ccmp_nc,'w', format='NETCDF4') fnc.createDimension('t', ilenu) u_netcdf = fnc.createVariable('wndu', 'f4', ('t')) v_netcdf = fnc.createVariable('wndv', 'f4', ('t')) u_netcdf[:] = col_wndu v_netcdf[:] = col_wndv fnc.close() print(i,'output np.save') # - print(latli,mlat_ccmp[latli],alat.data) #,alat) print(wndv2[timei,latli,lonli]) # + print(nwp_uwnd[0,i].data) print(wndv2.shape) # -
mmdb/amsr_ccmp.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # COMP9318 Lab2 # ## Instructions # 1. This note book contains instructions for **COMP9318-lab2**. # # * You are required to complete your implementation in a file `submission.py` provided along with this notebook. # # * You are not allowed to print out unnecessary stuff. We will not consider any output printed out on the screen. All results should be returned in appropriate data structures return by corresponding functions. # # * You need to submit the code for **lab2** via following link: http://kg.cse.unsw.edu.au:8318/lab2/ . # # * For each question, we have provided you with detailed instructions along with question headings. In case of any problem, you can post your query @ Piazza. # # * If you choose to skip a question, leave the corresponding function body as it is (i.e., keep the `pass` line), otherwise it may affect your mark for other questions. # # * You are allowed to add other functions and/or import additional modules (you may have to in this lab), but you are not allowed to define global variables. **Only functions are allowed** in `submission.py`. # # * You should not import unnecessary modules/libraries, failing to import such modules at test time will lead to errors. # # * We will provide immediate feedback on your submission. You can access your scores using the online submission portal on the same day. # # * For **Final Evaluation** we will be using a different dataset, so your final scores may vary. # # * You are allowed to submit as many times as you want before the deadline, but **ONLY the latest version will be kept and marked**. # # * Submission deadline for this assignment is **11:59:59 PM on 11th April, 2018**. We will **not** accept any late submissions. # ### Question 1: Optimized BUC algorithm (50 points) # # You need to implement the full `buc_rec_optimized` algorithm with the single-tuple optimization (as described below). Given an input dataframe: # # A | B | M # ---|---|--- # 1 | 2 | 100 # 2 | 1 | 20 # # Invoking `buc_rec_optimized` on this data will result in following dataframe: # # # A | B | M # ---|---|--- # 1 | 2 | 100 # 1 |ALL| 100 # 2 | 1 | 20 # 2 |ALL| 20 # ALL| 1 | 20 # ALL| 2 | 100 # ALL|ALL| 120 # # In the file `submission.py`, we have pre-defined the `buc_rec_optimized` function and its helper functions. # # ## Input and output # # Both input and output are dataframes. # # The input dataframe (i.e., the base cuboid) is directly generated from the input file. Given the dimensionality of the base cuboid is $d$, each row is like: # # <pre> # v_1 v_2 ... v_d m # </pre> # # where v_i is the cell's value on the i-th dimension, and m is the measure value. # # The output dataframe contains $n$ rows, each for a non-empty cell in the compute data cube derived from the input base cuboid. Each row is formatted like input: # # <pre> # v_1 v_2 ... v_d m # </pre> # # where v_i is the cell's value on the i-th dimension, and m is the measure value. # # # ## The single-tuple optimization # # Consider the naive way of recursive implementation of the BUC algorithm, you will notice that it uses several recursive calls to compute all the derived results from an input that consists of only one tuple. This is certainly a waste of computation. # # For example, if we are asked to compute the cube given the following input # # B | C | M # ---|---|--- # 1 | 2 | 100 # # We can immmediately output the following, **without** using any recursive calls. # # <pre> # 1 2 100 # * 2 100 # 1 * 100 # * * 100 # </pre> # # # ** Note: For lab-2, you are allowed to use only two libraries, i.e., pandas, and numpy.** import pandas as pd import numpy as np # + ##============================================================ # Data file format: # * tab-delimited input file # * 1st line: dimension names and the last dimension is assumed to be the measure # * rest of the lines: data values. def read_data(filename): df = pd.read_csv(filename, sep='\t') return (df) # helper functions def project_data(df, d): # Return only the d-th column of INPUT return df.iloc[:, d] def select_data(df, d, val): # SELECT * FROM INPUT WHERE input.d = val col_name = df.columns[d] return df[df[col_name] == val] def remove_first_dim(df): # Remove the first dim of the input return df.iloc[:, 1:] def slice_data_dim0(df, v): # syntactic sugar to get R_{ALL} in a less verbose way df_temp = select_data(df, 0, v) return remove_first_dim(df_temp) # - def buc_rec_optimized(df):# do not change the heading of the function pass # **replace** this line with your code # + ## You can test your implementation using the following code... import submission as submission input_data = read_data('./asset/a_.txt') output = submission.buc_rec_optimized(input_data) output # - # # Question 2: Optimal binning algorithm using dynamic programming (50 points) # You need to implement the optimal binning algorithm using the dynamic programming algorithm we discussed in the lecture. You are allowed to use $O(n^2)$ space. # # ## Input # # The input contains data (in a list) and the number of bins (an integer). # # ## Output # # You are required to output the binning result and the matrix computed by the algorithm. # # The matrix entries record optimal binning cost for a suffix of the input array using a certain number of bins. You should assign -1 to all the invalid solutions. x = [3, 1, 18, 11, 13, 17] num_bins = 4 def v_opt_dp(x, b):# do not change the heading of the function pass # **replace** this line with your code # + ## You can test your implementation using the following code... import submission as submission matrix, bins = submission.v_opt_dp(x, num_bins) print("Bins = {}".format(bins)) print("Matrix =") for row in matrix: print(row) # -
Labs/Lab2-Specs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # + import os, sys, inspect cmd_folder = os.path.realpath( os.path.dirname( os.path.abspath(os.path.split(inspect.getfile( inspect.currentframe() ))[0]))) if cmd_folder not in sys.path: sys.path.insert(0, cmd_folder) from transitions import * from transitions.extensions import GraphMachine from IPython.display import Image, display, display_png class Matter(object): def is_valid(self): return True def is_not_valid(self): return False def is_also_valid(self): return True # graph object is created by the machine def show_graph(self, **kwargs): self.get_graph(**kwargs).draw('state.png', prog='dot') display(Image('state.png')) # + transitions = [ { 'trigger': 'melt', 'source': 'solid', 'dest': 'liquid' }, { 'trigger': 'evaporate', 'source': 'liquid', 'dest': 'gas', 'conditions':'is_valid' }, { 'trigger': 'sublimate', 'source': 'solid', 'dest': 'gas', 'unless':'is_not_valid' }, { 'trigger': 'ionize', 'source': 'gas', 'dest': 'plasma', 'conditions':['is_valid','is_also_valid'] } ] states=['solid', 'liquid', 'gas', 'plasma'] model = Matter() machine = GraphMachine(model=model, states=states, transitions=transitions, initial='solid', show_auto_transitions=True, # default value is False title="Matter is Fun!", show_conditions=True) model.show_graph() # - machine.show_auto_transitions = False # hide auto transitions model.show_graph(force_new=True) # rerender graph model.melt() model.show_graph() model.evaporate() model.show_graph() model.ionize() model.show_graph() # multimodel test model1 = Matter() model2 = Matter() machine = GraphMachine(model=[model1, model2], states=states, transitions=transitions, initial='solid', title="Matter is Fun!", show_conditions=True) model1.melt() model1.show_graph() model2.sublimate() model2.show_graph() # show only region of interest which is previous state, active state and all reachable states model2.show_graph(show_roi=True)
examples/Graph MIxin Demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + """Load an existing vtkStructuredGrid and draw the lines of the velocity field joining them in ribbons""" import vtk from vtkplotter import * ######################## vtk # Read the data and specify which scalars and vectors to read. pl3d = vtk.vtkMultiBlockPLOT3DReader() pl3d.SetXYZFileName(datadir+"combxyz.bin") pl3d.SetQFileName(datadir+"combq.bin") pl3d.SetScalarFunctionNumber(100) pl3d.SetVectorFunctionNumber(202) pl3d.Update() # this vtkStructuredData already contains a vector field: domain = pl3d.GetOutput().GetBlock(0) ######################## vtkplotter box = Actor(domain, c=None, alpha=0.1) probe = Line([9,0,28], [11,0,33], res=11).color('k') stream = streamLines(domain, probe, direction='backwards', ribbons=2) show(box, probe, stream) # -
examples/volumetric/streamribbons.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Layout and Styling of Jupyter widgets # # This section presents how to layout and style Jupyter interactive widgets to build rich and *reactive* widget-based applications. # ## The `layout` attribute. # # Jupyter interactive widgets have a `layout` attribute exposing a number of CSS properties that impact how widgets are laid out. # # ### Exposed CSS properties # # <div class="alert alert-info" style="margin: 20px"> # The following properties map to the values of the CSS properties of the same name (underscores being replaced with dashes), applied to the top DOM elements of the corresponding widget. # </div> # # # #### Sizes # # - `height` # - `width` # - `max_height` # - `max_width` # - `min_height` # - `min_width` # # #### Display # # - `visibility` # - `display` # - `overflow` # - `overflow_x` (deprecated in `7.5`, use `overflow` instead) # - `overflow_y` (deprecated in `7.5`, use `overflow` instead) # # #### Box model # # - `border` # - `margin` # - `padding` # # #### Positioning # # - `top` # - `left` # - `bottom` # - `right` # # #### Image/media # # - `object_fit` # - `object_position` # # #### Flexbox # # - `order` # - `flex_flow` # - `align_items` # - `flex` # - `align_self` # - `align_content` # - `justify_content` # - `justify_items` # # #### Grid layout # # - `grid_auto_columns` # - `grid_auto_flow` # - `grid_auto_rows` # - `grid_gap` # - `grid_template_rows` # - `grid_template_columns` # - `grid_template_areas` # - `grid_row` # - `grid_column` # - `grid_area` # # ### Shorthand CSS properties # # You may have noticed that certain CSS properties such as `margin-[top/right/bottom/left]` seem to be missing. The same holds for `padding-[top/right/bottom/left]` etc. # # In fact, you can atomically specify `[top/right/bottom/left]` margins via the `margin` attribute alone by passing the string `'100px 150px 100px 80px'` for a respectively `top`, `right`, `bottom` and `left` margins of `100`, `150`, `100` and `80` pixels. # # Similarly, the `flex` attribute can hold values for `flex-grow`, `flex-shrink` and `flex-basis`. The `border` attribute is a shorthand property for `border-width`, `border-style (required)`, and `border-color`. # ## Simple examples # The following example shows how to resize a `Button` so that its views have a height of `80px` and a width of `50%` of the available space. It also includes an example of setting a CSS property that requires multiple values (a border, in thise case): # + from ipywidgets import Button, Layout b = Button(description='(50% width, 80px height) button', layout=Layout(width='50%', height='80px', border='2px dotted blue')) b # - # The `layout` property can be shared between multiple widgets and assigned directly. Button(description='Another button with the same layout', layout=b.layout) # ### Is simple layout really simple? # # The cell below adds a `min_width` and `max_width` to the button layout. The effect may be surprising; in CSS if max/min width are present they override the width. b.layout.min_width='10%' b.layout.max_width='20%' # ### Natural sizes, and arrangements using HBox and VBox # # Most of the core-widgets have default heights and widths that tile well together. This allows simple layouts based on the `HBox` and `VBox` helper functions to align naturally: # + from ipywidgets import Button, HBox, VBox words = ['correct', 'horse', 'battery', 'staple'] items = [Button(description=w) for w in words] left_box = VBox([items[0], items[1]]) right_box = VBox([items[2], items[3]]) HBox([left_box, right_box]) # - # ## Flexbox and Grid # # The *Flexbox* CSS specification is great for laying out items in a single direction, horizontally or vertically. As we saw in the previous example, two dimensional layout can be done with flexbox by using a combination of horizontal and vertical components. # # The *Grid* CSS specifation is designed to be used for two dimensional layout. There are properties for specifying the number of items in each row or column, how they should be sized, and how items should be aligned. # # ### For more information about Flexbox and Grid # # The are notebooks with more detail about [widgets and the Flexbox model](reference_guides/guide-flex-box.ipynb) and [widgets and the Grid model](reference_guides/guide-grid-box.ipynb). The code examples from each of those notebooks is included here also. # # If you want to learn more about CSS layout after this tutorial, take a look at this [excellent set of articles on CSS layout at MDN](https://developer.mozilla.org/en-US/docs/Learn/CSS/CSS_layout). The Flexbox and Grid articles each have links to more extensive guides at the end of the article. # ## The Flexbox layout # # The `HBox` and `VBox` classes above are special cases of the `Box` widget. # # The `Box` widget enables the entire CSS flexbox spec as well as the Grid layout spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container. # # Again, the whole flexbox spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. # # ### Acknowledgement # # The following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Flexbox](https://css-tricks.com/snippets/css/a-guide-to-flexbox/) by <NAME>, and uses text and various images from the article [with permission](https://css-tricks.com/license/). # # ### Basics and terminology # # The flexbox layout spectrum is excellent for laying out items in a single direction, either horizontally or vertically. # # Since flexbox is a whole module and not a single property, it involves a lot of things including its whole set of properties. Some of them are meant to be set on the container (parent element, known as "flex container") whereas the others are meant to be set on the children (known as "flex items"). # If regular layout is based on both block and inline flow directions, the flex layout is based on "flex-flow directions". Please have a look at this figure from the specification, explaining the main idea behind the flex layout. # # ![Flexbox](./images/flexbox.png) # # Basically, items will be laid out following either the `main axis` (from `main-start` to `main-end`) or the `cross axis` (from `cross-start` to `cross-end`). # # - `main axis` - The main axis of a flex container is the primary axis along which flex items are laid out. Beware, it is not necessarily horizontal; it depends on the flex-direction property (see below). # - `main-start | main-end` - The flex items are placed within the container starting from main-start and going to main-end. # - `main size` - A flex item's width or height, whichever is in the main dimension, is the item's main size. The flex item's main size property is either the ‘width’ or ‘height’ property, whichever is in the main dimension. # cross axis - The axis perpendicular to the main axis is called the cross axis. Its direction depends on the main axis direction. # - `cross-start | cross-end` - Flex lines are filled with items and placed into the container starting on the cross-start side of the flex container and going toward the cross-end side. # - `cross size` - The width or height of a flex item, whichever is in the cross dimension, is the item's cross size. The cross size property is whichever of ‘width’ or ‘height’ that is in the cross dimension. # # ### The VBox and HBox helpers # # The `VBox` and `HBox` helper classes provide simple defaults to arrange child widgets in vertical and horizontal boxes. They are roughly equivalent to: # # ```Python # def VBox(*pargs, **kwargs): # """Displays multiple widgets vertically using the flexible box model.""" # box = Box(*pargs, **kwargs) # box.layout.display = 'flex' # box.layout.flex_flow = 'column' # box.layout.align_items = 'stretch' # return box # # def HBox(*pargs, **kwargs): # """Displays multiple widgets horizontally using the flexible box model.""" # box = Box(*pargs, **kwargs) # box.layout.display = 'flex' # box.layout.align_items = 'stretch' # return box # ``` # # # # ### Examples # # **Four buttons in a VBox. Items stretch to the maximum width, in a vertical box taking `50%` of the available space.** # + from ipywidgets import Layout, Button, Box items_layout = Layout(width='auto') # override the default width of the button to 'auto' to let the button grow box_layout = Layout(display='flex', flex_flow='column', align_items='stretch', border='solid', width='50%') words = ['correct', 'horse', 'battery', 'staple'] items = [Button(description=word, layout=items_layout, button_style='danger') for word in words] box = Box(children=items, layout=box_layout) box # - # **Three buttons in an HBox. Items flex proportionally to their weight.** # + from ipywidgets import Layout, Button, Box, VBox # Items flex proportionally to the weight and the left over space around the text items_auto = [ Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'), Button(description='weight=3; auto', layout=Layout(flex='3 1 auto', width='auto'), button_style='danger'), Button(description='weight=1; auto', layout=Layout(flex='1 1 auto', width='auto'), button_style='danger'), ] # Items flex proportionally to the weight items_0 = [ Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'), Button(description='weight=3; 0%', layout=Layout(flex='3 1 0%', width='auto'), button_style='danger'), Button(description='weight=1; 0%', layout=Layout(flex='1 1 0%', width='auto'), button_style='danger'), ] box_layout = Layout(display='flex', flex_flow='row', align_items='stretch', width='70%') box_auto = Box(children=items_auto, layout=box_layout) box_0 = Box(children=items_0, layout=box_layout) VBox([box_auto, box_0]) # - # **A more advanced example: a reactive form.** # # The form is a `VBox` of width '50%'. Each row in the VBox is an HBox, that justifies the content with space between.. # + from ipywidgets import Layout, Button, Box, FloatText, Textarea, Dropdown, Label, IntSlider form_item_layout = Layout( display='flex', flex_flow='row', justify_content='space-between' ) form_items = [ Box([Label(value='Age of the captain'), IntSlider(min=40, max=60)], layout=form_item_layout), Box([Label(value='Egg style'), Dropdown(options=['Scrambled', 'Sunny side up', 'Over easy'])], layout=form_item_layout), Box([Label(value='Ship size'), FloatText()], layout=form_item_layout), Box([Label(value='Information'), Textarea()], layout=form_item_layout) ] form = Box(form_items, layout=Layout( display='flex', flex_flow='column', border='solid 2px', align_items='stretch', width='50%' )) form # - # **A more advanced example: a carousel.** # + from ipywidgets import Layout, Button, Box, Label item_layout = Layout(height='100px', min_width='40px') items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)] box_layout = Layout(overflow_x='scroll', border='3px solid black', width='500px', height='', flex_flow='row', display='flex') carousel = Box(children=items, layout=box_layout) VBox([Label('Scroll horizontally:'), carousel]) # - # #### *Compatibility note* # # The `overflow_x` and `overflow_y` options are deprecated in ipywidgets `7.5`. Instead, use the shorthand property `overflow='scroll hidden'`. The first part specificies overflow in `x`, the second the overflow in `y`. # ## A widget for exploring layout options # # Use the dropdowns and sliders in the widget to change the layout of the box containing the colored buttons. Many of the CSS layout options described above are available, and the Python code to generate a `Layout` object reflecting the settings is in a `TextArea` in the widget. # # A few questions to answer after the demonstration of this (see the [detailed flexbox guide for a longer discussion](reference_guides/guide-flex-box.ipynb)): # # 1. What does changing `justify_content` affect? You may find it easier to answer this if you set `wrap` to `wrap`. # 2. What does `align_items` affect? # 3. How is `align_content` different than `align_items`? # from layout_preview import layout layout # ### Exercises # **Four buttons in a box revisted: Change order and orientation** # # This example, from earlier in this notebook, lays out 4 buttons vertically. # # Flexbox allows you to change the order and orientation of the children items in the flexbox without changing the children themselves. # # 1. Change the `flex_flow` so that the buttons are displayed in a single column in *reverse order*. # 2. Change the `flex_flow` so that the buttons are displayed in a single *row* instead of a column. # 3. Try setting a few values of `align_items` and describe how it affects the display of the buttons. # 4. Make the box narrower by changing the `width`, then change `flex_flow` to lay out the buttons in rows that wrap so that there is a 2x2 grid of buttons. # # Feel free to figure out the layout using the tool above and copy/paste the layout here! # + from ipywidgets import Layout, Button, Box items_layout = Layout(width='auto') # override the default width of the button to 'auto' to let the button grow box_layout = Layout(display='flex', flex_flow='column', align_items='stretch', border='solid', width='20%') words = ['correct', 'horse', 'battery', 'staple'] items = [Button(description=word, layout=items_layout, button_style='danger') for word in words] box = Box(children=items, layout=box_layout) box # - # **Carousel revisted: item layout** # # The code that generated the carousel is reproduced below. Run the cell, then continue reading. # + from ipywidgets import Layout, Button, Box, Label item_layout = Layout(height='100px', min_width='40px') items = [Button(layout=item_layout, description=str(i), button_style='warning') for i in range(40)] box_layout = Layout(overflow_x='scroll', border='3px solid black', width='500px', height='', flex_flow='row', display='flex') carousel = Box(children=items, layout=box_layout) VBox([Label('Scroll horizontally:'), carousel]) # - # **To do:** # # + Change the `min_width` for *one* of the `items`, say the first one. Does it affect only the first one, or all of them? Why? # + Change the `height` of *only* the first button. *Hint:* It needs its own `Layout`. items[0].layout.min_width = 'FILL IN WITH A WIDTH' # ## The Grid layout # # The `GridBox` class is a special case of the `Box` widget. # # The `Box` widget enables the entire CSS flexbox spec, enabling rich reactive layouts in the Jupyter notebook. It aims at providing an efficient way to lay out, align and distribute space among items in a container. # # A more detailed description of the [Grid layout is available](reference_guides/guide-grid-box.ipynb). # # The whole grid layout spec is exposed via the `layout` attribute of the container widget (`Box`) and the contained items. One may share the same `layout` attribute among all the contained items. # # The following flexbox tutorial on the flexbox layout follows the lines of the article [A Complete Guide to Grid](https://css-tricks.com/snippets/css/complete-guide-grid/) by <NAME>, and uses text and various images from the article [with permission](https://css-tricks.com/license/). # # ### Basics # # To get started you have to define a container element as a grid with display: grid, set the column and row sizes with grid-template-rows, grid-template-columns, and grid_template_areas, and then place its child elements into the grid with grid-column and grid-row. Similarly to flexbox, the source order of the grid items doesn't matter. Your CSS can place them in any order, which makes it super easy to rearrange your grid with media queries. Imagine defining the layout of your entire page, and then completely rearranging it to accommodate a different screen width all with only a couple lines of CSS. Grid is one of the most powerful CSS modules ever introduced. # # ### Important terminology # # Before diving into the concepts of Grid it's important to understand the terminology. Since the terms involved here are all kinda conceptually similar, it's easy to confuse them with one another if you don't first memorize their meanings defined by the Grid specification. But don't worry, there aren't many of them. # # **Grid Container** # # The element on which `display: grid` is applied. It's the direct parent of all the grid items. In this example container is the grid container. # # ```html # <div class="container"> # <div class="item item-1"></div> # <div class="item item-2"></div> # <div class="item item-3"></div> # </div> # ``` # # **Grid Item** # # The children (e.g. direct descendants) of the grid container. Here the item elements are grid items, but sub-item isn't. # # ```html # <div class="container"> # <div class="item"></div> # <div class="item"> # <p class="sub-item"></p> # </div> # <div class="item"></div> # </div> # ``` # # **Grid Line** # # The dividing lines that make up the structure of the grid. They can be either vertical ("column grid lines") or horizontal ("row grid lines") and reside on either side of a row or column. Here the yellow line is an example of a column grid line. # # ![grid-line](images/grid-line.png) # # **Grid Track** # # The space between two adjacent grid lines. You can think of them like the columns or rows of the grid. Here's the grid track between the second and third row grid lines. # # ![grid-track](images/grid-track.png) # # A more detailed description of the [Grid layout is available](reference_guides/guide-grid-box.ipynb). The [Grid layout guide on MDN](https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Grid_Layout#Guides) is also excellent. from ipywidgets import Button, GridBox, Layout, ButtonStyle # The first example defines a 3x3 grid and places 9 buttons into the grid. GridBox(children=[Button(description=str(i), layout=Layout(width='auto', height='auto'), style=ButtonStyle(button_color='darkseagreen')) for i in range(9) ], layout=Layout( width='50%', grid_template_columns='100px 50px 100px', grid_template_rows='80px auto 80px', grid_gap='5px 10px') ) # ### Exercises # **Add more buttons** # # Modify the code above to place more buttons in the `GridBox` (do *not* modify the layout). Any number of buttons larger than 9 is fine. # # 1. What happens to the extra buttons? Are they laid out like the first 9 buttons? # # The grid template defines a 3x3 grid. If additional children are placed in the grid their properties are determined by the layout properties `grid_auto_columns`, `grid_auto_rows` and `grid_auto_flow` properties. # # 2. Set `grid_auto_rows="10px"` and rerun the example with more than 9 buttons. # # 3. Set `grid_auto_rows` so that the automatically added rows have the same format as the templated rows. # ### An alternate way of defining the grid # # The grid can also be set up using a description words. The layout below defines a grid with 4 columns and 3 rows. The first row is a header, the bottom row is a footer, and the middle row has content in the first two columns, then an empty cell, followed by a sidebar. # # Widgets are assigned to each of these areas by setting the widgets's layout `grid_area` to the name of the area. # # ``` # "header header header header" # "main main . sidebar " # "footer footer footer footer" # ``` # + header = Button(description='Header', layout=Layout(width='auto', grid_area='header'), style=ButtonStyle(button_color='lightblue')) main = Button(description='Main', layout=Layout(width='auto', grid_area='main'), style=ButtonStyle(button_color='moccasin')) sidebar = Button(description='Sidebar', layout=Layout(width='auto', grid_area='sidebar'), style=ButtonStyle(button_color='salmon')) footer = Button(description='Footer', layout=Layout(width='auto', grid_area='footer'), style=ButtonStyle(button_color='olive')) GridBox(children=[header, main, sidebar, footer], layout=Layout( width='50%', grid_template_rows='auto auto auto', grid_template_columns='25% 25% 25% 25%', grid_template_areas=''' "header header header header" "main main . sidebar " "footer footer footer footer" ''') ) # - # ### Exercises # **Make the main area larger** # # 1. Add another row or two to the template area so that the main area is 3 rows high and 2 columns wide.
jupyter_notebooks/jupyter/widgets_tutorial/06.01-Widget_Layout.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # %pylab inline pylab.rcParams['figure.figsize'] = (10.0, 8.0) import os, sys files = [x for x in os.listdir('.') if x[-4:] == '.npy'] for file in files: print(file) data = np.load(file) plt.plot(data['time'], data['current']) path = '../128s' files = [x for x in os.listdir(path) if x[-4:] == '.npy'] for file in files: print(file) data = np.load(path + '/' + file) plt.plot(data['time'], data['current']) path = '../600s' files = [x for x in os.listdir(path) if x[-4:] == '.npy'] for file in files: print(file) data = np.load(path + '/' + file) plt.plot(data['time'], data['current'])
measurements/pbs/faradaic/diode/32s/Untitled.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # (autocorr)= # # # Autocorrelation analysis & convergence # # In this tutorial, we will discuss a method for convincing yourself that your chains are sufficiently converged. # This can be a difficult subject to discuss because it isn't formally possible to guarantee convergence for any but the simplest models, and therefore any argument that you make will be circular and heuristic. # However, some discussion of autocorrelation analysis is (or should be!) a necessary part of any publication using MCMC. # # With emcee, we follow [<NAME> (2010)](https://msp.org/camcos/2010/5-1/p04.xhtml) and recommend using the *integrated autocorrelation time* to quantify the effects of sampling error on your results. # The basic idea is that the samples in your chain are not independent and you must estimate the effective number of independent samples. # There are other convergence diagnostics like the [Gelman–Rubin statistic](http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/305.pdf) (*Note: you should not compute the G–R statistic using multiple chains in the same emcee ensemble because the chains are not independent!*) but, since the integrated autocorrelation time directly quantifies the Monte Carlo error (and hence the efficiency of the sampler) on any integrals computed using the MCMC results, it is the natural quantity of interest when judging the robustness of an MCMC analysis. # + tags=["hide-cell"] # %config InlineBackend.figure_format = "retina" from matplotlib import rcParams rcParams["savefig.dpi"] = 100 rcParams["figure.dpi"] = 100 rcParams["font.size"] = 20 # - # ## Monte Carlo error # # The goal of every MCMC analysis is to evaluate integrals of the form # # $$ # \mathrm{E}_{p(\theta)}[f(\theta)] = \int f(\theta)\,p(\theta)\,\mathrm{d}\theta \quad. # $$ # # If you had some way of generating $N$ samples $\theta^{(n)}$ from the probability density $p(\theta)$, then you could approximate this integral as # # $$ # \mathrm{E}_{p(\theta)}[f(\theta)] \approx \frac{1}{N} \sum_{n=1}^N f(\theta^{(n)}) # $$ # # where the sum is over the samples from $p(\theta)$. # If these samples are independent, then the sampling variance on this estimator is # # $$ # \sigma^2 = \frac{1}{N}\,\mathrm{Var}_{p(\theta)}[f(\theta)] # $$ # # and the error decreases as $1/\sqrt{N}$ as you generate more samples. # In the case of MCMC, the samples are not independent and the error is actually given by # # $$ # \sigma^2 = \frac{\tau_f}{N}\,\mathrm{Var}_{p(\theta)}[f(\theta)] # $$ # # where $\tau_f$ is the *integrated autocorrelation time* for the chain $f(\theta^{(n)})$. # In other words, $N/\tau_f$ is the effective number of samples and $\tau_f$ is the number of steps that are needed before the chain "forgets" where it started. # This means that, if you can estimate $\tau_f$, then you can estimate the number of samples that you need to generate to reduce the relative error on your target integral to (say) a few percent. # # **Note:** It is important to remember that $\tau_f$ depends on the specific function $f(\theta)$. # This means that there isn't just *one* integrated autocorrelation time for a given Markov chain. # Instead, you must compute a different $\tau_f$ for any integral you estimate using the samples. # ## Computing autocorrelation times # # There is a great discussion of methods for autocorrelation estimation in [a set of lecture notes by <NAME>](https://pdfs.semanticscholar.org/0bfe/9e3db30605fe2d4d26e1a288a5e2997e7225.pdf) and the interested reader should take a look at that for a more formal discussion, but I'll include a summary of some of the relevant points here. # The integrated autocorrelation time is defined as # # $$ # \tau_f = \sum_{\tau=-\infty}^\infty \rho_f(\tau) # $$ # # where $\rho_f(\tau)$ is the normalized autocorrelation function of the stochastic process that generated the chain for $f$. # You can estimate $\rho_f(\tau)$ using a finite chain $\{f_n\}_{n=1}^N$ as # # $$ # \hat{\rho}_f(\tau) = \hat{c}_f(\tau) / \hat{c}_f(0) # $$ # # where # # $$ # \hat{c}_f(\tau) = \frac{1}{N - \tau} \sum_{n=1}^{N-\tau} (f_n - \mu_f)\,(f_{n+\tau}-\mu_f) # $$ # # and # # $$ # \mu_f = \frac{1}{N}\sum_{n=1}^N f_n \quad. # $$ # # (Note: In practice, it is actually more computationally efficient to compute $\hat{c}_f(\tau)$ using a fast Fourier transform than summing it directly.) # # Now, you might expect that you can estimate $\tau_f$ using this estimator for $\rho_f(\tau)$ as # # $$ # \hat{\tau}_f \stackrel{?}{=} \sum_{\tau=-N}^{N} \hat{\rho}_f(\tau) = 1 + 2\,\sum_{\tau=1}^N \hat{\rho}_f(\tau) # $$ # # but this isn't actually a very good idea. # At longer lags, $\hat{\rho}_f(\tau)$ starts to contain more noise than signal and summing all the way out to $N$ will result in a very noisy estimate of $\tau_f$. # Instead, we want to estimate $\tau_f$ as # # $$ # \hat{\tau}_f (M) = 1 + 2\,\sum_{\tau=1}^M \hat{\rho}_f(\tau) # $$ # # for some $M \ll N$. # As discussed by Sokal in the notes linked above, the introduction of $M$ decreases the variance of the estimator at the cost of some added bias and he suggests choosing the smallest value of $M$ where $M \ge C\,\hat{\tau}_f (M)$ for a constant $C \sim 5$. # Sokal says that he finds this procedure to work well for chains longer than $1000\,\tau_f$, but the situation is a bit better with emcee because we can use the parallel chains to reduce the variance and we've found that chains longer than about $50\,\tau$ are often sufficient. # ## A toy problem # # To demonstrate this method, we'll start by generating a set of "chains" from a process with known autocorrelation structure. # To generate a large enough dataset, we'll use [celerite](http://celerite.readthedocs.io): # + import numpy as np import matplotlib.pyplot as plt np.random.seed(1234) # Build the celerite model: import celerite from celerite import terms kernel = terms.RealTerm(log_a=0.0, log_c=-6.0) kernel += terms.RealTerm(log_a=0.0, log_c=-2.0) # The true autocorrelation time can be calculated analytically: true_tau = sum(2 * np.exp(t.log_a - t.log_c) for t in kernel.terms) true_tau /= sum(np.exp(t.log_a) for t in kernel.terms) true_tau # Simulate a set of chains: gp = celerite.GP(kernel) t = np.arange(2000000) gp.compute(t) y = gp.sample(size=32) # Let's plot a little segment with a few samples: plt.plot(y[:3, :300].T) plt.xlim(0, 300) plt.xlabel("step number") plt.ylabel("$f$") plt.title("$\\tau_\mathrm{{true}} = {0:.0f}$".format(true_tau), fontsize=14); # - # Now we'll estimate the empirical autocorrelation function for each of these parallel chains and compare this to the true function. # + def next_pow_two(n): i = 1 while i < n: i = i << 1 return i def autocorr_func_1d(x, norm=True): x = np.atleast_1d(x) if len(x.shape) != 1: raise ValueError("invalid dimensions for 1D autocorrelation function") n = next_pow_two(len(x)) # Compute the FFT and then (from that) the auto-correlation function f = np.fft.fft(x - np.mean(x), n=2 * n) acf = np.fft.ifft(f * np.conjugate(f))[: len(x)].real acf /= 4 * n # Optionally normalize if norm: acf /= acf[0] return acf # Make plots of ACF estimate for a few different chain lengths window = int(2 * true_tau) tau = np.arange(window + 1) f0 = kernel.get_value(tau) / kernel.get_value(0.0) # Loop over chain lengths: fig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True) for n, ax in zip([10, 100, 1000], axes): nn = int(true_tau * n) ax.plot(tau / true_tau, f0, "k", label="true") ax.plot( tau / true_tau, autocorr_func_1d(y[0, :nn])[: window + 1], label="estimate", ) ax.set_title(r"$N = {0}\,\tau_\mathrm{{true}}$".format(n), fontsize=14) ax.set_xlabel(r"$\tau / \tau_\mathrm{true}$") axes[0].set_ylabel(r"$\rho_f(\tau)$") axes[-1].set_xlim(0, window / true_tau) axes[-1].set_ylim(-0.05, 1.05) axes[-1].legend(fontsize=14); # - # This figure shows how the empirical estimate of the normalized autocorrelation function changes as more samples are generated. # In each panel, the true autocorrelation function is shown as a black curve and the empirical estimator is shown as a blue line. # # Instead of estimating the autocorrelation function using a single chain, we can assume that each chain is sampled from the same stochastic process and average the estimate over ensemble members to reduce the variance. # It turns out that we'll actually do this averaging later in the process below, but it can be useful to show the mean autocorrelation function for visualization purposes. # + fig, axes = plt.subplots(1, 3, figsize=(12, 4), sharex=True, sharey=True) for n, ax in zip([10, 100, 1000], axes): nn = int(true_tau * n) ax.plot(tau / true_tau, f0, "k", label="true") f = np.mean( [ autocorr_func_1d(y[i, :nn], norm=False)[: window + 1] for i in range(len(y)) ], axis=0, ) f /= f[0] ax.plot(tau / true_tau, f, label="estimate") ax.set_title(r"$N = {0}\,\tau_\mathrm{{true}}$".format(n), fontsize=14) ax.set_xlabel(r"$\tau / \tau_\mathrm{true}$") axes[0].set_ylabel(r"$\rho_f(\tau)$") axes[-1].set_xlim(0, window / true_tau) axes[-1].set_ylim(-0.05, 1.05) axes[-1].legend(fontsize=14); # - # Now let's estimate the autocorrelation time using these estimated autocorrelation functions. # Goodman & Weare (2010) suggested averaging the ensemble over walkers and computing the autocorrelation function of the mean chain to lower the variance of the estimator and that was what was originally implemented in emcee. # Since then, @fardal on GitHub [suggested that other estimators might have lower variance](https://github.com/dfm/emcee/issues/209). # This is absolutely correct and, instead of the Goodman & Weare method, we now recommend computing the autocorrelation time for each walker (it's actually possible to still use the ensemble to choose the appropriate window) and then average these estimates. # # Here is an implementation of each of these methods and a plot showing the convergence as a function of the chain length: # + # Automated windowing procedure following Sokal (1989) def auto_window(taus, c): m = np.arange(len(taus)) < c * taus if np.any(m): return np.argmin(m) return len(taus) - 1 # Following the suggestion from Goodman & Weare (2010) def autocorr_gw2010(y, c=5.0): f = autocorr_func_1d(np.mean(y, axis=0)) taus = 2.0 * np.cumsum(f) - 1.0 window = auto_window(taus, c) return taus[window] def autocorr_new(y, c=5.0): f = np.zeros(y.shape[1]) for yy in y: f += autocorr_func_1d(yy) f /= len(y) taus = 2.0 * np.cumsum(f) - 1.0 window = auto_window(taus, c) return taus[window] # Compute the estimators for a few different chain lengths N = np.exp(np.linspace(np.log(100), np.log(y.shape[1]), 10)).astype(int) gw2010 = np.empty(len(N)) new = np.empty(len(N)) for i, n in enumerate(N): gw2010[i] = autocorr_gw2010(y[:, :n]) new[i] = autocorr_new(y[:, :n]) # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G&W 2010") plt.loglog(N, new, "o-", label="new") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.axhline(true_tau, color="k", label="truth", zorder=-100) plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14); # - # In this figure, the true autocorrelation time is shown as a horizontal line and it should be clear that both estimators give outrageous results for the short chains. # It should also be clear that the new algorithm has lower variance than the original method based on Goodman & Weare. # In fact, even for moderately long chains, the old method can give dangerously over-confident estimates. # For comparison, we have also plotted the $\tau = N/50$ line to show that, once the estimate crosses that line, The estimates are starting to get more reasonable. # This suggests that you probably shouldn't trust any estimate of $\tau$ unless you have more than $F\times\tau$ samples for some $F \ge 50$. # Larger values of $F$ will be more conservative, but they will also (obviously) require longer chains. # ## A more realistic example # # Now, let's run an actual Markov chain and test these methods using those samples. # So that the sampling isn't completely trivial, we'll sample a multimodal density in three dimensions. # + import emcee def log_prob(p): return np.logaddexp(-0.5 * np.sum(p ** 2), -0.5 * np.sum((p - 4.0) ** 2)) sampler = emcee.EnsembleSampler(32, 3, log_prob) sampler.run_mcmc( np.concatenate( (np.random.randn(16, 3), 4.0 + np.random.randn(16, 3)), axis=0 ), 500000, progress=True, ); # - # Here's the marginalized density in the first dimension. # + chain = sampler.get_chain()[:, :, 0].T plt.hist(chain.flatten(), 100) plt.gca().set_yticks([]) plt.xlabel(r"$\theta$") plt.ylabel(r"$p(\theta)$"); # - # And here's the comparison plot showing how the autocorrelation time estimates converge with longer chains. # + # Compute the estimators for a few different chain lengths N = np.exp(np.linspace(np.log(100), np.log(chain.shape[1]), 10)).astype(int) gw2010 = np.empty(len(N)) new = np.empty(len(N)) for i, n in enumerate(N): gw2010[i] = autocorr_gw2010(chain[:, :n]) new[i] = autocorr_new(chain[:, :n]) # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G&W 2010") plt.loglog(N, new, "o-", label="new") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14); # - # As before, the short chains give absurd estimates of $\tau$, but the new method converges faster and with lower variance than the old method. # The $\tau = N/50$ line is also included as above as an indication of where we might start trusting the estimates. # ## What about shorter chains? # # Sometimes it just might not be possible to run chains that are long enough to get a reliable estimate of $\tau$ using the methods described above. # In these cases, you might be able to get an estimate using parametric models for the autocorrelation. # One example would be to fit an [autoregressive model](https://en.wikipedia.org/wiki/Autoregressive_model) to the chain and using that to estimate the autocorrelation time. # # As an example, we'll use [celerite](http://celerite.readthedocs.io) to fit for the maximum likelihood autocorrelation function and then compute an estimate of $\tau$ based on that model. # The celerite model that we're using is equivalent to a second-order ARMA model and it appears to be a good choice for this example, but we're not going to promise anything here about the general applicability and we caution care whenever estimating autocorrelation times using short chains. # # :::{note} # To run this part of the tutorial, you'll need to install [celerite](https://celerite.readthedocs.io) and [autograd](https://github.com/HIPS/autograd). # ::: # + from scipy.optimize import minimize def autocorr_ml(y, thin=1, c=5.0): # Compute the initial estimate of tau using the standard method init = autocorr_new(y, c=c) z = y[:, ::thin] N = z.shape[1] # Build the GP model tau = max(1.0, init / thin) kernel = terms.RealTerm( np.log(0.9 * np.var(z)), -np.log(tau), bounds=[(-5.0, 5.0), (-np.log(N), 0.0)], ) kernel += terms.RealTerm( np.log(0.1 * np.var(z)), -np.log(0.5 * tau), bounds=[(-5.0, 5.0), (-np.log(N), 0.0)], ) gp = celerite.GP(kernel, mean=np.mean(z)) gp.compute(np.arange(z.shape[1])) # Define the objective def nll(p): # Update the GP model gp.set_parameter_vector(p) # Loop over the chains and compute likelihoods v, g = zip(*(gp.grad_log_likelihood(z0, quiet=True) for z0 in z)) # Combine the datasets return -np.sum(v), -np.sum(g, axis=0) # Optimize the model p0 = gp.get_parameter_vector() bounds = gp.get_parameter_bounds() soln = minimize(nll, p0, jac=True, bounds=bounds) gp.set_parameter_vector(soln.x) # Compute the maximum likelihood tau a, c = kernel.coefficients[:2] tau = thin * 2 * np.sum(a / c) / np.sum(a) return tau # Calculate the estimate for a set of different chain lengths ml = np.empty(len(N)) ml[:] = np.nan for j, n in enumerate(N[1:8]): i = j + 1 thin = max(1, int(0.05 * new[i])) ml[i] = autocorr_ml(chain[:, :n], thin=thin) # - # Plot the comparisons plt.loglog(N, gw2010, "o-", label="G&W 2010") plt.loglog(N, new, "o-", label="new") plt.loglog(N, ml, "o-", label="ML") ylim = plt.gca().get_ylim() plt.plot(N, N / 50.0, "--k", label=r"$\tau = N/50$") plt.ylim(ylim) plt.xlabel("number of samples, $N$") plt.ylabel(r"$\tau$ estimates") plt.legend(fontsize=14); # This figure is the same as the previous one, but we've added the maximum likelihood estimates for $\tau$ in green. # In this case, this estimate seems to be robust even for very short chains with $N \sim \tau$.
docs/tutorials/autocorr.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Dirichlet Distribution # + [markdown] slideshow={"slide_type": "slide"} # ## Imports # + slideshow={"slide_type": "fragment"} import warnings from collections import OrderedDict from pathlib import Path import numpy as np import pandas as pd # Visualization from ipywidgets import interact, FloatSlider import matplotlib.pyplot as plt import seaborn as sns # - # %matplotlib inline plt.style.use('ggplot') plt.rcParams['figure.figsize'] = (14.0, 8.7) warnings.filterwarnings('ignore') pd.options.display.float_format = '{:,.2f}'.format # + [markdown] slideshow={"slide_type": "slide"} # ## Simulate Dirichlet Distribution # - # The Dirichlet distribution produces probability vectors that can be used with discrete distributions. That is, it randomly generates a given number of values that are positive and sum to one. It has a parameter 𝜶 of positive real value that controls the concentration of the probabilities. Values closer to zero mean that only a few values will be positive and receive most probability mass. # # The following simulation let's you interactively explore how different parameter values affect the resulting probability distributions. # + hide_input=false slideshow={"slide_type": "fragment"} f=FloatSlider(value=1, min=1e-2, max=1e2, step=1e-2, continuous_update=False, description='Alpha') @interact(alpha=f) def sample_dirichlet(alpha): topics = 10 draws= 9 alphas = np.full(shape=topics, fill_value=alpha) samples = np.random.dirichlet(alpha=alphas, size=draws) fig, axes = plt.subplots(nrows=3, ncols=3, sharex=True, sharey=True) axes = axes.flatten() plt.setp(axes, ylim=(0, 1)) for i, sample in enumerate(samples): axes[i].bar(x=list(range(10)), height=sample, color=sns.color_palette("Set2", 10)) fig.suptitle('Dirichlet Allocation | 10 Topics, 9 Samples') fig.tight_layout() plt.subplots_adjust(top=.95) # -
Chapter14/03_dirichlet_distribution.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: xaitools # language: python # name: xaitools # --- # # (Example 2) Help developers understand why a file is predicted as defective # # ## Motivation # Traditionally, the predictions of defect models can help developers prioritize which files are the most risky. # However, developers do not understand why a file is predicted as defective, leading to a lack of trust in the predictions and hindering the adoption of defect prediction models in practice. # Thus, a lack of explainability of defect prediction models remains an extremely challenging problem. # # ## Approach # To address this problem, we proposed to use a model-agnostic technique called LIME{cite}`ribeiro2016model` to explain the predictions of file-level defect prediction models. # In particular, we first build file-level defect prediction models that are trained using traditional software features (e.g., lines of code, code complexity, the number of developers who edited a file) with a random forest classification technique. # For each prediction, we apply LIME to understand the prediction. # This approach allows us to identify which features contribute to the prediction of each file. # This will help developers understand why a file is predicted as defective. # # ## Results # # ```{figure} /xai4se/images/defect-explanation.png # --- # name: figure-defect-explanation # --- # An example of a visual explanation generated by LIME. # ``` # # {numref}`figure-defect-explanation` presents an example of a visual explanation generated by LIME to understand why a file is predicted as defective. # According to this visual explanation, this file is predicted as defective with a risk score of 70%. # The top-3 important factors that support this prediction are (1) the high number of class and method declaration lines, (2) the high number of distinct developers that contributed to the file, and (3) the low proportion of code ownership. # Thus, to mitigate the risk of having defects for this file, developers should consider decreasing the number of class and method declaration lines, reducing the number of distinct developers, and increasing the proportion of code ownership. #
docs/xai4se/local-defect-explanation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Exploring and Processing Data # imports import pandas as pd import numpy as np import os # ## Import Data # set the path of the raw data raw_data_path = os.path.join(os.path.pardir,'data','raw') train_file_path = os.path.join(raw_data_path, 'train.csv') test_file_path = os.path.join(raw_data_path, 'test.csv') # read the data with all default parameters train_df = pd.read_csv(train_file_path, index_col='PassengerId') test_df = pd.read_csv(test_file_path, index_col='PassengerId') # get the type type(train_df) # ## Basic Structure # use .info() to get brief information about the dataframe train_df.info() test_df.info() test_df['Survived'] = -888 # Adding Survived with a default value df = pd.concat((train_df, test_df),axis=0) df.info() # Number of male passengers male_passengers = df.loc[df.Sex == 'male',:] print('Number of male passengers : {0}'.format(len(male_passengers))) # Number of male passengers in first class male_passengers_first_class = df.loc[((df.Sex == 'male') & (df.Pclass == 1)),:] print('Number of male passengers in first class: {0}'.format(len(male_passengers_first_class))) # ## Summary Statistics df.describe() # Mean and Median fares print('Mean fare : {0}'.format(df.Fare.mean())) # mean print('Median fare : {0}'.format(df.Fare.median())) # median # dispersion measures print('Min fare : {0}'.format(df.Fare.min())) # minimum print('Max fare : {0}'.format(df.Fare.max())) # maximum print('Fare range : {0}'.format(df.Fare.max() - df.Fare.min())) # range print('25 percentile : {0}'.format(df.Fare.quantile(.25))) # 25 percentile print('50 percentile : {0}'.format(df.Fare.quantile(.5))) # 50 percentile print('75 percentile : {0}'.format(df.Fare.quantile(.75))) # 75 percentile print('Variance fare : {0}'.format(df.Fare.var())) # variance print('Standard deviation fare : {0}'.format(df.Fare.std())) # standard deviation # %matplotlib inline # box-whisker plot df.Fare.plot(kind='box') # categorical column : Counts df.Sex.value_counts() # categorical column : Proprotions df.Sex.value_counts(normalize=True) # apply on other columns df[df.Survived != -888].Survived.value_counts() # count : Passenger class df.Pclass.value_counts() # visualize counts df.Pclass.value_counts().plot(kind='bar') # title : to set title, color : to set color, rot : to rotate labels df.Pclass.value_counts().plot(kind='bar',rot = 0, title='Class wise passenger count', color='c'); # ## Distributions # Histogram plot of Age df.Age.plot(kind='hist', title='histogram for Age', color='c', bins=20); # Density plot for Age df.Age.plot(kind='kde', title='Density plot for Age', color='c'); # histogram for fare df.Fare.plot(kind='hist', title='histogram for Fare', color='c', bins=20); print('skewness for age : {0:.2f}'.format(df.Age.skew())) print('skewness for fare : {0:.2f}'.format(df.Fare.skew())) # Age vs Fare scatter plot df.plot.scatter(x='Age', y='Fare', color='c', title='scatter plot : Age vs Fare'); # Mean values of Age based on Gender df.groupby('Sex').Age.median() # Median values of Fare based on Passenger class df.groupby(['Pclass']).Fare.median() # Median values of Age based on Passenger class df.groupby(['Pclass']).Age.median() df.groupby(['Pclass'])['Fare','Age'].median() df.groupby(['Pclass']).agg({'Fare' : 'mean', 'Age' : 'median'}) aggregations = { 'Fare': { # work on the "Fare" column 'mean_Fare': 'mean', # get the mean fare 'median_Fare': 'median', # get median fare 'max_Fare': max, 'min_Fare': np.min }, 'Age': { # work on the "Age" column 'median_Age': 'median', # Find the max, call the result "max_date" 'min_Age': min, 'max_Age': max, 'range_Age': lambda x: max(x) - min(x) # Calculate the age range per group } } df.groupby(['Pclass']).agg(aggregations) df.groupby(['Pclass', 'Embarked']).Fare.median() df.groupby(['Sex','Pclass']).Age.mean() df.groupby(['Sex','Pclass']).Age.mean().unstack() # # ## Data Munging : Working with missing values # use .info() to detect missing values (if any) df.info() # ### Feature : Embarked # extract rows with Embarked as Null df[df.Embarked.isnull()] # how many people embarked at different points df.Embarked.value_counts() # which embarked point has higher survival count pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Embarked) # explore the fare of each class for each embarkment point df.groupby(['Pclass', 'Embarked']).Fare.median() # replace the missing values with 'C' df.Embarked.fillna('C', inplace=True) # check if any null value remaining df[df.Embarked.isnull()] # check info again df.info() # ### Feature : Fare df[df.Fare.isnull()] median_fare = df.loc[(df.Pclass == 3) & (df.Embarked == 'S'),'Fare'].median() print(median_fare) df.Fare.fillna(median_fare, inplace=True) # check info again df.info() # ### Feature : Age # set maximum number of rows to be displayed pd.options.display.max_rows = 15 # return null rows df[df.Age.isnull()] # #### replace with median age of gender # median values df.groupby('Sex').Age.median() # visualize using boxplot df[df.Age.notnull()].boxplot('Age','Sex'); # #### option 2 : replace with median age of Pclass df[df.Age.notnull()].boxplot('Age','Pclass'); # + # replace : # pclass_age_median = df.groupby('Pclass').Age.transform('median') # df.Age.fillna(pclass_age_median , inplace=True) # - # #### option 3 : replace with median age of title df.Name # Function to extract the title from the name def GetTitle(name): first_name_with_title = name.split(',')[1] title = first_name_with_title.split('.')[0] title = title.strip().lower() return title df.Name.map(lambda x : GetTitle(x)) df.Name.map(lambda x : GetTitle(x)).unique() # + # Function to extract the title from the name def GetTitle(name): title_group = {'mr' : 'Mr', 'mrs' : 'Mrs', 'miss' : 'Miss', 'master' : 'Master', 'don' : 'Sir', 'rev' : 'Sir', 'dr' : 'Officer', 'mme' : 'Mrs', 'ms' : 'Mrs', 'major' : 'Officer', 'lady' : 'Lady', 'sir' : 'Sir', 'mlle' : 'Miss', 'col' : 'Officer', 'capt' : 'Officer', 'the countess' : 'Lady', 'jonkheer' : 'Sir', 'dona' : 'Lady' } first_name_with_title = name.split(',')[1] title = first_name_with_title.split('.')[0] title = title.strip().lower() return title_group[title] # - # create Title feature df['Title'] = df.Name.map(lambda x : GetTitle(x)) # head df.head() # Box plot of Age with title df[df.Age.notnull()].boxplot('Age','Title'); # replace missing values title_age_median = df.groupby('Title').Age.transform('median') df.Age.fillna(title_age_median , inplace=True) # check info again df.info() # ## Working with outliers # ### Age # use histogram to get understand the distribution df.Age.plot(kind='hist', bins=20, color='c'); df.loc[df.Age > 70] # ### Fare # histogram for fare df.Fare.plot(kind='hist', title='histogram for Fare', bins=20, color='c'); # box plot to indentify outliers df.Fare.plot(kind='box'); # look into the outliers df.loc[df.Fare == df.Fare.max()] # Try some transformations to reduce the skewness LogFare = np.log(df.Fare + 1.0) # Adding 1 to accomodate zero fares : log(0) is not defined # Histogram of LogFare LogFare.plot(kind='hist', color='c', bins=20); # binning pd.qcut(df.Fare, 4) pd.qcut(df.Fare, 4, labels=['very_low','low','high','very_high']) # discretization pd.qcut(df.Fare, 4, labels=['very_low','low','high','very_high']).value_counts().plot(kind='bar', color='c', rot=0); # create fare bin feature df['Fare_Bin'] = pd.qcut(df.Fare, 4, labels=['very_low','low','high','very_high']) # ## Feature Engineering # ### Feature : Age State ( Adult or Child ) # AgeState based on Age df['AgeState'] = np.where(df['Age'] >= 18, 'Adult','Child') # AgeState Counts df['AgeState'].value_counts() # crosstab pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].AgeState) # ### Feature : FamilySize # Family : Adding Parents with Siblings df['FamilySize'] = df.Parch + df.SibSp + 1 # 1 for self # explore the family feature df['FamilySize'].plot(kind='hist', color='c'); # further explore this family with max family members df.loc[df.FamilySize == df.FamilySize.max(),['Name','Survived','FamilySize','Ticket']] pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].FamilySize) # ### Feature : IsMother # a lady aged more thana 18 who has Parch >0 and is married (not Miss) df['IsMother'] = np.where(((df.Sex == 'female') & (df.Parch > 0) & (df.Age > 18) & (df.Title != 'Miss')), 1, 0) # Crosstab with IsMother pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].IsMother) # ### Deck # explore Cabin values df.Cabin # use unique to get unique values for Cabin feature df.Cabin.unique() # look at the Cabin = T df.loc[df.Cabin == 'T'] # set the value to NaN df.loc[df.Cabin == 'T', 'Cabin'] = np.NaN # look at the unique values of Cabin again df.Cabin.unique() # extract first character of Cabin string to the deck def get_deck(cabin): return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z') df['Deck'] = df['Cabin'].map(lambda x : get_deck(x)) # check counts df.Deck.value_counts() # use crosstab to look into survived feature cabin wise pd.crosstab(df[df.Survived != -888].Survived, df[df.Survived != -888].Deck) # info command df.info() # ### Categorical Feature Encoding # sex df['IsMale'] = np.where(df.Sex == 'male', 1, 0) # columns Deck, Pclass, Title, AgeState df = pd.get_dummies(df,columns=['Deck', 'Pclass','Title', 'Fare_Bin', 'Embarked','AgeState']) df.info() # ### Drop and Reorder Columns # drop columns df.drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1, inplace=True) # reorder columns columns = [column for column in df.columns if column != 'Survived'] columns = ['Survived'] + columns df = df[columns] # check info again df.info() # ## Save Processed Dataset processed_data_path = os.path.join(os.path.pardir,'data','processed') write_train_path = os.path.join(processed_data_path, 'train.csv') write_test_path = os.path.join(processed_data_path, 'test.csv') # train data df.loc[df.Survived != -888].to_csv(write_train_path) # test data columns = [column for column in df.columns if column != 'Survived'] df.loc[df.Survived == -888, columns].to_csv(write_test_path) # ### Building the data processing script get_processed_data_script_file = os.path.join(os.path.pardir,'src','data','get_processed_data.py') # + # %%writefile $get_processed_data_script_file import numpy as np import pandas as pd import os def read_data(): # set the path of the raw data raw_data_path = os.path.join(os.path.pardir,'data','raw') train_file_path = os.path.join(raw_data_path, 'train.csv') test_file_path = os.path.join(raw_data_path, 'test.csv') # read the data with all default parameters train_df = pd.read_csv(train_file_path, index_col='PassengerId') test_df = pd.read_csv(test_file_path, index_col='PassengerId') test_df['Survived'] = -888 df = pd.concat((train_df, test_df), axis=0) return df def process_data(df): # using the method chaining concept return (df # create title attribute - then add this .assign(Title = lambda x: x.Name.map(get_title)) # working missing values - start with this .pipe(fill_missing_values) # create fare bin feature .assign(Fare_Bin = lambda x: pd.qcut(x.Fare, 4, labels=['very_low','low','high','very_high'])) # create age state .assign(AgeState = lambda x : np.where(x.Age >= 18, 'Adult','Child')) .assign(FamilySize = lambda x : x.Parch + x.SibSp + 1) .assign(IsMother = lambda x : np.where(((x.Sex == 'female') & (x.Parch > 0) & (x.Age > 18) & (x.Title != 'Miss')), 1, 0)) # create deck feature .assign(Cabin = lambda x: np.where(x.Cabin == 'T', np.nan, x.Cabin)) .assign(Deck = lambda x : x.Cabin.map(get_deck)) # feature encoding .assign(IsMale = lambda x : np.where(x.Sex == 'male', 1,0)) .pipe(pd.get_dummies, columns=['Deck', 'Pclass','Title', 'Fare_Bin', 'Embarked','AgeState']) # add code to drop unnecessary columns .drop(['Cabin','Name','Ticket','Parch','SibSp','Sex'], axis=1) # reorder columns .pipe(reorder_columns) ) def get_title(name): title_group = {'mr' : 'Mr', 'mrs' : 'Mrs', 'miss' : 'Miss', 'master' : 'Master', 'don' : 'Sir', 'rev' : 'Sir', 'dr' : 'Officer', 'mme' : 'Mrs', 'ms' : 'Mrs', 'major' : 'Officer', 'lady' : 'Lady', 'sir' : 'Sir', 'mlle' : 'Miss', 'col' : 'Officer', 'capt' : 'Officer', 'the countess' : 'Lady', 'jonkheer' : 'Sir', 'dona' : 'Lady' } first_name_with_title = name.split(',')[1] title = first_name_with_title.split('.')[0] title = title.strip().lower() return title_group[title] def get_deck(cabin): return np.where(pd.notnull(cabin),str(cabin)[0].upper(),'Z') def fill_missing_values(df): # embarked df.Embarked.fillna('C', inplace=True) # fare median_fare = df[(df.Pclass == 3) & (df.Embarked == 'S')]['Fare'].median() df.Fare.fillna(median_fare, inplace=True) # age title_age_median = df.groupby('Title').Age.transform('median') df.Age.fillna(title_age_median , inplace=True) return df def reorder_columns(df): columns = [column for column in df.columns if column != 'Survived'] columns = ['Survived'] + columns df = df[columns] return df def write_data(df): processed_data_path = os.path.join(os.path.pardir,'data','processed') write_train_path = os.path.join(processed_data_path, 'train.csv') write_test_path = os.path.join(processed_data_path, 'test.csv') # train data df[df.Survived != -888].to_csv(write_train_path) # test data columns = [column for column in df.columns if column != 'Survived'] df[df.Survived == -888][columns].to_csv(write_test_path) if __name__ == '__main__': df = read_data() df = process_data(df) write_data(df) # - # !python $get_processed_data_script_file train_df = pd.read_csv(write_train_path) train_df.info()
notebooks/exploring-processing-data.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## 3. ETH Modeling # - Ethereum Energy Consumption Index # https://digiconomist.net/ethereum-energy-consumption # - Ethereum hashrate # https://etherscan.io/chart/hashrate # - Ethereum difficulty # https://etherscan.io/chart/difficulty # - Ethereum Marcket Price # https://www.coindesk.com/price/ethereum # ### Importing Necessary Libraries import pandas as pd import matplotlib.pyplot as plt # import numpy as np # from scipy.interpolate import interp1d import os # os.getcwd() bc = pd.read_csv('/Users/lemontreeran/VENVPythonProj/cyptomodeling/ETH-DATAMODEL.csv') bc.tail() # ### Converting Dates into a Datetime Format bc['Date'] = pd.to_datetime(bc.Date) bc.dtypes # ### Setup the constant variable: # #### 1. hrs_day(number of hours in a day) # #### 2. electricity_cost(13.5 cents per kWh) # #### 3. sec_hr(number of seconds in an hour) # #### 3. block_reward(2 since the data range after 17/01/2019) hrs_day = 24 sec_hr = 3600 electricity_cost = 0.135 # ### Calculate model price bc['model price'] = bc.apply(lambda x: ((x['Hashpower'] / 1000) * (electricity_cost * x['Energy efficiency'] * hrs_day)) / (((x['Block Reward'] * x['Hashpower'] * sec_hr) / (x['Difficulty'] * 1000 / 10**9 * 2**32)) * hrs_day), axis=1) bc['ratio'] = bc.apply(lambda x: (x['model price'] / x['Market Price']), axis=1) bc['baseline'] = bc.apply(lambda x: 1, axis=1) bc.head() # ### Setting dates as the index bc.set_index('Date', inplace=True) bc.index bc['Market Price'] model_bc = bc[['Market Price','model price']] model_bc.head() model_bc.tail() # ### Plotting Bitcoin Model Prices bc_reindexed = model_bc.reindex(pd.date_range(start=model_bc.index.min(),end=model_bc.index.max(),freq='1D')) bc_reindexed.interpolate(method='cubic') # + model_bc.plot(figsize=(16,5)) plt.xlabel('Date') plt.ylabel('Price in USD') plt.title('Ethereum Price Model - Marginal Cost of Product') plt.savefig('ethmodelprice.png') plt.show() # + ratio_bc = bc[['ratio', 'baseline']] ratio_bc.plot(figsize=(16,5)) plt.xlabel('Date') plt.ylabel('ratio') plt.title('Ratio Ether Price Model - Marginal Cost of Product') plt.savefig('ethmodelpriceratio.png') plt.show() # -
ETHModeling.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # + [markdown] id="view-in-github" # <a href="https://colab.research.google.com/github/butchland/fastai_xla_extensions/blob/master/samples/MNIST_TPU_demo.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> # + [markdown] id="7lDQKIEBHMbN" # # FastAI XLA Extensions MNIST Demo (Single Core TPU) # # + [markdown] id="yJAjBc_8HMbT" # ## How to use # + [markdown] id="xv3xARMbHMbW" # ### Configure the Pytorch XLA package # # The Pytorch xla package requires an environment supporting TPUs (Kaggle kernels, GCP or Colab environments required) # # If running on Colab, make sure the Runtime Type is set to TPU. # # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 132857, "status": "ok", "timestamp": 1615472739280, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="p2g1lEnkOXg5" outputId="b13280b3-d8fd-4798-885c-dc9928fe40e4" # !pip install -qqq --no-cache-dir torch==1.7.1+cu101 torchvision==0.8.2+cu101 torchtext==0.8.0 -f https://download.pytorch.org/whl/torch_stable.html # + id="d_0If38OHMbr" #hide_input #colab import os assert os.environ['COLAB_TPU_ADDR'], 'Make sure to select TPU from Edit > Notebook settings > Hardware accelerator' # + [markdown] id="29dsMCEVHMb_" # Install fastai2 and the fastai_xla_extensions packages # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 137289, "status": "ok", "timestamp": 1615472743721, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="ZbPDbj0oaBoO" outputId="047be875-57fa-40c6-9674-bb723da0f3af" # !pip install -Uqq fastcore --upgrade # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 164016, "status": "ok", "timestamp": 1615472770470, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="6UVX0iopHMbx" outputId="e9ee8f74-eb33-403b-e7a2-6cf2a43656fd" #hide #colab # !curl -s https://course19.fast.ai/setup/colab | bash # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 140966, "status": "ok", "timestamp": 1615472747401, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="SDUSw4PFHMcA" outputId="ba0fc77d-1cd2-4599-f945-82770933886b" # !pip install -Uqq fastai==2.3.0 # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 160532, "status": "ok", "timestamp": 1615472766977, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="KELFH2r4HMb4" outputId="ea8bfbc5-7567-4bfc-fd2d-feb3d92da5fe" # !pip install -Uqq cloud-tpu-client==0.10 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.7-cp37-cp37m-linux_x86_64.whl # for nightly versions # VERSION = "20200325" #@param ["1.5" , "20200325", "nightly"] # # !curl https://raw.githubusercontent.com/pytorch/xla/master/contrib/scripts/env-setup.py -o pytorch-xla-env-setup.py # # !python pytorch-xla-env-setup.py --version $VERSION # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 170933, "status": "ok", "timestamp": 1615472777390, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="qsnrEAeiHMcG" outputId="04fa4d1a-e4c5-4a46-bf94-7acb4450836f" # !pip install -Uqq fastai_xla_extensions # # !pip install -Uqq git+https://github.com/butchland/fastai_xla_extensions.git # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 172553, "status": "ok", "timestamp": 1615472779020, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="7TSmVCkWgICi" outputId="47ce8615-c81d-4910-96fb-81a24c9fd78d" # !pip freeze | grep torch # !pip freeze | grep fast # + [markdown] id="UcLKp-m0HMcV" # ### Import the libraries # Import the fastai2 and fastai_xla_extensions libraries # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 214645, "status": "ok", "timestamp": 1615472821121, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="XkVauzS0HMcd" outputId="ed614f9c-84e4-43de-e2c2-86f679cb43cd" #colab from fastai.vision.all import * from fastai_xla_extensions.all import * # + [markdown] id="O-5CdtT2HMcm" # ### Example # Build a MNIST classifier -- adapted from fastai course [Lesson 4 notebook](https://github.com/fastai/course-v4/blob/master/nbs/04_mnist_basics.ipynb) # + [markdown] id="OI564wSCHMcq" # Load MNIST dataset # + colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"elapsed": 232062, "status": "ok", "timestamp": 1615472838549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="f7V3P4bpHMcs" outputId="b1976f70-04a8-4c46-c465-cfc57ba7441e" path = untar_data(URLs.MNIST) # + id="v8DG9UGJHMcz" #hide Path.BASE_PATH = path # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 232053, "status": "ok", "timestamp": 1615472838553, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="9Ki0w-i2imL2" outputId="8c7d57be-fd6a-462e-bdb6-d944eef222ec" path.ls() # + [markdown] id="5pXtb0u2HMc3" # Create Fastai DataBlock # # # _Note that batch transforms are currently # set to none as they seem to slow the training # on the TPU (for investigation)._ # + id="YspZqFB4HMc5" datablock = DataBlock( blocks=(ImageBlock,CategoryBlock), get_items=get_image_files, splitter=GrandparentSplitter(train_name='training',valid_name='testing'), get_y=parent_label, item_tfms=Resize(28), batch_tfms=[*aug_transforms(do_flip=False, min_scale=0.9), Normalize.from_stats(*imagenet_stats)] ) # + id="ybNFQ98YHMc9" #colab # datablock.summary(path) # + [markdown] id="GbjmFmFJHMdB" # Create the dataloader # + id="siIb-eqcHMdC" dls = datablock.dataloaders(path,bs=256) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 8594, "status": "ok", "timestamp": 1615473591486, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="47hpELWTHMdH" outputId="cf51cc2e-3c9e-43e4-f5ae-faa59e183846" dls.device # + colab={"base_uri": "https://localhost:8080/", "height": 536} executionInfo={"elapsed": 9579, "status": "ok", "timestamp": 1615473592480, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="euAYHv4zHMdL" outputId="911ae7a0-41ba-4e00-8a3d-ff1f74c20c97" dls.show_batch() # + [markdown] id="_09Ilvp5HMdP" # Create a Fastai CNN Learner # + id="BG2ew3YfHMdR" learner = cnn_learner(dls, resnet18, metrics=accuracy, concat_pool=False) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10473, "status": "ok", "timestamp": 1615473593394, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="pQ4lp95b_Ezf" outputId="7c1bb36c-1e2e-4fac-8951-b64a66d97b12" learner.to_xla() # + id="mgZR8Nz6FV15" learner.dls.device # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 10459, "status": "ok", "timestamp": 1615473593395, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="9y65hL2-olxf" outputId="ec166b58-271d-4ff2-cb14-c944fd1c6c9a" learner.xla_opt # + id="5t0VEz_diyQF" # learner.fit(1,1e-3) # + [markdown] id="xDfjq0z3HMdU" # Using the `lr_find` works # + colab={"base_uri": "https://localhost:8080/", "height": 300} executionInfo={"elapsed": 58102, "status": "ok", "timestamp": 1615473641058, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="JHok2FWTHMdW" outputId="7f7bb15f-e80c-47ad-dafe-44b08e9ba3b9" #colab learner.lr_find() # + [markdown] id="oAnRYPZoHMdb" # Fine tune model # # + colab={"base_uri": "https://localhost:8080/", "height": 267} executionInfo={"elapsed": 715605, "status": "ok", "timestamp": 1615474298570, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="GNeUgzFXHMdc" outputId="82dcab5f-8b77-45a3-e503-23d3f38a79f5" #colab learner.fine_tune(3,freeze_epochs=3, base_lr=1e-3) # + [markdown] id="2yaPvcZEHMdg" # Unfreeze the model # + id="zPWfQMzgHMdh" #colab learner.unfreeze() # + [markdown] id="HgWpfg1KHMdm" # Run the LR Finder again. # # + colab={"base_uri": "https://localhost:8080/", "height": 300} executionInfo={"elapsed": 755860, "status": "ok", "timestamp": 1615474338836, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="At3Z1MoNHMdn" outputId="a4780122-7c7c-41b6-e81f-faa1bbdcc555" #colab learner.lr_find() # + [markdown] id="sbIG1OArHMdq" # Further fine-tuning # + colab={"base_uri": "https://localhost:8080/", "height": 80} executionInfo={"elapsed": 867171, "status": "ok", "timestamp": 1615474450158, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="lGz4fgUdHMds" outputId="460af5b8-372e-4162-8a4c-cafccf65aeff" #colab learner.fit_one_cycle(1,slice(1e-5)) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 870793, "status": "ok", "timestamp": 1615474453789, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="AdogJJXIHMdw" outputId="f7f96937-c82a-4202-cf5a-c6c72618938c" #hide #colab learner.save('stage-1') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 873686, "status": "ok", "timestamp": 1615474456690, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="3mnx2DOkHMdz" outputId="ab52fa7c-7648-4837-f48e-68c731e4c5d6" #hide #colab learner.load('stage-1') # + [markdown] id="RMXK_lu5HMd4" # Model params are using TPU # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 873679, "status": "ok", "timestamp": 1615474456691, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="r8noSy1iHMd5" outputId="56f4cb9c-3cd7-4590-8aa6-c1b497eb6e1b" #colab one_param(learner.model).device # + [markdown] id="Cq5kT-Qb-r1j" # Dataloader device is None _(DeviceMoverTransform)_ is actually the one moving the batch input to the TPU # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 873671, "status": "ok", "timestamp": 1615474456691, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="U9ApCcZm-l2p" outputId="f4dde63c-066b-49c9-862c-37ae05103a3f" #colab learner.dls.device is None # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 873665, "status": "ok", "timestamp": 1615474456692, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="BqGFwCrz-6mk" outputId="6d370156-e7b5-4146-87bb-407a725ddd3f" #colab learner.dls.train.after_batch.fs # + colab={"base_uri": "https://localhost:8080/", "height": 265} executionInfo={"elapsed": 873923, "status": "ok", "timestamp": 1615474456958, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="Xdb3qz7zMtBU" outputId="5583a15a-7321-43be-b35d-c71ab2a5088e" learner.recorder.plot_loss() # + colab={"base_uri": "https://localhost:8080/", "height": 276} executionInfo={"elapsed": 874377, "status": "ok", "timestamp": 1615474457420, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="uClF6e_DHMd9" outputId="17d475f9-2d74-4d3f-b747-22ff23ba653e" #colab learner.recorder.plot_sched() # + [markdown] id="YWDGI0REHMeK" # Plot loss seems to be working fine. # + colab={"base_uri": "https://localhost:8080/", "height": 17} executionInfo={"elapsed": 892767, "status": "ok", "timestamp": 1615474475819, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="pdpFbBy1QX_z" outputId="860d84e0-c6eb-44f6-f07b-373729116c07" interp = ClassificationInterpretation.from_learner(learner) # + colab={"base_uri": "https://localhost:8080/", "height": 311} executionInfo={"elapsed": 893522, "status": "ok", "timestamp": 1615474476586, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="H5gQkQb7QeRI" outputId="f7b20726-ace7-4ab7-fff4-89518f08677f" interp.plot_confusion_matrix() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 893514, "status": "ok", "timestamp": 1615474476587, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="uqNCkdOFQqrW" outputId="66b3a52d-fb17-4ef1-da7f-65b8f592037a" interp.most_confused(4) # + colab={"base_uri": "https://localhost:8080/", "height": 585} executionInfo={"elapsed": 894513, "status": "ok", "timestamp": 1615474477596, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh6hxH8XsmO-D_xEWQfgLPP3712rG2YZXMG9j7Z5A=s64", "userId": "11910892068509052005"}, "user_tz": -480} id="9AokGWUBQ1Cm" outputId="73671851-e8a6-4b6a-de1f-64025aea7c7c" interp.plot_top_losses(9) # + id="ZvhT4AJaQ74e"
samples/MNIST_TPU_demo.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3.8.3 64-bit # name: python383jvsc74a57bd0a70ade87c18bf9df7701d35ff9479870395d7462b82e94d46211d02695a62577 # --- # # Numpy ufuncs # A universal function is a function that operates on ndarrays in an element-by-element fashion. # A ufunc is a “vectorized” wrapper for a function that takes a fixed number of specific inputs and produces a fixed number of specific outputs. # + from typing import Iterable import numpy as np np.random.seed(0) # - # ## Native Python Loops def reciprocal(values: Iterable[float]) -> Iterable[float]: output = np.empty(len(values)) for i in range(len(values)): output[i] = 1.0 / values[i] return output small_array = np.random.randint(low=1, high=10, size=5) print(reciprocal(small_array)) # | Multiple of a second | Unit | Symbol | Definition | Comparative examples & common units | # |-|-|-|-|-| # | 10−9 | 1 nanosecond | ns | One billionth of one second | 1 ns: Time to execute one machine cycle by a 1 GHz microprocessor 1 ns: Light travels 30 cm | # | 10−6 | 1 microsecond | µs | One millionth of one second | 1 µs: Time to execute one machine cycle by an Intel 80186 microprocessor 2.2 µs: Lifetime of a muon 4–16 µs: Time to execute one machine cycle by a 1960s minicomputer | # | 10−3 | 1 millisecond | ms | One thousandth of one second | 1 ms: time for a neuron in human brain to fire one impulse and return to rest 4–8 ms: typical seek time for a computer hard disk | # + small_array = np.random.randint(low=1, high=10, size=5) # %timeit reciprocal(small_array) # + big_array = np.random.randint(low=1, high=10, size=100_000) # %timeit reciprocal(big_array) # - 200*1000 / 11.8 # ## Introducing UFuncs print(reciprocal(small_array)) print(1.0 / small_array) # %timeit (1.0 / big_array) # %timeit np.reciprocal(big_array) 200*1000 / 103 # Speedup # + x = np.arange(4) print(x) # - print(x + 2) print(x - 2) print(x * 2) print(x / 2) print(x + 2.0) print(x - 2.0) print(x * 2.0) print(x / 2.0) print(np.add(x, 2)) # | Name | Description | # |-|-| # | add | Add arguments element-wise. | # | subtract | Subtract arguments, element-wise. | # | multiply | Multiply arguments element-wise. | # | matmul | Matrix product of two arrays. | # | divide | Returns a true division of the inputs, element-wise. | # | negative | Numerical negative, element-wise. | # | positive | Numerical positive, element-wise. | # | power | First array elements raised to powers from second array, element-wise. | # | mod | Return element-wise remainder of division. | # | absolute | Calculate the absolute value element-wise. | # | fabs | Compute the absolute values element-wise. | # | sign | Returns an element-wise indication of the sign of a number. | # | exp | Calculate the exponential of all elements in the input array. | # | log | Natural logarithm, element-wise. | # | sqrt | Return the non-negative square-root of an array, element-wise. | # | square | Return the element-wise square of the input. | # | reciprocal | Return the reciprocal of the argument, element-wise. | # | gcd | Returns the greatest common divisor of \|x1\| and \|x2\| | # | lcm | Returns the lowest common multiple of \|x1\| and \|x2\| | x = np.array([-2, -1, 0, 1, 2]) abs(x) print(np.abs(x)) print(np.fabs(x)) # + x = np.array([-2, -1, 0, 1, 2], dtype=np.float32) print(np.abs(x)) print(np.fabs(x)) # - # ### Trigonometric functions # | Name | Description | # |-|-| # | sin | Trigonometric sine, element-wise. | # | cos | Cosine element-wise. | # | tan | Compute tangent element-wise. | # + theta = np.linspace(0, 2.0 * np.pi, 7) print(theta) # - print(np.sin(theta)) print(np.cos(theta)) # ### Bit-twiddling functions # | Name | Description | # |-|-| # | bitwise_and | Compute the bit-wise AND of two arrays element-wise. | # | bitwise_or | Compute the bit-wise OR of two arrays element-wise. | # | bitwise_xor | Compute the bit-wise XOR of two arrays element-wise. | print(np.bitwise_and(14, 13)) print(np.binary_repr(12)) print(np.bitwise_and([14, 3], 13)) # ### Comparison functions # | Name | Description | # |-|-| # | greater | Return the truth value of (x1 > x2) element-wise. | # | greater_equal | Return the truth value of (x1 >= x2) element-wise. | # | less | Return the truth value of (x1 < x2) element-wise. | # | less_equal | Return the truth value of (x1 <= x2) element-wise. | # | not_equal | Return (x1 != x2) element-wise. | # | equal | Return (x1 == x2) element-wise. | print(np.greater([4, 2], [2, 2])) # + a = np.array([4, 2]) b = np.array([2, 2]) print(a > b)
2_Numpy/03_Ufuncs.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: conda_braket # language: python # name: conda_braket # --- # + executionInfo={"elapsed": 10439, "status": "ok", "timestamp": 1614176379684, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="HoLmJLkIX810" # # %matplotlib inline import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable import numpy as np import tensorflow as tf from tensorflow.keras.utils import to_categorical # + [markdown] id="vZFNOwFXoY8N" # # Loading Raw Data # + executionInfo={"elapsed": 9571, "status": "ok", "timestamp": 1614176379684, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="LwP9RGQpgLhg" # every features in the dataset header_list = { 0: 'label', 1: 'lepton 1 pT', 2: 'lepton 1 eta', 3: 'lepton 1 phi', 4: 'lepton 2 pT', 5: 'lepton 2 eta', 6: 'lepton 2 phi', 7: 'missing energy magnitude', 8: 'missing energy phi', 9: 'MET_rel', 10: 'axial MET', 11: 'M_R', 12: 'M_TR_2', 13: 'R', 14: 'MT2', 15: 'S_R', 16: 'M_Delta_R', 17: 'dPhi_r_b', 18: 'cos(theta_r1)' } # best features according to reference [1] important_features = [ 'lepton 1 pT', 'lepton 2 pT', 'missing energy magnitude', 'M_TR_2', 'M_Delta_R', 'lepton 1 eta', 'lepton 2 eta' ] important_features_id = np.array([ 0, 3, 6, 11, 15, 1, 4 ]) # + num_sample = 5000 X_train = np.loadtxt('./SUSY Dataset/X_train_' + str(num_sample) + '.txt') Y_train = np.loadtxt('./SUSY Dataset/Y_train_' + str(num_sample) + '.txt') X_test = np.loadtxt('./SUSY Dataset/X_test_' + str(num_sample) + '.txt') Y_test = np.loadtxt('./SUSY Dataset/Y_test_' + str(num_sample) + '.txt') print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape) # + [markdown] id="F_4AcCtNsC8o" # # Dataset Preprocessing (Standardization + PCA) # + [markdown] id="QP-6XdVPsfxC" # ## Standardization # + id="EDtzCvTur6BR" def normalize(X, use_params=False, params=None): """Normalize the given dataset X Args: X: ndarray, dataset Returns: (Xbar, mean, std): tuple of ndarray, Xbar is the normalized dataset with mean 0 and standard deviation 1; mean and std are the mean and standard deviation respectively. Note: You will encounter dimensions where the standard deviation is zero, for those when you do normalization the normalized data will be NaN. Handle this by setting using `std = 1` for those dimensions when doing normalization. """ if use_params: mu = params[0] std_filled = [1] else: mu = np.mean(X, axis=0) std = np.std(X, axis=0) #std_filled = std.copy() #std_filled[std==0] = 1. Xbar = (X - mu)/(std + 1e-8) return Xbar, mu, std # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1033, "status": "ok", "timestamp": 1613973838354, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="Ma2r_EXQsHYJ" outputId="b51731f6-1e7c-4aec-cb76-6ac31d1d7b0c" X_train, mu_train, std_train = normalize(X_train) X_train.shape, Y_train.shape # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 849, "status": "ok", "timestamp": 1613973838355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="p8DrXKvutdFL" outputId="5f0d72a6-c61a-4fac-8e0a-564916c3c87f" X_test = (X_test - mu_train)/(std_train + 1e-8) X_test.shape, Y_test.shape # + [markdown] id="BwikC3mysh4L" # ## PCA # + id="0AHFK0_lsdzh" from sklearn.decomposition import PCA from matplotlib import pyplot as plt # + id="CzdxRenYstM_" num_component = 9 pca = PCA(n_components=num_component, svd_solver='full') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1222, "status": "ok", "timestamp": 1613973841737, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="qUHn6-Zvsz2X" outputId="ec31cdfc-050f-4372-99e2-a73a92e6f0a2" pca.fit(X_train) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 723, "status": "ok", "timestamp": 1613973841739, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="dyj975-ttIbe" outputId="c6c76b38-eaae-46e4-f124-29fe66a4900e" np.cumsum(pca.explained_variance_ratio_) # + id="g2Cv6r9utNKg" X_train = pca.transform(X_train) X_test = pca.transform(X_test) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1089, "status": "ok", "timestamp": 1613973843733, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="ahC8V_QzuDzV" outputId="68ccf3c7-1b3e-4574-e6ea-cb77ed6806f2" print(X_train.shape, Y_train.shape) print(X_test.shape, Y_test.shape) # + [markdown] id="CoGE5F_Gc7Bp" # ## Norm # + id="00-l6454QeXB" X_train = (X_train.T / np.sqrt(np.sum(X_train ** 2, -1))).T X_test = (X_test.T / np.sqrt(np.sum(X_test ** 2, -1))).T # + colab={"base_uri": "https://localhost:8080/", "height": 287} executionInfo={"elapsed": 1089, "status": "ok", "timestamp": 1613876177070, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "03770692095188133952"}, "user_tz": -420} id="voxkEj1BuJB6" outputId="725f2671-55a8-4c4a-b39f-0f582f735df4" plt.scatter(X_train[:100, 0], X_train[:100, 1]) plt.scatter(X_train[100:200, 0], X_train[100:200, 1]) plt.scatter(X_train[200:300, 0], X_train[200:300, 1]) # + [markdown] id="Aah4i8VuoqAI" # ## Standard Scaling # + id="2sEElTQSovC7" X_train = 2*((X_train - np.min(X_train, axis=0))/(np.max(X_train, axis=0) - np.min(X_train, axis=0))) - 1 X_test = 2*((X_test - np.min(X_test, axis=0))/(np.max(X_test, axis=0) - np.min(X_test, axis=0))) - 1 # + [markdown] id="q-9-xvgMYnaj" # ## Best Features # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 52155, "status": "ok", "timestamp": 1614176430893, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="fYMoTPuZAyWv" outputId="18640813-8223-4246-daa9-917a101824aa" X_train = X_train[:, important_features_id[:6]] X_test = X_test[:, important_features_id[:6]] X_train.shape, X_test.shape # + [markdown] id="ytKzDR1JuKub" # # Quantum # + executionInfo={"elapsed": 53916, "status": "ok", "timestamp": 1614176434500, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="J8PDpZ5IuM0l" import pennylane as qml from pennylane import numpy as np from pennylane.optimize import AdamOptimizer, GradientDescentOptimizer qml.enable_tape() # Set a random seed np.random.seed(42) # + executionInfo={"elapsed": 53710, "status": "ok", "timestamp": 1614176434502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="UvNlkVfXFyHE" # Define output labels as quantum state vectors # def density_matrix(state): # """Calculates the density matrix representation of a state. # Args: # state (array[complex]): array representing a quantum state vector # Returns: # dm: (array[complex]): array representing the density matrix # """ # return state * np.conj(state).T label_0 = [[1], [0]] label_1 = [[0], [1]] def density_matrix(state): """Calculates the density matrix representation of a state. Args: state (array[complex]): array representing a quantum state vector Returns: dm: (array[complex]): array representing the density matrix """ return np.outer(state, np.conj(state)) state_labels = [label_0, label_1] #state_labels = np.loadtxt('./tetra_states.txt', dtype=np.complex_) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 52778, "status": "ok", "timestamp": 1614176434502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="CqFqFejff6Lq" outputId="a8971894-1eaa-449d-d8cc-38fcf6681946" dm_labels = [density_matrix(state_labels[i]) for i in range(2)] len(dm_labels) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 52558, "status": "ok", "timestamp": 1614176434502, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="vPoezRrsf6Lq" outputId="4dfd4a17-75d1-4fef-ac7c-e6b722dca450" dm_labels # + executionInfo={"elapsed": 51749, "status": "ok", "timestamp": 1614176434504, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="OiWplucFf6Lt" binary_class = np.array([[1, 0], [0, 1]]) class_labels = binary_class # + executionInfo={"elapsed": 51473, "status": "ok", "timestamp": 1614176434505, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="puDSt_FZWotb" num_fc_layer = 5 #params_fix = np.random.uniform(size=(2, num_fc_layer, 6)) # + device_arn = "arn:aws:braket:::device/quantum-simulator/amazon/sv1" device_rigetti = "arn:aws:braket:::device/qpu/rigetti/Aspen-9" # Please enter the S3 bucket you created during onboarding # (or any other S3 bucket starting with 'amazon-braket-' in your account) in the code below my_bucket = f"amazon-braket-edo" # the name of the bucket my_prefix = "edo-testing" # the name of the folder in the bucket s3_folder = (my_bucket, my_prefix) # + executionInfo={"elapsed": 50273, "status": "ok", "timestamp": 1614176434505, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="2WqqMVHbF2B8" n_samples = 10 n_qubits = n_samples # number of class #dev_fc = qml.device("default.qubit", wires=n_qubits) #layer_id = 4 #dev_braket_local = qml.device("braket.local.qubit", wires=1) #dev_braket_aws = qml.device('braket.aws.qubit', device_arn=device_arn, wires=1, s3_destination_folder=s3_folder) dev_rigetti_aws = qml.device('braket.aws.qubit', device_arn=device_rigetti, shots=10000, wires=n_qubits, s3_destination_folder=s3_folder) @qml.qnode(dev_rigetti_aws) def q_fc(params, inputs): """A variational quantum circuit representing the DRC. Args: params (array[float]): array of parameters inputs = [x, y] x (array[float]): 1-d input vector y (array[float]): single output state density matrix Returns: float: fidelity between output state and input """ #print(len(inputs)) #print(len(params[0])) #print(int(len(inputs[0])/3)) # data sample iteration for data in range(len(inputs)): # layer iteration for l in range(len(params[0])): # qubit iteration for q in range(1): # gate iteration for g in range(int(len(inputs[0])/3)): qml.Rot(*(params[0][l][3*g:3*(g+1)] * inputs[data][3*g:3*(g+1)] + params[1][l][3*g:3*(g+1)]), wires=data) #return [qml.expval(qml.Hermitian(dm_labels[i%2], wires=[i])) for i in range(n_qubits)] return [qml.expval(qml.PauliZ(wires=i)) for i in range(n_qubits)] #return [qml.expval(qml.Identity(wires=i)) for i in range(n_qubits)] #return qml.probs(wires=[i for i in range(n_qubits)]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 50643, "status": "ok", "timestamp": 1614176436091, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/<KEY>", "userId": "03770692095188133952"}, "user_tz": -420} id="jx7U8pzif6Lr" outputId="a43a1105-8a48-495f-fac3-8a36cf16b3f9" best_params = np.array([[[ 0.7079311 , 0.37632585, -0.33214605, -0.13929626, -0.7721329 , -0.5372071 ], [-0.29678243, 0.01558876, -0.24696895, 0.17431983, -0.12074132, 0.37094465], [-0.40574065, -0.04427808, 1.0555736 , -0.81905687, -1.0446428 , 0.05147649], [-0.49339634, -0.57406455, 0.50634474, -0.4733748 , 0.29814383, 0.3295614 ], [ 1.2720922 , 0.2524813 , 0.7091267 , -0.12986897, 0.20881489, -0.7476898 ]], [[ 0.5376858 , -1.1241286 , 2.0380347 , 2.3011663 , -0.61580783, 0.5203443 ], [ 0.7615255 , -2.1535773 , -0.9438669 , -0.7446632 , -0.8937272 , 0.5636517 ], [-0.15026595, 1.9657563 , -0.31717998, -0.6812052 , -1.3080239 , -0.09160299], [ 0.981377 , 1.6538823 , -0.38690042, 0.7046689 , 0.03279989, -0.15615936], [-0.532189 , -0.27468985, -0.04790952, -1.2638885 , -0.93331784, -0.3911027 ]]]) alpha = np.array([0.94681466, 1.0617169 ]) print(best_params.shape, alpha.shape) #result = q_fc(best_params, X_test[Y_test==1][1*n_samples:2*n_samples]) # - for i in range(100): result_0 = q_fc(best_params, X_test[Y_test==0][i*10:(i+1)*10]) np.savetxt('./QPU Probs/Testing_label0_sample' + str(i) + '.txt', result_0) result_1 = q_fc(best_params, X_test[Y_test==1][i*10:(i+1)*10]) np.savetxt('./QPU Probs/Testing_label1_sample' + str(i) + '.txt', result_1) # + z_label_0 = np.zeros((1000,)) z_label_1 = np.zeros((1000,)) for i in range(100): temp = np.loadtxt('./QPU Probs/Testing_label0_sample' + str(i) + '.txt') z_label_0[i*10:(i+1)*10] = temp temp = np.loadtxt('./QPU Probs/Testing_label1_sample' + str(i) + '.txt') z_label_1[i*10:(i+1)*10] = temp # + fidel_1_label_0 = np.clip(alpha[1]*(1 - z_label_0)/2, 0, 1) fidel_1_label_1 = np.clip(alpha[1]*(1 - z_label_1)/2, 0, 1) Y_pred_1 = np.concatenate((fidel_1_label_0, fidel_1_label_1), axis=0) Y_label = np.concatenate((np.zeros((1000,), dtype=int), np.ones((1000,), dtype=int)), axis=0) # - from sklearn.metrics import roc_auc_score roc_auc_score(Y_label, Y_pred_1) ((fidel_1_label_0 <= 0.5).sum() + (fidel_1_label_1 > 0.5).sum())/len(Y_pred_1) # # Identity result # test 31, label = 1 result # test 31 next, label = 1 result # test 31, label = 0 result # test 31 next, label = 0 result result.shape # ### Pauli Z result # test 31, label = 1 result # test 31 next, label = 1 result # test 31, label = 0 result # test 31 next, label = 0 result # + # <NAME>: z_label_0 = np.array([ 0.2052, 0.3892, -0.4704, 0.221 , -0.2536, 0.197 , 0.676 , 0.6612, 0.621 , 0.519 , -0.5854, 0.5732, 0.164 , -0.1676, 0.6956, 0.125 , 0.4456, -0.167 , 0.4002, 0.6422, 0.4086, 0.3668, 0.0866, -0.8304, 0.2616, 0.1554, 0.3014, 0.1458, 0.0584, 0.494 , -0.1356, 0.2262, 0.3538, -0.2864, 0.2752, 0.465 , -0.1544, 0.3892, 0.466 , 0.3724, 0.3596, 0.5772, 0.6406, 0.6022, 0.4172, 0.5914, 0.17 , 0.3742, -0.0356, 0.1512, 0.2936, 0.3358, 0.5774, 0.2392, -0.0954, -0.273 , 0.4048, 0.5102, 0.3164, 0.0484, 0.271 , 0.601]) z_label_1 = np.array([-0.0988, -0.4582, -0.75 , 0.1434, -0.1888, -0.0612, 0.6464, 0.1216, 0.0044, 0.1916, 0.0478, 0.538 , 0.0216, -0.8128, 0.0892, -0.581 , -0.7138, -0.7486, -0.8212, -0.4582, -0.0726, -0.695 , 0.1646, -0.6738, -0.172 , 0.1552, -0.7156, -0.1188, 0.0474, -0.5956, 0.4076, -0.0098, 0.1326, -0.5032, -0.5214, -0.518 , -0.3026, -0.344 , -0.8128, -0.1418, 0.5018, 0.5552, 0.057 , 0.363 , -0.8772, 0.223 , -0.4446, -0.5378, -0.3864, -0.3078, -0.2358, 0.5728, -0.0344, -0.6686, -0.364 , -0.173 , 0.4984, 0.2596, 0.1948, 0.5128, -0.052 , -0.6116]) z_label_0.shape, z_label_1.shape # - Y_test np.concatenate((((1 + z_label_0)/2).reshape(-1,1), ((1 - z_label_1)/2).reshape(-1,1)), axis=1) np.concatenate((((z_label_0 + 1)/2).reshape(-1,1), ((1 - z_label_1)/2)).reshape(-1,1), axis=1) # # ------------------------------- result.shape result.reshape(5,2) # + def decimalToBinary(n, length): binary = bin(n).replace("0b", "") if len(binary) != length: for i in range(length - len(binary)): binary = "0" + binary return binary # qpu_prob = np.array([0.00023841, 0.00086494, 0.0007704, 0.00279498, 0.00035357, 0.00128275, # 0.00114254, 0.00414509, 0.00815735, 0.02959442, 0.02635975, 0.09563172, # 0.01209772, 0.04388988, 0.03909271, 0.1418262, 0.00034558, 0.00125376, # 0.00111672, 0.0040514, 0.00051252, 0.00185938, 0.00165615, 0.00600841, # 0.01182428, 0.04289785, 0.03820911, 0.13862055, 0.01753595, 0.06361947, # 0.05666585, 0.20558059]) qpu_prob = result qpu_qubit = int(np.log2(len(qpu_prob))) bit_length = qpu_qubit qubit_prob = np.zeros((qpu_qubit, 2)) for i in range(len(qpu_prob)): bit = decimalToBinary(i, bit_length) for j in range(len(bit)): qubit_prob[j, int(bit[j])] += qpu_prob[i] print(qubit_prob) # - alpha*qubit_prob np.sum(qubit_prob, axis=1) # + andrei_qubit = int(np.log2(len(andrei))) bit = decimalToBinary(30, andrei_qubit) bit # - int(bit[4]) # + executionInfo={"elapsed": 50040, "status": "ok", "timestamp": 1614176434506, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="mAF1ZCCkf6Lu" from keras import backend as K # Alpha Custom Layer class class_weights(tf.keras.layers.Layer): def __init__(self): super(class_weights, self).__init__() w_init = tf.random_normal_initializer() self.w = tf.Variable( initial_value=w_init(shape=(1, 2), dtype="float32"), trainable=True, ) def call(self, inputs): return (inputs * self.w) # + executionInfo={"elapsed": 49503, "status": "ok", "timestamp": 1614176436092, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="epPDK5i9hOGH" n_component = 6 X = tf.keras.Input(shape=(n_component,), name='Input_Layer') # Quantum FC Layer, trainable params = 18*L*n_class + 2, output size = 2 num_fc_layer = 5 q_fc_layer_0 = qml.qnn.KerasLayer(q_fc, {"params": (2, num_fc_layer, n_component)}, output_dim=2)(X) # Alpha Layer alpha_layer_0 = class_weights()(q_fc_layer_0) model = tf.keras.Model(inputs=X, outputs=alpha_layer_0) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 50040, "status": "ok", "timestamp": 1614176437207, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="u1ozqS33W66k" outputId="b64d1dae-c71a-4bfc-ddd8-8e8ef091d9a9" model(X_train[0:32]) # - model.load_weights('./Best Model/Best6_layer5(layer_id=all)_set5000_saved-model-08.hdf5') model.get_weights() # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 48376, "status": "ok", "timestamp": 1614176437208, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="HDAO9VoF_7Wj" outputId="8de36e3e-99da-4593-a18f-7b671f9a8ce6" model.summary() # + executionInfo={"elapsed": 722, "status": "ok", "timestamp": 1614112963269, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="-EwdaREVAFS4" #model.get_layer('keras_layer_2').set_weights([params_fix]) # + executionInfo={"elapsed": 498, "status": "ok", "timestamp": 1614112963521, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="YE2m-zLeNVWD" opt = tf.keras.optimizers.Adam(learning_rate=0.05) model.compile(opt, loss='mse', metrics=["accuracy"]) # + executionInfo={"elapsed": 746, "status": "ok", "timestamp": 1614112979452, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="Je4XMuzff6Lx" filepath = "./Model/Best6_layer5_set5000_20epoch_saved-model-{epoch:02d}.hdf5" checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath, monitor='val_acc', verbose=1, save_weights_only=True, save_best_only=False, mode='auto') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 4672116, "status": "ok", "timestamp": 1614130475968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="FA6P7sl9f6Lx" outputId="7b479355-33b0-4304-d669-5cd35847ba6f" H = model.fit(X_train, to_categorical(Y_train), epochs=20, batch_size=128, initial_epoch=0, validation_data=(X_test, to_categorical(Y_test)), verbose=1, callbacks=[checkpoint]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1467, "status": "ok", "timestamp": 1614095915268, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="Q3gtpjc3u3JI" outputId="3fc8d729-6f77-41d4-c17c-f152c0afbc89" model.get_weights() # + executionInfo={"elapsed": 916, "status": "ok", "timestamp": 1614083799794, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="LfrS-Yewu3Q4" params_fix = np.array([[[ 0.7079311 , 0.5172549 , -0.9970402 , -0.07831129, -0.9837008 , -0.29626024], [ 0.34591857, 0.34577498, -0.17645611, 0.33760414, -0.69614476, 0.06621677], [ 0.85546058, 0.70365786, 0.47417383, 0.09783416, 0.49161588, 0.47347177], [ 0.17320187, 0.43385165, 0.39850473, 0.6158501 , 0.63509365, 0.04530401], [ 0.37461261, 0.62585992, 0.50313626, 0.85648984, 0.65869363, 0.16293443]], [[ 0.5376858 , -1.4710503 , 1.9514682 , 2.2145982 , -0.42923906, 0.31407434], [ 0.55525535, -1.8778402 , -0.7275799 , -0.52837616, -1.497912 , 0.46619168], [ 0.96119056, 0.90535064, 0.19579113, 0.0693613 , 0.100778 , 0.01822183], [ 0.09444296, 0.68300677, 0.07118865, 0.31897563, 0.84487531, 0.02327194], [ 0.81446848, 0.28185477, 0.11816483, 0.69673717, 0.62894285, 0.87747201]]]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1026, "status": "ok", "timestamp": 1614085608956, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="UApKcfvPu3Lt" outputId="d9c7a987-5bc2-4338-f435-2db544b603b3" params_layer_0 = np.array([[ 0.7079311 , 0.5172549 , -0.9970402 , -0.07831129, -0.9837008 , -0.29626024], [ 0.5376858 , -1.4710503 , 1.9514682 , 2.2145982 , -0.42923906, 0.31407434]]) params_layer_1 = np.array([[ 0.34591857, 0.34577498, -0.17645611, 0.33760414, -0.69614476, 0.06621677], [ 0.55525535, -1.8778402 , -0.7275799 , -0.52837616, -1.497912 , 0.46619168]]) params_layer_2 = np.array([[-0.91766816, -0.01087031, 1.1789958 , -0.5871541 , -2.0162444 , -0.2134564 ], [-0.24772605, 1.9002616 , -0.3421518 , -0.70617735, -1.1227049 , -0.03500864]]) params_layer_3 = np.array([[-0.9082992 , -0.6364117 , 0.64269185, -0.0499308 , 0.79538846, 0.08005117], [ 1.0379716 , 1.7597849 , -0.17306192, 0.918507 , 0.37375253, -0.0367587 ]]) params_layer_4 = np.array([[ 0.3021399 , 0.11904235, 0.9860843 , 0.06097979, 0.01730645, -0.7476898 ], [-0.4127887 , 0.09943755, 0.06011434, -1.1558647 , -1.1234 , -0.3911027 ]]) params_layer_0.shape, params_layer_1.shape, params_layer_2.shape, params_layer_3.shape, params_layer_4.shape # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 599, "status": "ok", "timestamp": 1614085608957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="IQt0amPKu3OV" outputId="7a1bfd50-2b69-463e-cb9a-610d6a9e04e7" params_fix[:, 0, :] = params_layer_0 params_fix[:, 1, :] = params_layer_1 params_fix[:, 2, :] = params_layer_2 params_fix[:, 3, :] = params_layer_3 params_fix[:, 4, :] = params_layer_4 params_fix.shape # + id="4D9AqesLDP9H" # + id="04KTmFA4DP_q" # + executionInfo={"elapsed": 929, "status": "ok", "timestamp": 1614099127401, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "03770692095188133952"}, "user_tz": -420} id="X6ZwTvYuVt1A" model.load_weights('./Model_Layerwise/Best6_layer5(layer_id=all)_set5000_saved-model-08.hdf5') # + id="WsGbB_MQnBq-" model.load_weights('./Model/Best6_layer5_set5000_20epoch_saved-model-20.hdf5') # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 929, "status": "ok", "timestamp": 1614099132903, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="Xo9fL3LDIV7H" outputId="40515a44-ff6d-4046-8c85-afd9cb9bbca8" model.get_weights() # + executionInfo={"elapsed": 1823, "status": "ok", "timestamp": 1614176621728, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="ETlX7VgEf6MB" from sklearn.metrics import roc_auc_score # + executionInfo={"elapsed": 513255, "status": "ok", "timestamp": 1614177134888, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="DsJh2RXaQDe0" Y_pred_train = model.predict(X_train) Y_pred_test = model.predict(X_test) # + executionInfo={"elapsed": 1994, "status": "ok", "timestamp": 1614182533251, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="85jL2HwGduVn" model.load_weights('./Model/Best6_layer5_set5000_saved-model-10.hdf5') # + executionInfo={"elapsed": 121127, "status": "ok", "timestamp": 1614180291549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="o2kdTMA8nKnB" Y_pred_train_10epoch = model.predict(X_train) Y_pred_test_10epoch = model.predict(X_test) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1554, "status": "ok", "timestamp": 1614182588542, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="oju5zAx9x3_Q" outputId="323d407e-cd74-4adf-ca06-970f96a9bd8f" Y_pred_train_10epoch # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1373, "status": "ok", "timestamp": 1614182596891, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="-Ya_6wuox6e4" outputId="a063399b-ba44-480b-99cb-806bc35faa3e" Y_pred_train # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 709, "status": "ok", "timestamp": 1614182727759, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="BiS4o8EpyBHO" outputId="bdb206c5-3aa2-4450-e8c2-43fb84c867e8" Y_pred_train_all = np.concatenate((Y_pred_train, Y_pred_train_10epoch), axis=1) Y_pred_test_all = np.concatenate((Y_pred_test, Y_pred_test_10epoch), axis=1) Y_pred_train_all.shape, Y_pred_test_all.shape # + executionInfo={"elapsed": 1314, "status": "ok", "timestamp": 1614184417673, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="dBIhN2323bPn" def check_pred(pred_1, pred_2): class_pred_1 = np.argmax(pred_1, axis=1) class_pred_2 = np.argmax(pred_2, axis=1) return class_pred_1 == class_pred_2, class_pred_1, class_pred_2 # + executionInfo={"elapsed": 1354, "status": "ok", "timestamp": 1614185472670, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="oiDAMBGG3ruH" pred_train_is_same, pred_1, pred_2 = check_pred(Y_pred_train, Y_pred_train_10epoch) pred = np.zeros((len(Y_pred_train),)) for i in range(len(pred)): if pred_train_is_same[i]: if pred_1[i] == 0: pred[i] = min(1-Y_pred_train[i,0], Y_pred_train[i,1], 1-Y_pred_train_10epoch[i,0], Y_pred_train_10epoch[i,1]) else: pred[i] = max(1-Y_pred_train[i,0], Y_pred_train[i,1], 1-Y_pred_train_10epoch[i,0], Y_pred_train_10epoch[i,1]) else: prob_1 = max(Y_pred_train[i,1], Y_pred_train_10epoch[i,1]) prob_0 = max(Y_pred_train[i,0], Y_pred_train_10epoch[i,0]) if prob_0 >= prob_1: pred[i] = min(1-Y_pred_train[i,0], Y_pred_train[i,1], 1-Y_pred_train_10epoch[i,0], Y_pred_train_10epoch[i,1]) else: pred[i] = max(1-Y_pred_train[i,0], Y_pred_train[i,1], 1-Y_pred_train_10epoch[i,0], Y_pred_train_10epoch[i,1]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 976, "status": "ok", "timestamp": 1614185472671, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="IWWbtvEt7g0i" outputId="9b487413-1d81-47ec-ccb3-b81d3d99a62c" roc_auc_score(Y_train, pred) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 750, "status": "ok", "timestamp": 1614185479755, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="PJwhEjFa85Sd" outputId="a9a5b5a9-78f0-4c7a-b361-ab6a883df103" np.sum((pred > 0.5) == Y_train)/len(Y_train) # + executionInfo={"elapsed": 1368, "status": "ok", "timestamp": 1614185360672, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="yOwTv8eH8Or8" pred_test_is_same, pred_1, pred_2 = check_pred(Y_pred_test, Y_pred_test_10epoch) pred = np.zeros((len(Y_pred_test),)) for i in range(len(pred)): if pred_test_is_same[i]: if pred_1[i] == 0: pred[i] = min(1-Y_pred_test[i,0], Y_pred_test[i,1], 1-Y_pred_test_10epoch[i,0], Y_pred_test_10epoch[i,1]) else: pred[i] = max(1-Y_pred_test[i,0], Y_pred_test[i,1], 1-Y_pred_test_10epoch[i,0], Y_pred_test_10epoch[i,1]) else: prob_1 = max(Y_pred_test[i,1], Y_pred_test_10epoch[i,1]) prob_0 = max(Y_pred_test[i,0], Y_pred_test_10epoch[i,0]) if prob_0 >= prob_1: pred[i] = min(1-Y_pred_test[i,0], Y_pred_test[i,1], 1-Y_pred_test_10epoch[i,0], Y_pred_test_10epoch[i,1]) else: pred[i] = max(1-Y_pred_test[i,0], Y_pred_test[i,1], 1-Y_pred_test_10epoch[i,0], Y_pred_test_10epoch[i,1]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 863, "status": "ok", "timestamp": 1614185367012, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="SvZe9_RX76R3" outputId="f57f65d8-ecf2-4e9a-af55-ddb6187b0ac6" roc_auc_score(Y_test, pred) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1272, "status": "ok", "timestamp": 1614185464735, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="cHXhqIUq8nxU" outputId="2153f9c1-0597-4755-e3cb-d58dad0ba43a" np.sum((pred > 0.5) == Y_test)/len(Y_test) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1985, "status": "ok", "timestamp": 1614182577426, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="ZI-hifztttNr" outputId="3e2b2c01-aa4c-4e3a-be79-9e18f0f4df51" print(np.sum(np.argmax(Y_pred_train, axis=1) == Y_train)/len(Y_train)) print(np.sum(np.argmax(Y_pred_test, axis=1) == Y_test)/len(Y_test)) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1197, "status": "ok", "timestamp": 1614185524069, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="dbu3k61-QcfY" outputId="bdd64b86-9d02-4abb-b0e9-b59dd2d716f6" roc_auc_score(Y_train, Y_pred_train[:,1]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 730, "status": "ok", "timestamp": 1614185525509, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="bdwzSw2IQ0dO" outputId="d0246a69-4814-4656-c7e7-1d5a66f64f76" roc_auc_score(Y_test, Y_pred_test[:,1]) # + executionInfo={"elapsed": 1207, "status": "ok", "timestamp": 1614185626286, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="1mshtwhDp6ro" pred = np.zeros((len(Y_pred_train),)) for i in range(len(pred)): id = np.argmax(Y_pred_train[i, :]) if id == 0: pred[i] = min(Y_pred_train[i, 1], 1-Y_pred_train[i, 0]) elif id == 1: pred[i] = max(Y_pred_train[i, 1], 1-Y_pred_train[i, 0]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1232, "status": "ok", "timestamp": 1614185628275, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="73fKab8Gv_N8" outputId="104f59c4-cb02-4dd1-b45b-26d2db04e184" roc_auc_score(Y_train, pred) # + executionInfo={"elapsed": 1534, "status": "ok", "timestamp": 1614179716871, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="-kaD47jswBzv" pred = np.zeros((len(Y_pred_test),)) for i in range(len(pred)): id = np.argmax(Y_pred_test[i, :]) if id == 0: if (1 - Y_pred_test[i, 0]) < Y_pred_test[i, 1]: pred[i] = (1 - Y_pred_test[i, 0]) else: pred[i] = Y_pred_test[i, 1] elif id == 1: if Y_pred_test[i, 1] > (1 - Y_pred_test[i, 0]): pred[i] = Y_pred_test[i, 1] else: pred[i] = (1 - Y_pred_test[i, 0]) # + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1909, "status": "ok", "timestamp": 1614179717605, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Ggpw7xw-lyk6u6l92QjpI7MlI7qjJuuciCpwrUd=s64", "userId": "03770692095188133952"}, "user_tz": -420} id="tBzzs61Kw3JE" outputId="656840ed-bc64-42cd-d17c-9d786b3f042d" roc_auc_score(Y_test, pred) # + id="IIsG_RUdw453"
BestEdu/Layerwise DRC Keras SUSY Real Hardware.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python # name: python3 # --- # ## Data Science Academy - Python Fundamentos - Capítulo 4 # # ### Download: http://github.com/dsacademybr # ### Expressões Lambda # + # Definindo uma função, que neste caso terá 3 linhas de código def potencia(num): result = num * 2 return result # Solicitando a execução da função potencia(2) # + # Definindo uma função, que neste caso terá 2 linhas de código def potencia(num): return num ** 2 # Solicitando a execução da função potencia(2) # + # Definindo uma função, que neste caso terá 1 linha de código def potencia(num): return num ** 2 # Solicitando a execução da função potencia(2) # + # Definindo uma expressão lambda # Notar que o primeiro num é a entrada, e o segundo num, que tem a expressão (num ** 2) é a saída da expressão potencia = lambda num: num ** 2 # Solicitando a execução da função potencia(2) # + # Definindo uma expressão lambda # Notar que neste função teremos um retorno booleano, ou seja, true ou false # Faça o teste. Troque o número da chamada da função e irá notar os resultados com true ou false # Lembre-se, operadores de comparação retornam boolean, true ou false Par = lambda x: x % 2 == 0 # Solicitando a execução da função Par(3) # + # Definindo uma expressão lambda # Notar que nessa expressão, a entrada é o primeiro (s) e a saída é (s[0]) # Essa expressão irá retornar o primeiro caractere da string que for informado no parâmetro da expressão first = lambda s: s[0] # Solicitando a execução da função first('Python') # + # Definindo uma expressão lambda # Notar que nessa expressão, a entrada é o primeiro (s) e a saída é (s[::-1]) # Essa expressão irá retorna o valor reverso reverso = lambda s: s[::-1] # Solicitando a execução da função reverso('Python') # + # Definindo uma expressão lambda # Notar que nessa expressão, a entrada é o primeiro (x, y) e a saída é (x + y), onde utilizamos mais de uma variável de entrada # Essa expressão irá somar os dois números que informados na chamda da função addNum = lambda x, y: x + y # Solicitando a execução da função addNum(2, 3) # - # ### Fim # # #### Obrigado - Data Science Academy - facebook.com/dsacademybr
trainings/data science academy/python fundamentals for data analysis/2 - loops, conditionals, methods and functions/10 - lambda expressions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import os import astropy import numpy as np from astropy.table import Table from astropy.table import Column import glob import matplotlib.pyplot as plt import pandas as pd from collections import Counter from mpl_toolkits.mplot3d import Axes3D directory = r"C:\Users\20xha\Documents\Caltech\Research\SNID\snid_outputs\\" final_rcf_table = Table.from_pandas(pd.read_hdf("C:/Users/20xha/Documents/Caltech/Research/final_rcf_table.h5")) SEDM_ML_sample = Table.read("C:/Users/20xha/Documents/Caltech/Research/SEDM_ML_sample.ascii", format = "ascii") SEDM_ML_sample.rename_column('col1', 'ZTF_Name') SEDM_ML_sample.rename_column('col2', "Class") SEDM_ML_sample.rename_column('col8', "Version") run_list = os.listdir(r"C:\Users\20xha\Documents\Caltech\Research\SNID\snid_outputs/")[1::] Table_List_Numpy = np.load("SNID_results_all_rlap3.npy", allow_pickle=True) Max = 0 Min = 100 for i in Table_List_Numpy: Temp_Max = np.max(i[1]["rlap"]) Temp_Min = np.min(i[1]["rlap"]) if(Temp_Max > Max): Max = Temp_Max if(Temp_Min < Min): Min = Temp_Min ranges = np.linspace(0, 25, 25) # + ResultsTable_List_both = [] count = 0 for rlap in ranges: for agree in range(0,16): ResultsTable = Table( names=("ZTF_Name", "Both" ), meta={"name": "Spectrum Results after SNID"}, dtype=("U64", "U64" ) ) for j in Table_List_Numpy: row = [] row.append(j[0]) good = j[1][np.where(j[1]["grade"] == "good")] if(np.max(good["rlap"]) > rlap): Top15 = good[0:15] c = Counter(Top15["type"]) if(c.most_common()[0][1] >= agree): row.append(c.most_common()[0][0]) ResultsTable.add_row(row) count += 1 if(len(ResultsTable) != 0): ResultsTable_List_both.append([rlap,agree,ResultsTable]) if(count % 10 == 0): print(count) # - ResultsTable_List_both_numpy = np.asarray(ResultsTable_List_both) np.save("ResultsTable_List_both_numpy", ResultsTable_List_both_numpy) ResultsTable_List_both_numpy = np.load("ResultsTable_List_both_numpy.npy", allow_pickle=True) len(np.unique(Table_List_Numpy[:,0])) len(np.unique(SEDM_ML_sample["ZTF_Name"])) Classification = Table( names=("ZTF_Name", "Class", "Version" ), meta={"name": "Basic ZTF Name Data"}, dtype=("U64", "U64", "U64" ) ) for i in np.unique(SEDM_ML_sample["ZTF_Name"]): row = SEDM_ML_sample["ZTF_Name", "Class", "Version"][np.where(i == SEDM_ML_sample["ZTF_Name"])][-1] Classification.add_row(row) counters_both = [] for i in ResultsTable_List_both_numpy: counter = 0 wrong = [] JoinedResults = astropy.table.join(i[2], Classification) for j in JoinedResults: if(j["Class"] != '-'): correct_1a = "Ia" in j["Class"] classified_1a = "Ia" in j["Both"] if(correct_1a==classified_1a): counter += 1 else: wrong.append([j["ZTF_Name"], j["Class"], j["Both"]]) wrong = np.asarray(wrong) counters_both.append([i[0],i[1],counter,len(JoinedResults), wrong]) counters_both = np.asarray(counters_both) low_rlap = np.where(counters_both[:,0] < 20) counters_both[low_rlap][np.argmax(counters_both[low_rlap][:,2]/counters_both[low_rlap][:,3])] counters_both[np.where(counters_both[:,2] == counters_both[:,3])] fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.plot(counters_both[:,0], counters_both[:,1], counters_both[:,2]/counters_both[:,3]) ax.set_xlabel("rlap") ax.set_ylabel("agreements") ax.set_zlabel("accuracy") # + count = 0 rlap = 11.5 agree = 15 ResultsTable = Table( names=("ZTF_Name", "SNID_Class" ), meta={"name": "Spectrum Results after SNID"}, dtype=("U64", "U64" ) ) for j in Table_List_Numpy: row = [] row.append(j[0]) good = j[1][np.where(j[1]["grade"] == "good")] if(np.max(good["rlap"]) > rlap): Top15 = good[0:15] c = Counter(Top15["type"]) if(c.most_common()[0][1] >= agree): row.append(c.most_common()[0][0]) ResultsTable.add_row(row) count += 1 if(count% 100 == 0): print(count) # - wrong = [] JoinedResults = astropy.table.join(ResultsTable, Classification) for j in JoinedResults: if(j["Class"] != '-' and j["Class"] != "0.0"): correct_1a = "Ia" in j["Class"] classified_1a = "Ia" in j["SNID_Class"] if(correct_1a==classified_1a): counter += 1 else: wrong.append([j["ZTF_Name"], j["Class"], j["SNID_Class"]]) wrong = np.asarray(wrong) JoinedResults.to_pandas().to_csv("SNID_Results.csv") wrong Table_List_Summary = [] count = 0 for i in np.unique(SEDM_ML_sample["ZTF_Name"]): row = [] row.append(i) files = glob.glob(directory + i + "\\*.readableoutput") if(len(files) != 0): row.append(files[1].split("\\")[-1]) templates_list = Table.read(files[1], format = "ascii") Table_List_Summary.append([i, templates_list]) count += 1 if count % 100 == 0: print(count) Table_List_Summary = np.asarray(Table_List_Summary) count = 0 ResultsTable_Summary = Table( names=("ZTF_Name", "SNID_Class" ), meta={"name": "Spectrum Results after SNID"}, dtype=("U64", "U64" ) ) for j in Table_List_Summary: row = [] row.append(j[0]) row.append(j[1]["type"][np.argmax(j[1]["ntemp"])]) ResultsTable_Summary.add_row(row) count += 1 if(count% 100 == 0): print(count) wrong = [] counter = 0 total_counter = 0 JoinedResults_Summary = astropy.table.join(ResultsTable_Summary, Classification) for j in JoinedResults: if(j["Class"] != '-'): total_counter += 1 correct_1a = "Ia" in j["Class"] classified_1a = "Ia" in j["SNID_Class"] if(correct_1a==classified_1a): counter += 1 else: wrong.append([j["ZTF_Name"], j["Class"], j["SNID_Class"]]) wrong = np.asarray(wrong) SEDM_ML_sample[0] Rlap_and_SNIDClass = Table( names=("ZTF_Name", "SNID_Class", "rlap" ), meta={"name": "Basic ZTF Name Data"}, dtype=("U64", "U64", "U64" ) ) for i in Table_List_Numpy: good = i[1][np.where(i[1]["grade"] == "good")] row = [i[0]] best_row = np.argmax(good["rlap"]) row.append(good[best_row]["type"]) row.append(good[best_row]["rlap"]) Rlap_and_SNIDClass.add_row(row) VersionRlapSNIDClass = astropy.table.join(Classification["ZTF_Name", "Version"], Rlap_and_SNIDClass) SEDM_ML_sample_VersionRlapSNIDClass = astropy.table.join(SEDM_ML_sample,VersionRlapSNIDClass) SEDM_ML_sample_VersionRlapSNIDClass.to_pandas().to_csv("SEDM_ML_sample_rlap.csv") SEDM_ML_sample_VersionRlapSNIDClass
AchievingPerfection.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ## The Data # # We will be using data from a Kaggle data set: # # https://www.kaggle.com/harlfoxem/housesalesprediction # # #### Feature Columns # # * id - Unique ID for each home sold # * date - Date of the home sale # * price - Price of each home sold # * bedrooms - Number of bedrooms # * bathrooms - Number of bathrooms, where .5 accounts for a room with a toilet but no shower # * sqft_living - Square footage of the apartments interior living space # * sqft_lot - Square footage of the land space # * floors - Number of floors # * waterfront - A dummy variable for whether the apartment was overlooking the waterfront or not # * view - An index from 0 to 4 of how good the view of the property was # * condition - An index from 1 to 5 on the condition of the apartment, # * grade - An index from 1 to 13, where 1-3 falls short of building construction and design, 7 has an average level of construction and design, and 11-13 have a high quality level of construction and design. # * sqft_above - The square footage of the interior housing space that is above ground level # * sqft_basement - The square footage of the interior housing space that is below ground level # * yr_built - The year the house was initially built # * yr_renovated - The year of the house’s last renovation # * zipcode - What zipcode area the house is in # * lat - Lattitude # * long - Longitude # * sqft_living15 - The square footage of interior housing living space for the nearest 15 neighbors # * sqft_lot15 - The square footage of the land lots of the nearest 15 neighbors import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns # %matplotlib inline df = pd.read_csv('../data/kc_house_data.csv') # # EDA df.head() df.info() df.isnull() sns.heatmap(df.isnull(),cbar=False) df.isnull().sum() df.describe().transpose() plt.figure(figsize=(10,6)) sns.distplot(df['price']) plt.figure(figsize=(10,6)) sns.countplot(x = df['bedrooms'],data=df) df.corr()['price'].sort_values(ascending=False) plt.figure(figsize=(10,6)) sns.scatterplot(x='price',y='sqft_living',data=df) plt.figure(figsize=(6,10)) sns.scatterplot(x='long',y='lat',data=df,hue='price') df.sort_values('price',ascending=False).head(20) cutoff = len(df)*0.01 #By eliminating top 215 rows, we can have proper price distribution cutoff #taking top proced records from index 216 to end. Getting rid of price column outliers for accurate visualization df_99_perc = df.sort_values('price',ascending=False)[216:] df_99_perc plt.figure(figsize=(12,8)) sns.scatterplot(x='long',y='lat',data=df_99_perc,hue='price',alpha=0.2,palette='RdYlGn') df.columns sns.boxplot(x='waterfront',y='price',data=df) # # Feature Engineering df.head() df =df.drop('id',axis=1) df.info() df['date'] = pd.to_datetime(df['date']) df['date'] df['year'] = df['date'].apply(lambda date: date.year) df['month'] = df['date'].apply(lambda date: date.month) df.head() plt.figure(figsize=(10,6)) sns.boxplot(x='month',y='price',data=df) df.groupby('month').mean()['price'].plot() df.groupby('year').mean()['price'].plot() df = df.drop('date',axis=1) df.info() df = df.drop('zipcode',axis=1) df.info() # # Scaling and Train Test Split X = df.drop('price', axis=1).values y = df['price'].values from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101) from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler() X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense # + model = Sequential() model.add(Dense(19,activation='relu')) model.add(Dense(19,activation='relu')) model.add(Dense(19,activation='relu')) model.add(Dense(19,activation='relu')) model.add(Dense(1)) model.compile(optimizer='adam',loss='mse') # - model.fit(x=X_train,y=y_train, validation_data=(X_test,y_test), batch_size=128, epochs=400) losses = pd.DataFrame(model.history.history) losses.plot() from sklearn.metrics import mean_squared_error,mean_absolute_error,explained_variance_score predictions = model.predict(X_test) np.sqrt(mean_squared_error(y_test,predictions)) mean_absolute_error(y_test,predictions) df['price'].describe() explained_variance_score(y_test,predictions) plt.figure(figsize=(10,6)) plt.scatter(y_test,predictions) plt.plot(y_test,y_test,'r') single_house = df.drop('price',axis=1).iloc[0] single_house = scaler.transform(single_house.values.reshape(-1,19)) model.predict(single_house) df.head(1) # # Retrain Model on the dataset without outliers to check the results df_99_perc['date'] = pd.to_datetime(df_99_perc['date']) df_99_perc['year'] = df_99_perc['date'].apply(lambda date: date.year) df_99_perc['month'] = df_99_perc['date'].apply(lambda date: date.month) df_99_perc = df_99_perc.drop(['zipcode'],axis=1) df_99_perc.head() X = df_99_perc.drop('price', axis=1).values y = df_99_perc['price'].values X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101) X_train = scaler.fit_transform(X_train) X_test = scaler.transform(X_test) model.fit(x=X_train,y=y_train, validation_data=(X_test,y_test), batch_size=128, epochs=400) losses_1 = pd.DataFrame(model.history.history) losses_1.plot() predictions = model.predict(X_test) np.sqrt(mean_squared_error(y_test,predictions)) model.predict(single_house) df.head(1) plt.figure(figsize=(10,6)) plt.scatter(y_test,predictions) plt.plot(y_test,y_test,'r')
Tensorflow Keras Regression Project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import os import sys os.chdir('/home/ubuntu/speech2speech') sys.path.append(os.path.abspath(os.path.dirname('/home/ubuntu/speech2speech') + '/..')) import matplotlib.pyplot as plt import math import pandas as pd from torchviz import make_dot, make_dot_from_trace import random from torchsummary import summary from torchvision.utils import make_grid import torchvision.transforms as transforms import torchvision.datasets as datasets import torch.optim as optim from torch.utils.data import DataLoader, Dataset import torch.nn.functional as F import torch.nn as nn import torch import umap from six.moves import xrange from scipy.signal import savgol_filter import pathlib import librosa.display import librosa import numpy as np import json import argparse from speech2speech.models.training_utils import * # - class speech2speech: def __init__(): self._device = "cpu" def inference(self, path_to_model, path_audio_input, speaker_id = '260'): """ initialize object params: path_to_model: pretrain model """ self._model = model.load_state_dict(torch.load(path_to_model)['state_dict'])
notebooks/old/serving_trial.py.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import matplotlib.pyplot as plt import seaborn as sns import pandas as pd import numpy as np sns.axes_style("white") # + def show_lineplot(df, hue='model', x='iteration', y='item_rank', xlabel='Iteration', ylabel='Item Rank', name="item_rank_iteration", save=True): fig, ax = plt.subplots(figsize=(6, 3)) # df = df.sort_values(by=['model']) #plt.axhline(y=0.165, color='r', linestyle='-') ax = sns.lineplot(x=x, y=y, hue=hue, style=hue, data=df, ci=68) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[1:], labels=labels[1:]) plt.tight_layout() if save: fig_path = load_yaml('config/global.yml', key='path')['figs'] fig.savefig('{0}/{1}.pdf'.format(fig_path, name), bbox_inches="tight", pad_inches=0, format='pdf') else: plt.show() def show_barplot(df, hue='Model', x='target_rank', y='Popularity', xlabel='Target Rank', ylabel='Popularity', name="item_pop", save=True): fig, ax = plt.subplots(figsize=(6, 3)) df = df.sort_values(by=['Model']) #plt.axhline(y=0.165, color='r', linestyle='-') # ax = sns.barplot(y=x, x=y, hue=hue, data=df, orient='h', ci=68) # ax = sns.barplot(x=x, y=y, hue=hue, data=df, ci=68) # ax = sns.boxplot(x=x, y=y, hue=hue, data=df) ax = sns.violinplot(x=x, y=y, hue=hue, data=df) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) handles, labels = ax.get_legend_handles_labels() ax.legend(handles=handles[:], labels=labels[0:]) # ax.legend(handles=handles[1:], labels=labels[1:]) plt.tight_layout() if save: fig_path = load_yaml('config/global.yml', key='path')['figs'] fig.savefig('{0}/{1}.pdf'.format(fig_path, name), bbox_inches="tight", pad_inches=0, format='pdf') else: plt.show() # + def save_dataframe_csv(df, path, name): df.to_csv(path+name, index=False) def load_dataframe_csv(path, name, index_col=None): return pd.read_csv(path+name, index_col=index_col) # - table_path = '../tables/critiquing/multi_step_critiquing/yelp/avg/' name = 'test_result_for_plotting1.csv' df = load_dataframe_csv(table_path, name) df['result'] df def avg_successful_rate(df): num_runs = len(np.where(df['iteration'] == 0)[0]) num_success = len(np.where(df['result'] == 'successful')[0]) return num_success/num_runs def avg_length(df,include_fail = True): num_runs = len(np.where(df['iteration'] == 0)[0]) return len(df)/num_runs avg_length(df) avg_successful_rate(df) df_20 = df[df['target_rank'] == 20]
Critiques Draft/.ipynb_checkpoints/multi-step critiquing plots-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="36s2EXUbfq9Y" # # Predict a first alpha matte with U-2-Net # # This is the 1st step in our workflow to remove the background from an image: # # 1. **Use U-2-Net pre-trained model to generate a first alpha matte** (the current colab notebook) # 2. Use the U-2-Net alpha matte to generate a trimap** # 3. Train MODNet model with the original image, the trimap and ground truth image from DUTS dataset # # ## Sources: # * [U-2-Net GitHub](https://github.com/xuebinqin/U-2-Net) # # + [markdown] id="rxKC0bw0gXPh" # # Import # + id="Kf0-7Hf9fbyd" # import modules to handle files import os import shutil from google.colab import drive # + [markdown] id="KpzH1clzjB2Y" # # Mount Google Drive # + colab={"base_uri": "https://localhost:8080/"} id="UwB3pLH7jBCC" executionInfo={"status": "ok", "timestamp": 1623137903347, "user_tz": -120, "elapsed": 24021, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPlc6uDDSsjHzaIeMPdr-Jw14nRfyQhGuaw0TDJw=s64", "userId": "04282691266898005200"}} outputId="cc56b206-ac35-4a62-b0c4-e93899ebb05e" drive.mount('/content/drive/') # + [markdown] id="UKpVItnihTj0" # # Clone U-2-Net GitHub repo & download pre-trained model # + colab={"base_uri": "https://localhost:8080/"} id="gDFiCe-ghWrH" executionInfo={"status": "ok", "timestamp": 1623137945036, "user_tz": -120, "elapsed": 8162, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPlc6uDDSsjHzaIeMPdr-Jw14nRfyQhGuaw0TDJw=s64", "userId": "04282691266898005200"}} outputId="28c64ca8-81d5-4e35-c852-92b8b9876a10" # # cd to directory # %cd /content # clone repository if doesn't already exist if not os.path.exists('U-2-Net'): # !git clone https://github.com/xuebinqin/U-2-Net.git # # cd to repository # %cd U-2-Net/ # create directory where to save the pre-trained model pretrained_model_dir = 'saved_models/u2net/' shutil.rmtree(pretrained_model_dir, ignore_errors=True) # remove directory if exists os.makedirs(pretrained_model_dir) # create directory # set pre-trained model path pretrained_model_path = os.path.join(pretrained_model_dir,'u2net.pth') # download pre-trained model if not os.path.exists(pretrained_model_path): # !gdown --id 1ao1ovG1Qtx4b7EoskHXmi2E9rp5CHLcZ \ # -O "$pretrained_model_path" # + [markdown] id="AoR6tXIGsCf2" # # Predict alpha matte with U-2-Net # + [markdown] id="PodglO2rLNkE" # ## Function # + colab={"base_uri": "https://localhost:8080/"} id="aMd57q7skseo" executionInfo={"status": "ok", "timestamp": 1623147098853, "user_tz": -120, "elapsed": 223, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPlc6uDDSsjHzaIeMPdr-Jw14nRfyQhGuaw0TDJw=s64", "userId": "04282691266898005200"}} outputId="daa9f8c3-236b-41b9-e365-f2895a48388b" # # cd to repository # %cd /content # # copy modified u2net script that predicts alpha matte current_path = "/content/drive/MyDrive/Faktion/exploration/u2net_test_modified.py" dst_path = "/content/U-2-Net/u2net_test_modified.py" shutil.copy(current_path, dst_path) # # cd to repository # %cd U-2-Net/ from u2net_test_modified import predict_u2net_alpha_matte # + colab={"base_uri": "https://localhost:8080/"} id="2DXBZJdksJcc" executionInfo={"status": "ok", "timestamp": 1623147892963, "user_tz": -120, "elapsed": 39336, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiPlc6uDDSsjHzaIeMPdr-Jw14nRfyQhGuaw0TDJw=s64", "userId": "04282691266898005200"}} outputId="1be87ce4-a2e5-4727-ecae-7b965289c642" # specify original image dir & destination dir where to save alpha matte src_dir_path = "/content/drive/MyDrive/Faktion/DUTS/DUTS-TR/DUTS-TR-Image" dst_dir_path = "/content/drive/MyDrive/Faktion/DUTS/DUTS-TR/DUTS-TR-u2net-MASK" # if destination directory doesn't exist already if not os.path.exists(dst_dir_path): # create directory os.makedirs(dst_dir_path) # predict alpha matte predict_u2net_alpha_matte(src_dir_path, dst_dir_path)
exploration/1_predict_u2net_alpha_matte.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- from keras import layers from keras import models from keras import optimizers from keras.preprocessing import image import matplotlib.pyplot as plt import numpy as np from numpy import random import os from os.path import isfile, join import pandas as pd from math import ceil # %matplotlib inline class ConvnetSneakers: def __init__(self, base_image_path, master_image_size, master_color_channels): self.master_image_size = master_image_size self.base_image_path = base_image_path self.master_color_channels = master_color_channels def get_new_model(self): """ Creates a new CNN model. It also prints a summary of the model. It uses the external variables master_image_size and master_color_channels to setup the input layer. Returns ------- keras.models.Sequential The model ready to be trained """ model = models.Sequential() model.add(layers.Conv2D(64, (3, 3), activation='relu', input_shape=(self.master_image_size[0], self.master_image_size[1], self.master_color_channels))) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Conv2D(64, (3, 3), activation='relu')) model.add(layers.MaxPooling2D((2, 2))) model.add(layers.Flatten()) model.add(layers.Dense(256, activation='relu')) model.add(layers.Dense(1, activation='sigmoid')) model.compile(loss='binary_crossentropy', optimizer=optimizers.RMSprop(lr=0.5e-4), metrics=['acc']) model.summary() return model def train_validation_test(self, shuffle_seed=0): """ Reads images in a directory and splits them up according to class Assumes a binary classification, with EQUAL COUNTS in each class. This automatically assigns classes to the images. It assumes the name of each class is the first token of the filename as delimited by "_". The dataframes returned are to be used by Keras. They have the columns "filename" and "class" to point to the images and the classes, repectively. Parameters ---------- shuffle_seed : int The integer to seed the random number generator which shuffles the dataframe. Returns ------- pd.DataFrame, pd.DataFrame, pd.DataFrame The train, validation and test sets, respectively. """ print(f'>>> Shuffle seed {shuffle_seed}') train_fraction = 0.8 random.seed(shuffle_seed) all_images_list = [] for filename in os.listdir(self.base_image_path): if isfile(join(self.base_image_path, filename)): image_class = filename.split('.')[0] all_images_list.append({'class': image_class, 'filename': filename}) all_images = pd.DataFrame(all_images_list) all_images = all_images.sample(frac=1).reset_index(drop=True) all_classes = all_images['class'].unique() first_class_name = all_classes[0] second_class_name = all_classes[1] first_class = all_images.copy().where(all_images['class'] == first_class_name).dropna() second_class = all_images.copy().where(all_images['class'] == second_class_name).dropna() train_row_count = int(len(first_class) * train_fraction) test_val_count = len(first_class) - train_row_count first_class_train = first_class.iloc[2 * test_val_count:] first_class_val = first_class.iloc[test_val_count:2 * test_val_count] first_class_test = first_class.iloc[0:test_val_count] second_class_train = second_class.iloc[2 * test_val_count:] second_class_val = second_class.iloc[test_val_count:2 * test_val_count] second_class_test = second_class.iloc[0:test_val_count] train = first_class_train.append(second_class_train).reset_index().drop('index', axis=1) val = first_class_val.append(second_class_val).reset_index().drop('index', axis=1) test = first_class_test.append(second_class_test).reset_index().drop('index', axis=1) print(first_class_name, second_class_name) return train, val, test def train_validation_test_generators(self, shuffle_seed=0): """ Creates generators for train, validation and test datasets. These can then be used by Keras to train a model. Dataframe shuffling is prevented at this step because the dataframe is assumed to have been shuffled beforehand with an RNG with a known seed. The dataframe is created for you from the images in src_dir. See the train_validation_test function for more information. Parameters ---------- shuffle_seed : int The seed for the RNG used for dataframe shuffling. Returns ------- """ train, validation, test = self.train_validation_test(shuffle_seed) train_datagen = image.ImageDataGenerator(rescale=1.0/255) test_datagen = image.ImageDataGenerator(rescale=1.0/255) validation_datagen = image.ImageDataGenerator(rescale=1.0/255) train_generator = train_datagen.flow_from_dataframe(dataframe=train, directory=self.base_image_path, target_size=self.master_image_size, batch_size=20, shuffle=False, color_mode='grayscale', class_mode='binary') validation_generator = train_datagen.flow_from_dataframe(dataframe=validation, directory=self.base_image_path, target_size=self.master_image_size, batch_size=20, shuffle=False, color_mode='grayscale', class_mode='binary') test_generator = train_datagen.flow_from_dataframe(dataframe=test, directory=self.base_image_path, target_size=self.master_image_size, batch_size=20, shuffle=False, color_mode='grayscale', class_mode='binary') return train_generator, validation_generator, test_generator def train_model_and_get_history(self, shuffle_seed=0, epochs=10): """ Trains a model and returns the RNG seed and history Parameters ---------- shuffle_seed : int The seed for the RNG that shuffles the train, validation and test datasets. Returns ------- keras.model.Sequential, dict The model that created the history """ train_generator, validation_generator, test_generator = self.train_validation_test_generators(shuffle_seed) model = self.get_new_model() train_history = model.fit_generator(train_generator, steps_per_epoch=100, epochs=epochs, validation_data=validation_generator, validation_steps=50, verbose=1) return model, train_history def train_and_run_different_shuffles(self, shuffles=10, epochs_per_shuffle=10): """ This runs a sequence of trainings, each with a different shuffle controlled by a different random number generator. Parameters ---------- shuffles : int The number of shuffles and training runs to go. epochs_per_shuffle : int The number of epochs in each shuffle of the data. Returns ------- list, list List of histories (see below) of the training runs and a list of the maximum validation accuracies for each training run. """ histories = [] max_val_accs = [] for shuffle_seed in range(shuffles): model, history = self.train_model_and_get_history(shuffle_seed, epochs=epochs_per_shuffle) train, validation, test = self.train_validation_test(shuffle_seed) histories.append({ 'shuffle_seed': shuffle_seed, 'epochs': epochs_per_shuffle, 'val_accs': history.history['val_acc'], 'max_val_acc': max(history.history['val_acc']), 'model': model, 'train': train, 'validation': validation, 'test': test }) max_val_accs.append(max(history.history['val_acc'])) return histories, max_val_accs trainer = ConvnetSneakers(master_image_size=(256, 256), base_image_path='grayscale-256x256', master_color_channels=1) histories, max_val_accs = trainer.train_and_run_different_shuffles(shuffles=50, epochs_per_shuffle=10) max_val_accs
work_in_progress/version_11.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [Root] # language: python # name: Python [Root] # --- import matplotlib.pyplot as plt % matplotlib inline import numpy as np import pandas as pd from cvxpy import * # check if cvxpy is installed and check installed solvers print installed_solvers() # load tranining data set, due to data privacy issues the data for the paper isn't relesed # this data set is a synthetic data set # load edge E= pd.read_csv("data/edges_new.csv",header=None) # community label for each node C = pd.read_csv("data/nodes_C.csv",header=None) # name name V = pd.read_csv("data/nodes_N.csv",header=None) # distance between connedted nodes E_all = pd.read_csv("data/E_all.csv",header=None) # cd ../ # %load_ext autoreload # %autoreload 2 from src.models.ERGM_CVX import ERGM_CVX A = ERGM_CVX(E, C, V, E_all) A.run_CVX() print 'theta1 and theta2',A.W print 'theta3',A.b
notebook/.ipynb_checkpoints/call_ERGM_CVX-checkpoint.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python [conda env:anaconda] # language: python # name: conda-env-anaconda-py # --- # + [markdown] slideshow={"slide_type": "slide"} # # Chapter 6 # # <NAME> (2016, 2018) # # In Chapter 6 we will cover the following topics # * Non-parametric Density Estimation, specifically Kernel Density Estimation (KDE) # * $k$-Nearest Neighbor Density Estimation # * Parametric Density Estimation, specifically Gaussian Mixture Models (GMM) # * Clustering algorithms, particularly $K$-means # + [markdown] slideshow={"slide_type": "slide"} # ## Kernel Density Estimation (KDE) # # Inferring the pdf of a sample of data is known as *density estimation*. Essentially we are smoothing the data. # # Density estimation is useful because identifying low probability regions can help uncover rare sources. Similarly, if the data can be divided into subsamples, one can estimate the pdf for each subsample and, in turn, determine classifications for new objects. # # *Nonparametric* density estimation is useful when we know nothing about the underlying distribution of the data since we don't have to specify a model. This flexibility allows us to capture the shape of the distribution well, at the expense of more difficulty interpreting the results. # # [*Kernel Density Estimation (KDE)*](https://en.wikipedia.org/wiki/Kernel_density_estimation) is the standard here (and, incidentally, is something that we have been doing in my group for about 15 years now). # # Let's start by recalling the experiment that we did with 1-D histograms in the first week of class. # + slideshow={"slide_type": "slide"} # Modified from Ivezic, Figure 6.1 # Author: <NAME> # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt from scipy import stats # %matplotlib inline #------------------------------------------------------------ # Draw the random data np.random.seed(1) x = np.concatenate([np.random.normal(-0.5, 0.3, size=14), np.random.normal(1, 0.3, size=7)]) #------------------------------------------------------------ # First figure: silly histogram binning fig1 = plt.figure(figsize=(8, 4)) fig1.subplots_adjust(left=0.12, right=0.95, wspace=0.05, bottom=0.15, top=0.9, hspace=0.05) FC = '#6666FF' XLIM = (-2, 2.9) YLIM = (-0.09, 1.1) ax = fig1.add_subplot(121) bins = np.linspace(-1.8, 2.7, 13) ax.hist(x, bins=bins, normed=True, histtype='stepfilled', fc='k', alpha=0.3) ax.plot(XLIM, [0, 0], '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax.set_xlabel('$x$') ax.set_ylabel('$p(x)$') #Shift bin centers by 0.25 ax = fig1.add_subplot(122) ax.yaxis.set_major_formatter(plt.NullFormatter()) ax.hist(x, bins=bins + 0.25, normed=True, histtype='stepfilled', fc='k', alpha=0.3) ax.plot(XLIM, [0, 0], '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax.set_xlabel('$x$') plt.show() # + [markdown] slideshow={"slide_type": "slide"} # The underlying distribution in both panels is the same, that is the data points that make up the histogram are the same. All we have done is shifted the locations of the bins by 0.25. # As we saw in Lecture 2, the choice of bin centers can really change the histogram that we make. # # The next panels are what happens if we center the bins on each point. This is an example of kernel density estimation using a "top-hat" kernel. It is a good description of the data, but is pretty ugly. # + slideshow={"slide_type": "slide"} fig1b = plt.figure(figsize=(8, 4)) fig1b.subplots_adjust(left=0.12, right=0.95, wspace=0.05, bottom=0.1, top=0.95, hspace=0.05) ax = fig1b.add_subplot(111) ax.xaxis.set_major_formatter(plt.NullFormatter()) binwidth = bins[1] - bins[0] x_plot = np.linspace(-4, 4, 1000) y_plot = (abs(x_plot - x[:, None]) <= 0.5 * binwidth).astype(float) y_plot /= (binwidth * len(x)) ax.fill(x_plot, y_plot.sum(0), ec='k', lw=1, fc='k', alpha=0.3) ax.plot(x_plot, y_plot.T, '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax.set_ylabel('$p(x)$') # + [markdown] slideshow={"slide_type": "slide"} # We can make it look nicer by choosing a different kernel. That is by choosing a different bin shape. The next 3 plots show KDEs using Gaussian kernels with different width Gaussians. # + slideshow={"slide_type": "slide"} #------------------------------------------------------------ # First figure: transition to KDE fig2 = plt.figure(figsize=(8, 8)) fig2.subplots_adjust(left=0.12, right=0.95, wspace=0.05, bottom=0.0, top=1.0, hspace=0.05) ax = fig2.add_subplot(311) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.yaxis.set_major_formatter(plt.NullFormatter()) binwidth = bins[1] - bins[0] x_plot = np.linspace(-4, 4, 1000) y_plot = binwidth * stats.norm.pdf(x_plot, x[:, None], 0.1) y_plot /= (binwidth * len(x)) ax.fill(x_plot, y_plot.sum(0), ec='k', lw=1, fc='k', alpha=0.3) ax.plot(x_plot, y_plot.T, '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax = fig2.add_subplot(312) ax.xaxis.set_major_formatter(plt.NullFormatter()) binwidth = bins[1] - bins[0] x_plot = np.linspace(-4, 4, 1000) y_plot = binwidth * stats.norm.pdf(x_plot, x[:, None], 0.7) y_plot /= (binwidth * len(x)) ax.fill(x_plot, y_plot.sum(0), ec='k', lw=1, fc='k', alpha=0.3) ax.plot(x_plot, 4 * y_plot.T, '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax.set_ylabel('$p(x)$') ax.set_xlabel('$x$') ax = fig2.add_subplot(313) ax.yaxis.set_major_formatter(plt.NullFormatter()) binwidth = bins[1] - bins[0] x_plot = np.linspace(-4, 4, 1000) y_plot = binwidth * stats.norm.pdf(x_plot, x[:, None], 0.2) y_plot /= (binwidth * len(x)) ax.fill(x_plot, y_plot.sum(0), ec='k', lw=1, fc='k', alpha=0.3) ax.plot(x_plot, y_plot.T, '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) ax.set_xlabel('$x$') # + [markdown] slideshow={"slide_type": "slide"} # This looks better, but gives us a "Goldilocks" problem. The first plot uses a kernel that is too narrow. The second is too wide. The third is "just right". # # We can think of KDE as replacing the points with "clouds". Each cloud is described by the kernel $K(u)$, where $K(u)$ can be any function that is smooth, is postive definite, normalizes to unity, has a mean of 0, and has a positive variance. # + [markdown] slideshow={"slide_type": "slide"} # A common kernel is the Gaussian kernel that we just used above: # # $$K(u) = \frac{1}{(2\pi)^{D/2}}\exp^{-u^2/2}$$ # # Note that the "$D$" is necessary because while histograms are generally 1-D, the kind of Big Data analysis that we are interested in will be $N$-D. # + [markdown] slideshow={"slide_type": "slide"} # Once a kernel is chosen the kernel density estimate at a given point, $x$, is given by # $$ \hat{f}(x) = \frac{1}{Nh^D}\sum_{i=1}^N K\left(\frac{d(x,x_i)}{h}\right),$$ # where $\hat{f}$ is an *estimator* of our distribution. # + [markdown] slideshow={"slide_type": "slide"} # Where does this come from? Well if you wanted to know the density of points you could compute # $\frac{\sum_1^N\delta (x-x_i)}{V},$ where $\delta (x-x_i)$ is the Delta function, $V$ is the volume, and $N$ is the number of points. In $D$-dimensional space a volume element is just $h^D$. Then instead of representing our observation as a delta function, we represent it by our kernel function. To normalize for the number of points, divide by $N$. # # The argument of $K$ is just some measure of the distance between $x$ and each $x_i$. Normally $d(x,x_i) = (x-x_i)$. For the gaussian kernel that makes $h=\sigma$. Take a second to convince yourself that that is the case. So, you can see how $h$ represents the "width" or what is usually called the "bandwidth" in this context. # # You might wonder why the book uses $\hat{f}$ instead of just $f$ since we already are using $f$ instead of $h$ (the true distribution). I don't know. # + [markdown] slideshow={"slide_type": "slide"} # Here is a comparison of some different possible kernels. The one that I use most commonly is actually an Epanechnikov kernel since the Gaussian and Exponential have rather long tails. # ![Ivezic, Figure 6.2](http://www.astroml.org/_images/fig_kernels_1.png) # + [markdown] slideshow={"slide_type": "slide"} # We won't go through the math, but it turns out that the Epanechnikov kernel is "optimal" in the sense of minimizing the variance. That kernel is given by $$K(x) = \frac{3}{4}(1-x^2),$$ # for $|x|\le 1$ and 0 otherwise. Below is the code that produces the plot above. Add the Epanechnikov kernel to it. # + slideshow={"slide_type": "slide"} # Complete and Execute # Author: <NAME> # License: BSD # The figure produced by this code is published in the textbook # "Statistics, Data Mining, and Machine Learning in Astronomy" (2013) # For more information, see http://astroML.github.com # To report a bug or issue, use the following forum: # https://groups.google.com/forum/#!forum/astroml-general import numpy as np from matplotlib import pyplot as plt #------------------------------------------------------------ # Compute Kernels. u = np.linspace(-5, 5, 10000) du = u[1] - u[0] gauss = (1. / np.sqrt(2 * np.pi)) * np.exp(-0.5 * u ** 2) exp = 0.5 * np.exp(-abs(u)) tophat = 0.5 * np.ones_like(u) tophat[abs(u) > 1] = 0 # Range of the tophat kernel ep = ____ # Add the Epanechnikov kernel function ep[____]=0 # Set the range of the kernel #------------------------------------------------------------ # Plot the kernels fig = plt.figure(figsize=(5, 3.75)) ax = fig.add_subplot(111) ax.plot(u, gauss, '-', c='black', lw=3, label='Gaussian') ax.plot(u, exp, '-', c='#666666', lw=2, label='Exponential') ax.plot(u, tophat, '-', c='#999999', lw=1, label='Top-hat') ax.plot(__,__,__,__,label='Epanechnikov') # Add the Epanechnikov kernel to the plot ax.legend(loc=1) ax.set_xlabel('$u$') ax.set_ylabel('$K(u)$') ax.set_xlim(-5, 5) ax.set_ylim(0, 0.8001) plt.show() # + [markdown] slideshow={"slide_type": "slide"} # The crucial part of KDE is to determine the optimal value for the width of the kernel. When we first discussed histograms and KDE we talked about theoretical computation of optimal bandwidths. Let's now look at how we can empirically determine the optimal bandwidth through [**cross validation**](https://en.wikipedia.org/wiki/Cross-validation_(statistics). # + [markdown] slideshow={"slide_type": "slide"} # Cross validation is related to the construction of training and test sets that we talked about last time. There are a number of different ways to do this. For example, you could *randomly sample* to decide which data goes into the training or test sets: # ![Random Sample Cross Validation Example; Remesan Figure 3.7](http://i.stack.imgur.com/4Lrff.png) # # Where we aren't just doing this once, but rather many times so that each data point is treated both as a training point and as a test point. # # We could do this in a more ordered way (e.g., to make sure that each point gets sampled as training/test the same number of times) and do a $K$-fold cross validation. Here $K$ is the number of "experiments" that need to be done so that each data point appears in a test sample once. # # ![K-Fold Cross Validation Example; Remesan Figure 3.8](http://i.stack.imgur.com/fhMza.png) # # We can take that to the extreme by having $K\equiv N$, so that in each experiment we leave out just one object. This is called "Leave-One-Out" cross validation: # # ![Leave-One-Out Cross Validation Example; Remesan Figure 3.9](http://images.slideplayer.com/16/4977882/slides/slide_35.jpg) # + [markdown] slideshow={"slide_type": "slide"} # We can implement this in Scikit-Learn with [`GridSearchCV`](http://scikit-learn.org/0.17/modules/generated/sklearn.grid_search.GridSearchCV.html) and replot our histogram above as follows: # + slideshow={"slide_type": "slide"} # Complete and Execute this cell to determine the bandwidth from sklearn.neighbors import KernelDensity from sklearn.model_selection import GridSearchCV bwrange = np.linspace(0.1, 1.0, 30) # Test 30 bandwidths from 0.1 to 1.0 K = 5 # 5-fold cross validation grid = GridSearchCV(KernelDensity(), {'bandwidth': bwrange}, cv=K) grid.fit(x[:, None]) #Fit the histogram data that we started the lecture with. h_opt = grid.best_params_['bandwidth'] print h_opt # + slideshow={"slide_type": "slide"} # Execute this cell to see the new "histogram" fig2 = plt.figure(figsize=(5, 5)) ax = fig2.add_subplot(111) ax.xaxis.set_major_formatter(plt.NullFormatter()) ax.yaxis.set_major_formatter(plt.NullFormatter()) binwidth = bins[1] - bins[0] x_plot = np.linspace(-4, 4, 1000) y_plot = binwidth * stats.norm.pdf(x_plot, x[:, None], h_opt) y_plot /= (binwidth * len(x)) ax.fill(x_plot, y_plot.sum(0), ec='k', lw=1, fc='k', alpha=0.3) ax.plot(x_plot, y_plot.T, '-k', lw=1) ax.plot(x, 0 * x - 0.05, '+k') ax.set_xlim(XLIM) ax.set_ylim(YLIM) # + [markdown] slideshow={"slide_type": "slide"} # ## 2-D Histograms # # Here is some sample code using [`sklearn.neighbors.KernelDensity`](http://scikit-learn.org/stable/modules/generated/sklearn.neighbors.KernelDensity.html). Play around with this and see how it works. Make different variations of the plot. What we are doing here is using KDE to set the plot color to indicate the relative density of the points. This is essentially a 2-D histogram. # + slideshow={"slide_type": "slide"} # %matplotlib inline import numpy as np from matplotlib import pyplot as plt from sklearn.neighbors import KernelDensity # Two 2-D normal distributions with offset centroids X = np.concatenate([np.random.normal([-1,-1],[0.75,0.75],size=(1000,2)),np.random.normal([1,1],[1,1],size=(500,2))]) kde = KernelDensity(kernel='gaussian', bandwidth=0.1) kde.fit(X) #fit the model to the data u = v = np.linspace(-4,5,80) Xgrid = np.vstack(map(np.ravel, np.meshgrid(u, v))).T dens = np.exp(kde.score_samples(Xgrid)) #evaluate the model on the grid plt.scatter(Xgrid[:,0],Xgrid[:,1], c=dens, cmap="Purples", edgecolor="None") plt.colorbar() # + [markdown] slideshow={"slide_type": "slide"} # Now copy the example from above to a new cell and splice in the cross validation code to produce a new plot with the "optimal" bandwidth. Try `bandwidth=0.01` to `bandwidth=1.0`. Basically, splice in the lines of code for `GridSearchCV` between the lines setting `X` and instantiating `kde`.
notebooks/KernelDensityEstimation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # astGl - Uebung 3 # ## Aufgabe 1 # %matplotlib notebook from sympy import * import matplotlib.pyplot as plt from IPython.display import display, Math, Latex def disp(str): display(Latex(str)) # Impedanzes R1 = 75; G1 = 1/R1 R2 = 75; G2 = 1/R2 R3 = 250; G3 = 1/R3 R4 = 750; G4 = 1/R4 Yp = G1+G2; Zp = 1/Yp Ym = G3+G4; Zm = 1/Ym # Transfer Function Tsig = G1*Zp*(G3+G4)/G4 Tsig # Resistor Noise TG1 = Tsig/G1 TG2 = TG1 TG1 TG3 = -1/G4 TG4 = TG3 TG3 k = 1.38E-23 # Bolzman Constant T = 300 # Kelvin In1 = sqrt(4*k*T*G1) In2 = sqrt(4*k*T*G2) In3 = sqrt(4*k*T*G3) In4 = sqrt(4*k*T*G4) Vout_G1_2 = In1**2 * TG1**2 Vout_G2_2 = In2**2 * TG2**2 Vout_G3_2 = In3**2 * TG3**2 Vout_G4_2 = In4**2 * TG4**2 Vout_G_2 = Vout_G1_2 + Vout_G2_2 + Vout_G3_2 + Vout_G4_2 disp("$V_{OUT_{G1}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_G1_2)) disp("$V_{OUT_{G2}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_G2_2)) disp("$V_{OUT_{G3}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_G3_2)) disp("$V_{OUT_{G4}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_G4_2)) disp("$V_{OUT_{G}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_G_2)) # OP Noise Vn_op = 3.5E-9 # V/sqrt(Hz) In_p_op = 1.5E-12 # A/sqrt(Hz) In_m_op = 18.0E-12 # A/sqrt(Hz) T_Vn = Ym/G4 T_Inp = Tsig/G1 T_Inm = -1/G4 Vout_Vn_2 = T_Vn**2 * Vn_op**2 Vout_Inp_2 = T_Inp**2 * In_p_op**2 Vout_Inm_2 = T_Inm**2 * In_m_op**2 Vout_op_2 = Vout_Vn_2 + Vout_Inp_2 + Vout_Inm_2 disp("$V_{OUT_{Vn}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_Vn_2)) disp("$V_{OUT_{I+}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_Inp_2)) disp("$V_{OUT_{I-}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_Inm_2)) disp("$V_{OUT_{OP}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vout_op_2)) # Total Noise Vn_out_2 = Vout_op_2 + Vout_G_2 disp("$V_{n,_{OUT}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %sqrt(Vn_out_2)) # 20.8 nV/sqrt(Hz) # Input Reffered Vn_in = sqrt(Vout_op_2) / abs(Tsig) disp("$V_{n,_{in}}^2$ = (%s $\\frac{V}{\\sqrt{Hz}})^2$" %(Vn_in)) # 10.4 nV/sqrt(Hz)
astGl/astGl_Uebung3_.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Data exploration # !ls -laFh ../data # !head -2 ../data/rates-indices-ccfi-scfi.csv # !echo ... # !tail -2 ../data/rates-indices-ccfi-scfi.csv # # Python libraries import os import pandas as pd import numpy as np from scipy import signal from datetime import datetime import matplotlib.pyplot as plt # # Utilities def plot_timeseries (df_ts, save_pic=False): if save_pic: plt.figure() plt.xlabel('Rate indices every week') ax1 = df_ts['ccfi'].plot(x='week', grid=True, color='red', label='CCFI') ax2 = df_ts['scfi'].plot(x='week', secondary_y=True, grid=True, color='blue', label='SCFI') h1, l1 = ax1.get_legend_handles_labels() h2, l2 = ax2.get_legend_handles_labels() plt.legend(h1+h2, l1+l2, loc=2) filename = 'img/' + df_ts.name + '.png' plt.savefig(filename, dpi=300, bbox_inches='tight') plt.clf() # plt.figure() plt.xlabel('Rate indices every week') ax1 = df_ts['ccfi'].plot(x='week', grid=True, color='red', label='CCFI') ax2 = df_ts['scfi'].plot(x='week', secondary_y=True, grid=True, color='blue', label='SCFI') h1, l1 = ax1.get_legend_handles_labels() h2, l2 = ax2.get_legend_handles_labels() plt.legend(h1+h2, l1+l2, loc=2) plt.show() def plot_image(filename): im = plt.imread(filename) plt.axis('off') plt.imshow(im) import datetime # Example: week #33 of 2018. d = "2018-W33" # The -5 corresponds to the week day, Friday here r = datetime.datetime.strptime(d + '-5', '%Y-W%W-%w') print(r) # # CCFI index # + path = '../data/' filename = 'rates-indices-ccfi-scfi.csv' dateparser = lambda x: pd.datetime.strptime(x + '-5', '%Y-W%W-%w') df_rates_ccfi_scfi = pd.read_csv(path + filename, delimiter = ';', header = 0, parse_dates = ['week'], date_parser = dateparser) # Extract month and year, for convenience df_rates_ccfi_scfi['year'] = df_rates_ccfi_scfi['week'].dt.year df_rates_ccfi_scfi['month'] = df_rates_ccfi_scfi['week'].dt.month # The data-frame needs a name for the plot_image() function to work df_rates_ccfi_scfi.name = 'rates-ccfi-scfi' # Display the data-frame df_rates_ccfi_scfi # - df_rates_ccfi_scfi.dtypes len(df_rates_ccfi_scfi) plot_timeseries(df_rates_ccfi_scfi, save_pic=False) plot_image ('img/rates-ccfi-scfi.png')
use-cases/market-outlook-transport-rates/2018-08-forecasting/rate-indices-forecasting.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="X8hfh7A1_Gu-" colab_type="text" # #**Predicting Solubility of Molecules** # + [markdown] id="yPG3EH8L_fsL" colab_type="text" # **1.Install Rdkit** # + id="yNiL8UvI35qG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="924c235a-d0ee-49dc-9363-a5e130785d2e" # ! wget https://repo.anaconda.com/miniconda/Miniconda3-py37_4.8.2-Linux-x86_64.sh # ! chmod +x Miniconda3-py37_4.8.2-Linux-x86_64.sh # ! bash ./Miniconda3-py37_4.8.2-Linux-x86_64.sh -b -f -p /usr/local # ! conda install -c rdkit rdkit -y import sys sys.path.append('/usr/local/lib/python3.7/site-packages/') # + [markdown] id="8GKG81Zh_a40" colab_type="text" # **2.1 Import dataset** # + id="MDdrltQu4J91" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 212} outputId="6d3163a4-39eb-46a5-9178-79cfff427701" # ! wget https://raw.githubusercontent.com/dataprofessor/data/master/delaney.csv # + [markdown] id="rJUuLCIG45OT" colab_type="text" # **2.2. Read in the dataset** # + id="GxXIUVdD4u59" colab_type="code" colab={} import pandas as pd # + id="Qk5e1RU-5Co9" colab_type="code" colab={} sol = pd.read_csv('/content/drive/My Drive/Colab Notebooks/bio informatics project/delaney.csv') # + id="IJCHL7TM5T9I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="8c5ba3cb-5581-4870-de29-279457bb3cdc" sol.head() # + [markdown] id="sRYzDRbo5bD9" colab_type="text" # **2.3. Examining the SMILES data** # + [markdown] id="-oWXKGQN5gCw" colab_type="text" # Chemical structures are encoded by a string of text known as SMILES which is an acronym for ***Simplified Molecular-Input Line-Entry System*** # + id="Zplnid2B5Vju" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="0a6e445c-37d2-4827-a136-8340ca681ea1" sol.SMILES # + [markdown] id="fcTugywQ5rSm" colab_type="text" # **2.4. Convert a molecule from the SMILES string to an rdkit object** # + id="gcv3qm7k5nOR" colab_type="code" colab={} from rdkit import Chem # + id="Jy5-aa0c5wOY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="afe12350-e801-4a66-d132-4f4d1ba144ff" Chem.MolFromSmiles(sol.SMILES[0]) # + [markdown] id="studHt-S522g" colab_type="text" # **2.5. Working with rdkit object** # + id="1-iI6u5Q5y2L" colab_type="code" colab={} m = Chem.MolFromSmiles('ClCC(Cl)(Cl)Cl') # + id="NoL_6Qj957Jg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1671d254-9f58-4e7c-a4be-327fc2da581a" m.GetNumAtoms() # + [markdown] id="TNX_d0Kn6Cw3" colab_type="text" # # #3. Calculate molecular descriptors in rdkit # + [markdown] id="YjMNXly26IPx" colab_type="text" # **3.1. Convert list of molecules to rdkit object** # + id="d4P9QRyk59ID" colab_type="code" colab={} from rdkit import Chem # + id="xs8Cq4zT6PW-" colab_type="code" colab={} mol_list2 = [Chem.MolFromSmiles(element) for element in sol.SMILES] # + id="u6gKrGcL6Yij" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="7c0cac52-31f9-4c01-ec70-3e7a36e2cb2e" len(mol_list2) # + id="vvelJ-3R6bIB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="6ac860ea-3678-4cd4-f22b-0c871747cc4e" mol_list2[:5] # + [markdown] id="4gEZBahI6i2t" colab_type="text" # **3.2. Calculate molecular descriptors** # + [markdown] id="UswoH4tw6o-M" colab_type="text" # To predict LogS (log of the aqueous solubility), the study by Delaney makes use of 4 molecular descriptors: # # # # 1. cLogP (Octanol-water partition coefficient) # 2. MW (Molecular weight) # 3. RB (Number of rotatable bonds) # 4. AP (Aromatic proportion = number of aromatic atoms / total number of heavy atoms) # # + [markdown] id="MClAzcmt7hZ7" colab_type="text" # Unfortunately, rdkit readily computes the first 3. As for the AP descriptor, we will calculate this by manually computing the ratio of the number of aromatic atoms to the total number of heavy atoms which rdkit can compute. # + [markdown] id="JQlEVatt7kVC" colab_type="text" # **3.2.1. LogP, MW and RB** # + id="51NZWnW86dkN" colab_type="code" colab={} import numpy as np from rdkit.Chem import Descriptors # + id="I4jvlQ7V7n7S" colab_type="code" colab={} def generate(smiles, verbose=False): moldata= [] for elem in smiles: mol=Chem.MolFromSmiles(elem) moldata.append(mol) baseData= np.arange(1,1) i=0 for mol in moldata: desc_MolLogP = Descriptors.MolLogP(mol) desc_MolWt = Descriptors.MolWt(mol) desc_NumRotatableBonds = Descriptors.NumRotatableBonds(mol) row = np.array([desc_MolLogP, desc_MolWt, desc_NumRotatableBonds]) if(i==0): baseData=row else: baseData=np.vstack([baseData, row]) i=i+1 columnNames=["MolLogP","MolWt","NumRotatableBonds"] descriptors = pd.DataFrame(data=baseData,columns=columnNames) return descriptors # + id="SuezfXSZ7siY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="aa9685e6-3144-4ba4-c420-cba260544166" df = generate(sol.SMILES) df # + [markdown] id="JyqZZWGA7xeA" colab_type="text" # **3.2.2. Aromatic proportion** # + [markdown] id="rAubjWwE78BX" colab_type="text" # **3.2.1.1. Number of aromatic atoms** # # Here, we will create a custom function to calculate the Number of aromatic atoms. With this descriptor we can use it to subsequently calculate the AP descriptor. # # Computing for a single molecule. # + id="GBlRBf_m7u58" colab_type="code" colab={} m = Chem.MolFromSmiles('COc1cccc2cc(C(=O)NCCCCN3CCN(c4cccc5nccnc54)CC3)oc21') # + id="XoIE-ZI78GwU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 621} outputId="0d5ea214-1c5d-4709-bfb1-a4060240bd6c" aromatic_atoms = [m.GetAtomWithIdx(i).GetIsAromatic() for i in range(m.GetNumAtoms())] aromatic_atoms # + id="4Q7f7M9h8KIC" colab_type="code" colab={} def AromaticAtoms(m): aromatic_atoms = [m.GetAtomWithIdx(i).GetIsAromatic() for i in range(m.GetNumAtoms())] aa_count = [] for i in aromatic_atoms: if i==True: aa_count.append(1) sum_aa_count = sum(aa_count) return sum_aa_count # + id="OC5mh-7r8QWk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3e8496bd-b582-4328-c96c-0073bc60ce46" AromaticAtoms(m) # + [markdown] id="C7XRd3Si8W2h" colab_type="text" # Computing for molecules in the entire dataset # + id="j4bcwYqF8TOK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="df3ba271-ad0e-443d-d3e9-7d6faad5c43a" desc_AromaticAtoms = [AromaticAtoms(element) for element in mol_list2] desc_AromaticAtoms # + [markdown] id="p9FGI3lG8yIx" colab_type="text" # **3.2.1.2. Number of heavy atoms** # + id="sW6QSRfN8Z81" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="5c6fff9f-6325-4c4d-b18a-a9759d3661ba" desc_HeavyAtomCount = [Descriptors.HeavyAtomCount(element) for element in mol_list2] desc_HeavyAtomCount # + [markdown] id="r0SH4xtV9FAw" colab_type="text" # **3.2.1.3. Computing the Aromatic Proportion (AP) descriptor** # + id="OTZwSdqA83fp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a3a699ce-6c32-49f5-c578-d58cf27a0f80" desc_AromaticProportion = [AromaticAtoms(element)/Descriptors.HeavyAtomCount(element) for element in mol_list2] desc_AromaticProportion # + id="vHbD_Ly69vxX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="72c5871b-b495-446a-dda1-1f25210bed18" df_desc_AromaticProportion = pd.DataFrame(desc_AromaticProportion, columns=['AromaticProportion']) df_desc_AromaticProportion # + [markdown] id="03rmwr6D9WYz" colab_type="text" # **3.3. X matrix (Combining all computed descriptors into 1 dataframe)** # + id="WLge4HW-9JdV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 406} outputId="e708c057-03a4-4911-fdfd-4d10259e114c" X = pd.concat([df,df_desc_AromaticProportion], axis=1) X # + [markdown] id="m4YHQ6g397Y8" colab_type="text" # **3.4. Y matrix** # + id="eTFdZTKl9an4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} outputId="3799435f-dc47-42ec-fa4c-f54598781ee1" sol.head() # + id="vkx0BHuM-A-z" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="9bc0ae1b-8945-4364-862c-a3ad38c1712d" Y = sol.iloc[:,1] Y # + [markdown] id="iWo31grU-F2g" colab_type="text" # #Model building # + [markdown] id="bsvF9OW5-Irn" colab_type="text" # **Data split** # + id="D_8fPqOW-Diz" colab_type="code" colab={} from sklearn.model_selection import train_test_split # + id="-ubEgHxz-MDC" colab_type="code" colab={} X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2) # + [markdown] id="VME71inY-Q1f" colab_type="text" # **Linear Regression Model¶** # + id="L3xN5auy-OCH" colab_type="code" colab={} from sklearn import linear_model from sklearn.metrics import mean_squared_error, r2_score # + id="Zvdt_h3Z-XG5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="46135797-b0e1-484b-ffec-ead97a8f94fc" model = linear_model.LinearRegression() model.fit(X_train, Y_train) # + id="-hF5WhKr-Zjx" colab_type="code" colab={} Y_pred_train = model.predict(X_train) # + id="5v4CHhbG-cfi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="6b08f5a1-8424-4f9c-b732-9afb8e93dd12" print('Coefficients:', model.coef_) print('Intercept:', model.intercept_) print('Mean squared error (MSE): %.2f' % mean_squared_error(Y_train, Y_pred_train)) print('Coefficient of determination (R^2): %.2f' % r2_score(Y_train, Y_pred_train)) # + [markdown] id="riFc6L1C-hx4" colab_type="text" # **Predict the X_test** # + id="IQUTsWnj-fnN" colab_type="code" colab={} Y_pred_test = model.predict(X_test) # + id="isNT0zuq-kqt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 87} outputId="2b0b3999-38e3-44e5-9014-98c7e9772912" print('Coefficients:', model.coef_) print('Intercept:', model.intercept_) print('Mean squared error (MSE): %.2f' % mean_squared_error(Y_test, Y_pred_test)) print('Coefficient of determination (R^2): %.2f' % r2_score(Y_test, Y_pred_test)) # + id="4yjPEHRt-xt4" colab_type="code" colab={} import matplotlib.pyplot as plt # %matplotlib inline # + id="gwRVjD_T-nIB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="6e20972d-2a88-4430-8650-dc2e146aca9f" plt.figure(figsize=(5,11)) # 2 row, 1 column, plot 1 plt.subplot(2, 1, 1) plt.scatter(x=Y_train, y=Y_pred_train, c="#7CAE00", alpha=0.3) # Add trendline # https://stackoverflow.com/questions/26447191/how-to-add-trendline-in-python-matplotlib-dot-scatter-graphs z = np.polyfit(Y_train, Y_pred_train, 1) p = np.poly1d(z) plt.plot(Y_test,p(Y_test),"#F8766D") plt.ylabel('Predicted LogS') # 2 row, 1 column, plot 2 plt.subplot(2, 1, 2) plt.scatter(x=Y_test, y=Y_pred_test, c="#619CFF", alpha=0.3) z = np.polyfit(Y_test, Y_pred_test, 1) p = np.poly1d(z) plt.plot(Y_test,p(Y_test),"#F8766D") plt.ylabel('Predicted LogS') plt.xlabel('Experimental LogS') # + id="vDMXpaXp-2dw" colab_type="code" colab={} from sklearn.ensemble import RandomForestRegressor # + id="T6UY1zePAADg" colab_type="code" colab={} model = RandomForestRegressor() # + id="V1wA8KEpADZw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="630b4057-bfb7-4e40-a085-63960b596aa7" model.fit(X_train,Y_train) # + id="B0b8tqThAJa9" colab_type="code" colab={} y_pred_train = model.predict(X_train) # + id="Agq6Vu15AQkU" colab_type="code" colab={} y_pred_test = model.predict(X_test) # + id="9oLxNZH9AVww" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 677} outputId="393e718c-8468-4b39-ec42-3eb422310e06" plt.figure(figsize=(5,11)) # 2 row, 1 column, plot 1 plt.subplot(2, 1, 1) plt.scatter(x=Y_train, y=y_pred_train, c="#7CAE00", alpha=0.3) # Add trendline # https://stackoverflow.com/questions/26447191/how-to-add-trendline-in-python-matplotlib-dot-scatter-graphs z = np.polyfit(Y_train, y_pred_train, 1) p = np.poly1d(z) plt.plot(Y_test,p(Y_test),"#F8766D") plt.ylabel('Predicted LogS') # 2 row, 1 column, plot 2 plt.subplot(2, 1, 2) plt.scatter(x=Y_test, y=y_pred_test, c="#619CFF", alpha=0.3) z = np.polyfit(Y_test, y_pred_test, 1) p = np.poly1d(z) plt.plot(Y_test,p(Y_test),"#F8766D") plt.ylabel('Predicted LogS') plt.xlabel('Experimental LogS') # + id="XElI2h1VAtXK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5fe5974e-8200-49e0-9a35-1d7f2f37eb4a" print(model.score(X_train,Y_train)) # + id="LHqyh-QFA4ml" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="1545dc18-73ac-4714-e202-3686a6095c1a" print(model.score(X_test,Y_test)) # + id="35WJcWcZBlnx" colab_type="code" colab={}
solubility.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # A Data-Filled Day with the Mouse # ## Introduction and Motivation # # Disney is a gold mine of data. Between the Parks, Movies, Television Networks, and other Entertainment subsidiaries, there is an unlimited amount of data sources for aspiring Data Scientists to explore. With so many opportunities, I wanted to take the time to explore one avenue and show how even a simple project could prove beneficial to such a large corporation. # # # Disney Parks Blog (http://disneyparks.disney.go.com/blog/) is Disney's primary outlet for announcements and news about Disneyland Resort, Walt Disney World, and Disney Cruise Lines. The Disney Parks Blog, like most blogs on the internet, allows authors (bloggers) to write about a topic of interest - in this case, Disney related subjects. Once a Blog Post is submitted, anyone with access to the World Wide Web can view and add comments to the Blog Post. # # # Why would anyone be interested in blog data? Imagine a Disney marketing team looking to capitalize on park audiences. Which parks should the team focus on to maximize return? One option is to compare park popularity by ticket sales. Another would be to look at the parks with the most facebook likes or commenters on the blog. What about a Disney advertising group trying to determine when to schedule Disney ads to an online audience? Just by exploring some blog trends, we can predict the most likely time of the day Disney enthusiasts will be online. These are just a few of the ways that data analysis can be useful to Disney Corporation. # # # Ultimately, I'm doing this mini-project to improve my skillset and explore a data set that I've never seen previously. However, like many others, Disney was a large part of my childhood. The mere fact that this small project can be useful to Disney only bolsters my enthusiasm for data. # # # Before I begin, it is important to mention that this is an exploration project - that is to say, there is no clearly defined question that needs to be answered. As I often find with data-driven projects, the amount of guidance is minimal. It is therefore up to the Data Scientist to explore and do his diligence to find questions worthwhile of answering. # ## Layout # # 1. Introduction and Motivation # - An introduction to the project and a motivation for running the analysis. # 2. Layout # - "You are here" # 3. Prerequisites # - High level material, such as importing frequently used python packages and defining commonly used variables (I'm originally a Computer Scientist, so I do my best to avoid globals). # 4. The Data # - A brief introduction to the data. # 5. Exploration # - Graphs and summary data to help describe the data. # 6. Cruising on the Rise # - I take an in-depth look at Disney Cruise Lines. # 7. Weekday vs. Weekend Posts # - An analysis to see if posting on a weekday is advantageous. # 8. Negative Comment Detection (Sentiment Analysis) # - An attempt at sentiment analysis on blog post comments. # 9. Appendix # - Methods that aren't directly related to the analysis. Most code to clean / cache data. # # # ## Prerequisites # + import collections import itertools from datetime import datetime import json import os import os.path import pickle from pprint import pprint from random import shuffle import re import warnings # Ignore future matplotlib warnings warnings.filterwarnings("ignore", module="matplotlib") import matplotlib.pyplot as plt import numpy as np import scipy as sp import pandas as pd import seaborn as sns import tensorflow as tf from sklearn.cross_validation import train_test_split from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score from nltk.tokenize import RegexpTokenizer from stop_words import get_stop_words from nltk.stem.porter import PorterStemmer from gensim import corpora, models from gensim.models.ldamodel import LdaModel # Matplotlib settings. # %matplotlib inline plt.rcParams.update({'font.size': 18}) # Seaborn settings. sns.set(rc={"figure.figsize": (14, 7)}) sns.set_palette(sns.color_palette("Blues_d")) # - META = { # File paths for the data. 'data': { 'blog': { 'path': 'data/blog' }, 'sentiments': { 'filename': "data/sentiment/sentiments.json" }, 'comments_positive': { 'filename': "data/sentiment/comments_positive.json" }, 'comments_negative': { 'filename': "data/sentiment/comments-negative.json" }, 'cache': 'data/cache' }, # Models. 'model': { 'sentiment': 'models/sentiment/sentiment-model.ckpt' }, # Plotting info for matplotlib / seaborn 'plots': { 'palette': 'Blues_d', 'width': 16, 'height': 8, } } # ## The Data # ### Acquisition # # After browsing the web for a while, I determined that the blog was ideal because it is full of content and is freely available to the public. # # As I do not have direct access to Disney's Data, I developed a spider (code that autonomously navigates the website) to slowly scrape all of the blog posts. Once all of the blog posts were available locally, the data was wrangled into a format that could be easily consumed. # # **(Please note that as of early 2016, the disney blog has updated. Features, such as Facebook Likes, maybe be inconsistent with the data I obtained late 2015).** # ### Wrangling # # For the sake of brevity, it is not necessary to discuss how the data was obtained and cleaned. In the end, ~10,000 JSON files were available with content similar to the following: # + def demo_json_file(): """Demo the json file contents.""" path = "2009/09/chefs-offer-taste-of-epcot-international-food-wine-festival.json" filepath = "{0}/{1}".format(META['data']['blog']['path'], path) with open(filepath, 'r') as file: pprint(json.load(file)) # Execute. demo_json_file() # - # For each post, information such as the author, date of creation, number of Facebook likes, and post "body" (content) was collected. In addition, comments related to each post were collected. # ### Storage # # If I had an exorbitant amount of data, I would likely build a Mesos cluster with an HDFS and use Spark/Pandas for the majority of the work. # # However, due to the small nature of the data set, all JSON files were stored locally and converted to Panda's DataFrames (see appendix for code). posts, tags, filings, comments = post_dataframes() posts.head(3) tags.head(3) filings.head(3) comments.head(3) print("Total Blog Posts: {0}".format(len(posts))) # # ## Explore! # # There are about ~10,000 json files that contain the body of a blog post, anonymous comments, and some other meta data... Now what? How do we determine what is important? # # Well, that's the cool part about being a Data Scientist - we get to define what is important! So let's begin! # ### Distribution of Features # # One of the most useful steps of exploring data is plotting distributions. Visualizing distributions helps depict patterns that would otherwise be hard to understand by looking at raw numbers. # #### Words per Post # + def explore_words_per_post(posts): """ Explore and plot the number of words per post. Key arguments: posts -- The posts. """ word_counts = np.zeros(len(posts.index)) for i, (url, post) in enumerate(posts.iterrows()): word_counts[i] = len(post['body'].split(" ")) # Print some general stats. print_general_stats(word_counts) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height'] * 3), ncols=1, nrows=3) # Draw boxplot. ax = sns.boxplot(x=word_counts, ax=axs[0]) ax.set_title("Words per Post Boxplot") # Draw full distribution. ax = sns.distplot(word_counts, kde=0, ax=axs[1]) ax.set_title("Number of Words per Post") ax.set_xlabel('Number of Words') ax.set_ylabel('Count') # Lets look at the number of posts with 500 or less words. word_counts_zoomed_idxs = np.where(word_counts <= 500) bins = range(0, 501) # Draw zoomed in distribution. ax = sns.distplot(word_counts[word_counts_zoomed_idxs], bins=bins, kde=0, ax=axs[2]) ax.set_title("Number of Words per Post (Zoomed)") ax.set_xlabel("Number of Words") ax.set_ylabel("Count") # Execute. explore_words_per_post(posts.copy()) # - # Looking at the distribution, it looks like the bulk of the posts are sub 600 words with a mean of 200. This seems reasonable since I would expect each post to be a few paragraphs. # #### Comments per Post # + def explore_comments_per_post(posts, comments): """ Explore and plot the number of comments per post. Key arguments: posts -- The posts. comments -- The post comments. """ comment_counts = np.zeros(len(posts.index)) for i, (url, post) in enumerate(posts.iterrows()): comment_counts[i] = len(comments.loc[url]) if url in comments.index else 0 # Print some general stats. print_general_stats(comment_counts) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height'] * 3), ncols=1, nrows=3) # Draw boxplot. ax = sns.boxplot(x=comment_counts, ax=axs[0]) ax.set_title("Comments per Post Boxplot") # Draw full distribution. ax = sns.distplot(comment_counts, kde=0, ax=axs[1]) ax.set_title("Number of Comments per Post") ax.set_xlabel('Number of Comments') ax.set_ylabel('Count') # Lets look at the number of posts with 20 or less comments. comment_counts_zoomed_idxs = np.where(comment_counts <= 20) bins = range(0, 21) # Draw zoomed in distribution. ax = sns.distplot(comment_counts[comment_counts_zoomed_idxs], bins=bins, kde=0, ax=axs[2]) ax.set_title("Number of Comments per Post Distribution (Zoomed)") ax.set_xlabel("Number of Comments") ax.set_ylabel("Count") # Execute. explore_comments_per_post(posts.copy(), comments.copy()) # - # Each post has a median of ~6 comments, and quite a few had 40+. One thing that stands out is the complete lack of posts with one comment (see below)... I'm still pondering why this would be, other than a random phenomenon. # + def explore_comments_with_one_post(posts, comments): """ Explore the number of posts with one comment. Key arguments: posts -- The posts. comments -- The post comments. """ comment_counts = np.zeros(len(posts.index)) for i, (url, post) in enumerate(posts.iterrows()): comment_counts[i] = len(comments.loc[url]) if url in comments.index else 0 one_comment_count = len(np.where(comment_counts == 1)[0]) print("Number of posts with one comment: {0}".format(one_comment_count)) # Execute. explore_comments_with_one_post(posts.copy(), comments.copy()) # - # #### Words per Comment # + def explore_words_per_comment_per_post(comments): """ Explore and plot the number of words per comment. Key arguments: comments -- The post comments. """ word_counts = np.zeros(len(comments.index)) for i, (url, comment) in enumerate(comments.iterrows()): word_counts[i] = len(comment['body'].split(" ")) # Print some general stats. print_general_stats(word_counts) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height'] * 3), ncols=1, nrows=3) # Draw boxplot. ax = sns.boxplot(x=word_counts, ax=axs[0]) ax.set_title("Words per Comment Boxplot") # Draw full distribution. ax = sns.distplot(word_counts, kde=0, ax=axs[1]) ax.set_title("Number of Words per Comment") ax.set_xlabel('Number of Words') ax.set_ylabel('Count') # Lets look at the number of comments with 100 or less words. word_counts_zoomed_idxs = np.where(word_counts <= 100) bins = range(0, 101) # Draw zoomed in distribution. ax = sns.distplot(word_counts[word_counts_zoomed_idxs], bins=bins, kde=0, ax=axs[2]) ax.set_title("Number of Words per Comment (Zoomed)") ax.set_xlabel("Number of Words") ax.set_ylabel("Count") # Execute. explore_words_per_comment_per_post(comments) # - # The median number of words per comment is ~28 - which is more than I expected (I assumed there would be a lot of "I love Disney!!" type posts). There is one extreme outlier where someone wrote a 1200 word comment. Out of curiosity, I wanted to take a look at this post. I assumed the comment was either a) someone who really likes to tell strangers what's on their mind or b) someone who was ranting about their time at Disney. # # # I was wrong on both counts. # # # In short, the comment is from a father thanking Disney for the absolutely wonderful time he and his daughter had at Disneyland. One of the wonderful aspects about exploration is that for curious people like myself, you never know what to expect. Sometimes, what you find might be quite inspiring! # + def explore_long_comment(comments): """ Explore the long comment. Key arguments: comments -- The post comments. """ word_counts = np.zeros(len(comments.index)) for i, (url, comment) in enumerate(comments.iterrows()): word_counts[i] = len(comment['body'].split(" ")) idx = np.where(word_counts == 1247)[0] long_comment = comments.iloc[idx] print("Long comment: {0}".format(long_comment['body'].values)) # Execute. explore_long_comment(comments) # - # #### Likes per Post # + def explore_likes_per_post(posts): """ Explore and plot the number of facebook likes per post. Key arguments: posts -- The posts. """ like_counts = np.zeros(len(posts.index)) for i, (url, post) in enumerate(posts.iterrows()): like_counts[i] = post['facebook_likes'] if post['facebook_likes'] != None else np.nan # Don't include any posts where we could not determine the number of likes in pre-processing. like_counts = like_counts[~np.isnan(like_counts)] # Print some general stats. print_general_stats(like_counts) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height'] * 3), ncols=1, nrows=3) # Draw boxplot. ax = sns.boxplot(x=like_counts, ax=axs[0]) ax.set_title("Likes per Post Boxplot") # Draw full distribution. ax = sns.distplot(like_counts, kde=0, bins=30, ax=axs[1]) ax.set_title("Likes per Post") ax.set_xlabel('Number of Likes') ax.set_ylabel('Count') # Lets look at the number of posts with 200 or less likes. like_counts_zoomed_idxs = np.where(like_counts < 200) bins = range(0, 201) # Draw zoomed in distribution. ax = sns.distplot(like_counts[like_counts_zoomed_idxs], bins=bins, kde=0, ax=axs[2]) ax.set_title("Number of Likes per Post (Zoomed)") ax.set_xlabel("Number of Likes") ax.set_ylabel("Count") # Execute. explore_likes_per_post(posts.copy()) # - # The median number of likes per post was about 258, with a rather large deviation. Some posts had over 100,000 likes (more on these later)! # ### Varied Exploration # # I've plotted some generic distributions... Lets take a look at some other potentially interesting features. # #### Most Popular Posts (by Number of Comments) # + def explore_popular_posts_by_comments(posts, comments, filings, top): """ Explore the most popular posts ranked by the number of comments. Key arguments: posts -- The posts. comments: -- The post comments. filings: -- The post filings. top: -- The max number of posts to show. """ # Init the comment count to 0 for every row. posts['comment_count'] = 0 # Simple function that counts the comments for every post def count_comments(post): post['comment_count'] = len(comments.loc[post['url']]) if post['url'] in comments.index else 0 return post # Apply the count comments function to every row. We have to reset the index # because pandas doesn't pass it during apply(). posts = posts.reset_index().apply(count_comments, axis=1) posts.set_index('url', inplace=True) posts.sort_values('comment_count', ascending=False, inplace=True) posts_it = posts.iterrows() i = 0 while i < top: url, post = next(posts_it) print("({0}) {1}".format(post['comment_count'], post['title'])) print("\t{0}".format(datetime.fromtimestamp(int(post['timestamp'])).strftime("%Y-%m-%d"))) filings_display = [] if url in filings.index: filings_display = filings.loc[url]['filing'] filings_display = filings_display.tolist() if not isinstance(filings_display, str) else [filings_display] print("\t{0}".format(filings_display)) print("") i += 1 # Execute. explore_popular_posts_by_comments(posts.copy(), comments.copy(), filings.copy(), 15) # - # When sorting by the number of comments, no apparent trend is present. The topics seem to range from MagicBands, to Frozen, to park merchandise. # #### Most Popular Posts (by Facebook Likes) # + def explore_popular_posts_by_likes(posts, comments, filings, top): """ Explore the most popular posts ranked by the number of likes. Key arguments: posts -- The posts. comments -- The post comments. filings -- The post filings. top -- The top number of posts to show. """ posts.sort_values('facebook_likes', ascending=False, inplace=True) posts_it = posts.iterrows() i = 0 while i < top: url, post = next(posts_it) print("({0}) {1}".format(post['facebook_likes'], post['title'])) print("\t{0}".format(datetime.fromtimestamp(int(post['timestamp'])).strftime("%Y-%m-%d"))) filings_display = [] if url in filings.index: filings_display = filings.loc[url]['filing'] filings_display = filings_display.tolist() if not isinstance(filings_display, str) else [filings_display] print("\t{0}".format(filings_display)) print("") i += 1 # Execute. Send a copy of posts since we will be manipulating it. explore_popular_posts_by_likes(posts.copy(), comments.copy(), filings.copy(), 15) # - # Unlike sorting by comments, sorting by Facebook Likes shows a few prominent topics. Not surprisingly, Star Wars and Frozen top the list. One topic that surprised me was the number of popular cruise-related topics. I was intrigued enough to bookmark it for further analysis (more on this later). # #### Most Popular Topics (by Number of Comments) # + def explore_popular_topics_by_comments(posts, filings): """ Explore the most popular posts ranked by the number of comments. Key arguments: posts -- The posts. filings -- The post filings. """ # Init the count to 0 for every row. filings['comment_count'] = 0 # Simple function that counts the number of comments for every topic. def count_comments(filing): filing['comment_count'] = len(comments.loc[filing['url']]) if filing['url'] in comments.index else 0 return filing # Apply the count comments function to every row. We have to reset the index # because pandas doesn't pass it during apply(). filings = filings.reset_index().apply(count_comments, axis=1) filings.set_index('url', inplace=True) grouped = filings.groupby('filing', as_index=False).sum() grouped.sort_values('comment_count', ascending=False, inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x='filing', y='comment_count', palette=META['plots']['palette'], data=grouped, ax=axs) ax.set_title("Comments per Topic") ax.set_xlabel("Topic") ax.set_ylabel("Number of Comments") # Make the labels vertical. for item in ax.get_xticklabels(): item.set_rotation(90) # Execute. explore_popular_topics_by_comments(posts.copy(), filings.copy()) # - # At a quick glance, it is apparent that the most popular topics are related to the parks with "Disney World" acting as a catch-all. Magic Kingdom seemed to be the most popular topic, followed by Epcot. # #### Most Popular Topics (by Facebook Likes) # + def explore_popular_topics_by_likes(posts, filings): """ Explore the most popular posts ranked by the number of likes. Key arguments: posts -- The posts. filings -- The post filings. """ # Init the count to 0 for every row. filings['facebook_likes'] = 0 # Simple function that counts the likes for every post. def count_likes(filing): filing['facebook_likes'] = posts.loc[filing['url']]['facebook_likes'] return filing # Apply the count likes function to every row. We have to reset the index # because pandas doesn't pass it during apply(). filings = filings.reset_index().apply(count_likes, axis=1) filings.set_index('url', inplace=True) grouped = filings.groupby('filing', as_index=False).sum() grouped.sort_values('facebook_likes', ascending=False, inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x='filing', y='facebook_likes', palette=META['plots']['palette'], data=grouped, ax=axs) ax.set_title("Facebook Likes per Topics") ax.set_xlabel("Topic") ax.set_ylabel("Number of Facebook Likes") # Make the labels vertical. for item in ax.get_xticklabels(): item.set_rotation(90) # Execute. explore_popular_topics_by_likes(posts.copy(), filings.copy()) # - # Aggregating by Likes was very similar to aggregating by comments. # #### Most Common Commenter Locations # + def explore_commenter_locations(comments): """ Explore the locations of all the commenters. Key arguments: comments -- The comments. """ # We only want to look at comments that are not the blog poster. comments_without_author = comments[comments['is_post_author'] == False] # Find all the unique locations and their counts. counts = comments_without_author['location'].value_counts() # Lets only look at locations that have more than 100 commentors. count_idx = np.where(counts.values > 100) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x=counts.index[count_idx], y=counts.values[count_idx], palette=META['plots']['palette'], ax=axs) ax.set_title("Comments by Location") ax.set_xlabel("Location") ax.set_ylabel("Number of Comments") # Execute. explore_commenter_locations(comments.copy()) # - # Not surprisingly, the majority of the comments came from CA, FL, NY, and TX (which also happen to be the biggest states by population). # #### Comments per Post Year # + def explore_comments_per_post_year(posts, comments): """ Explore the comments per year. Key arguments: posts -- The posts. comments -- The comments. """ # We only want to look at comments that are not the blog poster. comments = comments[comments['is_post_author'] == False].copy() # Merge the post timestamp into the comments. comments = pd.merge(comments, posts[['timestamp']], left_index=True, right_index=True, suffixes=('_comment', '_post')) # Determine the datetime using the post's timestamp. comments['datetime'] = pd.to_datetime(comments['timestamp_post'], unit='s') comments['year'] = comments['datetime'].map(lambda x: int(x.strftime('%Y'))) # Remove 2009, 2015 since we have an incomplete data set for those years. comments = comments[(comments['year'] > 2009) & (comments['year'] < 2015)].copy() counts = comments['year'].value_counts() counts.sort_index(inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x=counts.index, y=counts.values, ax=axs) ax.set_title("Comments by Year") ax.set_xlabel("Year") ax.set_ylabel("Number of Comments") # Execute. explore_comments_per_post_year(posts.copy(), comments.copy()) # - # Surprisingly, the number of comments seemed to decrease year after year. This could mean that either the blog was becoming less popular or an alternative means of feedback was being used... # #### Likes by Post Year # + def explore_likes_by_post_year(posts): """ Explore number of likes by year. Key arguments: posts -- The posts. """ # Determine the year for each post. posts['datetime'] = pd.to_datetime(posts['timestamp'], unit='s') posts['year'] = posts['datetime'].map(lambda x: int(x.strftime('%Y'))) # Remove 2009 since we have an incomplete data set for that year. posts = posts[(posts['year'] > 2009) & (posts['year'] < 2015)].copy() # Keep only facebook likes and year. posts = posts[['year', 'facebook_likes']].copy() grouped = posts.groupby('year', as_index=False).sum() grouped.sort_values('year', inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x='year', y='facebook_likes', data=grouped, ax=axs) ax.set_title("Likes by Year") ax.set_xlabel("Year") ax.set_ylabel("Number of Likes") # Execute. explore_likes_by_post_year(posts.copy()) # - # Unlike the number of comments, the number of Facebook Likes have exploded year after year. The likely justification is two-fold: # # - Facebook has been steadily rising in popularity # - It is easier for readers to press the "like button" than to write a comment. # # Due to increased usage, I determined that Facebook Likes would be a better measure of popularity for future analysis. # #### Likes by Post Month # + def explore_likes_by_post_month(posts): """ Explore number of post likes by month. Key arguments: posts -- The posts. """ # Determine the month for each post. posts['datetime'] = pd.to_datetime(posts['timestamp'], unit='s') posts['year'] = posts['datetime'].map(lambda x: int(x.strftime('%Y'))) posts['month'] = posts['datetime'].map(lambda x: x.strftime('%m')) # Keep only recent years and drop 2015 since we have an incomplete data set. posts = posts[(posts['year'] > 2010) & (posts['year'] < 2015)].copy() # Keep only facebook likes and month. posts = posts[['month', 'facebook_likes']].dropna().copy() posts['facebook_likes'] = pd.Series(posts['facebook_likes'], dtype=np.float) grouped = posts.groupby('month', as_index=False).mean() grouped.sort_values('month', inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x='month', y='facebook_likes', data=grouped, palette=META['plots']['palette'], ax=axs) ax.set_title("Likes by Month") ax.set_xlabel("Month") ax.set_ylabel("Mean Number of Likes") # Execute. explore_likes_by_post_month(posts.copy()) # - # When aggregating the number of likes by month, we see a fairly consistent pattern. The exceptions is February, which had a slightly smaller average number of Likes. # #### Likes by Post Day of Week # + def explore_likes_by_post_dow(posts): """ Explore number of post likes by month. Key arguments: posts -- The posts. """ # Determine the month for each post. posts['datetime'] = pd.to_datetime(posts['timestamp'], unit='s') posts['dow'] = posts['datetime'].map(lambda x: x.strftime('%w')) # Keep only facebook likes and month. posts = posts[['dow', 'facebook_likes']].dropna().copy() posts['facebook_likes'] = pd.Series(posts['facebook_likes'], dtype=np.float) grouped = posts.groupby('dow', as_index=False).mean() grouped.sort_values('dow', inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. #sns.palplot() ax = sns.barplot(x='dow', y='facebook_likes', data=grouped, palette=META['plots']['palette'], ax=axs) ax.set_title("Likes by Day of Week") ax.set_xlabel("Day of Week of Post") ax.set_ylabel("Mean Number of Likes") # Execute. explore_likes_by_post_dow(posts.copy()) # - # When aggregating the number of likes by month, we see an obvious pattern. Both Saturday and Sunday (the weekend) have a significantly lower average number of Likes. # #### Comments by Time of the Day # + def explore_comments_by_time_of_day(comments): """ Explore the most common time of day for comments. Args: comments -- The comments. """ # We only want to look at comments that are not the blog poster. comments = pd.DataFrame(comments[comments['is_post_author'] == False]) comments['datetime'] = pd.to_datetime(comments['timestamp'], unit='s') comments['tod'] = comments['datetime'].map(lambda x: x.strftime('%H')) counts = comments['tod'].value_counts() counts.sort_index(inplace=True) fig, axs = plt.subplots(figsize=(META['plots']['width'], META['plots']['height']), ncols=1, nrows=1) # Draw bar plot. ax = sns.barplot(x=counts.index, y=counts.values, palette=META['plots']['palette'], ax=axs) ax.set_title("Comments by Time of Day") ax.set_xlabel("Time of Day") ax.set_ylabel("Number of Comments") # Execute. explore_comments_by_time_of_day(comments.copy()) # - # Based on the number of comments by time of day, it is evident that people prefer posting comments in the afternoon or evening (Eastern Standard Time). This is unsurprising as 0800-1000 EST most people are either getting ready for work or are not even up yet! # ### Exploration Analysis and Questions # # At this point, some basic exploration of the data set is complete. Some things to note: # # - Most of the counts follow a Poisson-like distribution. # - Facebook Likes are increasing year after year and are likely a better estimate of "popularity". # - Popular Disney topics are the movies (Star Wars and Frozen) and the parks (Magic Kingdom and Epcot). # - The most active States in regards to the blog are CA, FL, TX, and NY. # - There appears to be distinct days/times when the blog is more "active". # # Deeper analysis can be performed based on the findings from the basic exploration (e.g. What are some common topics amongst the most popular posts?). Ultimately, analysis that will be beneficial for the company is important - not just interesting facts. Topics that comes to mind are: # # - Why are cruise line posts so popular? # - Is there an advantage to posting on a weekday vs. weekend or morning vs. evening? # - Can we identify "negative" comments? (Comments where the user seems angry) # ## Cruising on the Rise # + def analyze_cruise_lines(posts, filings): """ Explore the cruise lines. Key arguments: posts -- The posts. filings -- The post filings. """ cruise_filings = filings[filings['filing'] == "Disney Cruise Line"] assert(np.all(cruise_filings['filing'].values == "Disney Cruise Line")) # Merge the filings and the posts cruise_posts = pd.merge(posts, cruise_filings, how='right', left_index=True, right_index=True) # Use the post's timestamp to determine the datetime. cruise_posts['datetime'] = pd.to_datetime(cruise_posts['timestamp'], unit='s') cruise_posts['year'] = cruise_posts['datetime'].map(lambda x: int(x.strftime('%Y'))) cruise_posts['month'] = cruise_posts['datetime'].map(lambda x: x.strftime('%m')) # We're going to be using facebook likes to measure popularity. # only look at recent years. cruise_posts = cruise_posts[cruise_posts['year'] >= 2013].dropna().copy() # Keep only the year, month, and facebook likes. cruise_posts = cruise_posts[['year', 'month', 'facebook_likes']] cruise_posts['facebook_likes'] = cruise_posts['facebook_likes'].map(lambda x: int(x)) # Group our items by year and month and find the mean number of facebook likes. grouped = cruise_posts.groupby(['year', 'month'], as_index=False).sum() grouped['id'] = grouped[['year', 'month']].apply(lambda x: "{0}-{1}".format(x[0], x[1]), axis=1) ax = sns.barplot(x="id", y="facebook_likes", data=grouped, palette=META['plots']['palette']) ax.set_title("Facebook Likes over Time") ax.set_xlabel("Year/Month") ax.set_ylabel("Facebook Likes") # Make the labels vertical. for item in ax.get_xticklabels(): item.set_rotation(90) analyze_cruise_lines(posts.copy(), filings.copy()) # - # ### Summary # # Deeper analysis conflicts with what was observed when the top posts by Facebook Likes were analyzed. Although there were a few Cruise Line posts that were popular, Cruise Lines Likes is consistenly low. The exceptions were a brief spike during the the winter of 2015 and the large spike in August of 2015 which was a single post publicizing the availability of Cruise Lines in New York. # # Based on Facebook Likes, there seems to be no evidence that Disney Cruise Lines are consistently growing in popularity. # # How could this information be beneficial? According to Cruise Market Watch, the predicted amount of cruise line passengers is increasing. from IPython.display import Image Image(filename='img/cruises.png') # If crusing in general is growing in popularity, but Disney Cruise Lines is not, Disney Corporation could be missing out on potential growth. # ## Weekday vs. Weekend Posts # # Earlier it was observed that there was a distinct difference in the number of Facebook Likes between posts that were on the weekdays and posts that were on the weekend. A Hypothesis Test can be used to show if the apparent difference is caused by chance. # # *Null Hypothesis*: There is no difference in the number of Facebook Likes between weekday and weekend. # + def weekday_vs_weekend_popularity(posts): """ """ # Determine the day of the week (dow) for each post. posts['datetime'] = pd.to_datetime(posts['timestamp'], unit='s') posts['year'] = posts['datetime'].map(lambda x: int(x.strftime('%Y'))) posts['dow'] = posts['datetime'].map(lambda x: int(x.strftime('%w'))) # Take a look at only the recent years. posts = posts[posts['year'] >= 2011].dropna().copy() # Keep only facebook likes and dow. posts = posts[['dow', 'facebook_likes']].copy().dropna() posts_weekday = posts[(posts['dow'] != 0) & (posts['dow'] != 6)] posts_weekend = posts[(posts['dow'] == 0) | (posts['dow'] == 6)] # Sanity check. assert(len(posts) == len(posts_weekday) + len(posts_weekend)) print("Weekday Sample Size:\t{0}".format(len(posts_weekday))) print("Weekday Variance:\t{0}".format(np.var(posts_weekday['facebook_likes'].values))) print("Weekend Sample Size:\t{0}".format(len(posts_weekend))) print("Weekend Variance:\t{0}".format(np.var(posts_weekend['facebook_likes'].values))) print() # Run a ttest for 2 independent samples. The weekday sample size is significnatly larger and has a different # variance than the weekend sample. Use Welch's t-test. test = sp.stats.ttest_ind( posts_weekday['facebook_likes'].values, posts_weekend['facebook_likes'].values, equal_var=False) print("P-Value: {0}".format(test.pvalue)) weekday_vs_weekend_popularity(posts.copy()) # - # ### Summary # # The p-value is less than the 5%, so the null hypothesis can be safely rejected. To be explicity clear, this only proves that the chances of seeing the observed difference is very unlikely to have happened by chance. It only provides *some* evidence that the reason for seeing the observed difference is due to the day of the week. # # How could this information be beneficial? The blog is used as a method to publicize events at Disney. Based on the simple Hypothesis Test, it would *likely* be advantageous for a Blogger to post on the weekend in order to maximize popularity. # # *Side note: At the time of writing this, the statistics community is having revolt against the misuse of p-values... I'll update this analysis should I discover that a different method is more appropriate.* # ## Negative Comment Detection (Sentiment Analysis) # # While sentiment is largely subjective, sentiment quantification can be very useful to businesses looking to understand consumer reactions. # # In the case of Disney Corporation, finding negative sentiments would be extremely advantageous. If Disney could determine what areas customers were most dissatisfied, proper action could be taken to improve the situation. # # In order to detect negative comments, a sentiment model must be constructed by training on postive and negative examples (supervised learning). The following methods are the step-by-step process in developing the sentiment model. # # *(For the tech-savy, I provided decent comments in the code for each phase. However, a deep understanding of Neural Networks and TensorFlow is required).* # ### Training Data # # The training data used to build the sentiment model came from a variety of sources. The largest source was Cornell's Movie Review data set which contained reviews of movies that were classified as "positive" (good review) or "negative" (bad review). The model was **not** trained on any of the comments from the Disney Blog as I did not have time to "hand" label thousands of comments. Once all the sources were obtained, they were merged into a single DataFrame. # + def sentiment_display_details(): """Display the sentiment dataframe.""" df = sentiment_dataframe() print("Total: {:d}".format(len(df))) print("Positive Sentiments: {:d}".format(len(df[df['sentiment'] == 1]))) print("Negative Sentiments: {:d}".format(len(df[df['sentiment'] == 0]))) print("") print(df.head()) sentiment_display_details() # - # ### Cleaning Strings def sentiment_clean(s): """ Cleans the provided string for sentiment analysis. Key arguments: s -- The string to clean. Returns: A cleansed string. """ # Add a space between punc. We don't want "great." and "great!" to show as different words, but # punc is important! s = re.sub("\s*\.+\s*", " . ", s) s = re.sub("\s*\*+\s*", "", s) # Comments tend to have patterns of !???!! or !!??!. Remove them for a simple question. s = re.sub("\s*(\!\?)+\s*", " ? ", s) s = re.sub("\s*(\?\!)+\s*", " ? ", s) # Remove HTML tags that people might have put in. s = re.sub("\<.+?\>", "", s) s = re.sub("\s*\,+\s*", " , ", s) s = re.sub("\s*\?+\s*", " ? ", s) s = re.sub("\s*\!+\s*", " ! ", s) s = re.sub("\(", "", s) s = re.sub("\)", "", s) s = re.sub("\<", "", s) s = re.sub("\>", "", s) s = re.sub("\\n", "", s) s = re.sub("\s*\"\s*", " \" ", s) # Comments tend to have &3 for love. lets formally change it to "love" s = re.sub("\s*&lt;3\s*", " love ", s) s = re.sub("\s*&\s*", " and ", s) # Get rid of Twitter style comments. s = re.sub("@", "", s) s = re.sub("#", "", s) s = s.lower().strip() return s # + def test_sentiment_clean(): """Test the sentiment_clean method.""" assert(sentiment_clean("THAT WAS AWESOME!!!") == "that was awesome !") assert(sentiment_clean("@Apple has snazy products.") == "apple has snazy products .") assert(sentiment_clean("Don't show the rest. <b>bad html content!!</b>") == "don't show the rest . bad html content !") assert(sentiment_clean("Do you like this statement!?!?") == "do you like this statement ?") assert(sentiment_clean("that was lame...)") == "that was lame .") assert(sentiment_clean("\"Quote me!!\"") == "\" quote me ! \"") print("pass") test_sentiment_clean() # - # ### Building a Vocabulary def sentiment_build_vocab(sentences, max_size=50000): """ Builds a vocabulary and a inverse vocabulary dictionary based on provided sentences. We reserve the first index for "unknown" words (future words that haven't been seen or padding during training) Key arguments: sentences - A list of strings to build a dictionary from. max_size - The maximum size of the dictionary. Returns: vocab -- The vocabulary dictionary. vocab_inv -- The inverse vocabulary dictionary. """ count = [['UNK', 0]] count.extend( collections.Counter( itertools.chain(*[sentence for sentence in sentences]) ).most_common(max_size - 1) ) vocab = dict() i = 0 for word, _ in count: vocab[word] = len(vocab) i += 1 vocab_inv = dict(zip(vocab.values(), vocab.keys())) return vocab, vocab_inv # + def test_sentiment_build_vocab(): """Test the sentiment_build_vocab method.""" sentences = ["word1 word2 word3 word4".split(" "), "word3 word4 word5".split(" "), "word3".split(" ")] vocab, vocab_inv = sentiment_build_vocab(sentences) #print(vocab) #print(vocab_inv) #print() assert(vocab["UNK"] == 0) assert(vocab_inv[0] == "UNK") assert(vocab["word3"] == 1) assert(vocab_inv[1] == "word3") # Try keeping only the top 3 words. vocab, vocab_inv = sentiment_build_vocab(sentences, max_size=3) #print(vocab) #print(vocab_inv) #print() assert(vocab["UNK"] == 0) assert(vocab_inv[0] == "UNK") try: assert(vocab["word5"] == 0) except KeyError: pass print("pass") test_sentiment_build_vocab() # - # ### Vocabulary Lookup def sentiment_vocab_lookup(vocab, word): """ Looks up a word in the vocab dictionary. If the word does not exist, it returns the "unknown" index. Key arguments: vocab -- The vocabulary dictionary. word -- The word to lookup. """ return vocab[word] if word in vocab else 0 # + def test_sentiment_vocab_lookup(): """Tests the sentiment_vocab_lookup method.""" sentences = ["word1 word2 word3 word4".split(" "), "word3 word4 word5".split(" "), "word3".split(" ")] vocab, vocab_inv = sentiment_build_vocab(sentences, max_size=3) assert(sentiment_vocab_lookup(vocab, "UNK") == 0) assert(sentiment_vocab_lookup(vocab, "word3") == 1) # Try words that should not exist since they were not in the vocab. assert(sentiment_vocab_lookup(vocab, "word5") == 0) assert(sentiment_vocab_lookup(vocab, "blablabla") == 0) print("pass") test_sentiment_vocab_lookup() # - # ### Labels def sentiment_label(sentiment): """ Given a JSON sentiment object, return a label that TensorFlow can understand. Key arguments: sentiment -- A JSON sentiment object. Returns: [1, 0] if the sentiment is positive and [0, 1] if the sentiment is negative. """ return [1, 0] if sentiment == 1 else [0, 1] def sentiment_label_inv(sentiment_label): """ Given a sentiment_label, return a positive/negative (int) result. Key arguments: sentiment -- A JSON sentiment object. Returns: 1 if the sentiment is positive and 0 if the sentiment is negative. """ return 1 if np.all(sentiment_label == [1, 0]) else 0 # + def test_sentiment_label(): """Test the sentiment_label method.""" assert(sentiment_label(1) == [1, 0]) assert(sentiment_label(0) == [0, 1]) assert(sentiment_label_inv([1, 0]) == 1) assert(sentiment_label_inv([0, 1]) == 0) print("pass") test_sentiment_label() # - # ### Vectorize Sentences def sentiment_vectorize_sentence(vocab, sentence, min_size): """ Vectorizes a sentence. If a sentence is smaller that min_size, it will be padded using vocab["UNK"]. Padding is necessary so that all sentences are the same length during mapping to sentence embeddings. Key arguments: vocab -- The vocabulary. sentence -- The sentence to vectorize. min_size -- The minimum size of the sentence. Padding using vocab["UNK"] will be used to fill remaining space. """ vec = [sentiment_vocab_lookup(vocab, word) for word in sentence] num_padding = min_size - len(vec) return np.array(vec + [vocab["UNK"]] * num_padding, dtype=np.int) # + def test_sentiment_vectorize_sentence(): """Tests the sentiment_vectorize_sentence method.""" sentences = ["the cat is small .".split(" "), "the cat was large !".split(" ")] vocab, vocab_inv = sentiment_build_vocab(sentences) vec = sentiment_vectorize_sentence(vocab, "the cat".split(" "), 5) assert(len(vec) == 5) assert(vec[0] == 1) assert(vec[1] == 2) assert(vec[-1] == 0) assert([vocab_inv[idx] for idx in vec] == ['the', 'cat', 'UNK', 'UNK', 'UNK']) print("pass") test_sentiment_vectorize_sentence() # - # ### Vectorize Sentiments def sentiment_vectorize_sentiments(sentiments, max_size): """ Vectorizes a list of sentiment (JSON) objects. Key arguments: sentiments -- max_size -- The maximum size of a sentiment sentence. """ # Go through each sentiment, and only evaluate ones that are less than the max_size. sentences = [] labels = [] for idx, sentiment in sentiments.iterrows(): sentence = sentiment['text'].split(" ") if (len(sentence) <= max_size): sentences.append(sentence) labels.append(sentiment_label(sentiment['sentiment'])) # Build the vocabulary using the sentences. vocab, vocab_inv = sentiment_build_vocab(sentences) # Build a list of vectors using the sentences. vecs = [] for sentence in sentences: vecs.append(sentiment_vectorize_sentence(vocab, sentence, max_size)) return np.array(vecs, dtype=np.int), np.array(labels, dtype=np.int), vocab, vocab_inv # + def test_sentiment_vectorize_sentiments(): """Tests the sentiment_vectorize_sentiments method.""" sentiments = sentiment_dataframe().iloc[:10] vecs, labels, vocab, vocab_inv = sentiment_vectorize_sentiments(sentiments, 100) test_vec = np.array( [sentiment_vocab_lookup(vocab, word) for word in sentiments.iloc[0]['text'].split(" ")], dtype=np.int) assert(np.all(test_vec == vecs[0][:len(test_vec)])) print("pass") test_sentiment_vectorize_sentiments() # - # ### Model class SentimentModel: """ TensorFlow sentiment model using Convolutional Neural Networks. """ def __init__(self, sentence_size, vocab_size, alpha=0.01): """ Initializes the most important model variables. Key arguments: sentence_size -- The maximum size of a sentence. vocab_size -- The size of the vocab. alpha -- L2 normalization parameter. """ self.state = { 'sentence_size': sentence_size, 'vocab_size': vocab_size, 'alpha': alpha, # The number of classes (Positive and Negative sentiment) 'num_classes': 2, # The embedding size of each sentence. 'embedding_size': 300, # The filter sizes to use during convolution. 'filter_sizes': [3, 4, 5], # The number of times to use each filter. 'num_filters': 100, } # self.state['num_features'] = self.state['num_filters'] * len(self.state['filter_sizes']) def init_graph(self, graph): """ Initializes the TensorFlow graph. Key arguments: graph -- The TensorFlow graph. """ state = self.state with graph.as_default(): # Set the feature and output label placeholders. self.x = tf.placeholder(tf.int32, [None, state['sentence_size']]) self.y = tf.placeholder(tf.float32, [None, state['num_classes']]) self.keep_prob = tf.placeholder(tf.float32) # Map each sentence vector (size of the vocabulary) to an embedding vector. embedded_weights = tf.Variable(tf.random_uniform([state['vocab_size'], state['embedding_size']], -1.0, 1.0)) embedded_chars = tf.nn.embedding_lookup(embedded_weights, self.x) embedded_chars_expanded = tf.expand_dims(embedded_chars, -1) # Each filter has a different convolution. Iterate through all and combine # them all at the end. pools = [] for i, filter_size in enumerate(state['filter_sizes']): filter_shape = [filter_size, state['embedding_size'], 1, state['num_filters']] weights = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1)) biases = tf.Variable(tf.constant(0.1, shape=[state['num_filters']])) conv = tf.nn.conv2d(embedded_chars_expanded, weights, strides=[1, 1, 1, 1], padding="VALID") hidden = tf.nn.relu(tf.nn.bias_add(conv, biases)) pool = tf.nn.max_pool(hidden, ksize=[1, state['sentence_size'] - filter_size + 1, 1, 1], strides=[1, 1, 1, 1], padding='VALID') pools.append(pool) # Merge the pooled layers. hidden_pool = tf.concat(3, pools) hidden_pool_flat = tf.reshape(hidden_pool, [-1, state['num_features']]) # Dropout hidden_dropout = tf.nn.dropout(hidden_pool_flat, self.keep_prob) # Map eac weights = tf.Variable(tf.truncated_normal([state['num_features'], state['num_classes']], stddev=0.1)) biases = tf.Variable(tf.constant(0.1, shape=[state['num_classes']])) # Determine scores and then find the max score to determine the best prediction. scores = tf.nn.xw_plus_b(hidden_dropout, weights, biases) self.predictions = tf.argmax(scores, 1,) # Calculate loss. losses = tf.nn.softmax_cross_entropy_with_logits(scores, self.y) self.loss = tf.reduce_mean(losses) + state['alpha'] * tf.nn.l2_loss(weights) # Calculate accuracy. self.accuracy = tf.reduce_mean(tf.cast(tf.equal(self.predictions, tf.argmax(self.y, 1)), "float")) # Run the optimizer. optimizer = tf.train.AdamOptimizer(1e-4) grads_and_vars = optimizer.compute_gradients(self.loss) self.optimize = optimizer.apply_gradients(grads_and_vars) # ### Training def sentiment_train(x, y, vocab, filepath, batch_size=250, epochs=1000): """ Trains a SentimentModel with the provided training set. Key arguments: x -- The feature set (vector of sentences). y -- The labels. vocab -- The vocabulary dictionary. filepath -- The file path to save the training model. batch_size -- The size of each iterative training batch. epochs -- The number of epochs (random assortment of training vectors) """ graph = tf.Graph() with graph.as_default(): with tf.Session(graph=graph) as session: # Initialize a sentiment model. model = SentimentModel(len(x[0]), len(vocab)) model.init_graph(graph) # Continue with a saved model if one exists. session.run(tf.initialize_all_variables()) saver = tf.train.Saver(tf.all_variables()) if os.path.isfile(filepath): print("Loading saved session fom {0}.".format(filepath)) saver.restore(session, filepath) print("Initialized.") x_size = len(x) batches = int(x_size / batch_size) + 1 # For each epoch, do a random assortment of the training sentences. for epoch in range(epochs): idx = np.random.permutation(np.arange(x_size)) x_shuffled = x[idx] y_shuffled = y[idx] for batch in range(batches): start_idx = batch * batch_size end_idx = min((batch + 1) * batch_size, x_size) x_batch = x_shuffled[start_idx:end_idx] y_batch = y_shuffled[start_idx:end_idx] # Feed our model with a dropout rate of 50%. feed = {model.x: x_batch, model.y: y_batch, model.keep_prob: 0.5} _, loss, accuracy = session.run([model.optimize, model.loss, model.accuracy], feed_dict=feed) # Print status and save model. if (batch % 10 == 0): print("Epoch {0} Batch {1} (L: {2}, A: {3})...".format(epoch, batch, loss, accuracy)) saver.save(session, filepath) # ### Prediction def sentiment_predict(x, vocab, filepath): """ Predicts the provided feature (sentence) vectors using the model at filepath. """ graph = tf.Graph() with graph.as_default(): with tf.Session(graph=graph) as session: model = SentimentModel(len(x[0]), len(vocab), 0.0) model.init_graph(graph) session.run(tf.initialize_all_variables()) saver = tf.train.Saver(tf.all_variables()) saver.restore(session, filepath) # Use a fake label set. y = np.array([[0, 1]] * len(x)) # Feed the model, but make sure to keep all of the values! feed = {model.x: x, model.y: y, model.keep_prob: 1.0} predictions = session.run([model.predictions], feed_dict=feed) return np.array(predictions[0] == 0, dtype=np.int) # ### Execution sentiment_vecs, sentiment_labels, sentiment_vocab, sentiment_vocab_inv = \ sentiment_vectorize_sentiments(sentiment_dataframe(), 200) # #### Split into Train/Test sentiment_x_train, sentiment_x_test, sentiment_y_train, sentiment_y_test = \ train_test_split(sentiment_vecs, sentiment_labels, test_size=0.10, random_state=42) print("Training Size:\t{0}".format(len(sentiment_y_train))) print("Test Size:\t{0}".format(len(sentiment_y_test))) # #### Verify the Positive/Negative Distribution print("Positive Training Examples:\t{0}".format( np.sum([np.array_equal(y, np.array([1, 0])) for y in sentiment_y_train]))) print("Negative Training Examples:\t{0}".format( np.sum([np.array_equal(y, np.array([0, 1])) for y in sentiment_y_train]))) print("Positive Test Examples:\t\t{0}".format( np.sum([np.array_equal(y, np.array([1, 0])) for y in sentiment_y_test]))) print("Negative Test Examples:\t\t{0}".format( np.sum([np.array_equal(y, np.array([0, 1])) for y in sentiment_y_test]))) # #### Train the Model sentiment_train(sentiment_x_train, sentiment_y_train, sentiment_vocab, META['model']['sentiment']) # **Train Summary** # # After many hours of training, the model was able to predict random positive/negative training samples with ~85% accuracy. # # ... # Epoch 1 Batch 70 (L: 0.3615756034851074, A: 0.8519999980926514)... # Epoch 1 Batch 80 (L: 0.38188397884368896, A: 0.8519999980926514)... # Epoch 1 Batch 90 (L: 0.36195218563079834, A: 0.8640000224113464)... # ... # # *(Note: The training model accuracy is not a significant estimate because it is biased. Evaluation should always occur on data the model has never seen before!)* # #### Test the Model # + def sentiment_test_model(): """ Analysis of the sentiment model using the test set. """ y = np.array([sentiment_label_inv(yi) for yi in sentiment_y_test[:100]], dtype=np.int) predictions = sentiment_predict(sentiment_x_test[:100], sentiment_vocab, META['model']['sentiment']) print("Accuracy") print(accuracy_score(y, predictions)) print() print("Precision") print(precision_score(y, predictions)) print() print("Recall") print(recall_score(y, predictions)) print() tp = len(np.where(y[np.where(y == predictions)] == 1)[0]) tn = len(np.where(y[np.where(y == predictions)] == 0)[0]) fp = len(np.where(predictions[np.where(y != predictions)] == 1)[0]) fn = len(np.where(predictions[np.where(y != predictions)] == 0)[0]) print("Negative Detection Accuracy") print(tn / (tn + fn)) print() print("Confusion Matrix") print(confusion_matrix(y, predictions)) print() sentiment_test_model() # - # **Test Summary** # # Accuracy for the test set was ~74% - about a 11% difference from the training accuracy. However, in this scenario, the more important statistic is the rate at which the model could accuractely determine negative sentiments. The negative detection accuracy for the model is ~71% which is by no means production ready, but is significantly better than flipping a coin (50%). It was also high enough to continue forward and evaluate using comments from Disney's Blog. # #### Test Blog Comments # # To test blog comments, I selected 10 positive and 10 negative comments. The model was then applied to predict the sentiment of each sentence. # # *(Note: I "randomly selected" comments - but I'm human and am therefore biased. I looked for comments that were intuitively positive or negative.)* # + def sentiment_test_comments(title, filepath, max_size): """ Reads in pre-determined comments and predicts their sentiments. Key arguments: title -- The title to print. filepath -- The file location of the comments. max_size -- The max sentence size. """ with open(filepath) as file: comments = json.load(file) # Conver the comments to vocab vectors. vecs = [] for comment in comments: comment = sentiment_clean(comment).split(" ") if (len(comment) <= max_size): vecs.append(sentiment_vectorize_sentence(sentiment_vocab, comment, max_size)) vecs = np.array(vecs, dtype=np.int) predictions = sentiment_predict(vecs, sentiment_vocab, META['model']['sentiment']) print(title) for i in range(len(predictions)): print("({0}) {1}".format(predictions[i], comments[i])) print() sentiment_test_comments("Positives", "data/sentiment/comments-positive.json", 200) sentiment_test_comments("Negatives", "data/sentiment/comments-negative.json", 200) # - # **Test Blog Comments Summary** # # The model was able to predict 8/10 positive sentences and 8/10 negative sentences correctly. I was troubled by the first negative comment which the model predicted as positive ("It bothers me sooo much when people refer..."). The comment is clearly negative, which leads me to believe that the training set is missing vocabulary (i.e. the model never learned how to classify "it bothers me") or is not particularly stable at predicting longer comments. # # The second negative comment contains no obvious negative words and therefore would be very difficult for the model to predict accurately. This raises a good point that the model will have a difficult time predicting "weak negative" or "neutral" comments. # ### Detect Negative Sentiments # # Now for the real test... Can we scan through the blog and determine negative comments? # + def sentiment_detect_negative_comments(comments, max_size, year, num_comments): """ Scans through the blog and looks for negative comments Key arguments: comments -- The blog comments. max_size -- The max sentence size. year -- The year to search. num_comments -- The number of comments to look at. """ comments['datetime'] = pd.to_datetime(comments['timestamp'], unit='s') comments['year'] = comments['datetime'].map(lambda x: int(x.strftime('%Y'))) comments = comments[comments['year'] == year].copy().iloc[:num_comments] for idx, comment in comments.iterrows(): body = sentiment_clean(comment['body']).split(" ") if (len(body) <= max_size): vecs = np.array([sentiment_vectorize_sentence(sentiment_vocab, body, max_size)], dtype=np.int) prediction = sentiment_predict(vecs, sentiment_vocab, META['model']['sentiment'])[0] if (prediction == 0): print(comment['body']) print() print("done") sentiment_detect_negative_comments(comments.copy(), 200, 2015, 100) # - # ### Summary # # After hours of contructing and training a Neural Network, the model was able to detect quite a few negative comments. Some subjects that caught my eye were: # # - Confusion on how to use Disney Maps. # - Disappointment that characters do not appear at every show. # - Disappointment over Disney no longer offering inagural medals for the half marathon. # # One interesting outcome was the number of questions the model detected. I was surprised to see that the majority of the negative predictions were questions. But logically, questions are about publicly raising concerns and concerns are typically negative so it makes sense. # # Overall, I thought the results were satisfactory. There is a lot of room for improvement. I only spent a few hours fine tuning the hyper-parameters and optimizing the process. Some potential improvements are: # # - Obtaining a much better training set. # - Removal of stopwords. # - Stemming. # - Exploring different values for the hyper-parameters. # - Removal of questions to determine if the model can detect rants. # # How could this information be beneficial? Disney is a brand. And like all brands, success is achieved by improving and maintaining quality. Being able to detect negative sentiment is a crucial step for maintaining a quality brand, such as Disney. With some improvements, this model could be successfully applied to a variety of sources (Twitter Feeds, Faceook comments, etc.) as an early problem detection system. # ## Conclusion and Future Work # # - Explored some high level information. # - Determined popular Disney topics, and popular times for blog readers. # - Developed a prototype negative sentiment detection model. # # The work here only scratches the surface of the possibilities. As with most projects, there is always room for improvement. # # - Deeper exploration. # - Improved negative sentiment detection. # - Topic prediction using LDA. # # # ## Appendix # Some accessory methods used during the process. def enumerate_post_filepaths(): """ Generator that returns the filepath for each blog post (.json) Returns: The filepath for each blog post. """ path = META['data']['blog']['path'] for year_dir in os.listdir(path): # Make sure we are actually looking at a year folder. .* files/dirs might exist. if (year_dir.isdigit()): for month_dir in os.listdir(os.path.join(path, year_dir)): if (month_dir.isdigit()): for filename in os.listdir(os.path.join(path, year_dir, month_dir)): yield os.path.join(path, year_dir, month_dir, filename) # + def test_enumerate_post_filepaths(): """Tests the enumerate_post_filepaths method.""" for filepath in enumerate_post_filepaths(): assert(filepath == "data/blog/2009/09/chefs-offer-taste-of-epcot-international-food-wine-festival.json") break print("pass") test_enumerate_post_filepaths() # - def enumerate_posts(): """ Generator that returns the filepath and contents for each blog post (.json). Returns: filepath -- The filepath to the blog post json file. contents -- The json object. """ for filepath in enumerate_post_filepaths(): contents = None with open(filepath, 'r') as file: contents = json.load(file) yield filepath, contents # + def test_enumerate_posts(): """Tests the enumerate_posts method.""" for (filepath, data) in enumerate_posts(): assert(filepath == "data/blog/2009/09/chefs-offer-taste-of-epcot-international-food-wine-festival.json") assert(data['title'] == "Chefs Offer Taste of Epcot International Food & Wine Festival") break print("pass") test_enumerate_posts() # - def post_dataframes(): """ Returns the blog post data as pandas DataFrames. Returns: posts -- A DataFrame containing generic post data index by URL. tags -- A DataFrame containing all the tags indexed by URL. filings -- A DataFrame containing all the filings indexed by URL. comments -- A DataFrame containing all the comments indexed by URL. """ # Define cache filenames. posts_filename = META['data']['cache'] + "/posts.pickle" tags_filename = META['data']['cache'] + "/tags.pickle" filings_filename = META['data']['cache'] + "/filings.pickle" comments_filename = META['data']['cache'] + "/comments.pickle" # Check if we have a cached pickle file already. if (os.path.isfile(posts_filename)): posts = pd.read_pickle(posts_filename) tags = pd.read_pickle(tags_filename) filings = pd.read_pickle(filings_filename) comments = pd.read_pickle(comments_filename) return posts, tags, filings, comments posts = pd.DataFrame() tags = pd.DataFrame() filings = pd.DataFrame() comments = pd.DataFrame() for i, (filepath, data) in enumerate(enumerate_posts()): post_series = { 'url': data['url'], 'author_name': data['author']['name'], 'author_role': data['author']['role'], 'author_division': data['author']['division'], 'title': data['title'], 'timestamp': data['timestamp'], 'body': data['body'], 'facebook_likes': data['facebook']['likes'], 'facebook_url': data['facebook']['url'], } posts = posts.append(post_series, ignore_index=True) for tag in data['tags']: tag_series = {'url': data['url'], 'tag': tag} tags = tags.append(tag_series, ignore_index=True) for filing in data['files']: filing_series = {'url': data['url'], 'filing': filing} filings = filings.append(filing_series, ignore_index=True) for comment in data['comments']: comment_series = { 'url': data['url'], 'author': comment['author'], 'timestamp': comment['timestamp'], 'body': comment['body'], 'is_post_author': comment['is_post_author'], 'location': comment['location'] } comments = comments.append(comment_series, ignore_index=True) if (i % 100 == 0): print("{0}...".format(i)) # Set the url as the index. posts.set_index('url', inplace=True) tags.set_index('url', inplace=True) filings.set_index('url', inplace=True) comments.set_index('url', inplace=True) # Cache all of the dataframes. posts.to_pickle(posts_filename) tags.to_pickle(tags_filename) filings.to_pickle(filings_filename) comments.to_pickle(comments_filename) return posts, tags, filings, comments # + def test_post_dataframes(): """Tests the post_dataframes method.""" first_post_title = "Chefs Offer Taste of Epcot International Food & Wine Festival" first_tag = "FWF" first_filings = "Epcot" first_comment_author = "Robert" posts, tags, filings, comments = post_dataframes() assert(posts.iloc[0].title == first_post_title) assert(tags.iloc[0].tag == first_tag) assert(filings.iloc[0].filing == first_filings) assert(comments.iloc[0].author == first_comment_author) print("pass") # Execute. test_post_dataframes() # - def print_general_stats(features, top=15): """ Display general stats for the features provided. Key arguments: features -- The features to display. """ print("General Stats:") print("--------------") print("Mean:\t\t{:f}".format(np.mean(features))) print("Median:\t\t{:f}".format(np.median(features))) print("Std. Dev.:\t{:f}".format(np.std(features))) print("") sorted_ixds = np.argsort(features) print("Top {0} Smallest Values:\t{1}".format(top, features[sorted_ixds][:top])) print("Top {0} Largest Values:\t{1}".format(top, features[sorted_ixds][-top:])) print("") # + def test_print_general_stats(): """Tests the print_general_stats method.""" features = np.arange(11, dtype=np.int) print_general_stats(features, 5) test_print_general_stats() # - def sentiment_dataframe(): """ Returns the sentiment dataframe. Returns: df -- A dataframe containing the sentiments. """ cache_filename = META['data']['cache'] + "/sentiments.pickle" if (os.path.isfile(cache_filename)): return pd.read_pickle(cache_filename) df = pd.DataFrame() with open(META['data']['sentiments']['filename'], 'r') as file: entries = json.load(file) for entry in entries: # Skip neutral sentiments. if (entry['sentiment'] != 2): series = pd.Series() series['sentiment'] = entry['sentiment'] series['text'] = entry['text']['cleansed'] # Add the entry series to the dataframe. df = df.append(series, ignore_index=True) # Cache the dataframe for faster reloads. df.to_pickle(cache_filename) return df # + def test_sentiment_dataframe(): """Tests the sentiment_dataframe method.""" df = sentiment_dataframe() print(df.head()) test_sentiment_dataframe() # - # ## Citings and Links # Citing the dataset # # When using this dataset please cite our ACL 2011 paper which # introduces it. This paper also contains classification results which # you may want to compare against. # # # @InProceedings{maas-EtAl:2011:ACL-HLT2011, # author = {<NAME>. and Daly, <NAME>. and Pham, <NAME>. and Huang, Dan and Ng, <NAME>. and Potts, Christopher}, # title = {Learning Word Vectors for Sentiment Analysis}, # booktitle = {Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies}, # month = {June}, # year = {2011}, # address = {Portland, Oregon, USA}, # publisher = {Association for Computational Linguistics}, # pages = {142--150}, # url = {http://www.aclweb.org/anthology/P11-1015} # } # # http://fivethirtyeight.com/features/statisticians-found-one-thing-they-can-agree-on-its-time-to-stop-misusing-p-values/ # # https://www.cs.cornell.edu/people/pabo/movie-review-data/ # # https://www.tensorflow.org/
disney.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import findspark findspark.init('/home/pushya/spark-2.1.0-bin-hadoop2.7') import pyspark from pyspark.sql import SparkSession spark = SparkSession.builder.appName('cluster').getOrCreate() data=spark.read.format('libsvm').load('sample_libsvm_data.txt') data.show() final_data = data.select('features') # + from pyspark.ml.clustering import KMeans # Loads data. dataset = spark.read.format("libsvm").load("sample_kmeans_data.txt") # Trains a k-means model. kmeans = KMeans().setK(2).setSeed(1) model = kmeans.fit(dataset) # Evaluate clustering by computing Within Set Sum of Squared Errors. wssse = model.computeCost(dataset) print("Within Set Sum of Squared Errors = " + str(wssse)) # Shows the result. centers = model.clusterCenters() print("Cluster Centers: ") for center in centers: print(center) # -
Untitled9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Rich Output # In Python, objects can declare their textual representation using the `__repr__` method. IPython expands on this idea and allows objects to declare other, rich representations including: # # * HTML # * JSON # * PNG # * JPEG # * SVG # * LaTeX # # A single object can declare some or all of these representations; all are handled by IPython's *display system*. This Notebook shows how you can use this display system to incorporate a broad range of content into your Notebooks. # ## Basic display imports # The `display` function is a general purpose tool for displaying different representations of objects. Think of it as `print` for these rich representations. from IPython.display import display # A few points: # # * Calling `display` on an object will send **all** possible representations to the Notebook. # * These representations are stored in the Notebook document. # * In general the Notebook will use the richest available representation. # # If you want to display a particular representation, there are specific functions for that: from IPython.display import ( display_pretty, display_html, display_jpeg, display_png, display_json, display_latex, display_svg ) # ## Images # To work with images (JPEG, PNG) use the `Image` class. from IPython.display import Image i = Image(filename='../images/ipython_logo.png') # Returning an `Image` object from an expression will automatically display it: i # Or you can pass an object with a rich representation to `display`: display(i) # An image can also be displayed from raw data or a URL. Image(url='http://python.org/images/python-logo.gif') # SVG images are also supported out of the box. from IPython.display import SVG SVG(filename='../images/python_logo.svg') # ### Embedded vs non-embedded Images # By default, image data is embedded in the notebook document so that the images can be viewed offline. However it is also possible to tell the `Image` class to only store a *link* to the image. Let's see how this works using a webcam at Berkeley. # + from IPython.display import Image img_url = 'http://www.lawrencehallofscience.org/static/scienceview/scienceview.berkeley.edu/html/view/view_assets/images/newview.jpg' # by default Image data are embedded Embed = Image(img_url) # if kwarg `url` is given, the embedding is assumed to be false SoftLinked = Image(url=img_url) # In each case, embed can be specified explicitly with the `embed` kwarg # ForceEmbed = Image(url=img_url, embed=True) # - # Here is the embedded version. Note that this image was pulled from the webcam when this code cell was originally run and stored in the Notebook. Unless we rerun this cell, this is not todays image. Embed # Here is today's image from same webcam at Berkeley, (refreshed every minutes, if you reload the notebook), visible only with an active internet connection, that should be different from the previous one. Notebooks saved with this kind of image will be smaller and always reflect the current version of the source, but the image won't display offline. SoftLinked # Of course, if you re-run this Notebook, the two images will be the same again. # ## HTML # Python objects can declare HTML representations that will be displayed in the Notebook. If you have some HTML you want to display, simply use the `HTML` class. from IPython.display import HTML s = """<table> <tr> <th>Header 1</th> <th>Header 2</th> </tr> <tr> <td>row 1, cell 1</td> <td>row 1, cell 2</td> </tr> <tr> <td>row 2, cell 1</td> <td>row 2, cell 2</td> </tr> </table>""" h = HTML(s) display(h) # You can also use the `%%html` cell magic to accomplish the same thing. # + language="html" # <table> # <tr> # <th>Header 1</th> # <th>Header 2</th> # </tr> # <tr> # <td>row 1, cell 1</td> # <td>row 1, cell 2</td> # </tr> # <tr> # <td>row 2, cell 1</td> # <td>row 2, cell 2</td> # </tr> # </table> # - # The display machinery can also be used by libraries. [Tabipy](https://pypi.python.org/pypi/Tabipy) is a library for constructing tables: # !pip install Tabipy # + from tabipy import Table, TableHeaderRow, TableCell t = Table(TableHeaderRow('divisions', 'result')) num = 55 for x in range(7): if num < 1: resultcell = TableCell(num, bg_colour='DarkBlue', text_colour='white') else: resultcell = TableCell(num) t.append_row((x, resultcell)) num /= 3 t # - # ## JavaScript # The Notebook also enables objects to declare a JavaScript representation. At first, this may seem odd as output is inherently visual and JavaScript is a programming language. However, this opens the door for rich output that leverages the full power of JavaScript and associated libraries such as [d3.js](http://d3js.org) for output. from IPython.display import Javascript # Pass a string of JavaScript source code to the `JavaScript` object and then display it. js = Javascript('alert("hi")'); display(js) # The same thing can be accomplished using the `%%javascript` cell magic: # + language="javascript" # # alert("hi"); # - # Here is a more complicated example that loads `d3.js` from a CDN, uses the `%%html` magic to load CSS styles onto the page and then runs ones of the `d3.js` examples. Javascript( """$.getScript('//cdnjs.cloudflare.com/ajax/libs/d3/3.2.2/d3.v3.min.js')""" ) # + language="html" # <style type="text/css"> # # circle { # fill: rgb(31, 119, 180); # fill-opacity: .25; # stroke: rgb(31, 119, 180); # stroke-width: 1px; # } # # .leaf circle { # fill: #ff7f0e; # fill-opacity: 1; # } # # text { # font: 10px sans-serif; # } # # </style> # + language="javascript" # # // element is the jQuery element we will append to # var e = element.get(0); # # var diameter = 600, # format = d3.format(",d"); # # var pack = d3.layout.pack() # .size([diameter - 4, diameter - 4]) # .value(function(d) { return d.size; }); # # var svg = d3.select(e).append("svg") # .attr("width", diameter) # .attr("height", diameter) # .append("g") # .attr("transform", "translate(2,2)"); # # d3.json("data/flare.json", function(error, root) { # var node = svg.datum(root).selectAll(".node") # .data(pack.nodes) # .enter().append("g") # .attr("class", function(d) { return d.children ? "node" : "leaf node"; }) # .attr("transform", function(d) { return "translate(" + d.x + "," + d.y + ")"; }); # # node.append("title") # .text(function(d) { return d.name + (d.children ? "" : ": " + format(d.size)); }); # # node.append("circle") # .attr("r", function(d) { return d.r; }); # # node.filter(function(d) { return !d.children; }).append("text") # .attr("dy", ".3em") # .style("text-anchor", "middle") # .text(function(d) { return d.name.substring(0, d.r / 3); }); # }); # # d3.select(self.frameElement).style("height", diameter + "px"); # - # ## LaTeX # The IPython display system also has builtin support for the display of mathematical expressions typeset in LaTeX, which is rendered in the browser using [MathJax](http://mathjax.org). # You can pass raw LaTeX test as a string to the `Math` object: from IPython.display import Math Math(r'F(k) = \int_{-\infty}^{\infty} f(x) e^{2\pi i k} dx') # With the `Latex` class, you have to include the delimiters yourself. This allows you to use other LaTeX modes such as `eqnarray`: from IPython.display import Latex Latex(r"""\begin{eqnarray} \nabla \times \vec{\mathbf{B}} -\, \frac1c\, \frac{\partial\vec{\mathbf{E}}}{\partial t} & = \frac{4\pi}{c}\vec{\mathbf{j}} \\ \nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\ \nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\ \nabla \cdot \vec{\mathbf{B}} & = 0 \end{eqnarray}""") # Or you can enter LaTeX directly with the `%%latex` cell magic: # + language="latex" # \begin{align} # \nabla \times \vec{\mathbf{B}} -\, \frac1c\, \frac{\partial\vec{\mathbf{E}}}{\partial t} & = \frac{4\pi}{c}\vec{\mathbf{j}} \\ # \nabla \cdot \vec{\mathbf{E}} & = 4 \pi \rho \\ # \nabla \times \vec{\mathbf{E}}\, +\, \frac1c\, \frac{\partial\vec{\mathbf{B}}}{\partial t} & = \vec{\mathbf{0}} \\ # \nabla \cdot \vec{\mathbf{B}} & = 0 # \end{align} # - # ## Audio # IPython makes it easy to work with sounds interactively. The `Audio` display class allows you to create an audio control that is embedded in the Notebook. The interface is analogous to the interface of the `Image` display class. All audio formats supported by the browser can be used. Note that no single format is presently supported in all browsers. from IPython.display import Audio Audio(url="http://www.nch.com.au/acm/8k16bitpcm.wav") # A NumPy array can be auralized automatically. The `Audio` class normalizes and encodes the data and embeds the resulting audio in the Notebook. # # For instance, when two sine waves with almost the same frequency are superimposed a phenomena known as [beats](https://en.wikipedia.org/wiki/Beat_%28acoustics%29) occur. This can be auralised as follows: # + import numpy as np max_time = 3 f1 = 220.0 f2 = 224.0 rate = 8000.0 L = 3 times = np.linspace(0,L,rate*L) signal = np.sin(2*np.pi*f1*times) + np.sin(2*np.pi*f2*times) Audio(data=signal, rate=rate) # - # ## Video # More exotic objects can also be displayed, as long as their representation supports the IPython display protocol. For example, videos hosted externally on YouTube are easy to load: from IPython.display import YouTubeVideo YouTubeVideo('sjfsUzECqK0') # Using the nascent video capabilities of modern browsers, you may also be able to display local # videos. At the moment this doesn't work very well in all browsers, so it may or may not work for you; # we will continue testing this and looking for ways to make it more robust. # # The following cell loads a local file called `animation.m4v`, encodes the raw video as base64 for http # transport, and uses the HTML5 video tag to load it. On Chrome 15 it works correctly, displaying a control bar at the bottom with a play/pause button and a location slider. from IPython.display import HTML from base64 import b64encode video = open("../images/animation.m4v", "rb").read() video_encoded = b64encode(video).decode('ascii') video_tag = '<video controls alt="test" src="data:video/x-m4v;base64,{0}">'.format(video_encoded) HTML(data=video_tag) # ## External sites # You can even embed an entire page from another site in an iframe; for example this is today's Wikipedia # page for mobile users: from IPython.display import IFrame IFrame('http://ipython.org', width='100%', height=350) # ## Links to local files # IPython provides builtin display classes for generating links to local files. Create a link to a single file using the `FileLink` object: from IPython.display import FileLink, FileLinks FileLink('Cell Magics.ipynb') # Alternatively, to generate links to all of the files in a directory, use the `FileLinks` object, passing `'.'` to indicate that we want links generated for the current working directory. It will optionally recurse into subdirectories as well. FileLinks('.', recursive=False) # ## Rich output and security # The IPython Notebook allows arbitrary code execution in both the IPython kernel and in the browser, though HTML and JavaScript output. More importantly, because IPython has a JavaScript API for running code in the browser, HTML and JavaScript output can actually trigger code to be run in the kernel. This poses a significant security risk as it would allow IPython Notebooks to execute arbitrary code on your computers. # # To protect against these risks, the IPython Notebook has a security model that specifies how dangerous output is handled. Here is a short summary: # # * When you run code in the Notebook, all rich output is displayed. # * When you open a notebook, rich output is only displayed if it doesn't contain security vulberabilities,... # * ..or if you have trusted a notebook, all rich output will run upon opening it. # # A full description of the IPython security model can be found on [this page](http://ipython.org/ipython-doc/dev/notebook/security.html). # ## Rich output and nbviewer # Much of the power of the Notebook is that it enables users to share notebooks with each other using http://nbviewer.ipython.org, without installing IPython locally. As of IPython 2.0, notebooks rendere on nbviewer will display all output, including HTML and JavaScript. Furthermore, to provide a consistent JavaScript environment on the live Notebook and nbviewer, the following JavaScript libraries are loaded onto the nbviewer page, *before* the notebook and its output is displayed: # # * [jQuery](http://jquery.com/) # * [RequireJS](http://requirejs.org/) # # Libraries such as [mpld3](http://mpld3.github.io/) use these capabilities to generate interactive visualizations that work on nbviewer.
examples/IPython Kernel/Rich Output.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.6.10 64-bit (''3.6.10'': pyenv)' # name: python3 # --- # # Software Development 1 # # - Improving Problem Solving # - Revisiting Binary Search Trees # - Dynamically Adding Elements # - Finding Elements # - Advanced Java Concepts # - Abstraction # - Inheritance # - Polymorphism # - Encapsulation # - Managing Java Classes # - Constructor # - Getters and Setters # - Searching Algorithms # - Bubble # - Selection Sort # - Insertion # ## Improving Problem Solving [(Example for class)](https://www.hackerrank.com/challenges/list-comprehensions/problem) # --- # This section is something that typically isn't taught in a CS course and is more of a personal thought to include. That being said this isn't what has to be done and may not work in all scenarios. This is more a note to the piece of CS that isn't typically talked about but has a place in every conversation regarding computing, tech, and the world in general. We need a method to solve problems. We all don't attack problems the same but the following is how I typically approach issues. # # ### I have a problem and I've read through it # --- # At this point I either understand the question fully, I have some idea of what we're doing, I'm clueless, or I'm reading the question again because I undestood nothing the first time and it's because of my inability to read and not because I can't solve the problem. # # # ### When we get a problem we need to understand what we're doing. # So I have a conundrum, I don't understand the problem. Do i understand parts of the problem? Am I close to understanding it and am just missing a couple pieces? Have I done something like this before that I can equate this too? How can I get myself to the point where I understand what this is trying to tell me? # # ### Break the problem down. # Let's start by finding the seams in the problem. That way we can break the problem down into smaller problems. Often we'll find that complex things are made up of a bunch of smaller complex things. All this to say it's hard to see an elephant for what it is standing 3 inches away from the elephant. We need to step back and see the elephant or in the case of this bad metaphor our problem for what it is. A bunch of smaller problems. # # ### We get more managable pieces, then figure out what each part needs to do. # Now we have more managable pieces. We can start figuring out how to tend to all of the smaller pieces. The cycle may restart here if one of the pieces is still complex but you're now closer to solving the issue that you're having. You have this smaller hurdle to jump over than you did before. # # We're gonna continue as if our pieces are all small enough though. We've got managable pieces. We know what they're each responsible for. Now it's time to start building these things out. # # ### Get it into terms we can understand # Ok so we have our "managable" piece. Sometimes you're doing something that you're weak in or just can't seem to understand a topic. Sometimes you just need to get it related to something that you do understand. # # Both an artist and a mathematition can draw a square. They care about different things but generally they both know. what a square is. Most of the time you to start just need to generally understand what you need to do. So if you're the mathematition or the artist trying to understand something in the other realm the fine details can come later as long as you can get a general understanding. # # ### Scale down # This part is probably my favorite and most comforting step. My problem seems insurmountable. I don't know how I could ever do it. # # Let's scale it down. I need to find out if 47,055,833,459 is a prime number. *cough cough* ~This is your assignment (sorta)~ That number is ridiculous. Let's start with a number I know is prime like 2 or 5 or 7. I'm going to find out if these smaller numbers are prime and ignore the big number for now. # # With this we can learn a valuable lesson. We're looking inbetween the lines. We need something that discovers prime numbers. Not large prime numbers. Just prime numbers. I say this to demonstrate that we need to look at the simple case for this. The big number is there to scare you. You need to focus on the process and the solution. If your process is sound the size of the problem becomes less of an issue. # # ![Car example](https://hackernoon.com/hn-images/1*1L6_1aPP-_kFDAfbt9DkUQ.jpeg) # # ### Find a solution # Ok I've scaled my issue down. I'm going to find out if numbers from 1-10 are prime or composite. Now I can start implementing a solution. This part is super important to think about. How is this done now. Has anyone done this or something like it? # # Now we don't want to take answers but we know how to determine if a number is prime or not. Now we need to abstract that solution so that I can give it any number. Meaning that my solution has to be dynamic. I can't have hard coded numbers in there. We also need to handle edge cases. Is 1 a prime number? (No.) # # ### Scale up # Now that we have our solution and we've tested it on the small scale level, it's time to attack our big scary number. # # 47,055,833,459 # # We may be successful here. We may have to scale back down and tweak our solution with a bigger test range. What's important is that we've made a lot of progress. Time to tinker till we get it right. # # ### Fix edge cases (Sometimes) # Even when our code is right and working sometimes we have edge cases that we didn't account for. # # Did you test negative numbers? # # Did you test 0? # # Edge/Fringe cases will always exist so don't feel too bad if you don't get them all on the first pass. # ## Revisiting Binary Tree # Trees are the data type that we're going to be focused on in our project so we'll take these little refreshers here and there. # # ### Dynamically Adding Elements # By now you've probably realized that you'll need to dynamically add objects. # # Now what does entail? We need to first see if our BST has a root node. If it doesn't, our inital element goes there. Otherwise we need to look at the value of the root node and the element that we're trying to add. If it's less than our number then it gets stored in our left child, if it's a duplicate we've decided to use the left child, and if it's greater than our number then right child. # # We can't make all of these decisions manually. You never know who will come along and decide to use your BST class. You may have to store some information for use later. Your code will need to be able to decide where to put these new values. # # To do this dynamically we need to think about what the tree needs to keep track of and program in that logic. While this is happening be sure to think not only about the requirements but possible people trying to break you. # # ### Finding Elements # Finding elements in our tree is fairly simple. # ## Advanced Java Concepts # Now that we've been using Java for a little while we should learn some more important terminology. # # ### Abstraction # Here we're more so focused on the blueprints of what we're going to be working on rather then the physical implementation of the blueprints. # # We're looking to hide the actual implementation of what will be available to the end user and only give them the functionality of the object. So in terms of where we've seen this are Abstract Classes and Inheritance. # # #### Abstract Classes # Abstract classes have the `abstract` keyword in the declaration of the class. # # These are capable of having `abstract` methods along side our regular methods. These are methods with no body. Meaning they're just empty. They are empty because we can extend them later and define the methods then. If we're using `abstract` methods then the class must be `abstract`. They also don't have curly braces `{}` we just cap it off with a semicolon. # # When classes are `abstract` they can't be instantiated meaning we can't make an instance of our abstract class with our normal `Object variableName = new Object();` # # To use these classes we need to `extend` the abstract class when making a new class. When we `extend` our abstract class we need to make sure that we provide actual implementations of the methods that we've given in the `abstract` class because remember we didn't define them before. # # #### Interfaces # Interfaces are similar to abstract classes but taken a step further in levels of abstraction. So first we're implicitly abstract and don't need to declare it. That being said being abstract in totality, we can ONLY use abstract methods in an interface. It'll have no body. # # - Same as the abstract class we can't instantiate from our interface. # - Unlike regular classes we don't have a constructor. # - It's irregular to have variables here but we can do so if they're `static` or `final` # - `static` these can be accessed without making an instance of the class # - `final` these cannot be changed after initialized # - Instead of being extended interfaces are `implemented` # - Interfaces can `extend` multiple interfaces. # # ### Inheritance # We've seen inheritance for our classes already and now it time to learn a little more about it. So you've seen me use `extends` when we did the `Mammal` example. This is when we subclass things, extend things, implement things. We inherit things from the class that were pulling from. We inherit the methods and variable of said class. # # ### Polymorphism # Polymorphism is implicit in Java. We have something called an IS-A relationship. This is basically the same as our `Mammal` example for instance we have a `Human` class. That `extends` `Mammal` we can also say that `Human` IS-A `Mammal`. # # Polymorphism is the ability to take on multiple forms through `implements` and `extends`. # # All Java objects are polymorphic because they will pass the IS-A test with themselves and the `Object` class. # # ### Encapsulation # Finally we have encapsulation. This is the ability to wrap all of the data and methods to manipulate that data belonging to a class together as a single unit. # # We're already partially there with the classes that we've been making. To have full encapsulation we need to # - Declare our variables `private` # - Provide public setter and getter methods that will access and allow you to view those private variable values # ## Managing Java Classes # When it comes to managing Java classes there are somethings that we can do to make our lives a little easier when it comes to being formulaic about things. For now we're going to talk about constructor and getters and setters. # # ### Constructor # We've already seen constructors in action. They're responsible for setting up our classes when we make a new instance of it. This is where we handle our logic as it pertains to things being defined with our initial objects. If things are subclassed then we need to access the `super` method by doing this we can pass along the parameters that belong to the class that we're extending. # # ### Getters and Setters # Having getters and setters is a simple concept in my opinion. # # Getters are a way to access the data inside of our object through a method. # Setters are a way to set the data inside our object. # # These are important because they allow us access values when we make things private. # ## Searching Algorithms # Something that we'll be doing for the next couple of weeks is looking at a few sorting algorithms. We're going to do this to improve our ability to interpret and understand logic. # # ### Bubble Sort # Bubble sort is the easiest implementation of sorting that we could probably implement. # # Logically the way it works is by swapping elements if they're not in the right order. # # The basic way to know that your done is by passing over the list and there being no swaps that happen. # # [Example](https://www.geeksforgeeks.org/bubble-sort/) # # ### Selection Sort # Selection sort is also fairly easy to implement. # # This works by finding the smallest element in the array on each pass. Once we do that we then look at the subarray minus the element that we sorted. # # We know that were done when our sub array gets down to 0. # # [Example](https://www.geeksforgeeks.org/selection-sort/) # # ### Insertion Sort # Insertion sort is one of the more natural ones of the sorting methods. # # This works similar to people sorting things into piles. Think cards! If you've been dealt cards usually you have a sorted portion and an unsorted portion and you'll insert cards into the proper place. For this we traverse the list from left to right and look at the current element if the current element is less than the one before it we move it to the left until it's in the proper place. # # We know we're done when we reach the end of the list. # # [Example](https://www.geeksforgeeks.org/insertion-sort/)
JupyterNotebooks/Lessons/Lesson 9.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + [markdown] pycharm={"name": "#%% md\n"} # Unzip datasets and prepare data: # + pycharm={"name": "#%%\n"} from collections import Counter import matplotlib.pyplot as plt import numpy as np import seaborn as sns from sklearn.decomposition import PCA from multi_imbalance.datasets import load_datasets from multi_imbalance.resampling.spider import SPIDER3 from multi_imbalance.utils.data import construct_flat_2pc_df # %matplotlib inline sns.set_style('darkgrid') # %matplotlib inline sns.set_style('darkgrid') dataset = load_datasets()['new_ecoli'] X, y = dataset.data, dataset.target print(X[:5]) print(y[:5]) # - Counter(dataset.target) # Resample data using SPIDER3 algorithm # + pycharm={"name": "#%%\n"} cost = np.random.rand(64).reshape((8, 8)) # np.ones((8, 8)) for i in range(8): cost[i][i] = 0 maj_int_min = { 'maj':[0,1], 'int':[4], 'min':[2,3] } clf = SPIDER3(k=1, maj_int_min = maj_int_min, cost=cost) resampled_X, resampled_y = clf.fit_resample(X, y) # - # Compare results by plotting data in 2 dimensions # + pycharm={"name": "#%%\n"} n = len(Counter(y).keys()) p = sns.color_palette("husl", n) pca = PCA(n_components=2) pca.fit(X) fig, axs = plt.subplots(ncols=2, nrows=2) fig.set_size_inches( 16, 10) axs = axs.flatten() axs[1].set_title("Base") sns.countplot(y, ax=axs[0], palette=p) X = pca.transform(X) df = construct_flat_2pc_df(X, y) sns.scatterplot(x='x1', y='x2', hue='y', style='y', data=df, alpha=0.7, ax=axs[1], legend='full', palette=p) axs[3].set_title("Spider") sns.countplot(resampled_y, ax=axs[2],palette=p) resampled_X = pca.transform(resampled_X) df = construct_flat_2pc_df(resampled_X, resampled_y) sns.scatterplot(x='x1', y='x2', hue='y', style='y', data=df, alpha=0.7, ax=axs[3], legend='full', palette=p) # - # ### Pipeline example # + from imblearn.pipeline import Pipeline from sklearn.model_selection import train_test_split from sklearn.preprocessing import StandardScaler from sklearn.neighbors import KNeighborsClassifier as KNN from sklearn.metrics import classification_report dataset = load_datasets()['new_ecoli'] X, y = dataset.data, dataset.target X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, random_state=42) pipeline = Pipeline([ ('scaler', StandardScaler()), ('spider', SPIDER3(k=1)), ('knn', KNN()) ]) pipeline.fit(X_train, y_train) y_hat = pipeline.predict(X_test) print(classification_report(y_test, y_hat))
examples/resampling/spider3.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This example show the use of ordinary least squares to estimate the parameters of a linear model # The principle of this example is to generate samples from the parameters R of a reference model # and to check that the solution S is close to these parameters R. # The solution is computed using the direct solution (analytical inversion of A). # - import numpy as np from matplotlib import pyplot as plt # Parameters of the problem R = np.array([[1],[2]]) # Reference model: R is the expected solution A * R = Y sigmaY = 0.2 # standard deviation of the noise applied to the observed values N = 100 # Number of samples seedinit = 0 # Initialization value for random generation # Initializing the generator the control the reproducibility of the test np.random.seed(seedinit) # Generating the input values X = np.arange(0, 1, 1.0/N, dtype=float).reshape(N, 1) # Building the matrix: A x = y A = np.concatenate((np.ones((N, 1), dtype=float), X), axis=1) # Generating observed values Y = np.matmul(A, R) + sigmaY * np.random.randn(N, 1) # Computing the solution from analytical inversion of the matrix A' * A sumx = np.sum(X) sumy = np.sum(Y) sumx2 = np.sum(np.square(X)) sumxy = np.sum(X * Y) S = np.divide(np.array([[(sumx2 * sumy - sumx * sumxy)], [(-sumx * sumy + N * sumxy)]]), (N * sumx2 - sumx**2)) print("Reference parameters", R.reshape(2)) print("Computed parameters", S.reshape(2)) # Computing an estimate based on the computed model Z = np.matmul(A, S) # Generating a figure to display the result plt.figure(figsize=(12, 9)) plt.title('Result of ordinary least squares for a linear model', fontdict={'fontsize':14, 'fontweight':'bold'}) plt.axis([0, 1, 0, (R[0][0] + R[1][0] + sigmaY) ]) plt.plot(X, Y, 'b.', label='Samples generated by reference model + noise') plt.plot(X, np.matmul(A, R), 'g', label='Reference model') plt.plot(X, Z, 'r', label='Estimation with computed model') plt.legend(loc='lower right')
jupyter/notebooks/ordinary_least_squares_linear_regression_3simplified.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Milestone 2: Putting together our EDAs into one # **Our analysis as a team answered the questions we set out to investigate in Milestone 1:** # ### Simple Questions: # 1) Is there a correlation between age and medical insurance costs? # * yes, insurance costs seem to go up as people age # # 2) Does an individual's BMI influence their medical insurance costs? # * Yes, higher BMI does correlate with higher costs but this database is also very skewed with obese individuals so we don't have that much data on non-obese individuals. And also we found out that 9 of the 10 individuals in the bottom 10 of the medical charges are actually obese individuals and not non-obese ones, which is odd. # # 3) How does the number of children someone has change their medical insurance costs? # * While our graphs and analysis seem to show that those with 5 children have lower medical costs than those with 0-4, the distribution of charges seem somewhat even between those with 0-4 children. # # 4) Do smokers have higher medical insurance costs? # * Yes. Absolutely. # # ### Complex Questions: # 1) Is there a gender bias present within the costs of medical insurance? If there is, how is this bias emphasized by other demographic factors? # * In this dataframe, men on average pay more than women in every category but the bottom 10 of the medical charges are also only filled with men while the top 10 are relatively even between the sexes. Overall, there doesn't seem to be a gender bias in this dataframe but that doesn't mean gender bias doesn't exist in medicine over all as this is just one set of data. # # 2) How does the cost of medical insurance change for each year an individual ages? # * There is a clear increase in medical charges as people age. However, we did not investigate further in terms of rate of increase as there are too many variables at play in this dataframe to concisely state the linear effect age has on medical insurance charges # # 3) Which combination of factors produces the lowest medical costs? Which combination produces the highest? # * After our analysis, we predicted that the lowest combination would be non smoking females less than 25 years old with more than 2 children, a BMI of less than 25, and live in the southwest. We were somewhat right as the real combination in the lowest 10 medical costs are 18 year old non smoking males, mostly with BMI over 30 and live in the southeast. # * Our prediction for the highest combination was 55 years or older males that smoke with a BMI over 30 that live in the southeast with less than or equal to 2 children. The real combination in the highest 10 medical costs is a less definitive than the lowest 10. Sex does not seem to play a factor. All of them smoke, have less than or equal to 2 kids, the mean age is 47 and half of them reside in the southeast. # # 4) How do medical insurance costs change throughout the quadrants presented in the dataset? # * Please see below and our individual EDAs for detailed in depth analysis of this dataset. # import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from scripts import project_functions # #### This is our Dataframe df = project_functions.load("../data/raw/Medical Expenses.csv") df # **With `.info()` and `.describe()`, we can see that there are no nulls and get an overview of the numerical data in the dataset** df.info() df.describe().T # **We use `.pairplot` to show to show the correlation between pairs of numerical variables** sns.pairplot(df) # ##### a few quick observations # - charges seem to go up with age # - higher number of children surprisingly has lower medical charges # - the BMI in this dataset is on the higher end (>25 BMI is overweight and >30 is obese) # **Use `.displot` to take a closer look at the distribution of medical insurance charges** sns.displot(data=df, x="Medical Insurance Charges", kde=True).set(title="Medical Insurance Charges Distribution") charges_ranges = [0,10000,20000,30000,40000,50000,60000,70000] charges_groups = ['0-10K','10K-20K','20K-30K','30K-40K','40K-50K','50K-60K','60K-70K'] df['Charges Groups'] = pd.cut(df['Medical Insurance Charges'].values,charges_ranges,labels=charges_groups,ordered=True) df = df.sort_values(by=['Medical Insurance Charges']) df sns.countplot(data=df,x='Charges Groups').set_title("Medical Charges Groups") # This provides us with the knowledge that most of the datapoints in this dataframe are of medical charges in the *below $10,000* category. # # Next, we'll use bins to categories the ages to take a closer look at the age distribution in this dataframe. age_ranges = [15,25,35,45,55,65] age_groups = ['15-25','25-35','35-45','45-55','55-65'] df['Age Groups'] = pd.cut(df.Age,age_ranges,labels=age_groups,ordered=True) df.sort_values(by=['Age']) df sns.countplot(data=df,x='Age Groups').set_title("Age Group Distribution") # It is good to see that the age groups are roughly evenly distributed. This will help us get a clearer image of the medical insurance charges across the population. # #### Use `.scatterplot` to take a closer look at the correlation between age and medical insurance charges. One of our research questions. sns.scatterplot(data=df,x="Age",y="Medical Insurance Charges") # **Use `.groupby` to calculate the mean medical insurance charge based on age group** round(df.groupby('Age Groups')['Medical Insurance Charges'].mean(),2) # **It appears that older individuals are charged more than younger individuals based on the scatterplots and the means.** This could be because older individuals are more prone to illnesses and have more medical conditions than younger individuals. # #### Next, we'll take a closer look at BMI vs Charges #use bins to categorize BMI BMI_ranges = [float("-inf"),18.5,25,30,float("inf")] BMI_groups = ['Underweight','Normal Weight','Overweight','Obese'] df['BMI Category'] = pd.cut(df.BMI,BMI_ranges,labels=BMI_groups,ordered=True) df = df.sort_values(by=['BMI']) df sns.countplot(data=df,x='BMI Category').set_title("BMI Distribution") # There is a surprisingly high amount of obese individuals in the study. This is no surprise since America has a very high obesity rate. It would have been better if this dataset had a more even distribution. # #### Let's take a closer look at the correlation between BMI and medical insurance charges, one of our research questions. # # Use `.scatterplot` to take a closer look at the correlation between BMI and medical insurance charges. sns.scatterplot(data=df,x="BMI",y="Medical Insurance Charges").set_title("BMI vs Medical Insurance Charges") # **Use `.groupby` to calculate the mean medical insurance charge based on BMI Category** round(df.groupby('BMI Category')['Medical Insurance Charges'].mean(),2) # The scatterplot isn't as clear but it is clear from the means table that higher BMI leads to higher insurance charges. Again, this makes sense as higher BMI leads to a multitude of other health conditions. # #### Use `.scatterplot` to take a closer look at the correlation between number of children and medical insurance charges, one of our research questions. sns.scatterplot(data=df,x="Number of Children",y="Medical Insurance Charges").set_title("Number of Children vs Medical Insurance Charges") # The distribution of charges seems somewhat even until the individuals with 5 children. Individuals with 5 children have considerable lower costs on average. This could be because with 5 children, parents are more reluctant to take their children to the hospital for less serious illnesses. # #### Now let's take a look at medical insurance charges for smokers vs non smokers. sns.displot(df[df.Smoker=='yes'],x='Medical Insurance Charges',color='c',kde=True).set(title="Medical Insurance Charges Distribution for Smokers") sns.displot(df[df.Smoker=='no'],x='Medical Insurance Charges',color='b',kde=True).set(title="Medical Insurance Charges Distribution for Non Smokers") # **Use `.groupby` to calculate the mean medical insurance charge based on Smoking Habits** round(df.groupby('Smoker')['Medical Insurance Charges'].mean(),2) # Smokers are charged considerably more on average than non-smokers. This makes sense as it is well known that smoking is bad for one's health. # #### Let's take a look at the distribution of sex in the data sns.countplot(data=df,x='Sex') # The number of males and females is almost equal which will help yeild more reliable results for the gender bias study. # **Use `.displot` to show the distribution of medical insurance charges for males** sns.displot(df[df.Sex=='male'],x='Medical Insurance Charges',color='c',kde=True).set(title= "Medical Insurance Charges Distribution for Males") # **Use `.displot` to show the distribution of medical insurance charges for females** sns.displot(df[df.Sex=='female'],x='Medical Insurance Charges',color='green',kde=True).set(title="Medical Insurance Charges for Females") # ***Use `.groupby()` to calculate the mean medical insurance charge based on sex*** round(df.groupby('Sex')['Medical Insurance Charges'].mean(),2) # It seems that men are charged slightly more for medical insurance on average. # #### Let's take a look at the trend in medical insurance costs as age increases sns.lineplot(data=df,x='Age',y='Medical Insurance Charges').set(title="Age vs Medical Insurance Charges") # There is a clear increase in medical insurance charges as individuals age. # #### Now, let's investigate the last factor in the dataframe, the place of residence sns.boxplot(data=df,x='Place of Residence',y='Medical Insurance Charges') # Use `.groupby` to calculate the mean medical insurance charge based on sex round(df.groupby('Place of Residence')['Medical Insurance Charges'].mean(),2) # Individuals who live southeast are charged more for medical insurance on average. # ### Next, let's take a closer look to see if there is a gender bias in medical insurance charges. plot1=sns.displot(df, x="Medical Insurance Charges", hue = "Sex", bins = 30, kde=True).set(title = "Medical Insurance Charges Distribution Between Male and Female") # from this plot, we can see that there's nothing especially definitive about whether men or women are being charged more based on sex. We can see that while there are more men in the lowest bin, there are also more men in the higher bins. But again, it's not too big of a difference. # Earlier, we found the mean of the medical insurance charges of just men and just women: round(df.groupby('Sex')['Medical Insurance Charges'].mean(),2) difference = 13956.75-12569.58 percent_difference = round(difference/12569.58*100, 2) #round to 2 decimal places print("Men's overall average medical insurance charge is", percent_difference, "% higher than women.") # # ## Now let's compare other factors along with sex # Using the male only and female only dfs, see if there's anything interesting within # # #### Medical Costs against Age between Men and Women # + dfmale = project_functions.male_only("../data/raw/Medical Expenses.csv") dffemale = project_functions.female_only("../data/raw/Medical Expenses.csv") plot4=sns.relplot(data=df, y = "Medical Insurance Charges", x="Age", hue="Sex").set(title = "Medical Insurance Charges as Person Ages") # - plot10=sns.lineplot(data=dfmale, x= 'Age', y="Medical Insurance Charges").set(title="Males Ages against Medical Insurance Charges") plot11=sns.lineplot(data=dffemale, x= 'Age', y="Medical Insurance Charges").set(title="Females Ages against Medical Insurance Charges") plot11=sns.lineplot(data=df, x= 'Age', y="Medical Insurance Charges", hue="Sex").set(title="Ages against Medical Insurance Charges") # ##### Separate into Age Groups and Gender # ##### Male: age_ranges = [15,25,35,45,55,65] age_groups = ['15-25','25-35','35-45','45-55','55-65'] dfmale['Age Groups'] = pd.cut(df.Age,age_ranges,labels=age_groups,ordered=True) dfmale.sort_values(by=['Age']) round(dfmale.groupby('Age Groups')['Medical Insurance Charges'].mean(),2) # ##### Female: dffemale['Age Groups'] = pd.cut(df.Age,age_ranges,labels=age_groups,ordered=True) dffemale.sort_values(by=['Age']) round(dffemale.groupby('Age Groups')['Medical Insurance Charges'].mean(),2) # Looking at the line graphs, there is no significant difference between the 2 genders, both both show a distinct upward trend in medical insurance costs as the person ages. However, looking at the averages, it looks like men pay more than women in all age groups except in the *45-55* years age age group. # #### The Average Cost of Medical Insurance Charges with Childen: plot15=sns.relplot(data=df, y = "Medical Insurance Charges", x="Number of Children", hue="Sex").set(title = "Medical Insurance Charges against Number of Children") round(df.groupby(['Sex', 'Number of Children'])['Medical Insurance Charges'].mean(),2) # Looking at how the number of children affect the medical insurance charges between men and women, it looks like males have higher insurances charges and that insurance costs seem to drop at 5 children for women and 4 and 5 children for men--- unsure if that's accurate to real life, maybe there's some correlation of people seeking medical treatment less with more kids to avoid additional medical charges or that people with less kids are more paranoid with their kids' health. # # **Regarding gender bias, it looks like men with children pay more on average than women with children except for at 5 kids.** # #### The Average Cost of Medical Insurance Charges with Smokers vs Non-Smokers: # + dfsmoke = project_functions.smokers_only("../data/raw/Medical Expenses.csv") dfsmoke[('Sex')].value_counts() # - # There are more men than women in the data that smoke but there's enough women that the data should be reliable. # + plot5=sns.displot(dfsmoke, x= "Medical Insurance Charges", hue = "Sex", kde=True).set(title = "Medical Insurance Charges Distribution Between Male and Female Smokers") # - # Male smokers appear to have higher medical charges overall- note the especially higher distribution around 40,0000. round(dfsmoke.groupby(['Sex'])['Medical Insurance Charges'].mean(),2) # The average medical insurance charge of men is about 7.8% higher than women in smokers. # # Earlier, we saw that a disproportionate amount of people in this dataframe have a high BMI, so let's investigate it when gender is separated out. # # #### The Average Cost of Medical Insurance Charges with BMI: # + #the details of the averages are in Angel's EDA #create new dataframe with above information bmid= {'Sex': ['female', 'male'], '20': [9089.36, 8548.16], '30':[10853.9, 10845.1], '40':[14234.69, 16431.42], '50': [15370.10, 18010.28], '60':[0, 16034.31]} dfbmi= pd.DataFrame(data=bmid) dfbmi dfbmi = pd.melt(dfbmi, id_vars=['Sex'], var_name='BMI', value_name="Average Medical Insurance Charges") #melt to turn df from wide to long dfbmi_wide=dfbmi.pivot("BMI", "Sex", "Average Medical Insurance Charges") #dfbmi['BMI'] = pd.to_numeric(dfbmi['BMI']) dfbmi_wide # - plot9 = sns.lineplot(data=dfbmi_wide).set(ylabel='Average Medical Insurance Charges', title='BMI vs Medical Costs between Men and Women') # The end of this graph is misleading as there are no datapoints for 50+ BMI in women, but overall men overtake women in medical costs, starting at 30 BMI. # So far, looking at each factor alone against medical insurance charges, it appears that `being elderly, being male, having a high BMI, having few children, being a smoker, and living in the SouthEast` # would yield the highest medical insurance costs in theory. # On the opposite end, `being young, female, having low BMI, more children, and being a non-smoker in the SouthWest` would yield the lowest medical insurance costs in theory. # # ### So let's create a dataframe based on the theoretical highest and lowest conditions: # # # # Highest: male, Age>55 years old, BMI>=30, smoker, southeast, <=2 children dfTH = project_functions.theory_highest("../data/raw/Medical Expenses.csv") round(dfTH['Medical Insurance Charges'].mean(),2) # Lowest: female, Age<25 years old, BMI<=25, nonsmoker, >= children, southwest dfTL = project_functions.theory_lowest("../data/raw/Medical Expenses.csv") round(dfTL['Medical Insurance Charges'].mean(),2) # These 2 theoretical conditions did produce realtively high and low medical charges. Let's take a look at the real conditions. *More details are in Angel's EDA* but let's just take a look at the top 10 and bottom 10 medical charges entries. # # #### TOP 10 dftop10 = project_functions.top10("../data/raw/Medical Expenses.csv") dftop10 dftop10.describe().T dftop10['Place of Residence'].value_counts() dftop10['Number of Children'].value_counts() # They are all `smokers`. 6 males vs 4 females. # Therefore, `sex doesn't seem to play a factor` in the highest charges. All the entries have a `BMI >30` so they are obese. While `half of them do reside in the southeast`, not all of them do. Most have `no children or only 1`. And there is a `mean age of 47`, but there is a 27 year old in the sample. # Therefore, our theory of : # Highest: # # - [ ] male # - [ ] Age>55 years old # - [x] BMI>=30 # - [x] smoker # - [x] southeast # - [x] <=2 children # # is partly right. (A x mark is given if more than half the sample has it) # #### BOTTOM 10 dfbot10 = project_functions.bot10("../data/raw/Medical Expenses.csv") dfbot10 dfbot10.describe().T dfbot10['Place of Residence'].value_counts() dfbot10['Number of Children'].value_counts() # They are all `non-smokers` and all `18 year old males`. Therefore, sex and age does seem to play a factor in the lowest charges. All the entries except 1 has a `BMI >30` so they are still obese, but we do have to remember that this dataframe has a disproportionate number of obese people. Still, it's interesting that more non obese people are not in the bottom 10 of the medical charges. All of them do reside in the `southeast` and they all have `no children`. # Therefore, our theory of : # Lowest: # # - [ ] female # - [x] Age<25 years old # - [ ] BMI<=25 # - [x] Non-smoker # - [ ] southwest # - [ ] >2 children # # is partly right. (A x mark is given if more than half the sample has it)
analysis/Milestone2.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler, StandardScaler from sklearn.preprocessing import LabelEncoder,OneHotEncoder from sklearn.impute import SimpleImputer import seaborn as sns # values to change unknown = ["","??","????","three","four","five"] # import Dataset datasets = pd.read_csv('CarData.csv',na_values = unknown) # dataset extra details and rows datasets.head() datasets.describe() # drop all values which have NaN in whole row datasets.dropna(how='all',inplace=True) print("\nData :\n",datasets) # + new_X = datasets.iloc[:, :].values # imputer is a function which help to analyse the dataset value and later transform into mean value and place in NaN values imputer = SimpleImputer(missing_values = np.nan,strategy = "mean") # analyse imputer = imputer.fit(new_X[:, 0:4]) # transform, implement mean value new_X[:, 0:4] = imputer.transform(new_X[:, 0:4]) imputer = imputer.fit(new_X[:, 5:11]) new_X[:, 5:11] = imputer.transform(new_X[:, 5:11]) print("\n\nMean Value for NaN : \n\n", new_X) # - # Commit: - Taken as reference code # It convert all the values in numeric form le = LabelEncoder() X = datasets.iloc[:, :].values X[ : ,4] = le.fit_transform(X[ : ,0]) print("\n\nFueltype : \n", X[ : , 4]) # dummies will help to create every common rows as a column dummy = pd.get_dummies(datasets['FuelType']) # droping Fueltype column from dataset datasets1 = datasets.drop(['FuelType'],axis=1) # inserting new dummy columns into dataset datasets1 = pd.concat([dummy,datasets1],axis=1) print("\n\nFinal Data :\n",datasets1) scaler = MinMaxScaler() scaler1 = scaler.fit_transform(new_X[ : , 0 : 4]) scaler2 = scaler.fit_transform(new_X[ : , 5 : 11]) print("\nScaler1 :\n", scaler1) print("\nScaler2 :\n", scaler2) std = StandardScaler() std1 = std.fit_transform(new_X[ : , 0: 4]) std2 = scaler.fit_transform(new_X[ : , 5 : 11]) print("\nStandard1 :\n", std1) print("\nStandard2 :\n", std2) new_dataset = datasets.iloc[:, :] corr = new_dataset.corr() sns.heatmap(corr) # + columns = np.full((corr.shape[0],), True, dtype=bool) for i in range(corr.shape[0]): for j in range(i+1, corr.shape[0]): if corr.iloc[i,j] >= 0.9: if columns[j]: columns[j] = False select_col = new_dataset.columns[columns-1] select_col.shape new_dataset = new_dataset[select_col] print(new_dataset) # -
CE019_Lab2/019_lab2_data_preprocessing.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Part IV: Installing lookyloo # # ## Tools used in the backend # # Lookyloo is glue and tooling to put together a bunch of existing projects. You can find a detailled description of the implementation [on the website](https://www.lookyloo.eu/docs/main/implementation-details.html), but to keep it simple, the main libraries and tools used are the following: # # * Redis: key/value database, used for caching # * Splash and Scrapy: instrumentation and capture of the websites # * ETE Toolkit: pure python library to handle phylogenetic trees # * D3JS: a JavaScript library to vizualize things # * Bootstrap: to make a website pretty # * Flask & Jinja: pure python webserver, and the templating module # # And we also have connector to 3rd party modules such as VirusTotal. # # ## Install guide # # It is extremely strongly to install lookyloo on a recent Ubuntu. I personaly use the latest release (right now, 20.10), but Lookyloo is also compatible with the most recent LTS (right now, 20.04), feel free to use either. # If you use an other operating system, you're on your own, and I won't be able to help you. # # Please follow the [official install guide](https://www.lookyloo.eu/docs/main/install-lookyloo.html), and let me know if you have questions. # # You can run lookyloo directly on your system, but if you prefer using a virtual machine, go for it. The examples I showed you until now were running on my main system. # # **Task**: Install lookyloo (on your host, or in a VM) # # ## Configuration # # Running `poetry run update --yes` will automatically generate `config/generic.json`, and the system should be ready to go, but you probably still want to have a look at the configuration and edit `config/generic.json` accordingly. The config file works out of the box, but you may want to configure `cache_clean_user` in order to access the hidden captures, and other development related features from the UI. # # Every feature is documented in the `_notes` key in `config/generic.json.sample`. # # ## Launch # # Assuming the installation went well, you can now [launch lookyloo](https://www.lookyloo.eu/docs/main/install-lookyloo.html#_run_splash). # # Make sure you have splash **and** lookyloo itself running, otherwise the captures won't work. # # **Note**: # * If you installed it directly on your host, ou can check if splash is up on runnig by opening http://127.0.0.1:8050/ in your browser, and lookyloo on http://127.0.0.1:5100/. # * If you installed in a virtual machine, use the IP address of the machine instead of 127.0.0.1 # # ## Basic usage guide # # Go to http://127.0.0.1:5100/, click on capture, and run it on the website of your choice. It can be anything but avoid websites requiring a login: the result may not be as interesing. # # After one or two minutes, you should see a tree. If you don't, there is an issue with your installation, you should see something in the logs (either lookyloo or splash). # # ### Diasgnostic and debug # # 1. Go to splash, paste the URL, run the capture. # * If it works, splash is working as expected. # * If it doesn't splash may not be able to access the internet? Look at the logs. # 2. *Assuming splash works* look in the terminal you started lookyloo in and there should be an error. Paste it in the search browser of your choice. # # ### Looking as simpler websites # # Most websites you use on a daily basis will probably be way to complicated to infestigate at first, so we will start with more simple ones. # # **Tasks**: Capture a simple website, and look at the examples on the herokuapp. # # 1. CIRCL website: http://circl.lu # 2. Copy and paste the URLs on [this page](https://lookyloo-testing.herokuapp.com/), and discuss what happen. # # ### Investigate your own website # # **Task**: Go back to the first website you tried, and describe what is going on.
04.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- # The starter code can be found in the final_project directory of the codebase that you downloaded for use with the mini-projects. Some relevant files: # # **poi_id.py** : Starter code for the POI identifier, you will write your analysis here. You will also submit a version of this file for your evaluator to verify your algorithm and results. # # **final_project_dataset.pkl** : The dataset for the project, more details below. # # **tester.py** : When you turn in your analysis for evaluation by Udacity, you will submit the algorithm, dataset and list of features that you use (these are created automatically in **poi_id.py**). The evaluator will then use this code to test your result, to make sure we see performance that’s similar to what you report. You don’t need to do anything with this code, but we provide it for transparency and for your reference. # # **emails_by_address** : this directory contains many text files, each of which contains all the messages to or from a particular email address. It is for your reference, if you want to create more advanced features based on the details of the emails dataset. You do not need to process the e-mail corpus in order to complete the project. # # # Steps to Success # We will provide you with starter code that reads in the data, takes your features of choice, then puts them into a numpy array, which is the input form that most sklearn functions assume. Your job is to engineer the features, pick and tune an algorithm, and to test and evaluate your identifier. Several of the mini-projects were designed with this final project in mind, so be on the lookout for ways to use the work you’ve already done. # # As preprocessing to this project, we've combined the Enron email and financial data into a dictionary, where each key-value pair in the dictionary corresponds to one person. The dictionary key is the person's name, and the value is another dictionary, which contains the names of all the features and their values for that person. The features in the data fall into three major types, namely financial features, email features and POI labels. # # **financial features**: ['salary', 'deferral_payments', 'total_payments', 'loan_advances', 'bonus', 'restricted_stock_deferred', 'deferred_income', 'total_stock_value', 'expenses', 'exercised_stock_options', 'other', 'long_term_incentive', 'restricted_stock', 'director_fees'] (all units are in US dollars) # # **email features**: ['to_messages', 'email_address', 'from_poi_to_this_person', 'from_messages', 'from_this_person_to_poi', 'shared_receipt_with_poi'] (units are generally number of emails messages; notable exception is ‘email_address’, which is a text string) # # **POI label**: [‘poi’] (boolean, represented as integer) # # You are encouraged to make, transform or rescale new features from the starter features. If you do this, you should store the new feature to my_dataset, and if you use the new feature in the final algorithm, you should also add the feature name to my_feature_list, so your evaluator can access it during testing. For a concrete example of a new feature that you could add to the dataset, refer to the lesson on Feature Selection. # # In addition, we advise that you keep notes as you work through the project. As part of your project submission, you will compose answers to a series of questions (given on the next page) to understand your approach towards different aspects of the analysis. Your thought process is, in many ways, more important than your final project and we will by trying to probe your thought process in these questions. # Free form questions # https://docs.google.com/document/d/1NDgi1PrNJP7WTbfSUuRUnz8yzs5nGVTSzpO7oeNTEWA/pub?embedded=true # # Rubric # https://review.udacity.com/#!/rubrics/27/view # # A list of Web sites, books, forums, blog posts, github repositories etc. that you referred to or used in this submission (add N/A if you did not use such resources). Please carefully read the following statement and include it in your document “I hereby confirm that this submission is my work. I have cited above the origins of any parts of the submission that were taken from Websites, books, forums, blog posts, github repositories, etc. # # Findings # - deferred_income,director_fees,restricted_stock_deferred give precison and recall > 0.3 # + # %matplotlib inline from sklearn.cross_validation import StratifiedShuffleSplit import time def print_time(name, start_time): print "{} time: {}s".format(name, round(time.time() - start_time, 3)) DISPLAY_STRING = "{:>0.{display_precision}f}\t\t" PERF_FORMAT_STRING = "\ Accuracy\tPrecision\tRecall\t\tF1\t\tF2\n\ {0}{0}{0}{0}{0}".format( DISPLAY_STRING ) RESULTS_FORMAT_STRING = "\ Total predictions, \tTrue positives, \tFalse positives, \tFalse negatives, \tTrue negatives \n\ {:4d}\t\t\t{:4d}\t\t\t{:4d}\t\t\t{:4d}\t\t\t{:4d}" def ratio(numerator, denominator): if numerator == 0 or denominator == 0: return 0 return float(numerator) / denominator def get_quadrant(true_negatives, false_negatives, true_positives, false_positives, predictions, labels_test): for prediction, truth in zip(predictions, labels_test): if prediction == 0 and truth == 0: true_negatives += 1 elif prediction == 0 and truth == 1: false_negatives += 1 elif prediction == 1 and truth == 0: false_positives += 1 elif prediction == 1 and truth == 1: true_positives += 1 else: print "Warning: Found a predicted label not == 0 or 1." print "All predictions should take value 0 or 1." print "Evaluating performance for processed predictions:" break return true_negatives, false_negatives, true_positives, false_positives def get_stats(true_negatives, false_negatives, true_positives, false_positives): total_predictions = true_negatives + false_negatives + false_positives + true_positives accuracy = ratio(true_positives + true_negatives, total_predictions) precision = ratio(true_positives, true_positives + false_positives) recall = ratio(true_positives, true_positives + false_negatives) f1 = ratio(2.0 * true_positives, 2 * true_positives + false_positives + false_negatives) f2 = ratio(5 * precision * recall, (4 * precision) + recall) return { 'accuracy': accuracy, 'precision': precision, 'recall': recall, 'f1': f1, 'f2': f2, 'total_predictions': total_predictions, 'true_positives': true_positives, 'false_positives': false_positives, 'false_negatives': false_negatives, 'true_negatives': true_negatives } def get_labels_and_features(dataset, feature_list): data = featureFormat(dataset, feature_list, sort_keys = True) return targetFeatureSplit(data) def test_classifier(clf, dataset, feature_list, folds = 1000): labels, features = get_labels_and_features(dataset, feature_list) cv = StratifiedShuffleSplit(labels, folds, random_state = 42) true_negatives = 0 false_negatives = 0 true_positives = 0 false_positives = 0 for train_idx, test_idx in cv: #print "counting {}, {}, {}, {}".format(true_negatives, false_negatives, true_positives, false_positives) features_train = [] features_test = [] labels_train = [] labels_test = [] for ii in train_idx: features_train.append( features[ii] ) labels_train.append( labels[ii] ) for jj in test_idx: features_test.append( features[jj] ) labels_test.append( labels[jj] ) clf.fit(features_train, labels_train) predictions = clf.predict(features_test) true_negatives, false_negatives, true_positives, false_positives = get_quadrant( true_negatives, false_negatives, true_positives, false_positives, predictions, labels_test ) return get_stats(true_negatives, false_negatives, true_positives, false_positives) def test_classifier_fast(clf, dataset, feature_list): labels, features = get_labels_and_features(dataset, feature_list) t0 = time.time() clf.fit(features_train, labels_train) print_time("training", t0) t0 = time.time() predictions = clf.predict(features_test) print_time("predictions", t0) true_negatives, false_negatives, true_positives, false_positives = get_quadrant( 0, 0, 0, 0, predictions, labels_test ) return get_stats(true_negatives, false_negatives, true_positives, false_positives) CLF_PICKLE_FILENAME = "my_classifier.pkl" DATASET_PICKLE_FILENAME = "my_dataset.pkl" FEATURE_LIST_FILENAME = "my_feature_list.pkl" def load_classifier_and_data(): with open(CLF_PICKLE_FILENAME, "r") as clf_infile: clf = pickle.load(clf_infile) with open(DATASET_PICKLE_FILENAME, "r") as dataset_infile: dataset = pickle.load(dataset_infile) with open(FEATURE_LIST_FILENAME, "r") as featurelist_infile: feature_list = pickle.load(featurelist_infile) return clf, dataset, feature_list #test_classifier(clf, my_dataset, features_list) #print(PERF_FORMAT_STRING) # + from multiprocessing import Pool from sklearn.naive_bayes import GaussianNB import time import itertools import pandas as pd import pickle import time import numpy as np STORE_LOCATION = "intermediate_results/" def store_dictionary(dictionary, file_name): with open(STORE_LOCATION + file_name, 'w') as f: pickle.dump(dictionary, f) def dict_to_df(dictionary): return pd.DataFrame.from_dict(dictionary, orient = 'index') def read_dictionary(dict_file_name): with open(STORE_LOCATION + dict_file_name, 'r') as f: tmp_dict = pickle.load(f) return tmp_dict, dict_to_df(tmp_dict) def result_for_features(dictionary, features): return dictionary[feature_list_to_key(features)] def get_rows_above_threshold(df, threshold_precision = 0.3, threshold_recall = 0.3, threshold_accuracy = 0.8, metrics = ['precision', 'recall', 'accuracy']): condition = ((df.precision >= threshold_precision) & (df.recall >= threshold_recall) & (df.accuracy > threshold_accuracy)) return df[condition].loc[:, metrics] # return df[condition].loc[:, :] def get_combination_features(n): for combination_length in xrange(1, n + 1): for comb in itertools.combinations(all_features, combination_length): yield ['poi'] + list(comb) def get_number_combinations(n): return len(list(get_combination_features(n))) - 1 def feature_list_to_key(_features_list): copy = _features_list[:] copy.remove('poi') return ",".join(sorted(copy)) def estimate_time(current, total, time_taken): if current == 0: return 0 return (time_taken * total) / float(current) def report_time(current, total, begin_time, estimates, how_often = 1): estimates[:-10] = [] time_taken = time.time() - begin_time start_estimate = estimate_time(current, total, time_taken) if current == 0: estimate = start_estimate else: estimate = np.mean([start_estimate, np.mean(estimates)]) estimates.append(estimate) if (current > 0) and (current % how_often == 0): print("reached {:5d} / {:5d} || done {:6.2f} % || Time taken {:10.2f} / {:10.2f}".format( current, total, ratio(time_taken * 100, estimate), time_taken, estimate )) def at_most_features(df, n): return df[df.index.map(lambda x: len(x.split(',')) <= n)] def above_threshold_at_most(df, p, r, a, n): return at_most_features(get_rows_above_threshold(df, p, r, a, ['precision', 'recall', 'accuracy', 'f1', 'f2']), n) # - import sklearn sklearn.__version__ # # --- # # Let me start by copying the code from the main file and breaking it up here so that I can arrange it and play around with that # + import sys projects_home = '/home/aseem/projects/ud120-projects' final_project_home = projects_home + '/final_project/' sys.path.append(final_project_home) sys.path.append(projects_home + '/tools/') # + import sys import pickle from feature_format import featureFormat, targetFeatureSplit from tester import dump_classifier_and_data#, test_classifier with open(final_project_home + "final_project_dataset.pkl", "r") as data_file: data_dict = pickle.load(data_file) #remove outliers del data_dict['TOTAL'] #Replace NaN with 0 for key, value in data_dict.iteritems(): for k, v in value.iteritems(): if v == 'NaN': value[k] = 0 #remove features with not enough value columns_to_remove = ['email_address', 'loan_advances', 'restricted_stock_deferred', 'director_fees'] for key, value in data_dict.iteritems(): for column in columns_to_remove: del value[column] #Create new features for key, value in data_dict.iteritems(): value['fraction_poi_to_this_person'] = ratio(value['from_poi_to_this_person'], value['to_messages']) value['fraction_from_this_person_to_poi'] = ratio(value['from_this_person_to_poi'], value['from_messages']) value['total_income'] = value['salary'] + value['bonus'] + value['long_term_incentive'] + \ value['other'] + value['expenses'] #Store to my Dataset for easy export my_dataset = data_dict #Make final classifier for export from sklearn.neighbors import KNeighborsClassifier from sklearn.preprocessing import MinMaxScaler steps = [ ('scaler', MinMaxScaler()), ('classifier', KNeighborsClassifier(n_neighbors = 1)) ] from sklearn.pipeline import Pipeline clf = Pipeline(steps) features_list = [ 'poi', 'exercised_stock_options', 'fraction_from_this_person_to_poi', 'from_messages', 'from_poi_to_this_person' ] #dump_classifier_and_data(clf, my_dataset, features_list) #clf, my_dataset, feature_list = load_classifier_and_data() test_classifier(clf, my_dataset, features_list) # + ### Task 1: Select what features you'll use. ### features_list is a list of strings, each of which is a feature name. ### The first feature must be "poi". all_features = [ # 'poi', 'salary', 'to_messages', 'deferral_payments', 'total_payments', 'exercised_stock_options', 'bonus', 'restricted_stock', 'shared_receipt_with_poi', # 'restricted_stock_deferred', 'total_stock_value', 'expenses', # 'loan_advances', 'from_messages', 'other', 'from_this_person_to_poi', # 'director_fees', 'deferred_income', 'long_term_incentive', 'from_poi_to_this_person', 'fraction_poi_to_this_person', 'fraction_from_this_person_to_poi', 'total_income' ] features_list = [ 'poi', 'exercised_stock_options', 'fraction_from_this_person_to_poi', 'from_messages', 'from_poi_to_this_person' ] feature_list_to_key(features_list) # + ### Task 3: Create new feature(s) ### Store to my_dataset for easy export below. my_dataset = data_dict ### Extract features and labels from dataset for local testing data = featureFormat(my_dataset, features_list, sort_keys = True) print len(data) labels, features = targetFeatureSplit(data) # + ### Task 4: Try a varity of classifiers ### Please name your classifier clf for easy export below. ### Note that if you want to do PCA or other multi-stage operations, ### you'll need to use Pipelines. For more info: ### http://scikit-learn.org/stable/modules/pipeline.html # Provided to give you a starting point. Try a variety of classifiers. from sklearn.naive_bayes import GaussianNB #clf = GaussianNB() from sklearn import svm #clf = svm.SVC(kernel="linear") from sklearn.neighbors import KNeighborsClassifier #clf = KNeighborsClassifier(n_neighbors = 1) # + ### Task 5: Tune your classifier to achieve better than .3 precision and recall ### using our testing script. Check the tester.py script in the final project ### folder for details on the evaluation method, especially the test_classifier ### function. Because of the small size of the dataset, the script uses ### stratified shuffle split cross validation. For more info: ### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html # Example starting point. Try investigating other evaluation techniques! from sklearn.cross_validation import train_test_split features_train, features_test, labels_train, labels_test = \ train_test_split(features, labels, test_size=0.3, random_state=42) ### Task 6: Dump your classifier, dataset, and features_list so anyone can ### check your results. You do not need to change anything below, but make sure ### that the version of poi_id.py that you submit can be run on its own and ### generates the necessary .pkl files for validating your results. dump_classifier_and_data(clf, my_dataset, features_list) # + def do_neighbors1(number_features = 1): k_neigh_result_simple_test = {} estimates = [] errors = [] total_iterations = get_number_combinations(number_features) t0 = time.time() for i, tmp_list in enumerate(get_combination_features(number_features)): n_neighbors = 1 # for n_neighbors in xrange(1, 3): clf = KNeighborsClassifier(n_neighbors = n_neighbors) #print "features_list is {}".format(features_list) start = time.time() #result = test_classifier_fast(clf, my_dataset, features_list) result = test_classifier(clf, my_dataset, tmp_list) #result['n_neighbors'] = n_neighbors result['time_taken'] = time.time() - start key = "{}".format(feature_list_to_key(tmp_list)) # key = "{},{}".format(feature_list_to_key(tmp_list), n_neighbors) k_neigh_result_simple_test[key] = result if i > 0 and ((i <= 100 and i % 5 == 0) or i % 25 == 0): report_time(i, total_iterations, t0, estimates) else: print("*" * 30) print("total {:4d} at {:10.4f}".format(i, time.time() - t0)) return k_neigh_result_simple_test #k_neigh_result_simple_test = do_neighbors1(5) # - k_neigh_results, k_neigh_df = read_dictionary('features_5_raw_features_k_neigh.pkl') # + def print_required1(df): print above_threshold_at_most(df, 0.3, 0.3, 0.8, 3).describe() print '**** naive bayes' print "" #print_required1(naive_bayes_df) print "" print "**** k neigh neighbors" print "" #print_required1(k_neigh_df) # + #Use features found using Naive Bayes brute force list_of_features_list = [] for index in get_rows_above_threshold(naive_bayes_df, 0.4).index: list_of_features_list.append(['poi'] + index.split(',')) len(list_of_features_list) # + from sklearn import svm from sklearn.grid_search import GridSearchCV from sklearn.cross_validation import train_test_split from sklearn.metrics import make_scorer, f1_score, precision_score def do_grid_search1(): feature_list = ['poi', 'salary'] t0 = time.time() param_grid = { 'C': [1e-1, 1e0, 1e1] } clf = GridSearchCV( svm.SVC(kernel='linear'), scoring = make_scorer(precision_score), param_grid=param_grid, n_jobs=3, verbose = 4 ) print_time("creation", t0) t0 = time.time() labels, features = get_labels_and_features(my_dataset, feature_list) features_train, features_test, labels_train, labels_test = \ train_test_split(features, labels, test_size=0.3, random_state=42) print_time("split", t0) t0 = time.time() clf.fit(features_train, labels_train) print_time("training", t0) print "best estimator {}".format(clf.best_estimator_) t0 = time.time() predictions = clf.predict(features_test) print_time("predictions", t0) return get_stats(*get_quadrant( 0, 0, 0, 0, predictions, labels_test )) #do_grid_search1() # - def do_svc_simple_test(): svc_result_simple_test = {} for features_list in list_of_features_list: print "features_list is {}".format(features_list) start = time.time() result = test_classifier_fast(clf, my_dataset, features_list) result['time_taken'] = time.time() - start print "time_taken was {}".format(result['time_taken']) svc_result_simple_test[feature_list_to_key(features_list)] = result dict_to_df(svc_result_simple_test) store_dictionary(svc_result_simple_test, "svc_on_naive_bayes_top_features.pkl") svc_results = {} for features_list in list_of_features_list: folds = 10 start = time.time() clf = SVC(kernel='linear') result = test_classifier(clf, my_dataset, features_list, folds=folds, verbose=True, verbose_at=1) result['folds'] = folds result['time_taken'] = time.time() - start print "time taken was {}".format(result['time_taken']) print "\n" svc_results[feature_list_to_key(features_list)] = result dict_to_df(svc_results) # + # def add_results_to_map(clf, dictionary, features_list, my_dataset = data_dict): # current_key = feature_list_to_key(features_list) # dictionary[current_key] = test_classifier(clf, my_dataset, features_list) def get_dict(clf, number_features = 2): dictionary = {} estimates = [] start = time.time() #Tried Using threads to make it faster. It was faster. # Just due to some odd reason the resulting dictionary # did not have anything when accessed from outside the function # Maybe TODO later #number_threads = 3 #pool = Pool(number_threads) #pool.map(add_results_to_map, get_combination_features(number_features)) total_iterations = get_number_combinations(number_features) for i, tmp_list in enumerate(get_combination_features(number_features)): current_key = feature_list_to_key(tmp_list) dictionary[current_key] = test_classifier(clf, my_dataset, tmp_list) report_time(i, total_iterations, start, estimates, 2) else: print("*" * 30) print("total {:4d} at {:10.4f}".format(i, time.time() - start)) return dictionary def main(): main_start = time.time() clf = GaussianNB() from sklearn.svm import SVC #clf = SVC(kernel="linear") return get_dict(clf, number_features = 1) #temp_results_all = main() # - above_threshold_at_most(dict_to_df(temp_results_all), 0.3, 0.3, 0.7, 4) naive_bayes_results, naive_bayes_df = read_dictionary('features_4_raw_features_naive_bayes.pkl') above_threshold_at_most(naive_bayes_df, 0.3, 0.3, 0.7, 1) # + def get_dict(clf, number_features = 2, report_on = 2): dictionary = {} estimates = [] start = time.time() total_iterations = get_number_combinations(number_features) for i, tmp_list in enumerate(get_combination_features(number_features)): current_key = feature_list_to_key(tmp_list) dictionary[current_key] = test_classifier(clf, my_dataset, tmp_list) report_time(i, total_iterations, start, estimates, report_on) else: print("*" * 30) print("total {:4d} at {:10.4f}".format(i, time.time() - start)) return dictionary def main(): main_start = time.time() from sklearn.neighbors import KNeighborsClassifier algorithm = KNeighborsClassifier(n_neighbors = 1) from sklearn.preprocessing import MinMaxScaler # from sklearn.decomposition import PCA steps = [ ('scaler', MinMaxScaler()), ('classifier', algorithm) ] from sklearn.pipeline import Pipeline clf = Pipeline(steps) return get_dict(clf, number_features = 4, report_on = 5) #temp_results_all = main() # - above_threshold_at_most(dict_to_df(temp_results_all), 0.55, 0.55, 0.85, 4) # + #store_dictionary(temp_results_all, "min_max_kneighbors_4_features_with_3_new_features.pkl")
udacity_data_science_notes/intro_machine_learning/project/project.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: 'Python 3.7.9 64-bit (''myenv'': conda)' # name: python3 # --- # <h1><center> INTRODUCTION TO PYTHON BASICS # Taking input from the user num = int(input("Enter a number:")) print("The number entered by the user: ", num) type(num) a = 5 type(a) type(float(a)) b = 5.5 type(b) type(int(b)) # <h2> Arithmetic Operators print(a+b) print(a-b) print(a*b) print(a/b) print(a**b) # a to the power of b print(a%b) # remainder of a/b # <h2> Comparison Operators print(a<b) print(a>b) print(a<=b) print(a>=b) print(a == b) # double equality operator print(a!=b) # not equal to operator print(a is not b) # not equal to operator # <h2> Logical and Identity Operators # + # and opeartion print(True and True) print(True and False) print(False and False) print("\n") # or opeartion print(True or True) print(True or False) print(False or False) print("\n") # not opeartion print(not True) print(not False) # - # <i><u>Note:-</i></u> Preference of Logical Operators: # * not # * and # * or # is, is not are identity operators print(a is b) # similar to == print(a is not b) # similar to != # in, not in are membership operators print(5 in [1,2,3,4,5]) print(10 not in [1,2,3,4,5]) # <h2> Basic Conversions x = 97 print(chr(x)) # converts integers to characater #print(ord(x)) # converts single characters to integers(ASCII CODE) print(oct(x)) # converts integer to octal print(bin(x)) # converts integer to binary # <h2> Decision Making # * if <br> # * elif <br> # * nested if if a<b: print("A is less than b") a = 20 if a<b: print("A is less than b") else: print("A is greater than b") if a<b and a<num: print("A is smallest") elif b<a and b<num: print("B is smallest") else: print("num is smallest") # <h2> Loops/ Iterations # * for # # * while # # * nested loop count = 1 while(count <= 10): print(count) count = count + 1 for i in range(1,11,1): print(i) import math print("e to the power of 100.12: ",math.exp(100.12)) print("log of 3 to the base 2: ",math.log(3,2)) print("10 power 10: ",math.pow(10,10)) print("Square root:", math.sqrt(100)) # <h2><center> STRINGS var1 = "<NAME>" var2 = "Python Programming" print(var1) print(var2) print("\n") print(var1[0]) # accessing first character of var1 print(var1[0:5]) # accessing first word of var1 using slicing operator(:) print(var2[7:]) # accessing second word of var2 using slicing operator(:) print(var2[-1]) # acessing last character of var2 print(var1 + " to" + var2) # + -> concatenation print(var1 * 2) # * -> repetition print(var1[0]) #[] -> slice print(var1[1:5]) #[:] -> slice range str = "name" print(str.capitalize()) # capitalizes the 1st letter of string print(str.endswith("me")) print(str.isalpha()) print("12345".isalpha()) print(str.isdigit()) print("12345".isnumeric()) print("+".isalnum()) print(str.upper()) # capitalizes entire string # <h2><center> LISTS list1 = ['physics', 'chemistry', 'mathematics', 'computer'] print(list1) type(list1) # + print(list1[0]) # accessing the first element print(list1[-1]) # accessing the last element print(list1[:2]) # accessing the first two elements from the list list1[1] = 'Chemistry' print("Updated list: ", list1) del list1[1] # to delete a list element from the list print("After deleting: ",list1) a = ['p','c',"m",'c'] # - print("Length of the list", len(list1)) print("Maximum element from the list: ", max([1,2.5, 50,10])) print("Minimum element from the list: ", min([1,2.5, 50,10])) a.reverse() print(a) # <h2><center> DICTIONARY dict1 = {"Name": ["Mithun", "Myil", "Sunaina", "Varshini"], "Gender": ["M","M","F","F"]} print(dict1['Name']) print(dict1['Gender']) print(dict1['Name'][0]) # accessing the first element of Name key print(len(dict1['Name'])) print(dict1.keys()) print(dict1.values()) # <h2>Date and Time import time print(time.time()) # returns the number of ticks/seconds since Jan 1,1970 00:00, which is called as epoch print(time.localtime()) # returns the structure format of epoch print(time.asctime(time.localtime())) # returns the local time from the struct format print(time.process_time()) # Returns the current processor time used in benchmarking tools print(time.asctime()) time.sleep(5) # suspends execution for 5 seconds print(time.asctime()) import calendar print(calendar.calendar(2021)) print(calendar.isleap(2021)) print(calendar.isleap(2020)) print("\n") print(calendar.month(2021,7)) # prints calendar of a specific month # <h2> Functions def fib(num): # A function to print the fibonacci numbers result = [] a, b = 0, 1 while b<num: result.append(b) a,b = b, a+b return result fib(20) # <h3>The Anonymous Functions</h3> # * They are nore declared in the standard manner using <i>def</i> keyword <br> # * <i>lambda</i> keyword is used to create small anonymous function <br> # * They can take any number of arguments but returns only one value <br> # * They cannot contain suites/ multiple expressions sum = lambda arg1, arg2 :(arg1+arg2) sum(10,20) import math content = dir(math) # returns the sorted lists of strings present in the math module print(content)
Basic Python/intro.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # XND # # **XND** is a library for typed memory blocks. # # libxnd implements support for typed memory blocks using the libndtypes type library. # # The xnd module implements a container type that maps most Python values relevant for scientific computing directly to typed memory. # # XND has a superset of features for typed memory than similar libraries like [numpy record arrays](https://docs.scipy.org/doc/numpy/user/basics.rec.html) and [apache arrow](https://arrow.apache.org/). # # This is a quick tour of what XND can do. from xnd import xnd import numpy as np # numpy is only to easily initialize # ## Construct from python data types. # # XND is infering the type of each element xnd([[1, 2], [3, 4]]) xnd([{'b': [10.2, 232.3]}, {'b': [0.2, 0.23]}]) # ## Construct from simple numpy arrays and record arrays xnd.from_buffer(np.arange(6).reshape(3, 2)) xnd.from_buffer(np.random.random((2, 3))) x = np.array([(1000, 400.25, 'abc'), (-23, -1e10, 'cba')], dtype=[('x', '<i4'), ('y', '>f4'), ('z', 'S3')]) xnd.from_buffer(x) # ## Constructing with predefined types # # Constructing from with predefined types has significant performance advantages for large arrays. This is becuase xnd does not have to infer the type for each element. # %%timeit N = 100000 xnd(N * [1]) # %%timeit N = 100000 types = f"{N} * float64" xnd(N * [1], type=types)
notebooks/xnd-python-tutorial.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 2 # language: python # name: python2 # --- from pyspark.sql import SparkSession spark = SparkSession.builder.appName('abc').getOrCreate() # ## Formatting text file to_output, build = [], "" with open("data/fasta/seq.fasta", 'r') as file: with open("data/fasta/seq_clean.fasta", "w") as output: for idx, line in enumerate(file.readlines()): line = line.replace("\n", "") if line[0] == '>': if idx > 0: to_output.append(build) to_output.append(line) build = "" else: build += line to_output.append(build) output.write("\n".join(to_output)) df = spark.read.text("data/bacteria2.fasta") df.show() sequences = df.where(~df.value.contains('>')) sequences.show() sequences.count() # + def decimal(y): print(y) if y == 'A': return 0b00 if y == 'C': return 0b01 if y == 'G': return 0b10 return 0b11 def binary(x): return map(lambda y: decimal(y), x) # - array_s = sequences.rdd.map(list).map(lambda x: (x[0].encode('ascii'))).map(list) array_s.take(2) tuple(map(lambda x: [x], array_s.take(2))) # + from pyspark.sql.types import * cSchema = StructType([StructField("Sequence", ArrayType(StringType()))]) df = spark.createDataFrame(tuple(map(lambda x: [x], array_s.take(2))), schema=cSchema) df.show() # + from pyspark.sql.types import StructType, StructField, ArrayType, StringType, IntegerType cSchema = StructType([StructField("id", IntegerType()), StructField("Sequence", ArrayType(StringType()))]) df = spark.createDataFrame((tuple([_id, data[0]]) for _id, data in enumerate(map(lambda x: [x], array_s.take(2)))), schema=cSchema) df.show() # + from pyspark.ml.feature import NGram w = 15 ngram = NGram(n=w, inputCol="Sequence", outputCol="ngrams") ngramDataFrame = ngram.transform(df) df_clean = ngramDataFrame.select(["id", "ngrams"]) df_clean.show(truncate=False) # - ngramDataFrame.show() from pyspark.sql.functions import split, explode df_explode = df_clean.withColumn('ngrams', explode('ngrams')) df_explode.show() df_w0 = df_explode.where(df_clean.id == 0) df_w0.show(truncate=False) df_w1 = df_explode.where(df_clean.id == 1) df_w1.show(truncate=False) # + from pyspark.sql import functions as F df_w1 = df_w1.withColumn("id1", F.monotonically_increasing_id()+1).withColumnRenamed('ngrams', 'w1').select('id1', 'w1') df_w1.show(truncate=False) # + def reducer(l): r = 0 for x in l: r = (r<<2) + x return r df_w1.select("w1").rdd.map(lambda x: list(x)[0].split( " ")).map(lambda x: map(lambda y: decimal(y), x)).map(lambda x: reducer(x)) # - import pyspark.sql.functions as F df_w0 = df_w0.withColumn("id0", F.monotonically_increasing_id()+1).withColumnRenamed('ngrams', 'w0').select('id0', 'w0') df_w0.show(truncate=False) df_w0.count() from pyspark.sql.types import ArrayType, FloatType from pyspark.sql.functions import udf # + def concatenation(l): r = "" for x in l.split(" "): r += x return r udf_func = udf(lambda x: concatenation(x), StringType()) df_final= df_w0.select('w0', udf_func('w0')).alias("result") df_final.show() # + def reducer_concat(l): r = 0 for x in l.split(" "): r = (r<<2) + decimal(x) return r # Create your UDF object (which accepts your python function called "my_udf") udf_object = udf(lambda y: reducer_concat(y), IntegerType()) # Apply the UDF to your Dataframe (called "df") df_final_2 = df_w0.select("w0", udf_object("w0")) df_final_2.show() # - df0 = df_w0.withColumn("word0", udf_object(df_w0.w0)).select("id0", "word0") df0.show() df1 = df_w1.withColumn("word1", udf_object(df_w1.w1)).select("id1", "word1") df1.show() df1.count() # Doing the cross product of the dataframes df_cross = df0.crossJoin(df1) df_cross.show() print((df_cross.count(), len(df_cross.columns))) # ## Creating the spaced words from pyspark.sql.functions import udf, col from pyspark.sql.types import IntegerType, ArrayType # Creating the spaced words and filtering by threshold with output array. # # See page 975 from paper to check scores. # + # Creating Data Frame and filtering by threshold old_pattern = 0b1100001111 pattern, k, threshold = 0b110000000000000011110000000000, 15, 0 Chiaromonte = [ [91, -114, -31, -123], [-114, 100, -125, -31], [-31, -125, 100, -114], [-123, -31, -114, 91] ] # + idx = lambda p, s: ((~(p ^ (0b11 << (2*s))) & (0b11 << (2*s))) >> (2*s)) def spaced_words(w0, w1, m, k, S): score = 0 if ~(w0 ^ w1) & m == m: # We got a hit, this is a spaced word spaced_word = w0 & m # We need to calculate the score dont_care_mask = (m ^ ((0b1<<(2*k))-1)) for i in range(k): if idx(dont_care_mask, i) == 0b11: score += (S[idx(w0, i)][idx(w1, i)]) return [spaced_word, score] else: return None def udf_spaced_words(mask, k, Score): return udf(lambda x, y: spaced_words(x, y, mask, k, Score), ArrayType(IntegerType())) # Create your UDF object (which accepts your python function called "my_udf") df_cross_spaced = df_cross.withColumn("spaced_word_score", udf_spaced_words(0b1100001111, 5, Chiaromonte)(col("word0"), col("word1"))) # df_cross_spaced.show() threshold = 0 df_cross_spaced.where(col("spaced_word_score").isNotNull()).where(col("spaced_word_score")[1]>threshold).show() # - # Implementing with two functions and two columns. # + idx = lambda p, s: ((~(p ^ (0b11 << (2*s))) & (0b11 << (2*s))) >> (2*s)) def spaced_words(w0, w1, m): if ~(w0 ^ w1) & m == m: return w0 & m else: return None def udf_spaced_words(mask): return udf(lambda x, y: spaced_words(x, y, mask), IntegerType()) def calculate_score(w0, w1, m, k, S): score = 0 # Calculate the don't care mask. dont_care_mask = (m ^ ((0b1<<(2*k))-1)) for i in range(k): if idx(dont_care_mask, i) == 0b11: score += (S[idx(w0, i)][idx(w1, i)]) return score def udf_score(mask, k, Score): return udf(lambda x, y: calculate_score(x, y, mask, k, Score), IntegerType()) # + df_cross_spaced = df0.crossJoin(df1) \ .withColumn("spaced_word", udf_spaced_words(pattern)(col("word0"), col("word1"))) \ .where(col("spaced_word").isNotNull()) \ .withColumn("score", udf_score(pattern, k, Chiaromonte)(col("word0"), col("word1"))) \ .where(col("score") > threshold) \ .orderBy([col("spaced_word"), col("score")], ascending=False) df_cross_spaced.show() # - # ## Eliminating duplications # ### With Windows from pyspark.sql.functions import collect_set, array_contains, expr, countDistinct import pyspark.sql.functions as F from pyspark.sql.window import Window windowSpec = Window \ .partitionBy(df_cross_spaced["spaced_word"])\ .orderBy(df_cross_spaced["score"].desc()) \ .rowsBetween(Window.unboundedPreceding, Window.currentRow) df_cross_spaced.show() df_cross_spaced\ .withColumn("min", F.least(df_cross_spaced.id0, df_cross_spaced.id1))\ .withColumn("max", F.greatest(df_cross_spaced.id0, df_cross_spaced.id1))\ .drop_duplicates(subset=["spaced_word", "min"]) \ .drop_duplicates(subset=["spaced_word", "max"]) \ .show() df_cross_spaced.drop_duplicates(subset=["spaced_word", "id1"]).show() df_cross_spaced.withColumn("elements", F.array(df_cross_spaced.id0, df_cross_spaced.id1))\ .drop_duplicates(subset=["spaced_word", "elements"]).show() contains = (F.max(df_cross_spaced['score']).over(windowSpec) - df_cross_spaced["score"]) contains df_cross_spaced.select( df_cross_spaced['spaced_word'], df_cross_spaced['id0'], df_cross_spaced['id1'], df_cross_spaced['score'], contains.alias("contains")).show() df_cross_spaced.groupBy(df_cross_spaced.spaced_word).agg(countDistinct(df_cross_spaced.id0, df_cross_spaced.id1).alias('c')).show() # ## Jukes Cantor Correction # + def jukes_cantor(w0, w1, m, k): score = 0 # Calculate the don't care mask. dont_care_mask = (m ^ ((0b1<<(2*k))-1)) for i in range(k): if idx(dont_care_mask, i) == 0b11: if not idx(w0, i) == idx(w1, i): score += 1 return score def udf_jukes_cantor(mask, k): return udf(lambda x, y: jukes_cantor(x, y, mask, k), IntegerType()) # - df_cross_spaced\ .withColumn("min", F.least(df_cross_spaced.id0, df_cross_spaced.id1))\ .withColumn("max", F.greatest(df_cross_spaced.id0, df_cross_spaced.id1))\ .drop_duplicates(subset=["spaced_word", "min"]) \ .drop_duplicates(subset=["spaced_word", "max"]) \ .withColumn("JukesCantor", udf_jukes_cantor(pattern, k)(col("word0"), col("word1"))) \ .show() from math import log JukesCantor(1.0/2.0) # # Wraping up everything # + df_result = df0.crossJoin(df1) \ .withColumn("spaced_word", udf_spaced_words(pattern)(col("word0"), col("word1"))) \ .where(col("spaced_word").isNotNull()) \ .withColumn("score", udf_score(pattern, k, Chiaromonte)(col("word0"), col("word1"))) \ .where(col("score") > threshold)\ .orderBy(["spaced_word", "score"], ascending=False) \ .withColumn("min", F.least(col("id0"), col("id1"))) \ .withColumn("max", F.greatest(col("id0"), col("id1"))) \ .drop_duplicates(subset=["spaced_word", "min"]) \ .drop_duplicates(subset=["spaced_word", "max"]) \ .withColumn("JukesCantor", udf_jukes_cantor(pattern, k)(col("word0"), col("word1"))) df_result.show() # - p = df_result.agg(F.sum("JukesCantor")).collect()[0][0]*1.0/((k-bin(pattern).count("1")/2)*df_result.count()) p from math import log JukesCantor = lambda p: -0.75*log(1-1.25*p) JukesCantor(p)
notebooks/FSWM.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Advanced simulation # In essence, a Model object is able to change the state of the system given a sample and evaluate certain metrics. # ![Simple Model](Model_Simple_UML.png "Model Simple UML") # Model objects are able to drastically cut simulation time by sorting the samples to minimize perturbations to the system between simulations. This decreases the number of iterations required to solve recycle systems. The following examples show how Model objects can be used. # # ### Create a model object # **Model objects are used to evaluate metrics around multiple parameters of a system.** # Create a Model object of the lipidcane biorefinery with internal rate of return as a metric: from biosteam.biorefineries import lipidcane as lc import biosteam as bst solve_IRR = lc.lipidcane_tea.solve_IRR metrics = bst.Metric('IRR', solve_IRR), model = bst.Model(lc.lipidcane_sys, metrics) # The Model object begins with no paramters: model # Note: Here we defined only one metric, but more metrics are possible. # ### Add design parameters # **A design parameter is a Unit attribute that changes design requirements but does not affect mass and energy balances.** # Add number of fermentation reactors as a "design" parameter: R301 = bst.find.unit.R301 # The Fermentation Unit @model.parameter(element=R301, kind='design', name='Number of reactors') def set_N_reactors(N): R301.N = N # The decorator returns a Parameter object and adds it to the model: set_N_reactors # Calling a Parameter object will update the parameter and results: set_N_reactors(5) print('Puchase cost at 5 reactors: ' + str(R301.purchase_cost)) set_N_reactors(8) print('Puchase cost at 8 reactors: ' + str(R301.purchase_cost)) # ### Add cost parameters # **A cost parameter is a Unit attribute that affects cost but does not change design requirements.** # Add the fermentation unit base cost as a "cost" parameter: @model.parameter(element=R301, kind='cost') # Note: name argument not given this time def set_base_cost(cost): R301.cost_items['Reactors'].cost = cost original = R301.cost_items['Reactors'].cost set_base_cost(10e6) print('Purchase cost at 10 million USD: ' + str(R301.purchase_cost)) set_base_cost(844e3) print('Purchase cost at 844,000 USD: ' + str(R301.purchase_cost)) # If the name was not defined, it defaults to the setter's signature: set_base_cost # ### Add isolated parameters # **An isolated parameter should not affect Unit objects in any way.** # Add feedstock price as a "isolated" parameter: lipid_cane = lc.lipid_cane # The feedstock stream @model.parameter(element=lipid_cane, kind='isolated') def set_feed_price(feedstock_price): lipid_cane.price = feedstock_price # ### Add coupled parameters # **A coupled parameter affects mass and energy balances of the system.** # Add lipid fraction as a "coupled" parameter: set_lipid_fraction = model.parameter(lc.set_lipid_fraction, element=lipid_cane, kind='coupled') set_lipid_fraction(0.10) print('IRR at 10% lipid: ' + str(solve_IRR())) set_lipid_fraction(0.05) print('IRR at 5% lipid: ' + str(solve_IRR())) # Add fermentation efficiency as a "coupled" parameter: @model.parameter(element=R301, kind='coupled') def set_fermentation_efficiency(efficiency): R301.efficiency = efficiency # ### Evaluate metric given a sample # **The model can be called to evaluate a sample of parameters.** # All parameters are stored in the model with highly coupled parameters first: model # Get all parameters as ordered in the model: model.get_parameters() # Evaluate sample: model([0.05, 0.85, 8, 100000, 0.040]) # ### Evaluate metric across samples # Evaluate at give parameter values: import numpy as np samples = np.array([(0.05, 0.85, 8, 100000, 0.040), (0.05, 0.90, 7, 100000, 0.040), (0.09, 0.95, 8, 100000, 0.042)]) model.load_samples(samples) model.evaluate() model.table # All evaluations are stored as a pandas DataFrame # Note that coupled parameters are on the left most columns, and are ordered from upstream to downstream (e.g. <Stream: Lipid cane> is upstream from <Fermentation: R301>) # ### Evaluate multiple metrics # Reset the metrics to include total utility cost: # + def total_utility_cost(): """Return utility costs in 10^6 USD/yr""" return lc.lipidcane_tea.utility_cost / 10**6 # This time use detailed names and units for appearance model.metrics = (bst.Metric('Internal rate of return', lc.lipidcane_tea.solve_IRR, '%'), bst.Metric('Utility cost', total_utility_cost, 'USD/yr')) model # - model.evaluate() model.table # ### Behind the scenes # ![Model UML Diagram](Model_UML.png "Model UML") # Model objects work with the help of Block and Parameter objects that are able to tell the relative importance of parameters through the `element` it affects and the `kind` (how it affects the system). Before a new parameter is made, if its `kind` is "coupled", then the Model object creates a Block object that simulates only the objects affected by the parameter. The Block object, in turn, helps to create a Parameter object by passing its simulation method.
docs/tutorial/Advanced simulation.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import torch # The autograd package provides automatic differentiation for all operations on Tensors # requires_grad = True -> tracks all operations on the tensor. x = torch.tensor([3.0], requires_grad=True) print(x) y = x*x + 2 # - # y was created as a result of an operation, so it has a grad_fn attribute. # grad_fn: references a Function that has created the Tensor print(x) # created by the user -> grad_fn is None print(y) print(y.grad_fn) y.backward() print(x.grad) # ### Example 2 x = torch.tensor([1.0,2.0,3.0], requires_grad=True) y = x + 2 print(x) print(y) # more operation on y z = y * y * 3 print(z) z = z.mean() print(z) # Gradients are calculated by tracing the graph from the root to the leaf and multiplying every gradient in the way using the chain rule. # + # Let's compute the gradients with backpropagation # When we finish our computation we can call .backward() and have all the gradients computed automatically. # The gradient for this tensor will be accumulated into .grad attribute. # It is the partial derivate of the function w.r.t. the tensor z.backward() print(x.grad) # dz/dx # - # ### Example import torch x=torch.tensor(2.0, requires_grad=True) z=torch.tensor(4.0, requires_grad=True) print(x) print(z) y=x**2+z**3 y.backward() print(x.grad) print(z.grad) # ### Example a = torch.tensor([[1,2,3],[4,5,6]], dtype=torch.float, requires_grad=True) print(a) for i in range(2): for j in range(3): out = a[i,j] * a[i,j] out.backward() print(a.grad) a = torch.tensor([[1,2,3],[4,5,6]], dtype=torch.float,requires_grad=True) print(a) print(a.shape) out = a * a out.backward(a) print(a.grad) # ### Example # + x = torch.randn(3, requires_grad=True) print(x) # + y = x * 2 for _ in range(10): y = y * 2 print(y) print(y.shape) # - v = torch.tensor([0.1, 1.0, 0.0001], dtype=torch.float32) y.backward(v) print(x.grad) # ## Stop a tensor from tracking history: # # ### For example during our training loop when we want to update our weights # ### then this update operation should not be part of the gradient computation # ### - x.requires_grad_(False) # ### - x.detach() # ### - wrap in 'with torch.no_grad():' # # .requires_grad_(...) changes an existing flag in-place. a = torch.randn(2, 2) print(a.requires_grad) b = ((a * 3) / (a - 1)) print(b.grad_fn) a.requires_grad_(True) print(a.requires_grad) b = (a * a).sum() print(b.grad_fn) # .detach(): get a new Tensor with the same content but no gradient computation: a = torch.randn(2, 2, requires_grad=True) print(a.requires_grad) b = a.detach() print(b.requires_grad) # wrap in 'with torch.no_grad():' a = torch.randn(2, 2, requires_grad=True) print(a.requires_grad) with torch.no_grad(): print((x ** 2).requires_grad) # ### Example # ------------- # backward() accumulates the gradient for this tensor into .grad attribute. # !!! We need to be careful during optimization !!! # Use .zero_() to empty the gradients before a new optimization step! weights = torch.ones(4, requires_grad=True) # + for epoch in range(3): # just a dummy example model_output = (weights*3).sum() model_output.backward() print(weights.grad) # optimize model, i.e. adjust weights... with torch.no_grad(): weights -= 0.1 * weights.grad # this is important! It affects the final weights & output weights.grad.zero_() print(weights) print(model_output) # Optimizer has zero_grad() method # optimizer = torch.optim.SGD([weights], lr=0.1) # During training: # optimizer.step() # optimizer.zero_grad() # - # ### Example : backprop # + import torch x = torch.tensor(1.0) y = torch.tensor(2.0) # This is the parameter we want to optimize -> requires_grad=True w = torch.tensor(1.0, requires_grad=True) print(x) print(y) print(w) # - # forward pass to compute loss y_predicted = w * x print(y_predicted) loss = (y_predicted - y)**2 print(loss) # backward pass to compute gradient dLoss/dw loss.backward() print(w.grad) # + # update weights # next forward and backward pass... # continue optimizing: # update weights, this operation should not be part of the computational graph with torch.no_grad(): w -= 0.01 * w.grad # don't forget to zero the gradients w.grad.zero_() # - # ### Example : Gradient descent # + import numpy as np # Compute every step manually # Linear regression # f = w * x # here : f = 2 * x X = np.array([1, 2, 3, 4], dtype=np.float32) Y = np.array([2, 4, 6, 8], dtype=np.float32) w = 0.0 print(X) print(Y) print(w) # + # model output def forward(x): return w * x # loss = MSE def loss(y, y_pred): return ((y_pred - y)**2).mean() # J = MSE = 1/N * (w*x - y)**2 # dJ/dw = 1/N * 2x(w*x - y) def gradient(x, y, y_pred): return np.dot(2*x, y_pred - y).mean() print(f'Prediction before training: f(5) = {forward(5):.3f}') # + # Training learning_rate = 0.01 n_iters = 20 for epoch in range(n_iters): # predict = forward pass y_pred = forward(X) # loss l = loss(Y, y_pred) # calculate gradients dw = gradient(X, Y, y_pred) # update weights w -= learning_rate * dw if epoch % 2 == 0: print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}') print(f'Prediction after training: f(5) = {forward(5):.3f}') # - # ### Example: Gradient descent with autograd # + import torch # Here we replace the manually computed gradient with autograd # Linear regression # f = w * x # here : f = 2 * x X = torch.tensor([1, 2, 3, 4], dtype=torch.float32) Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32) w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True) print(X) print(Y) print(w) # + # model output def forward(x): return w * x # loss = MSE def loss(y, y_pred): return ((y_pred - y)**2).mean() print(f'Prediction before training: f(5) = {forward(5).item():.3f}') # + # Training learning_rate = 0.01 n_iters = 100 for epoch in range(n_iters): # predict = forward pass y_pred = forward(X) # loss l = loss(Y, y_pred) # calculate gradients = backward pass l.backward() # update weights #w.data = w.data - learning_rate * w.grad with torch.no_grad(): w -= learning_rate * w.grad # zero the gradients after updating w.grad.zero_() if epoch % 10 == 0: print(f'epoch {epoch+1}: w = {w.item():.3f}, loss = {l.item():.8f}') print(f'Prediction after training: f(5) = {forward(5).item():.3f}') # -
pytorch/py_engg/auto_grad and backprop and gradient descent.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Check if results are binary correct import numpy as np import pandas as pd # First we read in two reference files, one for the intel results, one for the gnu results df_intel = pd.read_csv('reference_intel.csv', delimiter=' ') df_gnu = pd.read_csv('benchmark_i5.csv', delimiter=' ') # take the rows containing all the various matrix and vector sizes values_intel = df_intel[['n','Nx','Ny','exblas_i']] values_gnu = df_gnu[['n','Nx','Ny','exblas_i']].iloc[0:32] values_intel.set_index(['n','Nx'], inplace=True) values_gnu.set_index(['n','Nx'], inplace=True) # Make a dictionary of files and compilertypes that we want to check files = {'knl_mpi1':'intel', 'knl_mpi2':'intel', 'knl_mpi4':'intel', 'skl_mpi1':'intel', 'skl_mpi2':'intel', 'skl_mpi4':'intel', 'p100_mpi1':'gnu', 'p100_mpi2':'gnu', 'p100_mpi4':'gnu', 'v100_mpi1':'gnu', 'v100_mpi2':'gnu', 'v100_mpi4':'gnu', 'i5':'gnu','gtx1060':'gnu'} # Now, go through all the files and all rows and compare the result (the exblas_i column) to the # corresponding reference value for f,k in files.items(): df=pd.read_csv('benchmark_'+f+'.csv', delimiter=' ') Passed = True; Err = False print( "Checking", f , k) ref = values_gnu if k == 'intel' : ref = values_intel for i in df.index: try: if df.loc[i,'exblas_i'] != ref.loc[(df.loc[i,'n'],df.loc[i,'Nx']),'exblas_i'] and not pd.isnull(df.loc[i,'exblas_i']): Passed = False print( "Wrong result at n = ",df.loc[i,'n']," N = ",df.loc[i,'Nx'], " Difference is ", df.loc[i,'exblas_i']-ref.loc[(df.loc[i,'n'],df.loc[i,'Nx']),'exblas_i']) except KeyError: Err = True continue if Passed : print( "PASSED") else: print( "FAILED") if Err: print( " There was a Key error")
1-exact.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ### Import Data import pandas as pd import numpy as np import json import datetime f = open('../Data/batsmen.json',) batting = json.load(f) f = open('../Data/bowlers.json',) bowling = json.load(f) matches = pd.read_csv("../Data/matches.csv") matches.head() f = open('../Data/scorecard.json',) scorecard = json.load(f) # ### Final Data : Batting def allBatsmen(batting): batting_keys=batting.keys() performance={} for k in batting_keys: for j in batting[k]: performance.setdefault(k,[]).append([j[0],j[1][1],j[1][2],j[1][3],j[1][4],j[1][5]]) performance_keys = performance.keys() for per in performance_keys: l = performance[per] l1=sorted(l,reverse=True) performance[per]=l1 return performance bat={} bat=allBatsmen(batting) print(len(bat)) # ### Final Data : Bowling def allBowlers(bowling): bowling_keys=bowling.keys() performance={} for k in bowling_keys: for j in bowling[k]: performance.setdefault(k,[]).append([j[0],j[1][0],j[1][1],j[1][2],j[1][3],j[1][4]]) performance_keys = performance.keys() for per in performance_keys: l = performance[per] l1=sorted(l,reverse=True) performance[per]=l1 return performance bowl={} bowl=allBowlers(bowling) print(len(bowl)) # ### Batting Data for each team def getLatestPerf_Bat(perform,team_players, date, num_years=5): latest_performance={} perform_keys = perform.keys() for k in team_players: for i in perform[k]: d = datetime.datetime.strptime(date, '%Y-%m-%d') d1 = datetime.datetime.strptime(i[0], '%Y-%m-%d') min_date = datetime.datetime.strptime(str(d.year-num_years)+'-01'+'-01', '%Y-%m-%d') if(d1>=min_date and i[0]<date): latest_performance.setdefault(k,[]).append(i) return latest_performance def getAvgBat(team,index): final_score=[] for k in team.keys(): para=[] for j in team[k]: para.append(j[index]) para1=[x if x!=-1 else np.nan for x in para] para1=pd.Series(para1) f1=para1.ewm(com=0.5).mean() final_score.append(round(f1.mean(),3)) return final_score # ### Bowling Data for each team def getLatestPerf_Bowl(perform,team_players, date, num_years=5): latest_performance={} perform_keys = perform.keys() for k in team_players: for i in perform[k]: d = datetime.datetime.strptime(date, '%Y-%m-%d') d1 = datetime.datetime.strptime(i[0], '%Y-%m-%d') min_date = datetime.datetime.strptime(str(d.year-num_years)+'-01'+'-01', '%Y-%m-%d') if(d1>=min_date and i[0]<date): latest_performance.setdefault(k,[]).append(i) return latest_performance def getAvgBowl(team,index): final_score=[] for k in team.keys(): para=[] for j in team[k]: para.append(j[index]) para1=[x if x!=-1 else np.nan for x in para] para1=pd.Series(para1) f1=para1.ewm(com=0.5).mean() final_score.append(round(f1.mean(),3)) return final_score # ### Example Input # + #Example Input # - team_1 = 'IND' team_2 = 'ZIM' date = '2015-07-12' matches_input=matches[(matches['Date']==date)] matches_input=matches_input[(matches['Team_1']==team_1)] match_code=matches_input['MatchCode'].iloc[0] team_1_batsmen = [scorecard[str(match_code)]['BATTING1'][i][0] for i in range(10)] team_2_batsmen = [scorecard[str(match_code)]['BATTING2'][i][0] for i in range(10)] team_1_bowlers = [scorecard[str(match_code)]['BOWLING1'][i][0] for i in range(len(scorecard[str(match_code)]['BOWLING1']))] team_2_bowlers = [scorecard[str(match_code)]['BOWLING2'][i][0] for i in range(len(scorecard[str(match_code)]['BOWLING2']))] team1_bat=getLatestPerf_Bat(bat,team_1_batsmen, date, 5) team2_bat=getLatestPerf_Bat(bat,team_2_batsmen, date, 5) team1_bowl=getLatestPerf_Bowl(bowl,team_1_bowlers, date, 5) team2_bowl=getLatestPerf_Bowl(bowl,team_2_bowlers, date, 5) # + data = [[date,team_1, team_2]] predict = pd.DataFrame(data, columns = ['Date','Team1', 'Team2']) # - predict # + predict['Runs_team1'] = '' predict['Runs_team2'] = '' predict['Fours_team1'] = '' predict['Fours_team2'] = '' predict['Sixes_team1'] = '' predict['Sixes_team2'] = '' predict['Strike_rate_team1'] = '' predict['Strike_rate_team2'] = '' predict['Maidens_team1'] = '' predict['Maidens_team2'] = '' predict['Wickets_team1'] = '' predict['Wickets_team2'] = '' predict['Economy_rate_team1'] = '' predict['Economy_rate_team2'] = '' # - predict['Runs_team1'].iloc[0] = getAvgBat(team1_bat,1) predict['Runs_team2'].iloc[0] = getAvgBat(team2_bat,1) predict['Fours_team1'].iloc[0] = getAvgBat(team1_bat,3) predict['Fours_team2'].iloc[0] = getAvgBat(team2_bat,3) predict['Sixes_team1'].iloc[0] = getAvgBat(team1_bat,4) predict['Sixes_team2'].iloc[0] = getAvgBat(team2_bat,4) predict['Strike_rate_team1'].iloc[0] = getAvgBat(team1_bat,5) predict['Strike_rate_team2'].iloc[0] = getAvgBat(team2_bat,5) def adjust_length(l): if (len(l)<10): l.extend([-1] * (10 - len(l))) return l predict['Maidens_team1'].iloc[0] = adjust_length(getAvgBowl(team1_bowl,1)) predict['Maidens_team2'].iloc[0] = adjust_length(getAvgBowl(team2_bowl,1)) predict['Wickets_team1'].iloc[0] = adjust_length(getAvgBowl(team1_bowl,3)) predict['Wickets_team2'].iloc[0] = adjust_length(getAvgBowl(team2_bowl,3)) predict['Economy_rate_team1'].iloc[0] = adjust_length(getAvgBowl(team1_bowl,4)) predict['Economy_rate_team2'].iloc[0] = adjust_length(getAvgBowl(team2_bowl,4)) predict
analysis/OnlyAverages.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # ___ # # <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a> # ___ # # NumPy Exercises # # Now that we've learned about NumPy let's test your knowledge. We'll start off with a few simple tasks, and then you'll be asked some more complicated questions. # #### Import NumPy as np import numpy as np # #### Create an array of 10 zeros np.zeros(10) # #### Create an array of 10 ones np.ones(10) # #### Create an array of 10 fives (NEED TO TAKE NOTE) np.linspace(5, 5, 10) # #### Create an array of the integers from 10 to 50 np.arange(10, 51) # #### Create an array of all the even integers from 10 to 50 np.arange(10, 51, 2) # #### Create a 3x3 matrix with values ranging from 0 to 8 np.arange(0,9).reshape(3,3) # #### Create a 3x3 identity matrix np.eye(3) # #### Use NumPy to generate a random number between 0 and 1 np.random.rand(1) # #### Use NumPy to generate an array of 25 random numbers sampled from a standard normal distribution np.random.randn(25) # #### Create the following matrix: # # array([[ 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1 ], # [ 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2 ], # [ 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3 ], # [ 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4 ], # [ 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5 ], # [ 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6 ], # [ 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7 ], # [ 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8 ], # [ 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9 ], # [ 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1. ]]) np.arange(1, 101).reshape((10,10))/100 # #### Create an array of 20 linearly spaced points between 0 and 1: np.linspace(0, 1, 20) # ## Numpy Indexing and Selection # # Now you will be given a few matrices, and be asked to replicate the resulting matrix outputs: mat = np.arange(1,26).reshape(5,5) mat # + # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # - mat[2: , 1:] # + # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # - mat[3, 4] # + # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # - mat[:3, 1:2] # + # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # - mat mat[-1:, :] # + # WRITE CODE HERE THAT REPRODUCES THE OUTPUT OF THE CELL BELOW # BE CAREFUL NOT TO RUN THE CELL BELOW, OTHERWISE YOU WON'T # BE ABLE TO SEE THE OUTPUT ANY MORE # - mat[-2:, :] # ### Now do the following # #### Get the sum of all the values in mat mat.sum() # #### Get the standard deviation of the values in mat mat.std() # #### Get the sum of all the columns in mat mat.sum(axis = 0) # # Great Job!
Data Science and Machine Learning Bootcamp - JP/02.Python for Data Analysis - NumPy/04-Numpy Exercises-MySolutions.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Load unique tweet tokens from file # # Remove mentions and hashtags from tweets # # ### Save in another file the number of mentions for that tweet and the mentions list (same for hashtags) import time from TokenizerWrapper import TokenizerWrapper from TokenizerWrapper import special_tokens import numpy as np # ### Constants N_ROWS = 1000 # ### Paths # + TWEET_ID = "tweet_features_tweet_id" TWEET_TOKENS = "tweet_features_text_tokens" TWEET_TOKENS_FILE = "tweet_tokens/text_tokens_all_no_escaped_chars.csv" #"tweet_tokens/tweet_text_longer_than_280_no_escaped_chars.csv" RESULT_PATH = "tweet_tokens/text_tokens_clean_2.csv" MENTIONS_PATH = "tweet_tokens/mentions/mentions.csv" HASHTAGS_PATH = "tweet_tokens/hashtags/hashtags.csv" # - # ### Functions to extract mentions and hashtags from the tweet # return text_tokens, mentions_list, mentions_count # in case the tweet is a retweet def get_RT_mentions(tokens, mentions): length = len(tokens)-1 i = 2 # exclude CLS and the 56898 ('RT') token while tokens[i] != special_tokens[':'] and i < length: i += 1 #print('i: ' + str(i)) mentions.append(tokens[2:i]) #mentions.append('102\n') # append SEP \n tokens = tokens[i+1:] tokens.insert(0, '101') # insert CLS at beginning return tokens, mentions def get_mentions(tokens, mentions): found_initial = False initial_index = 0 final_index = 0 for i in range(len(tokens)): t = tokens[i] if t == special_tokens['@'] and not found_initial: initial_index = i found_initial = True elif found_initial and i==initial_index+1: pass elif found_initial and i > initial_index+1: decoded_t = tok.convert_tokens_to_strings([t])[0] if '##' in decoded_t: pass elif '_' == decoded_t: pass elif tok.convert_tokens_to_strings([tokens[i-1]])[0] == '_': pass else: final_index = i mentions.append(tokens[initial_index:final_index]) found_initial = False return tokens, mentions def get_remove_mentions(tokens, mentions): found_initial = False mask = [] initial_index = 0 final_index = 0 for i in range(len(tokens)): t = tokens[i] if found_initial and i==initial_index+1: mask.append(False) elif found_initial and i > initial_index+1: decoded_t = tok.convert_tokens_to_strings([t])[0] if '##' in decoded_t: mask.append(False) elif '_' == decoded_t: mask.append(False) elif tok.convert_tokens_to_strings([tokens[i-1]])[0] == '_': mask.append(False) else: final_index = i mentions.append(tokens[initial_index:final_index]) found_initial = False # mask.append(True) if not found_initial and t == special_tokens['@']: initial_index = i found_initial = True mask.append(False) elif not found_initial: mask.append(True) #print(decoded_t) tokens_arr = np.array(tokens) tokens_arr = tokens_arr[mask] tokens = tokens_arr.tolist() return tokens, mentions def get_remove_mentions_hashtags(tokens, mentions, hashtags): found_initial = False mask = [] initial_index = 0 final_index = 0 is_mention = False for i in range(len(tokens)): t = tokens[i] if found_initial and i==initial_index+1: mask.append(False) elif found_initial and i > initial_index+1: decoded_t = tok.convert_tokens_to_strings([t])[0] if '##' in decoded_t: mask.append(False) elif '_' == decoded_t: mask.append(False) elif tok.convert_tokens_to_strings([tokens[i-1]])[0] == '_': mask.append(False) else: final_index = i if is_mention: mentions.append(tokens[initial_index:final_index]) else: hashtags.append(tokens[initial_index:final_index]) found_initial = False # mask.append(True) if not found_initial and (t == special_tokens['@'] or t == special_tokens['#']): if t == special_tokens['@']: is_mention = True elif t == special_tokens['#']: is_mention = False initial_index = i found_initial = True mask.append(False) elif not found_initial: mask.append(True) #print(decoded_t) tokens_arr = np.array(tokens) tokens_arr = tokens_arr[mask] tokens = tokens_arr.tolist() return tokens, mentions, hashtags def split_line(l): l = l.split(',') t_id = l[0] t_list = l[1].split('\t') # replace("\\n",'').replace("\\t",'\t') return t_id, t_list def convert_tokens_to_strings(m_list): # print(m_list) strings_list = [] for m in m_list: m = tok.decode(m) m = m.replace(' ', '') strings_list.append(m) # otherwise last string not added return strings_list # + mentions_dict = {} current_mapping = 0 def map_mentions(m_list): global mentions_dict, current_mapping mapped = [] for m in m_list: if m not in mentions_dict: mentions_dict[m] = current_mapping current_mapping += 1 mapped.append(mentions_dict[m]) return mapped # + hashtags_dict = {} current_mapping_hashtag = 0 def map_hashtags(m_list): global hashtags_dict, current_mapping_hashtag mapped = [] for m in m_list: m = m.lower() if m not in hashtags_dict: hashtags_dict[m] = current_mapping_hashtag current_mapping_hashtag += 1 mapped.append(hashtags_dict[m]) return mapped # - # ### Functions to write results # + def save_tweet(index, text_tokens): string = index + ',' + '\t'.join(text_tokens) result_file.write(string) def save_mentions_or_hashtags(text_tokens, text, mapped, count, is_mentions=True): for i in range(len(text_tokens)): text_tokens[i] = '\t'.join(text_tokens[i]) # each mentions is separated by a ";" # each token in a mention is separated by a "\t" string = str(count) + ',' + ';'.join(text_tokens) + ',' + ''.join(text) + ',' + '\t'.join(map(str, mapped)) + '\n' if is_mentions: mentions_file.write(string) else: hashtags_file.write(string) # - f_to_int = lambda x: int(x) f_int = lambda x: list(map(f_to_int, x)) # ### Create a TokenizerWrapper and the dictionary to map mentions to integers tok = TokenizerWrapper() # ### Open output files and wirte headers (column names) result_file = open(RESULT_PATH, "w+") mentions_file = open(MENTIONS_PATH, "w+") hashtags_file = open(HASHTAGS_PATH, "w+") result_file.write(TWEET_ID + ',' + TWEET_TOKENS + "\n") mentions_file.write("mentions_count,mentions_tokens,mentions_text,mentions_mapped\n") hashtags_file.write("hashtags_count,hashtags_tokens,hashtags_text,hashtags_mapped\n") # ### Open files to be read tokens_file = open(TWEET_TOKENS_FILE, "r") # ### Execute # + # %%time # ~2h 30m EXECUTION # ignore header line = tokens_file.readline() start = time.time() finished = False row = 0 while not finished: # and row < N_ROWS: mentions_tokens = [] hashtags_tokens = [] if row % 1000000 == 0: elapsed_time = time.time() - start print('Row: ', row, ' - Elapsed time: ', elapsed_time) line = str(tokens_file.readline()) #print(line) if line != '': tweet_id, tokens_list = split_line(line) #if tweet_id == '130' or tweet_id == '154' or tweet_id == '161': #print('\ntweet_id: ', tweet_id) #print(tokens_list) #decoded_tweet = tok.decode(tokens_list) #print('\n', decoded_tweet, '\n') # retweets contain the word RT (right after CLS, in position 1) followed # by mentions and then a ':', before starting with the actual tweet text if tokens_list[1] == special_tokens['RT'] and tokens_list[2] == special_tokens['@']: tokens_list, mentions_tokens = get_RT_mentions(tokens_list, mentions_tokens) # remove remaining mentions tokens_list, mentions_tokens, hashtags_tokens = get_remove_mentions_hashtags(tokens_list, mentions_tokens, hashtags_tokens) mentions_count = len(mentions_tokens) mentions_strings = convert_tokens_to_strings(mentions_tokens) mapped_mentions = map_mentions(mentions_strings) hashtags_count = len(hashtags_tokens) hashtags_strings = convert_tokens_to_strings(hashtags_tokens) mapped_hashtags = map_hashtags(hashtags_strings) #print('tweet tokens: ', tokens_list) #print('mentions tokens: ', mentions_tokens) #print('mentions text: ', mentions_strings) #print('mapped_mentions: ', mapped_mentions) #print('mentions count: ', mentions_count) #print('decoded tweet: ', tok.decode(f_int(tokens_list))) #print('hashtag text: ', hashtags_strings) #print('mapped_hashtags: ', mapped_hashtags) save_tweet(tweet_id, tokens_list) save_mentions_or_hashtags(mentions_tokens, mentions_strings, mapped_mentions, mentions_count, is_mentions=True) save_mentions_or_hashtags(hashtags_tokens, hashtags_strings, mapped_hashtags, hashtags_count, is_mentions=False) else: finished = True row += 1 # + tokens_file.close() result_file.close() mentions_file.close() hashtags_file.close() # - # ### Save mapping dictionaries import json len(mentions_dict) json_mentions_mapping = json.dumps(mentions_dict) with open('tweet_tokens/mentions/mentions_mapping.json', 'w+') as f: f.write(json_mentions_mapping) len(hashtags_dict) json_hashtags_mapping = json.dumps(hashtags_dict) with open('tweet_tokens/hashtags/hashtags_mapping.json', 'w+') as f: f.write(json_hashtags_mapping) # ### Check the dataset # + # %%time import pandas as pd df = pd.read_csv(RESULT_PATH, #names=[TWEET_ID], nrows=1000, header=0, index_col=0) # - df # + # %%time df = pd.read_csv(MENTIONS_PATH, #names=[TWEET_ID], nrows=1000, header=0) # - df # + # %%time df = pd.read_csv(HASHTAGS_PATH, #names=[TWEET_ID], nrows=1000, header=0) # - df
BERT/Notebooks/remove_mentions_hashtags.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # 12 Fundamentals of Connectionists' methods # *This notebook illustrates methods to implement tasks using connectionists methods* # *** # <NAME>, Ph.D. 2021 # ### Import Packages # + #import packages import pandas as pd import matplotlib.pyplot as plt import matplotlib.cm as cm import sys; sys.path.insert(0, '..') #add the above level with the package #import data from sklearn.datasets import load_breast_cancer #Load the dataset data = load_breast_cancer() # define X dataframe X = data.data X = pd.DataFrame(X) # define y dataframe y = data.target y = pd.DataFrame(y,columns=['target']) ''' useful material https://medium.com/@thomascountz/19-line-line-by-line-python-perceptron-b6f113b161f3 ''' # - # ### Hyperparameters gridsearch and fit linear models # + from analogistics.learning.connectionists_grids import GridSearchConnectionist grid_search = GridSearchConnectionist() D_res_regr = grid_search.train_models_classification(X, y) D_res_regr # - # ### Evaluate the best model # + #Split into training and testing set from sklearn.model_selection import train_test_split x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=0) #retrieve the best model from the grid search best_model = D_res_regr.iloc[0]['MODEL'] best_model y_pred = best_model.predict(x_test) # - # ### Accuracy score = best_model.score(x_test, y_test) print(f"Accuracy: {score}") # ### Confusion matrix # + import matplotlib.pyplot as plt import seaborn as sns from sklearn import metrics #define the confusion matrix cm = metrics.confusion_matrix(y_test, y_pred) #plot the confusion matrix plt.figure(figsize=(9,9)) sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Blues_r'); plt.ylabel('Actual label'); plt.xlabel('Predicted label'); all_sample_title = 'Accuracy Score: {0}'.format(score) plt.title(all_sample_title, size = 15); # - # ### Area under the ROC curve # + fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred) plt.plot(fpr, tpr) plt.xlim([0.0, 1.0]) plt.ylim([0.0, 1.0]) plt.rcParams['font.size'] = 12 plt.title('ROC curve') plt.xlabel('False Positive Rate (1 - Specificity)') plt.ylabel('True Positive Rate (Sensitivity)') plt.grid(True) # calculate the AUC print(f"AUC: {metrics.roc_auc_score(y_test, y_pred)}")
examples/12 Fundamentals - Connectionists' methods.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # Introduction # Welcome to the course! # This notebook will outline our structure for the course, and introduce you to the notebook if you haven't used it before. # ## Background Expectations # # - Hopefully you've used Python before # - Experience with NumPy will be helpful, but not required # - Pandas will be the primary focus # - We'll see bits of scikit-learn and statsmodels # ## Course Format # # - I'll have slides # - We'll work through notebooks (execute each cell) # - The **slide title** will match the **notebook section** # - You'll do exercises # - During exercises, I'll follow-up on submitted questions # - I'll demonstrate the solutions # ## Jupyter Notebook # # > The Jupyter Notebook is a web application that allows you to create and share documents that contain live code, equations, visualizations and explanatory text. # # - Two Modes: Edit and Command # - Command -> Edit: `Enter` # - Edit -> Command: `Esc` # - Execute a Cell: `Shift+Enter` # - Down: `j/Down Arrow` # - Up: `k/Up Arrow` # ## Tab Completion # # IPython will tab complete method names and function arguments # # Use `shift+tab` to inside a function call to show the signature # type str.<TAB> # type str.split(<shift+TAB>) # ## Exercises # # - Lots of small exercises to check understanding # - Each exercise includes # + A prompt / question to be answered # + An empty cell for code # + A "magic" cell that loads a solution # - Execute the magic cell twice # <div class="alert alert-success" data-title="Print 'Hello, world!'"> # <h1><i class="fa fa-tasks" aria-hidden="true"></i> Exercise: Print 'Hello, world!'</h1> # </div> # # <p>Print the text "Hello, world!"</p> # Your code here # make sure to run this twice! # %load solutions/readme_00.py # Make sure to run the solution cell twice. I'd encourage you to always # run the solution cell, as later steps in the notebooks will depend on earlier # steps being correct. # ## Pandas Cheat Sheet # # https://github.com/pandas-dev/pandas/blob/master/doc/cheatsheet/Pandas_Cheat_Sheet.pdf # # ![cheat sheet](figures/cheat-sheet-preview.png) # # ## Notebooks # # 1. [Indexing](01-Indexing.ipynb) # 2. [Alignment](02-Alignment.ipynb) # 3. [Iterators & Groupby](03-Iterators-Groupby.ipynb) # 4. [Visualization](04-Visualization.ipynb) # 5. [Tidy Data](05-Tidy-Data.ipynb) # 6. [Performance](06-Performance.ipynb) # 7. [Timeseries](07-Timeseries.ipynb) # 8. [Ecosystem](08-Pandas-NumPy-ScikitLearn.ipynb)
notebooks/00-README.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + import numpy as np import pandas as pd import os from glob import glob # %matplotlib inline import matplotlib.pyplot as plt import tensorflow as tf from skimage import io from keras.preprocessing.image import ImageDataGenerator from keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten, Conv2D, MaxPooling2D from keras.models import Sequential, Model from keras.applications.vgg16 import VGG16 from keras.applications.resnet import ResNet50 from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau # - train_df = pd.read_csv('train.csv') valid_df = pd.read_csv('test.csv') train_df.head() valid_df.head() # ## Image Augmentation: # (no code changes needed) IMG_SIZE = (224, 224) # + train_idg = ImageDataGenerator(rescale=1. / 255.0, horizontal_flip = True, vertical_flip = False, height_shift_range= 0.1, width_shift_range=0.1, rotation_range=20, shear_range = 0.1, zoom_range=0.1) train_gen = train_idg.flow_from_dataframe(dataframe=train_df, directory=None, x_col = 'img_path', y_col = 'class', class_mode = 'binary', target_size = IMG_SIZE, batch_size = 9 ) # Note that the validation data should not be augmented! We only want to do some basic intensity rescaling here val_idg = ImageDataGenerator(rescale=1. / 255.0 ) val_gen = val_idg.flow_from_dataframe(dataframe=valid_df, directory=None, x_col = 'img_path', y_col = 'class', class_mode = 'binary', target_size = IMG_SIZE, batch_size = 6) ## We've only been provided with 6 validation images # - ## Pull a single large batch of random validation data for testing after each epoch testX, testY = val_gen.next() # ## Load in VGG16 with pre-trained ImageNet weights: # (No code changes needed): model = VGG16(include_top=True, weights='imagenet') model.summary() for indx, layer in enumerate(model.layers): print(indx, layer.name, layer.output_shape) transfer_layer = model.get_layer('block5_pool') vgg_model = Model(inputs=model.input, outputs=transfer_layer.output) ## Now, choose which layers of VGG16 we actually want to fine-tune (if any) ## Here, we'll freeze all but the last convolutional layer for layer in vgg_model.layers[0:17]: layer.trainable = False # + new_model = Sequential() # Add the convolutional part of the VGG16 model from above. new_model.add(vgg_model) # Flatten the output of the VGG16 model because it is from a # convolutional layer. new_model.add(Flatten()) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. new_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. new_model.add(Dense(1024, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. new_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. new_model.add(Dense(512, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. new_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. new_model.add(Dense(256, activation='relu')) # Add a dropout-layer which may prevent overfitting and # improve generalization ability to unseen data e.g. the test-set. new_model.add(Dropout(0.5)) # Add a dense (aka. fully-connected) layer. # This is for combining features that the VGG16 model has # recognized in the image. new_model.add(Dense(1, activation='relu')) # - new_model.summary() ## Set our optimizer, loss function, and learning rate optimizer = Adam(lr=1e-4) loss = 'binary_crossentropy' metrics = ['binary_accuracy'] new_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) ## Run for 10 epochs to see if any learning occurs: history = new_model.fit_generator(train_gen, validation_data = (testX, testY), epochs = 10) # ### Write a function below to plot the output of your training that is stored in the 'history' variable from above: history.history # Define a function here that will plot loss, val_loss, binary_accuracy, and val_binary_accuracy over all of # your epochs: def plot_history(history): N = len(history.history['loss']) plt.style.use('ggplot') plt.figure() plt.plot(np.arange(0,N), history.history['loss'], label='Training loss') plt.plot(np.arange(0,N), history.history['val_loss'], label='Validtion Loss') plt.plot(np.arange(0,N), history.history['binary_accuracy'], label='Training Accuracy') plt.plot(np.arange(0,N), history.history['val_binary_accuracy'], label='Validation Accuracy') plt.xlabel('Epoch Number') plt.ylabel('Loss/Accuracy') plt.legend(loc='lower left') plt.show() ### YOUR CODE HERE plot_history(history) # ## Try a model with less dropout, same learning rate: # + ## COPY AND PASTE THE ARCHITECTURE FROM ABOVE, BUT CHANGE THE AMOUNT OF DROPOUT new_model = Sequential() new_model.add(vgg_model) new_model.add(Flatten()) new_model.add(Dense(1024, activation='relu')) new_model.add(Dropout(0.25)) new_model.add(Dense(512, activation='relu')) new_model.add(Dropout(0.20)) new_model.add(Dense(128, activation='relu')) new_model.add(Dropout(0.15)) new_model.add(Dense(32, activation='relu')) new_model.add(Dense(1, activation='sigmoid')) # - new_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) history = new_model.fit_generator(train_gen, validation_data = (testX, testY), epochs = 10) plot_history(history) # ## Finally, try a model with the same amount of dropout as you initiall had, but a slower learning rate: # + ## COPY AND PASTE THE ARCHITECTURE FROM THE FIRST EXAMPLE new_model = Sequential() new_model.add(vgg_model) new_model.add(Flatten()) new_model.add(Dense(1024, activation='relu')) new_model.add(Dropout(0.25)) new_model.add(Dense(512, activation='relu')) new_model.add(Dropout(0.20)) new_model.add(Dense(128, activation='relu')) new_model.add(Dropout(0.15)) new_model.add(Dense(32, activation='relu')) new_model.add(Dense(1, activation='sigmoid')) # + ## CHANGE THE LEARNING RATE DEFINED IN Adam(): optimizer = Adam(lr=1e-6) loss = 'binary_crossentropy' metrics = ['binary_accuracy'] # - new_model.compile(optimizer=optimizer, loss=loss, metrics=metrics) history = new_model.fit_generator(train_gen, validation_data = (testX, testY), epochs = 10) plot_history(history)
Applying AI to 2D Medical Imaging Data/Models for Classification of 2D Medical Images/Exercise - Evaluating Your Model/Exercise.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # name: python3 # --- # + id="XX1TGj4abRmI" from keras.callbacks import ModelCheckpoint from keras.models import Sequential from keras.layers import Dense, Activation, Flatten from sklearn.model_selection import train_test_split from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_absolute_error from matplotlib import pyplot as plt from sklearn.metrics import confusion_matrix import seaborn as sb import matplotlib.pyplot as plt import pandas as pd import numpy as np import warnings import tensorflow as tf warnings.filterwarnings('ignore') warnings.filterwarnings('ignore', category=DeprecationWarning) from xgboost import XGBRegressor import seaborn as sns sns.set() import matplotlib.pyplot as plt import matplotlib.dates as mdates from matplotlib.dates import DateFormatter import sklearn.linear_model as lm from sklearn.linear_model import LogisticRegression from sklearn.preprocessing import StandardScaler from sklearn.metrics import mean_squared_error from sklearn.metrics import log_loss from sklearn.metrics import accuracy_score import sklearn.impute as im from sklearn.preprocessing import PolynomialFeatures from datetime import timedelta from datetime import datetime # + [markdown] id="opeor60uC8ov" # # todo # feature importance, # rerun nn on new data # + [markdown] id="LQ4K6XFrPfVr" # # Data import # + colab={"base_uri": "https://localhost:8080/"} id="KxhoYj-FCqDz" executionInfo={"status": "ok", "timestamp": 1650258061733, "user_tz": 240, "elapsed": 652, "user": {"displayName": "<NAME>", "userId": "13329608347120801874"}} outputId="9cb0c3e8-b3cb-470d-906e-5a202971a12b" # !gdown --id 1dWfWzaISPR1Cv4tR0EtNhD1jQODV0Kp9 # train test key # !gdown --id 1ji2idP3o37gQmyR-x-n849LbhmxilxPG # full dataset # + id="xNpWhJFxCqFi" # + id="WwEoUxNoCqHC" # + [markdown] id="1co7xQB2DBgX" # # + id="KiJeiWFybVGX" df=pd.read_csv('https://raw.githubusercontent.com/hohohohia/misc/main/full_ds.csv') # + id="qn43UbNTbcU8" rdf=pd.read_csv('https://raw.githubusercontent.com/hohohohia/misc/main/reduced_ds.csv') # + id="djHMV0DS6P2I" df_full = pd.read_csv('https://raw.githubusercontent.com/hohohohia/misc/main/USDM_SMAP_SET_NOAA_full.csv') df_full[df_full == -9999] = np.nan FIPS = df_full.FIPS.unique() df_full['MapDate'] = pd.to_datetime(df_full['MapDate']) df_full['y_cat']=np.where(df_full['None'].to_numpy() ==100,0,1) # + id="5D7SgDIg_9ow" def build_model( df, X_list, y_list, start_pred, pred_time, lag = 1, remove_na = True): # data preparation pipeline created by Yujie if remove_na: df = df.dropna() X_train = df[df['MapDate'] < datetime.strptime(start_pred, '%Y-%M-%d') - timedelta(weeks = lag)][X_list] y_train = df[df['MapDate'] >= (min(df['MapDate']) + timedelta(weeks = lag))] y_train = y_train[y_train['MapDate'] <= start_pred][y_list] df_test = df[df['MapDate'] >= datetime.strptime(start_pred, '%Y-%M-%d') - timedelta(weeks = lag) ] X_test = df_test[df_test['MapDate'] < datetime.strptime(start_pred, '%Y-%M-%d') - timedelta(weeks = lag) + timedelta(weeks = pred_time) ][X_list] y_test = df_test[df_test['MapDate'] < datetime.strptime(start_pred, '%Y-%M-%d') + timedelta(weeks = pred_time)] y_test = y_test[y_test['MapDate'] >= datetime.strptime(start_pred, '%Y-%M-%d')][y_list] X_test = X_test.drop('MapDate', axis = 1) X_train = X_train.drop('MapDate', axis = 1) NN_model=modelmaker(X_train.shape[1]) NN_model.fit(X_train,y_train, epochs=5, batch_size=32, validation_split = 0.2,verbose=1) eval=NN_model.evaluate(X_test,y_test) pred=NN_model.predict(X_test) NN_model=None return eval[1],pred,y_test def impute(df, imp): return imp.fit_transform(df) # + id="4cO_vBzmAGmt" df_filled = df_full.copy() df_filled['MapDate'] = (df_full['MapDate'] - df_full['MapDate'].min()) / np.timedelta64(1,'D') # Models model = LogisticRegression() imputer = im.SimpleImputer() # + id="3zCwGiQR6P4C" # 1,3,4,8,16,26,52 # + id="rPQPtEQM6P9p" # + id="Ax_VtyZq6P_x" # + id="OCn-IezCQfN6" # rdf_numpy=rdf.to_numpy() # X =np.array(rdf_numpy[:,[4, 5,6]], dtype=np.float) # Y0 =np.array(rdf_numpy[:,[7]], dtype=np.float) # Y0 =np.where(Y0==100,1,0) # X=np.hstack([X,Y0]) # Y1 =np.array(rdf_numpy[:,[8]], dtype=np.float) # Y1 =np.where(Y1==100,1,0) # Y2 =np.array(rdf_numpy[:,[9]], dtype=np.float) # Y2 =np.where(Y2==100,1,0) # Y3 =np.array(rdf_numpy[:,[10]], dtype=np.float) # Y3 =np.where(Y3==100,1,0) # Y4 =np.array(rdf_numpy[:,[11]], dtype=np.float) # Y4 =np.where(Y4==100,1,0) # def trte_split(X,Y): # X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.33, random_state=438) # return X_train, X_test, y_train, y_test # X_train,_,_,_=trte_split(X,Y1) # + [markdown] id="IFLLfGD5Pi3I" # # Neural Net Regression # + [markdown] id="CI9aQcvHeBOC" # ## Pseudo cross sectional, mimicking OLS # + id="QUoB6VBf-9Jw" def modelmaker(shape): NN_model = Sequential() #input layer NN_model.add(Dense(128, kernel_initializer='normal',input_dim = shape, activation='relu')) #hidden layers NN_model.add(Dense(256, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(256, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(256, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(128, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(64, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(64, kernel_initializer='normal',activation='relu')) NN_model.add(Dense(32, kernel_initializer='normal',activation='relu')) #output layer NN_model.add(Dense(1)) NN_model.compile(loss=tf.keras.losses.BinaryCrossentropy(from_logits=True), optimizer='adam', metrics=tf.metrics.BinaryAccuracy(threshold=0.2)) return NN_model #NN_model.summary() # + id="DzfdWj6DlvlF" # def model_eval(X,Y): # X_train, X_test, y_train, y_test = trte_split(X,Y) # NN_model=model() # NN_model.fit(X_train,y_train, epochs=5, batch_size=32, validation_split = 0.2,verbose=1) # eval=NN_model.evaluate(X_test,y_test) # pred=NN_model.predict(X_test) # NN_model=None # return eval,pred,y_test # mse_y1,pred_y1,test_y1=model_eval(X,Y1) # mse_y2,pred_y2,test_y2=model_eval(X,Y2) # mse_y3,pred_y3,test_y3=model_eval(X,Y3) # mse_y4,pred_y4,test_y4=model_eval(X,Y4) # mses=np.round([mse_y1[1],mse_y2[1],mse_y3[1],mse_y4[1]],4) # + colab={"base_uri": "https://localhost:8080/", "height": 423} id="DnffYe6AG5mb" executionInfo={"status": "ok", "timestamp": 1649810193737, "user_tz": 240, "elapsed": 5, "user": {"displayName": "<NAME>", "userId": "13329608347120801874"}} outputId="f924dea4-bb7d-4357-f7a0-5aced7cb7e90" # Parameters X_list = ['MapDate', 'roff', 'evap','smap','PRCP'] y_list = ['y_cat'] start_pred = '2021-01-01' pred_time = 4 vars = X_list.copy() [vars.append(y) for y in y_list] df_filled = impute(df_filled[vars], imputer) df_filled = pd.DataFrame(df_filled, columns = vars) df_filled = df_filled.rename(columns = {'MapDate' : 'week'}) df_filled['MapDate'] = df_full['MapDate'] X_list.append('week') df_filled # + [markdown] id="6f26PkwGC7Tm" # # + id="oPksuq-bHB5y" Xs = ['MapDate','roff', 'evap','smap','PRCP','y_cat'] # + colab={"base_uri": "https://localhost:8080/"} id="_NdstkNlBAZS" executionInfo={"status": "ok", "timestamp": 1649816221597, "user_tz": 240, "elapsed": 6027351, "user": {"displayName": "<NAME>", "userId": "13329608347120801874"}} outputId="6ba6456e-c3d3-4369-db2d-03a225cf2f5a" weeks = [1,3,4,8,16,26,52] lags = [1, 2, 3, 4, 5] results = pd.DataFrame(index = lags, columns = weeks) df_test = df_filled for week in weeks: for lag in lags: res = build_model( df_test, Xs, y_list[0], start_pred, week, lag = lag) results.loc[lag, week] = res[0] # + colab={"base_uri": "https://localhost:8080/", "height": 314} id="Iaqm91Vkufga" executionInfo={"status": "ok", "timestamp": 1649816502842, "user_tz": 240, "elapsed": 975, "user": {"displayName": "<NAME>", "userId": "13329608347120801874"}} outputId="0d7a29dd-994e-4109-d542-3c9fe64bb83b" results = results.astype(float) ax = sns.heatmap(results, annot = True, cmap = 'Greens_r', fmt='.3f') ax.set_xlabel('Prediction Weeks') ax.set_ylabel('Lag Training Weeks') ax.invert_yaxis() plt.suptitle('Prediction accuracy') plt.show() # + id="Eg-n1-yflvo7" # + id="aehmVLqTbEZE" # # %%capture # # !pip install prettytable # + id="Hx--et6Qlvsr" # from prettytable import PrettyTable # x = PrettyTable() # x.add_column("For n weeks into the future", # ["1 week","2 weeks","3 weeks","4 weeks"]) # x.add_column("Accuracy", mses) # print(x) # + id="_pBf4i0UGP_O" # def binarize(x): # return np.where(x>0.2,1,0) # + id="1OqiNg-cHfZr" # def util1(pred_y1): # all_P=np.sum(binarize(pred_y1)) # all_N=pred_y1.shape[0]-all_P # dot=np.array([[1/all_N,1/all_P], # [1/all_N,1/all_P]]) # return dot # + id="OE3nW7kZeXCb" #np.multiply(confusion_matrix(binarize(pred_y1),test_y1),util1(pred_y1)) #true negative, false positive #false negative, true positive # + id="NiF6Mx6ueXEd" #np.multiply(confusion_matrix(binarize(pred_y2),test_y2),util1(pred_y2)) # + id="RiP7-rVfJFsi" #np.multiply(confusion_matrix(binarize(pred_y3),test_y3),util1(pred_y3)) # + id="jbirX3zKJFw6" #np.multiply(confusion_matrix(binarize(pred_y4),test_y4),util1(pred_y4))
Codes/Modeling/Archived/Baseline_Neural_net_models.ipynb
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # + # This file imports country data from the International Monetary Fund and International Country Risk Guide to # create predictor variables. It also imports the country bond rating from Fitch as the target prediction. # + import pandas as pd import numpy as np WEO = pd.read_excel("E:\Downloads\IMF\WEOApr2018all.xlsx") # - PoliticalFile = pd.ExcelFile("E:\Downloads\IMF\Political2014.xlsx") FinanicalFile = pd.ExcelFile("E:\Downloads\IMF\Finanical2014.xlsx") EconomicFile = pd.ExcelFile("E:\Downloads\IMF\Economic2014.xlsx") PNames = PoliticalFile.sheet_names FNames = FinanicalFile.sheet_names ENames = EconomicFile.sheet_names Fitch = pd.read_excel("E:\Downloads\IMF\FitchRatings.xlsx") # + Df = pd.DataFrame() for i in range(0,len(PNames)): df = pd.read_excel(PoliticalFile, sheet_name=PNames[i]) df['Indicator']=PNames[i] frames = [Df, df] Df = pd.concat(frames) for i in range(0,len(FNames)): df = pd.read_excel(FinanicalFile, sheet_name=FNames[i]) df['Indicator']=FNames[i] frames = [Df, df] Df = pd.concat(frames) for i in range(0,len(ENames)): df = pd.read_excel(EconomicFile, sheet_name=ENames[i]) df['Indicator']=ENames[i] frames = [Df, df] Df = pd.concat(frames) frames = [Df,WEO] Df = pd.concat(frames) # + RatingScale = np.array(['AAA','AA+','AA','AA-','A+','A','A-','BBB+','BBB','BBB-','BB+','BB','BB-','B+','B','B-','CCC+','CCC','CCC-','CC','C','RD','DDD','DD','D']) RatingScale = np.flip(RatingScale,axis = 0) FitchCountry = Fitch['Country'] FitchYear = Fitch.loc[:,'Date'].apply(lambda x:x.year) col=['Country', 'Year', 'RatingS'] FitchAArr = np.array([]) Country1 = FitchCountry[0] Year1 = FitchYear[0] Rating1 = Fitch.loc[(FitchCountry==Country1)&(FitchYear==Year1),'Foreign currency long-term'] RatingT = np.zeros(len(Rating1)) MeanRating = np.nan for j in range(0,len(Rating1)): Rating2 = Rating1.iloc[j] RatingT[j] = np.where(RatingScale==Rating1.iloc[j])[0] MeanRating=np.round(np.mean(RatingT)/5) newrow = [Country1,Year1,MeanRating] FitchAArr = np.array(newrow) FitchAArr = np.vstack([FitchAArr, newrow]) for i in range(1,len(Fitch)): Country1 = FitchCountry[i] Year1 = FitchYear[i] Year2 = FitchAArr[FitchAArr[:,0] ==Country1,1] if not(Year1 in Year2.astype(np.int)): Rating1 = Fitch.loc[(FitchCountry==Country1)&(FitchYear==Year1),'Foreign currency long-term'] RatingT = np.zeros(len(Rating1)) MeanRating = np.nan for j in range(0,len(Rating1)): Rating2 = Rating1.iloc[j] if Rating2 in RatingScale: RatingT[j] = np.where(RatingScale==Rating1.iloc[j])[0] else: break MeanRating=np.round(np.mean(RatingT)/5) newrow = [Country1,Year1,MeanRating] FitchAArr = np.vstack([FitchAArr, newrow]) FitchA = pd.DataFrame(FitchAArr,columns=col) # + X = np.zeros((len(FitchA), 21)) Y = np.zeros(len(FitchA)) AllNan = np.full(len(FitchA), True) for i in range(0,len(FitchA)): Country1 = FitchA.iloc[i,0] Year1 = FitchA.iloc[i,1] Rating1 = FitchA.iloc[i,2] DfCY = Df[Df['Country'] == Country1] NGDPD = DfCY[DfCY['WEO Subject Code'] == 'NGDPD'][np.int(Year1)] if NGDPD.size>0: NGDPD = NGDPD.iat[0] else: NGDPD = np.nan NID_NGDP = DfCY[DfCY['WEO Subject Code'] == 'NID_NGDP'][np.int(Year1)] if NID_NGDP.size>0: NID_NGDP = NID_NGDP.iat[0] else: NID_NGDP = np.nan NGSD_NGDP = DfCY[DfCY['WEO Subject Code'] == 'NGSD_NGDP'][np.int(Year1)] if NGSD_NGDP.size>0: NGSD_NGDP = NGSD_NGDP.iat[0] else: NGSD_NGDP = np.nan GGXWDG_NGDP = DfCY[DfCY['WEO Subject Code'] == 'GGXWDG_NGDP'][np.int(Year1)] if GGXWDG_NGDP.size>0: GGXWDG_NGDP = GGXWDG_NGDP.iat[0] else: GGXWDG_NGDP = np.nan BCA_NGDPD = DfCY[DfCY['WEO Subject Code'] == 'BCA_NGDPD'][np.int(Year1)] if BCA_NGDPD.size>0: BCA_NGDPD = BCA_NGDPD.iat[0] else: BCA_NGDPD = np.nan LUR = DfCY[DfCY['WEO Subject Code'] == 'LUR'][np.int(Year1)] if LUR.size>0: LUR = LUR.iat[0] else: LUR = np.nan GovStab = DfCY[DfCY['Indicator'] == 'Government Stability'][np.int(Year1)] if GovStab.size>0: GovStab = GovStab.iat[0] else: GovStab = np.nan SocCond = DfCY[DfCY['Indicator'] == 'Socioeconomic Conditions'][np.int(Year1)] if SocCond.size>0: SocCond = SocCond.iat[0] else: SocCond = np.nan IntConf = DfCY[DfCY['Indicator'] == 'Internal Conflict'][np.int(Year1)] if IntConf.size>0: IntConf = IntConf.iat[0] else: IntConf = np.nan ExtConf = DfCY[DfCY['Indicator'] == 'External Conflict'][np.int(Year1)] if ExtConf.size>0: ExtConf = ExtConf.iat[0] else: ExtConf = np.nan BurQual = DfCY[DfCY['Indicator'] == 'Bureaucracy Quality'][np.int(Year1)] if BurQual.size>0: BurQual = BurQual.iat[0] else: BurQual = np.nan ForDebt = DfCY[DfCY['Indicator'] == 'ForDebt'][np.int(Year1)] if ForDebt.size>0: ForDebt = ForDebt.iat[0] else: ForDebt = np.nan XRStab = DfCY[DfCY['Indicator'] == 'XRStab'][np.int(Year1)] if XRStab.size>0: XRStab = XRStab.iat[0] else: XRStab = np.nan DebtServ = DfCY[DfCY['Indicator'] == 'DebtServ'][np.int(Year1)] if DebtServ.size>0: DebtServ = DebtServ.iat[0] else: DebtServ = np.nan CAXGS = DfCY[DfCY['Indicator'] == 'CAXGS'][np.int(Year1)] if CAXGS.size>0: CAXGS = CAXGS.iat[0] else: CAXGS = np.nan IntLiq = DfCY[DfCY['Indicator'] == 'IntLiq'][np.int(Year1)] if IntLiq.size>0: IntLiq = IntLiq.iat[0] else: IntLiq = np.nan Inflation = DfCY[DfCY['Indicator'] == 'Inflation'][np.int(Year1)] if Inflation.size>0: Inflation = Inflation.iat[0] else: Inflation = np.nan GDPHead = DfCY[DfCY['Indicator'] == 'GDPHead'][np.int(Year1)] if GDPHead.size>0: GDPHead = GDPHead.iat[0] else: GDPHead = np.nan GDPGrowth = DfCY[DfCY['Indicator'] == 'GDPGrowth'][np.int(Year1)] if GDPGrowth.size>0: GDPGrowth = GDPGrowth.iat[0] else: GDPGrowth = np.nan BudBal = DfCY[DfCY['Indicator'] == 'BudBal'][np.int(Year1)] if BudBal.size>0: BudBal = BudBal.iat[0] else: BudBal = np.nan CACC = DfCY[DfCY['Indicator'] == 'BudBal'][np.int(Year1)] if CACC.size>0: CACC = CACC.iat[0] else: CACC = np.nan X[i,:] = np.array([NGDPD,NID_NGDP,NGSD_NGDP,GGXWDG_NGDP,BCA_NGDPD,LUR,GovStab,SocCond,IntConf,ExtConf,BurQual,ForDebt,XRStab,DebtServ,CAXGS,IntLiq,Inflation,GDPHead,GDPGrowth,BudBal,CACC]) Y[i] = Rating1 if all(np.isnan(X[i,:])): AllNan[i] = False X = X[AllNan,:] Y = Y[AllNan] # + import pickle print(np.shape(X)) f1 = open('X.pckl', 'wb') pickle.dump(X, f1) f1.close() print(np.shape(Y)) f2 = open('Y.pckl', 'wb') pickle.dump(Y, f2) f2.close()
CreditRatingData.ipynb