code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import json
import os
dictResponses={}
fileList = ['../Datasets/' + f for f in os.listdir('../Datasets') if f.endswith('.json')]
fileList
fileList=[fileList[6]]
issueEventList = []
prEventList = []
combinedList=[]
for f in fileList:
fileObj = open(f,)
data = json.load(fileObj)
for event in data:
try:
if(event["type"] == "IssuesEvent" and event["payload"]["action"] == "closed"):
combinedList.append([0,event["payload"]["issue"]])
if(event["type"] == "PullRequestEvent" and event["payload"]["action"] == "closed"):
combinedList.append([1,event["payload"]["pull_request"]])
except:
pass
fileObj.close()
len(combinedList)
import requests
import numpy as np
issuePrArr=[]
count=0
pattfile=os.path.expanduser('~/Documents/patt.txt')
with open(pattfile,'r') as f:
pat_u,pat_t = f.read().strip().split(",")
# print(pat_u + " " + pat_t)
# +
count=0
for i in combinedList:
if i[0]==1:
continue
ev=i[1]
count+=1
if count in dictResponses:
continue
r = requests.get(ev["events_url"],auth=(pat_u,pat_t))
print(str(count) + " " + str(r.status_code))
if r.status_code == 404:
dictResponses[count]=[]
elif(r.status_code==200):
dictResponses[count]=[]
for event in json.loads(r.content):
if("event" in event and event["event"] == "referenced"):
if(event["commit_id"] is not None):
dictResponses[count].append(event["commit_id"])
j=max(0,count-10)
while j<count+10:
if combinedList[j][0]==1 and combinedList[j][1]["merge_commit_sha"] == event["commit_id"]:
issuePrArr.append([ev["title"],ev["body"],''.join([label["name"] for label in ev["labels"]]),combinedList[j][1]["title"],combinedList[j][1]["body"]])
j+=1
# if(count>1):
# break
# -
issuePrArr
dictResponses
len(dictResponses)
import csv
w = csv.writer(open("../Datasets/"+fileList[0]+"commMap.csv", "wb"))
for key, val in dictResponses.items():
w.writerow([key, [x.encode('UTF-8') for x in val]])
| Notebooks/ViewData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
import torch
from PIL.Image import Image
from torch import Tensor
from src.OCR.architecture import CharacterRecognizer
from src.base import IO
def load_and_transform(img_path, model):
w, h = model.input_size
new_size = (h, w)
img = IO.load_image(img_path).convert('RGB')
resized = img.resize(new_size)
return to_torch_format(resized, model.dtype), resized
def to_torch_format(img: Image, dtype: torch.dtype) -> Tensor:
return torch.tensor(img.getdata(), dtype=dtype).reshape(*img.size, -1).T / 255
model = CharacterRecognizer.load_from_checkpoint('~/models/LPR-OCR/golden-lr/lightning_logs/version_2/checkpoints/last.ckpt')
# + pycharm={"name": "#%%\n"}
images = IO.get_image_paths('../rl-examples', '*.png') + IO.get_image_paths('../rl-examples', '*.jpg')
image_tensors = [load_and_transform(img, model)[0] for img in images]
stacked_images = torch.stack(image_tensors)
predictions = model.predict(stacked_images)
resized_images = [load_and_transform(img, model)[1] for img in images]
# + pycharm={"name": "#%%\n"}
print(predictions[0])
resized_images[0]
# + pycharm={"name": "#%%\n"}
print(predictions[1])
resized_images[1]
# + pycharm={"name": "#%%\n"}
print(predictions[2])
resized_images[2]
# + pycharm={"name": "#%%\n"}
print(predictions[3])
resized_images[3]
| bin/OCR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: ir
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Displaying vegalite with R
#
# Much like IPython, the R kernel allows you to publish custom data types. When the frontend (here, nteract) recieves these, if it knows how to render these it will!
#
# We'll use [vegalite](https://github.com/hrbrmstr/vegalite) for a nice declarative way to compose vegalite graphs using the `%>%` operator and `IRkernel`'s `IRdisplay` library for the displaying.
# + outputHidden=false inputHidden=false
library(IRdisplay)
#'
#' Display a vegalite chart in supported jupyter frontends (nteract, jupyterlab)
#'
#' @param vl Vega-Lite object
#'
to_irkernel <- function(vl){
IRdisplay::publish_mimebundle(list('application/vnd.vegalite.v1+json'=vl$x))
}
# + outputHidden=false inputHidden=false
library(vegalite)
vegalite() %>%
cell_size(400, 400) %>%
add_data("https://vega.github.io/vega-editor/app/data/cars.json") %>%
encode_x("Horsepower") %>%
encode_y("Miles_per_Gallon") %>%
encode_color("Origin", "nominal") %>%
mark_point() %>%
to_irkernel
# + outputHidden=false inputHidden=false
| applications/desktop/example-notebooks/vegalite-for-r.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Learning for Natural Language Processing with Pytorch
# This tutorial will walk you through the key ideas of deep learning programming using Pytorch.
# Many of the concepts (such as the computation graph abstraction and autograd) are not unique to Pytorch and are relevant to any deep learning tool kit out there.
#
# I am writing this tutorial to focus specifically on NLP for people who have never written code in any deep learning framework (e.g, TensorFlow, Theano, Keras, Dynet). It assumes working knowledge of core NLP problems: part-of-speech tagging, language modeling, etc. It also assumes familiarity with neural networks at the level of an intro AI class (such as one from the Russel and Norvig book). Usually, these courses cover the basic backpropagation algorithm on feed-forward neural networks, and make the point that they are chains of compositions of linearities and non-linearities. This tutorial aims to get you started writing deep learning code, given you have this prerequisite knowledge.
#
# Note this is about *models*, not data. For all of the models, I just create a few test examples with small dimensionality so you can see how the weights change as it trains. If you have some real data you want to try, you should be able to rip out any of the models from this notebook and use them on it.
# +
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
torch.manual_seed(1)
# -
# # 1. Introduction to Torch's tensor library
# All of deep learning is computations on tensors, which are generalizations of a matrix that can be indexed in more than 2 dimensions. We will see exactly what this means in-depth later. First, lets look what we can do with tensors.
# ### Creating Tensors
# Tensors can be created from Python lists with the torch.Tensor() function.
# +
# Create a torch.Tensor object with the given data. It is a 1D vector
V_data = [1., 2., 3.]
V = torch.Tensor(V_data)
print (V)
# Creates a matrix
M_data = [[1., 2., 3.], [4., 5., 6]]
M = torch.Tensor(M_data)
print (M)
# Create a 3D tensor of size 2x2x2.
T_data = [[[1.,2.], [3.,4.]],
[[5.,6.], [7.,8.]]]
T = torch.Tensor(T_data)
print (T)
# -
# What is a 3D tensor anyway?
# Think about it like this.
# If you have a vector, indexing into the vector gives you a scalar. If you have a matrix, indexing into the matrix gives you a vector. If you have a 3D tensor, then indexing into the tensor gives you a matrix!
#
# A note on terminology: when I say "tensor" in this tutorial, it refers to any torch.Tensor object. Vectors and matrices are special cases of torch.Tensors, where their dimension is 1 and 2 respectively. When I am talking about 3D tensors, I will explicitly use the term "3D tensor".
# +
# Index into V and get a scalar
print (V[0])
# Index into M and get a vector
print (M[0])
# Index into T and get a matrix
print (T[0])
# -
# You can also create tensors of other datatypes. The default, as you can see, is Float.
# To create a tensor of integer types, try torch.LongTensor(). Check the documentation for more data types, but Float and Long will be the most common.
# You can create a tensor with random data and the supplied dimensionality with torch.randn()
x = torch.randn((3, 4, 5))
print (x)
# ### Operations with Tensors
# You can operate on tensors in the ways you would expect.
x = torch.Tensor([ 1., 2., 3. ])
y = torch.Tensor([ 4., 5., 6. ])
z = x + y
print (z)
# See [the documentation](http://pytorch.org/docs/torch.html) for a complete list of the massive number of operations available to you. They expand beyond just mathematical operations.
#
# One helpful operation that we will make use of later is concatenation.
# +
# By default, it concatenates along the first axis (concatenates rows)
x_1 = torch.randn(2, 5)
y_1 = torch.randn(3, 5)
z_1 =torch.cat([x_1, y_1])
print (z_1)
# Concatenate columns:
x_2 = torch.randn(2, 3)
y_2 = torch.randn(2, 5)
z_2 = torch.cat([x_2, y_2], 1) # second arg specifies which axis to concat along
print (z_2)
# If your tensors are not compatible, torch will complain. Uncomment to see the error
# torch.cat([x_1, x_2])
# -
# ### Reshaping Tensors
# Use the .view() method to reshape a tensor.
# This method receives heavy use, because many neural network components expect their inputs to have a certain shape.
# Often you will need to reshape before passing your data to the component.
x = torch.randn(2, 3, 4)
print (x)
print (x.view(2, 12)) # Reshape to 2 rows, 12 columns
print (x.view(2, -1)) # Same as above. If one of the dimensions is -1, its size can be inferred
#
# # 2. Computation Graphs and Automatic Differentiation
# The concept of a computation graph is essential to efficient deep learning programming, because it allows you to not have to write the back propagation gradients yourself. A computation graph is simply a specification of how your data is combined to give you the output. Since the graph totally specifies what parameters were involved with which operations, it contains enough information to compute derivatives. This probably sounds vague, so lets see what is going on using the fundamental class of Pytorch: autograd.Variable.
#
# First, think from a programmers perspective. What is stored in the torch.Tensor objects we were creating above?
# Obviously the data and the shape, and maybe a few other things. But when we added two tensors together, we got an output tensor. All this output tensor knows is its data and shape. It has no idea that it was the sum of two other tensors (it could have been read in from a file, it could be the result of some other operation, etc.)
#
# The Variable class keeps track of how it was created. Lets see it in action.
# +
# Variables wrap tensor objects
x = autograd.Variable( torch.Tensor([1., 2., 3]), requires_grad=True )
# You can access the data with the .data attribute
print (x.data)
# You can also do all the same operations you did with tensors with Variables.
y = autograd.Variable( torch.Tensor([4., 5., 6]), requires_grad=True )
z = x + y
print (z.data)
# BUT z knows something extra.
print(z.grad_fn)
# -
# So Variables know what created them. z knows that it wasn't read in from a file, it wasn't the result of a multiplication or exponential or whatever. And if you keep following z.grad_fn, you will find yourself at x and y.
#
# But how does that help us compute a gradient?
# Lets sum up all the entries in z
s = z.sum()
print (s)
print (s.grad_fn)
# So now, what is the derivative of this sum with respect to the first component of x? In math, we want
# $$ \frac{\partial s}{\partial x_0} $$
# Well, s knows that it was created as a sum of the tensor z. z knows that it was the sum x + y.
# So
# $$ s = \overbrace{x_0 + y_0}^\text{$z_0$} + \overbrace{x_1 + y_1}^\text{$z_1$} + \overbrace{x_2 + y_2}^\text{$z_2$} $$
# And so s contains enough information to determine that the derivative we want is 1!
#
# Of course this glosses over the challenge of how to actually compute that derivative. The point here is that s is carrying along enough information that it is possible to compute it. In reality, the developers of Pytorch program the sum() and + operations to know how to compute their gradients, and run the back propagation algorithm. An in-depth discussion of that algorithm is beyond the scope of this tutorial.
# Lets have Pytorch compute the gradient, and see that we were right: (note if you run this block multiple times, the gradient will increment. That is because Pytorch *accumulates* the gradient into the .grad property, since for many models this is very convenient.)
s.backward() # calling .backward() on any variable will run backprop, starting from it.
print (x.grad)
# Understanding what is going on in the block below is crucial for being a successful programmer in deep learning.
# +
x = torch.randn((2,2))
y = torch.randn((2,2))
z = x + y # These are Tensor types, and backprop would not be possible
var_x = autograd.Variable( x )
var_y = autograd.Variable( y )
var_z = var_x + var_y # var_z contains enough information to compute gradients, as we saw above
print (var_z.grad_fn)
var_z_data = var_z.data # Get the wrapped Tensor object out of var_z...
new_var_z = autograd.Variable( var_z_data ) # Re-wrap the tensor in a new variable
# ... does new_var_z have information to backprop to x and y?
# NO!
print (new_var_z.grad_fn)
# And how could it? We yanked the tensor out of var_z (that is what var_z.data is). This tensor
# doesn't know anything about how it was computed. We pass it into new_var_z, and this is all the information
# new_var_z gets. If var_z_data doesn't know how it was computed, theres no way new_var_z will.
# In essence, we have broken the variable away from its past history
# -
# Here is the basic, extremely important rule for computing with autograd.Variables (note this is more general than Pytorch. There is an equivalent object in every major deep learning toolkit):
#
# ** If you want the error from your loss function to backpropogate to a component of your network, you MUST NOT break the Variable chain from that component to your loss Variable. If you do, the loss will have no idea your component exists, and its parameters can't be updated. **
#
# I say this in bold, because this error can creep up on you in very subtle ways (I will show some such ways below), and it will not cause your code to crash or complain, so you must be careful.
# # 3. Deep Learning Building Blocks: Affine maps, non-linearities and objectives
# Deep learning consists of composing linearities with non-linearities in clever ways. The introduction of non-linearities allows for powerful models. In this section, we will play with these core components, make up an objective function, and see how the model is trained.
# ### Affine Maps
# One of the core workhorses of deep learning is the affine map, which is a function $f(x)$ where
# $$ f(x) = Ax + b $$ for a matrix $A$ and vectors $x, b$. The parameters to be learned here are $A$ and $b$. Often, $b$ is refered to as the *bias* term.
# Pytorch and most other deep learning frameworks do things a little differently than traditional linear algebra. It maps the rows of the input instead of the columns. That is, the $i$'th row of the output below is the mapping of the $i$'th row of the input under $A$, plus the bias term. Look at the example below.
lin = nn.Linear(5, 3) # maps from R^5 to R^3, parameters A, b
data = autograd.Variable( torch.randn(2, 5) ) # data is 2x5. A maps from 5 to 3... can we map "data" under A?
print lin(data) # yes
# ### Non-Linearities
# First, note the following fact, which will explain why we need non-linearities in the first place.
# Suppose we have two affine maps $f(x) = Ax + b$ and $g(x) = Cx + d$. What is $f(g(x))$?
# $$ f(g(x)) = A(Cx + d) + b = ACx + (Ad + b) $$
# $AC$ is a matrix and $Ad + b$ is a vector, so we see that composing affine maps gives you an affine map.
#
# From this, you can see that if you wanted your neural network to be long chains of affine compositions, that this adds no new power to your model than just doing a single affine map.
#
# If we introduce non-linearities in between the affine layers, this is no longer the case, and we can build much more powerful models.
#
# There are a few core non-linearities. $\tanh(x), \sigma(x), \text{ReLU}(x)$ are the most common.
# You are probably wondering: "why these functions? I can think of plenty of other non-linearities."
# The reason for this is that they have gradients that are easy to compute, and computing gradients is essential for learning. For example
# $$ \frac{d\sigma}{dx} = \sigma(x)(1 - \sigma(x)) $$
#
# A quick note: although you may have learned some neural networks in your intro to AI class where $\sigma(x)$ was the default non-linearity, typically people shy away from it in practice. This is because the gradient *vanishes* very quickly as the absolute value of the argument grows. Small gradients means it is hard to learn. Most people default to tanh or ReLU.
# In pytorch, most non-linearities are in torch.functional (we have it imported as F)
# Note that non-linearites typically don't have parameters like affine maps do.
# That is, they don't have weights that are updated during training.
data = autograd.Variable( torch.randn(2, 2) )
print data
print F.relu(data)
# ### Softmax and Probabilities
# The function $\text{Softmax}(x)$ is also just a non-linearity, but it is special in that it usually is the last operation done in a network. This is because it takes in a vector of real numbers and returns a probability distribution. Its definition is as follows. Let $x$ be a vector of real numbers (positive, negative, whatever, there are no constraints). Then the i'th component of $\text{Softmax}(x)$ is
# $$ \frac{\exp(x_i)}{\sum_j \exp(x_j)} $$
# It should be clear that the output is a probability distribution: each element is non-negative and the sum over all components is 1.
#
# You could also think of it as just applying an element-wise exponentiation operator to the input to make everything non-negative and then dividing by the normalization constant.
# Softmax is also in torch.functional
data = autograd.Variable( torch.randn(5) )
print data
print F.softmax(data)
print F.softmax(data).sum() # Sums to 1 because it is a distribution!
print F.log_softmax(data) # theres also log_softmax
# ### Objective Functions
# The objective function is the function that your network is being trained to minimize (in which case it is often called a *loss function* or *cost function*).
# This proceeds by first choosing a training instance, running it through your neural network, and then computing the loss of the output. The parameters of the model are then updated by taking the derivative of the loss function. Intuitively, if your model is completely confident in its answer, and its answer is wrong, your loss will be high. If it is very confident in its answer, and its answer is correct, the loss will be low.
#
# The idea behind minimizing the loss function on your training examples is that your network will hopefully generalize well and have small loss on unseen examples in your dev set, test set, or in production.
# An example loss function is the *negative log likelihood loss*, which is a very common objective for multi-class classification. For supervised multi-class classification, this means training the network to minimize the negative log probability of the correct output (or equivalently, maximize the log probability of the correct output).
# # 4. Optimization and Training
# So what we can compute a loss function for an instance? What do we do with that?
# We saw earlier that autograd.Variable's know how to compute gradients with respect to the things that were used to compute it. Well, since our loss is an autograd.Variable, we can compute gradients with respect to all of the parameters used to compute it! Then we can perform standard gradient updates. Let $\theta$ be our parameters, $L(\theta)$ the loss function, and $\eta$ a positive learning rate. Then:
#
# $$ \theta^{(t+1)} = \theta^{(t)} - \eta \nabla_\theta L(\theta) $$
#
# There are a huge collection of algorithms and active research in attempting to do something more than just this vanilla gradient update. Many attempt to vary the learning rate based on what is happening at train time. You don't need to worry about what specifically these algorithms are doing unless you are really interested. Torch provies many in the torch.optim package, and they are all completely transparent. Using the simplest gradient update is the same as the more complicated algorithms. Trying different update algorithms and different parameters for the update algorithms (like different initial learning rates) is important in optimizing your network's performance. Often, just replacing vanilla SGD with an optimizer like Adam or RMSProp will boost performance noticably.
# # 5. Creating Network Components in Pytorch
# Before we move on to our focus on NLP, lets do an annotated example of building a network in Pytorch using only affine maps and non-linearities. We will also see how to compute a loss function, using Pytorch's built in negative log likelihood, and update parameters by backpropagation.
#
# All network components should inherit from nn.Module and override the forward() method. That is about it, as far as the boilerplate is concerned. Inheriting from nn.Module provides functionality to your component. For example, it makes it keep track of its trainable parameters, you can swap it between CPU and GPU with the .cuda() or .cpu() functions, etc.
#
# Let's write an annotated example of a network that takes in a sparse bag-of-words representation and outputs a probability distribution over two labels: "English" and "Spanish". This model is just logistic regression.
# ### Example: Logistic Regression Bag-of-Words classifier
# Our model will map a sparse BOW representation to log probabilities over labels. We assign each word in the vocab an index. For example, say our entire vocab is two words "hello" and "world", with indices 0 and 1 respectively.
# The BoW vector for the sentence "hello hello hello hello" is
# $$ \left[ 4, 0 \right] $$
# For "hello world world hello", it is
# $$ \left[ 2, 2 \right] $$
# etc.
# In general, it is
# $$ \left[ \text{Count}(\text{hello}), \text{Count}(\text{world}) \right] $$
#
# Denote this BOW vector as $x$.
# The output of our network is:
# $$ \log \text{Softmax}(Ax + b) $$
# That is, we pass the input through an affine map and then do log softmax.
# +
data = [ ("me gusta comer en la cafeteria".split(), "SPANISH"),
("Give it to me".split(), "ENGLISH"),
("No creo que sea una buena idea".split(), "SPANISH"),
("No it is not a good idea to get lost at sea".split(), "ENGLISH") ]
test_data = [ ("Yo creo que si".split(), "SPANISH"),
("it is lost on me".split(), "ENGLISH")]
# word_to_ix maps each word in the vocab to a unique integer, which will be its
# index into the Bag of words vector
word_to_ix = {}
for sent, _ in data + test_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print word_to_ix
VOCAB_SIZE = len(word_to_ix)
NUM_LABELS = 2
# -
class BoWClassifier(nn.Module): # inheriting from nn.Module!
def __init__(self, num_labels, vocab_size):
# calls the init function of nn.Module. Dont get confused by syntax,
# just always do it in an nn.Module
super(BoWClassifier, self).__init__()
# Define the parameters that you will need. In this case, we need A and b,
# the parameters of the affine mapping.
# Torch defines nn.Linear(), which provides the affine map.
# Make sure you understand why the input dimension is vocab_size
# and the output is num_labels!
self.linear = nn.Linear(vocab_size, num_labels)
# NOTE! The non-linearity log softmax does not have parameters! So we don't need
# to worry about that here
def forward(self, bow_vec):
# Pass the input through the linear layer,
# then pass that through log_softmax.
# Many non-linearities and other functions are in torch.nn.functional
return F.log_softmax(self.linear(bow_vec))
# +
def make_bow_vector(sentence, word_to_ix):
vec = torch.zeros(len(word_to_ix))
for word in sentence:
vec[word_to_ix[word]] += 1
return vec.view(1, -1)
def make_target(label, label_to_ix):
return torch.LongTensor([label_to_ix[label]])
# +
model = BoWClassifier(NUM_LABELS, VOCAB_SIZE)
# the model knows its parameters. The first output below is A, the second is b.
# Whenever you assign a component to a class variable in the __init__ function of a module,
# which was done with the line
# self.linear = nn.Linear(...)
# Then through some Python magic from the Pytorch devs, your module (in this case, BoWClassifier)
# will store knowledge of the nn.Linear's parameters
for param in model.parameters():
print param
# -
# To run the model, pass in a BoW vector, but wrapped in an autograd.Variable
sample = data[0]
bow_vector = make_bow_vector(sample[0], word_to_ix)
log_probs = model(autograd.Variable(bow_vector))
print log_probs
# Which of the above values corresponds to the log probability of ENGLISH, and which to SPANISH? We never defined it, but we need to if we want to train the thing.
label_to_ix = { "SPANISH": 0, "ENGLISH": 1 }
# So lets train! To do this, we pass instances through to get log probabilities, compute a loss function, compute the gradient of the loss function, and then update the parameters with a gradient step. Loss functions are provided by Torch in the nn package. nn.NLLLoss() is the negative log likelihood loss we want. It also defines optimization functions in torch.optim. Here, we will just use SGD.
#
# Note that the *input* to NLLLoss is a vector of log probabilities, and a target label. It doesn't compute the log probabilities for us. This is why the last layer of our network is log softmax.
# The loss function nn.CrossEntropyLoss() is the same as NLLLoss(), except it does the log softmax for you.
# Run on test data before we train, just to see a before-and-after
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print log_probs
print next(model.parameters())[:,word_to_ix["creo"]] # Print the matrix column corresponding to "creo"
# +
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# Usually you want to pass over the training data several times.
# 100 is much bigger than on a real data set, but real datasets have more than
# two instances. Usually, somewhere between 5 and 30 epochs is reasonable.
for epoch in xrange(100):
for instance, label in data:
# Step 1. Remember that Pytorch accumulates gradients. We need to clear them out
# before each instance
model.zero_grad()
# Step 2. Make our BOW vector and also we must wrap the target in a Variable
# as an integer. For example, if the target is SPANISH, then we wrap the integer
# 0. The loss function then knows that the 0th element of the log probabilities is
# the log probability corresponding to SPANISH
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
target = autograd.Variable(make_target(label, label_to_ix))
# Step 3. Run our forward pass.
log_probs = model(bow_vec)
# Step 4. Compute the loss, gradients, and update the parameters by calling
# optimizer.step()
loss = loss_function(log_probs, target)
loss.backward()
optimizer.step()
# -
for instance, label in test_data:
bow_vec = autograd.Variable(make_bow_vector(instance, word_to_ix))
log_probs = model(bow_vec)
print log_probs
print next(model.parameters())[:,word_to_ix["creo"]] # Index corresponding to Spanish goes up, English goes down!
# We got the right answer! You can see that the log probability for Spanish is much higher in the first example, and the log probability for English is much higher in the second for the test data, as it should be.
#
# Now you see how to make a Pytorch component, pass some data through it and do gradient updates.
# We are ready to dig deeper into what deep NLP has to offer.
# # 6. Word Embeddings: Encoding Lexical Semantics
# Word embeddings are dense vectors of real numbers, one per word in your vocabulary.
# In NLP, it is almost always the case that your features are words! But how should you represent a word in a computer?
# You could store its ascii character representation, but that only tells you what the word *is*, it doesn't say much about what it *means* (you might be able to derive its part of speech from its affixes, or properties from its capitalization, but not much). Even more, in what sense could you combine these representations?
# We often want dense outputs from our neural networks, where the inputs are $|V|$ dimensional, where $V$ is our vocabulary, but often the outputs are only a few dimensional (if we are only predicting a handful of labels, for instance). How do we get from a massive dimensional space to a smaller dimensional space?
#
# How about instead of ascii representations, we use a one-hot encoding? That is, we represent the word $w$ by
# $$ \overbrace{\left[ 0, 0, \dots, 1, \dots, 0, 0 \right]}^\text{|V| elements} $$
# where the 1 is in a location unique to $w$. Any other word will have a 1 in some other location, and a 0 everywhere else.
#
# There is an enormous drawback to this representation, besides just how huge it is. It basically treats all words as independent entities with no relation to each other. What we really want is some notion of *similarity* between words. Why? Let's see an example.
# Suppose we are building a language model. Suppose we have seen the sentences
# * The mathematician ran to the store.
# * The physicist ran to the store.
# * The mathematician solved the open problem.
#
# in our training data.
# Now suppose we get a new sentence never before seen in our training data:
# * The physicist solved the open problem.
#
# Our language model might do OK on this sentence, but wouldn't it be much better if we could use the following two facts:
# * We have seen mathematician and physicist in the same role in a sentence. Somehow they have a semantic relation.
# * We have seen mathematician in the same role in this new unseen sentence as we are now seeing physicist.
#
# and then infer that physicist is actually a good fit in the new unseen sentence? This is what we mean by a notion of similarity: we mean *semantic similarity*, not simply having similar orthographic representations. It is a technique to combat the sparsity of linguistic data, by connecting the dots between what we have seen and what we haven't. This example of course relies on a fundamental linguistic assumption: that words appearing in similar contexts are related to each other semantically. This is called the [distributional hypothesis](https://en.wikipedia.org/wiki/Distributional_semantics).
# ### Getting Dense Word Embeddings
#
# How can we solve this problem? That is, how could we actually encode semantic similarity in words?
# Maybe we think up some semantic attributes. For example, we see that both mathematicians and physicists can run, so maybe we give these words a high score for the "is able to run" semantic attribute. Think of some other attributes, and imagine what you might score some common words on those attributes.
#
# If each attribute is a dimension, then we might give each word a vector, like this:
# $$ q_\text{mathematician} = \left[ \overbrace{2.3}^\text{can run},
# \overbrace{9.4}^\text{likes coffee}, \overbrace{-5.5}^\text{majored in Physics}, \dots \right] $$
# $$ q_\text{physicist} = \left[ \overbrace{2.5}^\text{can run},
# \overbrace{9.1}^\text{likes coffee}, \overbrace{6.4}^\text{majored in Physics}, \dots \right] $$
#
# Then we can get a measure of similarity between these words by doing:
# $$ \text{Similarity}(\text{physicist}, \text{mathematician}) = q_\text{physicist} \cdot q_\text{mathematician} $$
#
# Although it is more common to normalize by the lengths:
# $$ \text{Similarity}(\text{physicist}, \text{mathematician}) = \frac{q_\text{physicist} \cdot q_\text{mathematician}}
# {\| q_\text{\physicist} \| \| q_\text{mathematician} \|} = \cos (\phi) $$
# Where $\phi$ is the angle between the two vectors. That way, extremely similar words (words whose embeddings point in the same direction) will have similarity 1. Extremely dissimilar words should have similarity -1.
# You can think of the sparse one-hot vectors from the beginning of this section as a special case of these new vectors we have defined, where each word basically has similarity 0, and we gave each word some unique semantic attribute. These new vectors are *dense*, which is to say their entries are (typically) non-zero.
#
# But these new vectors are a big pain: you could think of thousands of different semantic attributes that might be relevant to determining similarity, and how on earth would you set the values of the different attributes? Central to the idea of deep learning is that the neural network learns representations of the features, rather than requiring the programmer to design them herself. So why not just let the word embeddings be parameters in our model, and then be updated during training? This is exactly what we will do. We will have some *latent semantic attributes* that the network can, in principle, learn. Note that the word embeddings will probably not be interpretable. That is, although with our hand-crafted vectors above we can see that mathematicians and physicists are similar in that they both like coffee, if we allow a neural network to learn the embeddings and see that both mathematicians and physicisits have a large value in the second dimension, it is not clear what that means. They are similar in some latent semantic dimension, but this probably has no interpretation to us.
# In summary, **word embeddings are a representation of the *semantics* of a word, efficiently encoding semantic information that might be relevant to the task at hand**. You can embed other things too: part of speech tags, parse trees, anything! The idea of feature embeddings is central to the field.
# ### Word Embeddings in Pytorch
# Before we get to a worked example and an exercise, a few quick notes about how to use embeddings in Pytorch and in deep learning programming in general.
# Similar to how we defined a unique index for each word when making one-hot vectors, we also need to define an index for each word when using embeddings. These will be keys into a lookup table. That is, embeddings are stored as a $|V| \times D$ matrix, where $D$ is the dimensionality of the embeddings, such that the word assigned index $i$ has its embedding stored in the $i$'th row of the matrix. In all of my code, the mapping from words to indices is a dictionary named word_to_ix.
#
# The module that allows you to use embeddings is torch.nn.Embedding, which takes two arguments: the vocabulary size, and the dimensionality of the embeddings.
#
# To index into this table, you must use torch.LongTensor (since the indices are integers, not floats).
word_to_ix = { "hello": 0, "world": 1 }
embeds = nn.Embedding(2, 5) # 2 words in vocab, 5 dimensional embeddings
lookup_tensor = torch.LongTensor([word_to_ix["hello"]])
hello_embed = embeds( autograd.Variable(lookup_tensor) )
print hello_embed
# ### An Example: N-Gram Language Modeling
# Recall that in an n-gram language model, given a sequence of words $w$, we want to compute
# $$ P(w_i | w_{i-1}, w_{i-2}, \dots, w_{i-n+1} ) $$
# Where $w_i$ is the ith word of the sequence.
#
# In this example, we will compute the loss function on some training examples and update the parameters with backpropagation.
CONTEXT_SIZE = 2
EMBEDDING_DIM = 10
# We will use Shakespeare Sonnet 2
test_sentence = """When forty winters shall besiege thy brow,
And dig deep trenches in thy beauty's field,
Thy youth's proud livery so gazed on now,
Will be a totter'd weed of small worth held:
Then being asked, where all thy beauty lies,
Where all the treasure of thy lusty days;
To say, within thine own deep sunken eyes,
Were an all-eating shame, and thriftless praise.
How much more praise deserv'd thy beauty's use,
If thou couldst answer 'This fair child of mine
Shall sum my count, and make my old excuse,'
Proving his beauty by succession thine!
This were to be new made when thou art old,
And see thy blood warm when thou feel'st it cold.""".split()
# we should tokenize the input, but we will ignore that for now
# build a list of tuples. Each tuple is ([ word_i-2, word_i-1 ], target word)
trigrams = [ ([test_sentence[i], test_sentence[i+1]], test_sentence[i+2]) for i in xrange(len(test_sentence) - 2) ]
print trigrams[:3] # print the first 3, just so you can see what they look like
vocab = set(test_sentence)
word_to_ix = { word: i for i, word in enumerate(vocab) }
class NGramLanguageModeler(nn.Module):
def __init__(self, vocab_size, embedding_dim, context_size):
super(NGramLanguageModeler, self).__init__()
self.embeddings = nn.Embedding(vocab_size, embedding_dim)
self.linear1 = nn.Linear(context_size * embedding_dim, 128)
self.linear2 = nn.Linear(128, vocab_size)
def forward(self, inputs):
embeds = self.embeddings(inputs).view((1, -1))
out = F.relu(self.linear1(embeds))
out = self.linear2(out)
log_probs = F.log_softmax(out)
return log_probs
# +
losses = []
loss_function = nn.NLLLoss()
model = NGramLanguageModeler(len(vocab), EMBEDDING_DIM, CONTEXT_SIZE)
optimizer = optim.SGD(model.parameters(), lr=0.001)
for epoch in xrange(10):
total_loss = torch.Tensor([0])
for context, target in trigrams:
# Step 1. Prepare the inputs to be passed to the model (i.e, turn the words
# into integer indices and wrap them in variables)
context_idxs = map(lambda w: word_to_ix[w], context)
context_var = autograd.Variable( torch.LongTensor(context_idxs) )
# Step 2. Recall that torch *accumulates* gradients. Before passing in a new instance,
# you need to zero out the gradients from the old instance
model.zero_grad()
# Step 3. Run the forward pass, getting log probabilities over next words
log_probs = model(context_var)
# Step 4. Compute your loss function. (Again, Torch wants the target word wrapped in a variable)
loss = loss_function(log_probs, autograd.Variable(torch.LongTensor([word_to_ix[target]])))
# Step 5. Do the backward pass and update the gradient
loss.backward()
optimizer.step()
total_loss += loss.data
losses.append(total_loss)
print losses # The loss decreased every iteration over the training data!
# -
# ### Exercise: Computing Word Embeddings: Continuous Bag-of-Words
# The Continuous Bag-of-Words model (CBOW) is frequently used in NLP deep learning. It is a model that tries to predict words given the context of a few words before and a few words after the target word. This is distinct from language modeling, since CBOW is not sequential and does not have to be probabilistic. Typcially, CBOW is used to quickly train word embeddings, and these embeddings are used to initialize the embeddings of some more complicated model. Usually, this is referred to as *pretraining embeddings*. It almost always helps performance a couple of percent.
#
# The CBOW model is as follows. Given a target word $w_i$ and an $N$ context window on each side, $w_{i-1}, \dots, w_{i-N}$ and $w_{i+1}, \dots, w_{i+N}$, referring to all context words collectively as $C$, CBOW tries to minimize
# $$ -\log p(w_i | C) = \log \text{Softmax}(A(\sum_{w \in C} q_w) + b) $$
# where $q_w$ is the embedding for word $w$.
#
# Implement this model in Pytorch by filling in the class below. Some tips:
# * Think about which parameters you need to define.
# * Make sure you know what shape each operation expects. Use .view() if you need to reshape.
CONTEXT_SIZE = 2 # 2 words to the left, 2 to the right
raw_text = """We are about to study the idea of a computational process. Computational processes are abstract
beings that inhabit computers. As they evolve, processes manipulate other abstract
things called data. The evolution of a process is directed by a pattern of rules
called a program. People create programs to direct processes. In effect,
we conjure the spirits of the computer with our spells.""".split()
word_to_ix = { word: i for i, word in enumerate(set(raw_text)) }
data = []
for i in xrange(2, len(raw_text) - 2):
context = [ raw_text[i-2], raw_text[i-1], raw_text[i+1], raw_text[i+2] ]
target = raw_text[i]
data.append( (context, target) )
print data[:5]
class CBOW(nn.Module):
def __init__(self):
pass
def forward(self, inputs):
pass
# +
# create your model and train. here are some functions to help you make the data ready for use by your module
def make_context_vector(context, word_to_ix):
idxs = map(lambda w: word_to_ix[w], context)
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
make_context_vector(data[0][0], word_to_ix) # example
# -
# # 7. Sequence Models and Long-Short Term Memory Networks
# At this point, we have seen various feed-forward networks.
# That is, there is no state maintained by the network at all.
# This might not be the behavior we want.
# Sequence models are central to NLP: they are models where there is some sort of dependence through time between your inputs.
# The classical example of a sequence model is the Hidden Markov Model for part-of-speech tagging. Another example is the conditional random field.
#
# A recurrent neural network is a network that maintains some kind of state.
# For example, its output could be used as part of the next input, so that information can propogate along as the network passes over the sequence.
# In the case of an LSTM, for each element in the sequence, there is a corresponding *hidden state* $h_t$, which in principle can contain information from arbitrary points earlier in the sequence.
# We can use the hidden state to predict words in a language model, part-of-speech tags, and a myriad of other things.
# ### LSTM's in Pytorch
#
# Before getting to the example, note a few things.
# Pytorch's LSTM expects all of its inputs to be 3D tensors.
# The semantics of the axes of these tensors is important.
# The first axis is the sequence itself, the second indexes instances in the mini-batch, and the third indexes elements of the input.
# We haven't discussed mini-batching, so lets just ignore that and assume we will always have just 1 dimension on the second axis.
# If we want to run the sequence model over the sentence "The cow jumped", our input should look like
# $$
# \begin{bmatrix}
# \overbrace{q_\text{The}}^\text{row vector} \\
# q_\text{cow} \\
# q_\text{jumped}
# \end{bmatrix}
# $$
# Except remember there is an additional 2nd dimension with size 1.
#
# In addition, you could go through the sequence one at a time, in which case the 1st axis will have size 1 also.
#
# Let's see a quick example.
# +
lstm = nn.LSTM(3, 3) # Input dim is 3, output dim is 3
inputs = [ autograd.Variable(torch.randn((1,3))) for _ in xrange(5) ] # make a sequence of length 5
# initialize the hidden state.
hidden = (autograd.Variable(torch.randn(1,1,3)), autograd.Variable(torch.randn((1,1,3))))
for i in inputs:
# Step through the sequence one element at a time.
# after each step, hidden contains the hidden state.
out, hidden = lstm(i.view(1,1,-1), hidden)
# alternatively, we can do the entire sequence all at once.
# the first value returned by LSTM is all of the hidden states throughout the sequence.
# the second is just the most recent hidden state (compare the last slice of "out" with "hidden" below,
# they are the same)
# The reason for this is that:
# "out" will give you access to all hidden states in the sequence
# "hidden" will allow you to continue the sequence and backpropogate, by passing it as an argument
# to the lstm at a later time
inputs = torch.cat(inputs).view(len(inputs), 1, -1) # Add the extra 2nd dimension
hidden = (autograd.Variable(torch.randn(1,1,3)), autograd.Variable(torch.randn((1,1,3)))) # clean out hidden state
out, hidden = lstm(inputs, hidden)
print out
print hidden
# -
# ### Example: An LSTM for Part-of-Speech Tagging
# In this section, we will use an LSTM to get part of speech tags.
# We will not use Viterbi or Forward-Backward or anything like that, but as a (challenging) exercise to the reader, think about how Viterbi could be used after you have seen what is going on.
#
# The model is as follows: let our input sentence be $w_1, \dots, w_M$, where $w_i \in V$, our vocab.
# Also, let $T$ be our tag set, and $y_i$ the tag of word $w_i$. Denote our prediction of the tag of word $w_i$ by $\hat{y}_i$.
#
# This is a structure prediction, model, where our output is a sequence $\hat{y}_1, \dots, \hat{y}_M$, where $\hat{y}_i \in T$.
#
# To do the prediction, pass an LSTM over the sentence. Denote the hidden state at timestep $i$ as $h_i$. Also, assign each tag a unique index (like how we had word_to_ix in the word embeddings section).
# Then our prediction rule for $\hat{y}_i$ is
# $$ \hat{y}_i = \text{argmax}_j \ (\log \text{Softmax}(Ah_i + b))_j $$
# That is, take the log softmax of the affine map of the hidden state, and the predicted tag is the tag that has the maximum value in this vector. Note this implies immediately that the dimensionality of the target space of $A$ is $|T|$.
def prepare_sequence(seq, to_ix):
idxs = map(lambda w: to_ix[w], seq)
tensor = torch.LongTensor(idxs)
return autograd.Variable(tensor)
# +
training_data = [
("The dog ate the apple".split(), ["DET", "NN", "V", "DET", "NN"]),
("Everybody read that book".split(), ["NN", "V", "DET", "NN"])
]
word_to_ix = {}
for sent, tags in training_data:
for word in sent:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
print word_to_ix
tag_to_ix = {"DET": 0, "NN": 1, "V": 2}
# These will usually be more like 32 or 64 dimensional.
# We will keep them small, so we can see how the weights change as we train.
EMBEDDING_DIM = 6
HIDDEN_DIM = 6
# -
class LSTMTagger(nn.Module):
def __init__(self, embedding_dim, hidden_dim, vocab_size, tagset_size):
super(LSTMTagger, self).__init__()
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
# The LSTM takes word embeddings as inputs, and outputs hidden states
# with dimensionality hidden_dim.
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, tagset_size)
self.hidden = self.init_hidden()
def init_hidden(self):
# Before we've done anything, we dont have any hidden state.
# Refer to the Pytorch documentation to see exactly why they have this dimensionality.
# The axes semantics are (num_layers, minibatch_size, hidden_dim)
return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim)),
autograd.Variable(torch.zeros(1, 1, self.hidden_dim)))
def forward(self, sentence):
embeds = self.word_embeddings(sentence)
lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
tag_scores = F.log_softmax(tag_space)
return tag_scores
model = LSTMTagger(EMBEDDING_DIM, HIDDEN_DIM, len(word_to_ix), len(tag_to_ix))
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1)
# See what the scores are before training
# Note that element i,j of the output is the score for tag j for word i.
inputs = prepare_sequence(training_data[0][0], word_to_ix)
tag_scores = model(inputs)
print tag_scores
for epoch in xrange(300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients. We need to clear them out
# before each instance
model.zero_grad()
# Also, we need to clear out the hidden state of the LSTM, detaching it from its
# history on the last instance.
model.hidden = model.init_hidden()
# Step 2. Get our inputs ready for the network, that is, turn them into Variables
# of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = prepare_sequence(tags, tag_to_ix)
# Step 3. Run our forward pass.
tag_scores = model(sentence_in)
# Step 4. Compute the loss, gradients, and update the parameters by calling
# optimizer.step()
loss = loss_function(tag_scores, targets)
loss.backward()
optimizer.step()
# See what the scores are after training
inputs = prepare_sequence(training_data[0][0], word_to_ix)
tag_scores = model(inputs)
# The sentence is "the dog ate the apple". i,j corresponds to score for tag j for word i.
# The predicted tag is the maximum scoring tag.
# Here, we can see the predicted sequence below is 0 1 2 0 1
# since 0 is index of the maximum value of row 1,
# 1 is the index of maximum value of row 2, etc.
# Which is DET NOUN VERB DET NOUN, the correct sequence!
print tag_scores
# ### Exercise: Augmenting the LSTM part-of-speech tagger with character-level features
# In the example above, each word had an embedding, which served as the inputs to our sequence model.
# Let's augment the word embeddings with a representation derived from the characters of the word.
# We expect that this should help significantly, since character-level information like affixes have
# a large bearing on part-of-speech. For example, words with the affix *-ly* are almost always tagged as adverbs in English.
#
# Do do this, let $c_w$ be the character-level representation of word $w$. Let $x_w$ be the word embedding as before.
# Then the input to our sequence model is the concatenation of $x_w$ and $c_w$. So if $x_w$ has dimension 5, and $c_w$ dimension 3, then our LSTM should accept an input of dimension 8.
#
# To get the character level representation, do an LSTM over the characters of a word, and let $c_w$ be the final hidden state of this LSTM.
# Hints:
# * There are going to be two LSTM's in your new model. The original one that outputs POS tag scores, and the new one that outputs a character-level representation of each word.
# * To do a sequence model over characters, you will have to embed characters. The character embeddings will be the input to the character LSTM.
# # 8. Advanced: Dynamic Toolkits, Dynamic Programming, and the BiLSTM-CRF
# ### Dyanmic versus Static Deep Learning Toolkits
#
# Pytorch is a *dynamic* neural network kit. Another example of a dynamic kit is [Dynet](https://github.com/clab/dynet) (I mention this because working with Pytorch and Dynet is similar. If you see an example in Dynet, it will probably help you implement it in Pytorch). The opposite is the *static* tool kit, which includes Theano, Keras, TensorFlow, etc.
# The core difference is the following:
# * In a static toolkit, you define a computation graph once, compile it, and then stream instances to it.
# * In a dynamic toolkit, you define a computation graph *for each instance*. It is never compiled and is executed on-the-fly
#
# Without a lot of experience, it is difficult to appreciate the difference.
# One example is to suppose we want to build a deep constituent parser.
# Suppose our model involves roughly the following steps:
# * We build the tree bottom up
# * Tag the root nodes (the words of the sentence)
# * From there, use a neural network and the embeddings of the words
# to find combinations that form constituents. Whenever you form a new constituent,
# use some sort of technique to get an embedding of the constituent.
# In this case, our network architecture will depend completely on the input sentence.
# In the sentence "The green cat scratched the wall", at some point in the model, we will want to combine
# the span $(i,j,r) = (1, 3, \text{NP})$ (that is, an NP constituent spans word 1 to word 3, in this case "The green cat").
#
# However, another sentence might be "Somewhere, the big fat cat scratched the wall". In this sentence, we will want to form the constituent $(2, 4, NP)$ at some point.
# The constituents we will want to form will depend on the instance. If we just compile the computation graph once, as in a static toolkit, it will be exceptionally difficult or impossible to program this logic. In a dynamic toolkit though, there isn't just 1 pre-defined computation graph. There can be a new computation graph for each instance, so this problem goes away.
#
# Dynamic toolkits also have the advantage of being easier to debug and the code more closely resembling the host language (by that I mean that Pytorch and Dynet look more like actual Python code than Keras or Theano).
#
# I mention this distinction here, because the exercise in this section is to implement a model which closely resembles structure perceptron, and I believe this model would be difficult to implement in a static toolkit. I think that the advantage of dynamic toolkits for linguistic structure prediction cannot be overstated.
# ### Bi-LSTM Conditional Random Field Discussion
#
# For this section, we will see a full, complicated example of a Bi-LSTM Conditional Random Field for named-entity recognition. The LSTM tagger above is typically sufficient for part-of-speech tagging, but a sequence model like the CRF is really essential for strong performance on NER. Familiarity with CRF's is assumed. Although this name sounds scary, all the model is is a CRF but where an LSTM provides the features. This is an advanced model though, far more complicated than any earlier model in this tutorial. If you want to skip it, that is fine. To see if you're ready, see if you can:
#
# * Write the recurrence for the viterbi variable at step i for tag k.
# * Modify the above recurrence to compute the forward variables instead.
# * Modify again the above recurrence to compute the forward variables in log-space (hint: log-sum-exp)
#
# If you can do those three things, you should be able to understand the code below.
# Recall that the CRF computes a conditional probability. Let $y$ be a tag sequence and $x$ an input sequence of words. Then we compute
# $$ P(y|x) = \frac{\exp{(\text{Score}(x, y)})}{\sum_{y'} \exp{(\text{Score}(x, y')})} $$
#
# Where the score is determined by defining some log potentials $\log \psi_i(x,y)$ such that
# $$ \text{Score}(x,y) = \sum_i \log \psi_i(x,y) $$
# To make the partition function tractable, the potentials must look only at local features.
#
# In the Bi-LSTM CRF, we define two kinds of potentials: emission and transition. The emission potential for the word at index $i$ comes from the hidden state of the Bi-LSTM at timestep $i$. The transition scores are stored in a $|T|x|T|$ matrix $\textbf{P}$, where $T$ is the tag set. In my implementation, $\textbf{P}_{j,k}$ is the score of transitioning to tag $j$ from tag $k$. So:
#
# $$ \text{Score}(x,y) = \sum_i \log \psi_\text{EMIT}(y_i \rightarrow x_i) + \log \psi_\text{TRANS}(y_{i-1} \rightarrow y_i) $$
# $$ = \sum_i h_i[y_i] + \textbf{P}_{y_i, y_{i-1}} $$
# where in this second expression, we think of the tags as being assigned unique non-negative indices.
#
# If the above discussion was too brief, you can check out [this](http://www.cs.columbia.edu/%7Emcollins/crf.pdf) write up from Michael Collins on CRFs.
#
# ### The Forward Algorithm in Log-Space and the Log-Sum-Exp Trick
#
# As hinted at above, computing the forward variables requires using a log-sum-exp. I want to explain why, since it was a little confusing to me at first, and many resources just present the forward algorithm in potential space. The recurrence for the forward variable at the $i$'th word for the tag $j$, $\alpha_i(j)$, is
# $$ \alpha_i(j) = \sum_{j' \in T} \psi_\text{EMIT}(j \rightarrow i) \times \psi_\text{TRANS}(j' \rightarrow j) \times \alpha_{i-1}(j') $$
#
# This is numerically unstable, and underflow is likely. It is also inconvenient to work with proper non-negative potentials in our model. We instead want to compute $\log \alpha_i(j)$. What we need to do is to multiply the potentials, which corresponds to adding log potentials. Then, we have to sum over tags, but what is the corresponding operation to summing over tags in log space? It is not clear. Instead, we need to transform out of log-space, take the product of potentials, do the sum over tags, and then transform back to log space. This is broken down in the revised recurrence below:
#
# $$ \log \alpha_i(j) = \log \overbrace{\sum_{j' \in T} \exp{(\log \psi_\text{EMIT}(j \rightarrow i) + \log \psi_\text{TRANS}(j' \rightarrow j) + \log \alpha_{i-1}(j'))}}^\text{transform out of log-space and compute forward variable} $$
#
# If you carry out elementary exponential / logarithm identities in the stuff under the overbrace above, you will see that it computes the same thing as the first recurrence, then just takes the logarithm. Log-sum-exp appears a fair bit in machine learning, and there is a [well-known trick](https://en.wikipedia.org/wiki/LogSumExp) to computing it in a numerically stable way. I use this trick in my log_sum_exp function below (I don't think Pytorch provides this function in its library).
#
# ### Implementation Notes
#
# The example below implements the forward algorithm in log space to compute the partition function, and the viterbi algorithm to decode. Backpropagation will compute the gradients automatically for us. We don't have to do anything by hand.
#
# The implementation is not optimized. If you understand what is going on, you'll probably quickly see that iterating over the next tag in the forward algorithm could probably be done in one big operation. I wanted to code to be more readable. If you want to make the relevant change, you could probably use this tagger for real tasks.
# ### Example: Bidirectional LSTM Conditional Random Field for Named-Entity Recognition
# +
# Helper functions to make the code more readable.
def to_scalar(var):
# returns a python float
return var.view(-1).data.tolist()[0]
def argmax(vec):
# return the argmax as a python int
_, idx = torch.max(vec, 1)
return to_scalar(idx)
# Compute log sum exp in a numerically stable way for the forward algorithm
def log_sum_exp(vec):
max_score = vec[0, argmax(vec)]
max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1])
return max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast)))
class BiLSTM_CRF(nn.Module):
def __init__(self, vocab_size, tag_to_ix, embedding_dim, hidden_dim):
super(BiLSTM_CRF, self).__init__()
self.embedding_dim = embedding_dim
self.hidden_dim = hidden_dim
self.vocab_size = vocab_size
self.tag_to_ix = tag_to_ix
self.tagset_size = len(tag_to_ix)
self.word_embeds = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim/2, num_layers=1, bidirectional=True)
# Maps the output of the LSTM into tag space.
self.hidden2tag = nn.Linear(hidden_dim, self.tagset_size)
# Matrix of transition parameters. Entry i,j is the score of transitioning *to* i *from* j.
self.transitions = nn.Parameter(torch.randn(self.tagset_size, self.tagset_size))
# These two statements enforce the constraint that we never transfer *to* the start tag,
# and we never transfer *from* the stop tag (the model would probably learn this anyway,
# so this enforcement is likely unimportant)
self.transitions.data[tag_to_ix[START_TAG], :] = -10000
self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000
self.hidden = self.init_hidden()
def init_hidden(self):
return ( autograd.Variable( torch.randn(2, 1, self.hidden_dim)),
autograd.Variable( torch.randn(2, 1, self.hidden_dim)) )
def _forward_alg(self, feats):
# Do the forward algorithm to compute the partition function
init_alphas = torch.Tensor(1, self.tagset_size).fill_(-10000.)
# START_TAG has all of the score.
init_alphas[0][self.tag_to_ix[START_TAG]] = 0.
# Wrap in a variable so that we will get automatic backprop
forward_var = autograd.Variable(init_alphas)
# Iterate through the sentence
for feat in feats:
alphas_t = [] # The forward variables at this timestep
for next_tag in xrange(self.tagset_size):
# broadcast the emission score: it is the same regardless of the previous tag
emit_score = feat[next_tag].view(1, -1).expand(1, self.tagset_size)
# the ith entry of trans_score is the score of transitioning to next_tag from i
trans_score = self.transitions[next_tag].view(1, -1)
# The ith entry of next_tag_var is the value for the edge (i -> next_tag)
# before we do log-sum-exp
next_tag_var = forward_var + trans_score + emit_score
# The forward variable for this tag is log-sum-exp of all the scores.
alphas_t.append(log_sum_exp(next_tag_var))
forward_var = torch.cat(alphas_t).view(1, -1)
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
alpha = log_sum_exp(terminal_var)
return alpha
def _get_lstm_features(self, sentence):
self.hidden = self.init_hidden()
embeds = self.word_embeds(sentence).view(len(sentence), 1, -1)
lstm_out, self.hidden = self.lstm(embeds)
lstm_out = lstm_out.view(len(sentence), self.hidden_dim)
lstm_feats = self.hidden2tag(lstm_out)
return lstm_feats
def _score_sentence(self, feats, tags):
# Gives the score of a provided tag sequence
score = autograd.Variable( torch.Tensor([0]) )
tags = torch.cat( [torch.LongTensor([self.tag_to_ix[START_TAG]]), tags] )
for i, feat in enumerate(feats):
score = score + self.transitions[tags[i+1], tags[i]] + feat[tags[i+1]]
score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]]
return score
def _viterbi_decode(self, feats):
backpointers = []
# Initialize the viterbi variables in log space
init_vvars = torch.Tensor(1, self.tagset_size).fill_(-10000.)
init_vvars[0][self.tag_to_ix[START_TAG]] = 0
# forward_var at step i holds the viterbi variables for step i-1
forward_var = autograd.Variable(init_vvars)
for feat in feats:
bptrs_t = [] # holds the backpointers for this step
viterbivars_t = [] # holds the viterbi variables for this step
for next_tag in range(self.tagset_size):
# next_tag_var[i] holds the viterbi variable for tag i at the previous step,
# plus the score of transitioning from tag i to next_tag.
# We don't include the emission scores here because the max
# does not depend on them (we add them in below)
next_tag_var = forward_var + self.transitions[next_tag]
best_tag_id = argmax(next_tag_var)
bptrs_t.append(best_tag_id)
viterbivars_t.append(next_tag_var[0][best_tag_id])
# Now add in the emission scores, and assign forward_var to the set
# of viterbi variables we just computed
forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1)
backpointers.append(bptrs_t)
# Transition to STOP_TAG
terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]]
best_tag_id = argmax(terminal_var)
path_score = terminal_var[0][best_tag_id]
# Follow the back pointers to decode the best path.
best_path = [best_tag_id]
for bptrs_t in reversed(backpointers):
best_tag_id = bptrs_t[best_tag_id]
best_path.append(best_tag_id)
# Pop off the start tag (we dont want to return that to the caller)
start = best_path.pop()
assert start == self.tag_to_ix[START_TAG] # Sanity check
best_path.reverse()
return path_score, best_path
def neg_log_likelihood(self, sentence, tags):
self.hidden = self.init_hidden()
feats = self._get_lstm_features(sentence)
forward_score = self._forward_alg(feats)
gold_score = self._score_sentence(feats, tags)
return forward_score - gold_score
def forward(self, sentence): # dont confuse this with _forward_alg above.
self.hidden = self.init_hidden()
# Get the emission scores from the BiLSTM
lstm_feats = self._get_lstm_features(sentence)
# Find the best path, given the features.
score, tag_seq = self._viterbi_decode(lstm_feats)
return score, tag_seq
# +
START_TAG = "<START>"
STOP_TAG = "<STOP>"
EMBEDDING_DIM = 5
HIDDEN_DIM = 4
# Make up some training data
training_data = [ (
"the wall street journal reported today that apple corporation made money".split(),
"B I I I O O O B I O O".split()
), (
"georgia tech is a university in georgia".split(),
"B I O O O O B".split()
) ]
word_to_ix = {}
for sentence, tags in training_data:
for word in sentence:
if word not in word_to_ix:
word_to_ix[word] = len(word_to_ix)
tag_to_ix = { "B": 0, "I": 1, "O": 2, START_TAG: 3, STOP_TAG: 4 }
# -
model = BiLSTM_CRF(len(word_to_ix), tag_to_ix, EMBEDDING_DIM, HIDDEN_DIM)
optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4)
# Check predictions before training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
precheck_tags = torch.LongTensor([ tag_to_ix[t] for t in training_data[0][1] ])
print model(precheck_sent)
# Make sure prepare_sequence from earlier in the LSTM section is loaded
for epoch in xrange(300): # again, normally you would NOT do 300 epochs, it is toy data
for sentence, tags in training_data:
# Step 1. Remember that Pytorch accumulates gradients. We need to clear them out
# before each instance
model.zero_grad()
# Step 2. Get our inputs ready for the network, that is, turn them into Variables
# of word indices.
sentence_in = prepare_sequence(sentence, word_to_ix)
targets = torch.LongTensor([ tag_to_ix[t] for t in tags ])
# Step 3. Run our forward pass.
neg_log_likelihood = model.neg_log_likelihood(sentence_in, targets)
# Step 4. Compute the loss, gradients, and update the parameters by calling
# optimizer.step()
neg_log_likelihood.backward()
optimizer.step()
# Check predictions after training
precheck_sent = prepare_sequence(training_data[0][0], word_to_ix)
print model(precheck_sent)
# We got it!
# ### Exercise: A new loss function for discriminative tagging
# It wasn't really necessary for us to create a computation graph when doing decoding, since we do not backpropagate from the viterbi path score. Since we have it anyway, try training the tagger where the loss function is the difference between the Viterbi path score and the score of the gold-standard path. It should be clear that this function is non-negative and 0 when the predicted tag sequence is the correct tag sequence. This is essentially *structured perceptron*.
#
# This modification should be short, since Viterbi and score_sentence are already implemented. This is an example of the shape of the computation graph *depending on the training instance*. Although I haven't tried implementing this in a static toolkit, I imagine that it is possible but much less straightforward.
#
# Pick up some real data and do a comparison!
| 1-3_DeepLearningForNaturalLanguageProcessingWithPytorch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Install nightly version of TensorFlow.
#
# You need pip 19.1 or higher and TensorFlow 2.0 post Aug 2019. If not, run the necessary update scripts and check the versions again.
# %pip --version
import tensorflow as tf
print(tf.version.VERSION)
# %pip install --user --upgrade --quiet pip
# %pip install --user --upgrade --quiet tf_nightly-2.0-preview
# If you get a version that is from July 31, please make sure to update pip. You need pip version 19.1 or higher.
# ## Check Keras
#
# This is a simple repro that ensures that the Keras feature columns used in this tutorial will work properly.
# +
import pandas as pd
import datetime
print(tf.version.VERSION)
import shutil
import os
# serving inputs to model
inputs = {
'a' : tf.keras.layers.Input(name='a', shape=(), dtype='int32'),
'b' : tf.keras.layers.Input(name='b', shape=(), dtype='int32')
}
# feature columns based on training inputs
infc = {
'a' : tf.feature_column.categorical_column_with_identity('a', num_buckets = 7),
'b' : tf.feature_column.categorical_column_with_identity('b', num_buckets = 24)
}
#infc['ab'] = tf.feature_column.crossed_column([infc['a'], infc['b']], 2 )
# transformed
outfc = {
'a' : tf.feature_column.indicator_column(infc['a']),
'b' : tf.feature_column.indicator_column(infc['b']),
#'ab': tf.feature_column.embedding_column(infc['ab'], 2)
}
# model architecture
ds = tf.keras.layers.DenseFeatures(outfc.values())(inputs)
output = tf.keras.layers.Dense(1, name='pred')(ds)
model = tf.keras.models.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
df = pd.DataFrame.from_dict({
'a': [3, 5, 2],
'b': [13, 15, 18]
})
y = [3.0, 2.0, 2.0]
#shutil.rmtree('04_keras/checkpoints/', ignore_errors=True)
#checkpoint_path = "04_keras/checkpoints/nightly.cpt"
#cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
# save_weights_only=True,
# verbose=1)
model.fit([df['a'], df['b']], y, steps_per_epoch=1) #, callbacks=[cp_callback])
model.evaluate([df['a'], df['b']], y)
model.predict({
'a': tf.convert_to_tensor([3]),
'b': tf.convert_to_tensor([13])
}, steps=1)
# -
tf.keras.utils.plot_model(model, 'repro_model.png', show_shapes=True)
# +
# demonstrates how to write a custom serving function
class ExportModel(tf.keras.Model):
def __init__(self, model):
super().__init__(self)
self.model = model
@tf.function(input_signature=[
tf.TensorSpec([None,], dtype='int32', name='a'),
tf.TensorSpec([None,], dtype='int32', name='b')
])
def serving_fn(self, a, b):
return {
'pred' : self.model({'a': a, 'b': b}) #, steps=1)
}
def save(self, export_path):
sigs = {
'serving_default' : self.serving_fn
}
tf.keras.backend.set_learning_phase(0) # inference only
tf.saved_model.save(self, export_path, signatures=sigs)
import shutil, os, datetime
OUTPUT_DIR = '04_keras/export/savedmodel'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
# sm = ExportModel(model); sm.save(EXPORT_PATH) # custom serving function
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
# -
# !saved_model_cli show --tag_set serve --signature_def serving_default --dir {EXPORT_PATH}
# !find {EXPORT_PATH}
os.environ['EXPORT_PATH'] = EXPORT_PATH
# %%writefile repro.json
{"a": 3, "b": 13}
# + language="bash"
# PROJECT=cloud-training-demos
# BUCKET=${PROJECT}-ml
# REGION=us-east1
# MODEL_NAME=taxifare
# VERSION_NAME=v1
#
# if [[ $(gcloud ai-platform models list --format='value(name)' | grep $MODEL_NAME) ]]; then
# echo "$MODEL_NAME already exists"
# else
# # create model
# echo "Creating $MODEL_NAME"
# gcloud ai-platform models create --regions=$REGION $MODEL_NAME
# fi
#
# if [[ $(gcloud ai-platform versions list --model $MODEL_NAME --format='value(name)' | grep $VERSION_NAME) ]]; then
# echo "Deleting already existing $MODEL_NAME:$VERSION_NAME ... "
# gcloud ai-platform versions delete --model=$MODEL_NAME $VERSION_NAME
# echo "Please run this cell again if you don't see a Creating message ... "
# sleep 10
# fi
#
# # create model
# echo "Creating $MODEL_NAME:$VERSION_NAME"
# gcloud ai-platform versions create --model=$MODEL_NAME $VERSION_NAME --async \
# --framework=tensorflow --python-version=3.5 --runtime-version=1.14 \
# --origin=$EXPORT_PATH --staging-bucket=gs://$BUCKET
# -
# Monitor the model creation at [GCP Console > AI Platform](https://console.cloud.google.com/mlengine/models/taxifare/) and once the model version `v1` is created, proceed to the next cell.
# !gcloud ai-platform predict --model taxifare --json-instances repro.json --version v1
# +
## REPRO for @tf.function
import tensorflow as tf
import pandas as pd
import datetime
print(tf.version.VERSION)
import shutil
import os
# serving inputs to model
inputs = {
'pickup_datetime' : tf.keras.layers.Input(name='a', shape=(), dtype='string')
}
transformed = {}
feature_columns = {}
# hourofday can be done through a tf.strings.substr and works okay
transformed['hourofday'] = tf.keras.layers.Lambda(
lambda x: tf.strings.to_number(tf.strings.substr(x, 11, 2), out_type=tf.dtypes.int32),
name='hourofday'
)(inputs['pickup_datetime'])
feature_columns['hourofday'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity('hourofday', num_buckets=24))
# ADDING THIS CODE CAUSES ERROR
if False:
DAYS = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat']
def get_dayofweek(s):
if type(s) is not str:
s = s.numpy().decode('utf-8') # if it is a Tensor
ts = datetime.datetime.strptime(s, "%Y-%m-%d %H:%M:%S %Z")
return DAYS[ts.weekday()]
print(get_dayofweek('2012-07-05 14:18:00 UTC'))
print(get_dayofweek(tf.constant('2012-07-05 14:18:00 UTC')))
def dayofweek(ts_in):
return tf.map_fn(
lambda s: tf.py_function(get_dayofweek, inp=[s], Tout=tf.string),
ts_in
)
transformed['dayofweek'] = tf.keras.layers.Lambda(
lambda x: dayofweek(x),
name='dayofweek_pyfun'
)(inputs['pickup_datetime'])
transformed['dayofweek'] = tf.keras.layers.Reshape((), name='dayofweek')(transformed['dayofweek'])
feature_columns['dayofweek'] = tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_vocabulary_list('dayofweek', vocabulary_list = DAYS))
## END ADD
# model architecture
ds = tf.keras.layers.DenseFeatures(feature_columns.values())(transformed)
output = tf.keras.layers.Dense(1, name='pred')(ds)
model = tf.keras.models.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
df = pd.DataFrame.from_dict({
'pickup_datetime': ['2012-07-05 14:18:00 UTC', '2012-07-05 15:18:00 UTC'],
})
y = [3.0, 2.0]
# need clean checkpoint path: otherwise get cache error
OUTDIR='/tmp'
shutil.rmtree('{}/checkpoints/'.format(OUTDIR), ignore_errors=True)
checkpoint_path = '{}/checkpoints/taxi'.format(OUTDIR)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit([df['pickup_datetime']], y, steps_per_epoch=1, callbacks=[cp_callback])
import shutil, os, datetime
OUTPUT_DIR = '04_keras/export/savedmodel'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
tf.saved_model.save(model, EXPORT_PATH)
# -
# !saved_model_cli show --tag_set serve --signature_def serving_default --dir {EXPORT_PATH}
# +
## repro for "unhashable type list"
import tensorflow as tf
import pandas as pd
import datetime
print(tf.version.VERSION)
import shutil
import os
# serving inputs to model
inputs = {
'a' : tf.keras.layers.Input(name='a', shape=(), dtype='float32'),
'b' : tf.keras.layers.Input(name='b', shape=(), dtype='float32'),
}
if False:
# Skipping full serialization of object <tensorflow.python.keras.engine.training.Model object at 0x7fe60dbabc50>, because an error occurred while tracing layer functions. Error message: unhashable type: 'list'
transformed = inputs.copy()
else:
# no problem if I do this
transformed = {
colname : tf.keras.layers.Lambda(lambda x : x, name='passthrough_{}'.format(colname))(inputs[colname])
for colname in inputs.keys()
}
transformed['c'] = tf.keras.layers.Lambda(lambda x: x*x, name='c_sq_b')(inputs['b'])
feature_columns = {
colname: tf.feature_column.numeric_column(colname)
for colname in ['a', 'b', 'c']
}
# model architecture
ds = tf.keras.layers.DenseFeatures(feature_columns.values())(transformed)
output = tf.keras.layers.Dense(1, name='pred')(ds)
model = tf.keras.models.Model(inputs=inputs, outputs=output)
model.compile(optimizer='adam', loss='mse', metrics=['mse'])
df = pd.DataFrame.from_dict({
'a' : [3.0, 4.0],
'b' : [1.0, 2.0]
})
y = [3.0, 2.0]
# need clean checkpoint path: otherwise get cache error
OUTDIR='/tmp'
shutil.rmtree('{}/checkpoints/'.format(OUTDIR), ignore_errors=True)
checkpoint_path = '{}/checkpoints/taxi'.format(OUTDIR)
cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
save_weights_only=True,
verbose=1)
history = model.fit([df['a'], df['b']], y, batch_size=1, callbacks=[cp_callback])
import shutil, os, datetime
OUTPUT_DIR = '04_keras/export/savedmodel'
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
EXPORT_PATH = os.path.join(OUTPUT_DIR, datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
tf.saved_model.save(model, EXPORT_PATH)
# !saved_model_cli show --tag_set serve --signature_def serving_default --dir {EXPORT_PATH}
# -
| quests/serverlessml/install_nightly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import requests
import pandas as pd
import sys
sys.path.append('..')
import folium
from folium.features import *
from folium.element import *
from folium.plugins import *
# -
data = requests.get('https://raw.githubusercontent.com/austinlyons/dcjs-leaflet-untappd/master/untappd.json')\
.json()['response']['beers']['items']
data = pd.DataFrame(data)
# +
data['beer_rating_score']=data['beer'].apply(lambda x: x['rating_score'])
data['beer_beer_abv'] = data['beer'].apply(lambda x: x['beer_abv'])
data['beer_beer_ibu'] = data['beer'].apply(lambda x: x['beer_ibu'])
data['beer_beer_style'] = data['beer'].apply(lambda x: x['beer_style'])
data['beer_beer_name'] = data['beer'].apply(lambda x: x['beer_name'])
data['brewery_brewery_name'] = data['brewery'].apply(lambda x: x['brewery_name'])
data['dt'] = data.first_had.apply(pd.Timestamp)
data['first_had_year'] = data.dt.apply(lambda x: x.strftime('%Y'))#str(x.year))
data['first_had_month'] = data.dt.apply(lambda x: x.strftime('%b'))
data['first_had_day'] = data.dt.apply(lambda x: x.strftime('%a'))
data['popup'] = data.apply(lambda x: "<p>" + x['brewery_brewery_name'] + " "\
+ x['brewery']['location']['brewery_city'] + " "\
+ x['brewery']['location']['brewery_state'] + "</p>", axis=1)
data['lat'] = data['brewery'].apply(lambda x: x['location']['lat'])
data['lng'] = data['brewery'].apply(lambda x: x['location']['lng'])
data.drop(['beer','brewery', 'first_checkin_id', 'first_had', 'recent_checkin_id'],axis=1, inplace=True)
# -
data.head()
param = {
"dataCount" : [
{
"name" : "DC1",
"divId" : "#data-count"
},
{
"name" : "DC2",
"divId" : "#data-count2"
}
],
"pieChart" : [
{
"name" : "Year",
"column" : "first_had_year",
"divId" : "#chart-ring-year",
"width" : 150,
"height": 150,
"innerRadius" : 20,
"order" : [2013, 2014, 2015, 2016]
},
{
"name" : "Weekday",
"column" : "first_had_day",
"divId" : "#chart-ring-day",
"width" : 150,
"height": 150,
"innerRadius" : 20,
"order" : ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
},
{
"name" : "Month",
"column" : "first_had_month",
"divId" : "#chart-ring-month",
"width" : 150,
"height": 150,
"innerRadius" : 20,
"order" : [
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
}
],
"barChart" : [
{
"name" : "Rating",
"column" : "rating_score",
"divId" : "#chart-rating-count",
"width" : 300,
"height" : 180,
"domain" : [0.,5.2],
"groupby" : 0.25,
"xAxisLabel" : "My rating",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [0, 1, 2, 3, 4, 5],
"timeFormat" : None,
"barPadding" : 0.1
},
{
"name" : "Community rating",
"column" : "beer_rating_score",
"divId" : "#chart-community-rating-count",
"width" : 300,
"height" : 180,
"domain" : [0.3,5.3],
"groupby" : 1,
"xAxisLabel" : "Community rating",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [0, 1, 2, 3, 4, 5],
"timeFormat" : None,
"barPadding" : 0.01
},
{
"name" : "Alcohol By Volume (%)",
"column" : "beer_beer_abv",
"divId" : "#chart-abv-count",
"width" : 300,
"height" : 180,
"domain" : [0.,13],
"groupby" : 0.5,
"xAxisLabel" : "Alcohol By Volume (%)",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [0, 2, 4, 6, 8, 10, 12],
"barPadding" : 0.1
},
{
"name" : "Datetime",
"column" : "dt",
"divId" : "#chart-dt-count",
"width" : 600,
"height" : 180,
"domain" : [1356998400000,1451606400000],
"groupby" : 86400000*30,
"xAxisLabel" : "Datetime",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [1356998400000, 1388534400000, 1420070400000, 1451606400000],
"timeFormat" : "%b%Y",
"barPadding" : 0.1
},
{
"name" : "Datetime",
"column" : "dt",
"divId" : "#chart-dt2-count",
"width" : 600,
"height" : 180,
"domain" : [1356998400000,1451606400000],
"groupby" : 86400000*375,
"xAxisLabel" : "Datetime",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [1356998400000, 1420070400000],
"timeFormat" : "%b%Y",
"barPadding" : 0.1
},
{
"name" : "International Bitterness Units",
"column" : "beer_beer_ibu",
"divId" : "#chart-ibu-count",
"width" : 300,
"height" : 180,
"domain" : [0.,100],
"groupby" : 5,
"xAxisLabel" : "International Bitterness Units",
"yAxisLabel" : "Count",
"margins" : {'top': 10, 'right': 20, 'bottom': 50, 'left': 50},
"xAxisTickValues" : [0, 20, 40, 60, 80, 100],
"timeFormat" : None,
"barPadding" : 0.1
}
]
}
# +
#html = jinja2.Template(open('./dcjs-leaflet-untappd/index.html.tmpl').read())
#_repr_html_(html.render(data=pd.json.dumps(data.to_dict(orient='records')),
# param=pd.json.dumps(param)))
# +
f = folium.element.Figure(height=1500)
c = Crossfilter(pd.json.dumps(data.to_dict(orient='records')),
width="98%", left="2%",
position='absolute')
f.add_children(c)
row0 = Div(class_="row", height=300, width="100%")
row1 = Div(class_="row", height=200, width="100%")
row2 = Div(class_="row", height=200, width="100%")
row3 = Div(class_="row", height=200, width="100%")
row4 = Div(class_="row", height=500, width="100%")
c.add_children(row0)
c.add_children(row1)
c.add_children(row2)
c.add_children(row3)
c.add_children(row4)
m = folium.Map()
m.add_children(FeatureGroupFilter(c))
row0.add_children(m)
for i, x in enumerate(param['pieChart']):
row1.add_children(PieFilter(c, x['column'], name=x['name'],
width=150, height=150, inner_radius=20,
order=x['order'], class_="col-xs-4 pie-chart"))
for i, x in enumerate(param['barChart']):
row = row2 if i<3 else row3
row.add_children(BarFilter(
c,
x['column'],
width=250,
height=150,
bar_padding=0.1,
domain=x['domain'],
groupby=x['groupby'],
xlabel=x['xAxisLabel'],
ylabel=x["yAxisLabel"],
margins=x['margins'],
xticks=x['xAxisTickValues'],
time_format=x.get('timeFormat',None),
class_="col-xs-4 dc-chart"))
row4.add_children(TableFilter(c,
['brewery_brewery_name', 'beer_beer_name', 'beer_beer_style', 'rating_score',
'beer_rating_score', 'beer_beer_abv', 'beer_beer_ibu'
],
class_="table table-bordered table-striped"))
f
# -
| examples/Crossfilter example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" id="-dgPrK3P73-m" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664272269, "user_tz": -330, "elapsed": 2044, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# + id="R7Wn72Hn8guw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} executionInfo={"status": "ok", "timestamp": 1597664272271, "user_tz": -330, "elapsed": 2025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="0d73bf0d-5bcb-4ffb-b431-13712d7b2343"
from google.colab import drive
drive.mount('/content/drive')
# + id="7bbDr0bX9GRD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 117} executionInfo={"status": "ok", "timestamp": 1597664351021, "user_tz": -330, "elapsed": 63756, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="545c8e33-ae5e-4436-c84d-e588cd06622a"
# !unzip "/content/drive/My Drive/Analytics_Vidya_cv/av_independence_day/train1.zip"
# !unzip "/content/drive/My Drive/Analytics_Vidya_cv/av_independence_day/test1.zip"
# + [markdown] id="Ch-rboKS73-r" colab_type="text"
# # Loading the data
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" id="x6yggMGx73-s" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664351694, "user_tz": -330, "elapsed": 63195, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# + [markdown] id="Yj8jAI1-QARN" colab_type="text"
#
# + id="fnrfz7gn73-v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 297} executionInfo={"status": "ok", "timestamp": 1597664351695, "user_tz": -330, "elapsed": 61178, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="fb160b4b-6f9d-4a0d-ab44-ea427cf246ba"
train.head()
# + id="ejVr3YTx73-y" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664351696, "user_tz": -330, "elapsed": 60206, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
abstracts = train['ABSTRACT']
titles = train['TITLE']
# + id="Ajy8lv0k73-1" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664354045, "user_tz": -330, "elapsed": 61904, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
from numpy import array
from keras.preprocessing.text import one_hot
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.core import Activation, Dropout, Dense
from keras.layers import Flatten, LSTM
from keras.layers import GlobalMaxPooling1D
from keras.models import Model
from keras.layers.embeddings import Embedding
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.layers import Input
from keras.layers.merge import Concatenate
import re
import matplotlib.pyplot as plt
from nltk.corpus import stopwords
# + id="FAvQyGJd73-4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} executionInfo={"status": "ok", "timestamp": 1597664354049, "user_tz": -330, "elapsed": 61075, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="73d980fa-ea7e-4d55-ffe6-78d83a851b1d"
train.shape
# + [markdown] id="576nfO2673-7" colab_type="text"
# Removing any empty string
# + id="iKWUZWg773-8" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664354050, "user_tz": -330, "elapsed": 59398, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
filt = train['ABSTRACT'] != ""
train = train[filt]
train = train.dropna()
# + id="OyS-TJV173_A" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 135} executionInfo={"status": "ok", "timestamp": 1597664354051, "user_tz": -330, "elapsed": 58748, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="bf57fb89-6efa-44dc-bbd0-9564ccd1836b"
train['ABSTRACT'][123]
# + id="TD4xzCE973_D" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 117} executionInfo={"status": "ok", "timestamp": 1597664354052, "user_tz": -330, "elapsed": 58000, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="c938b039-eb1f-452a-ab02-835970527109"
print("Computer Science:" + str(train["Computer Science"][123]))
print("Physics:" + str(train["Physics"][123]))
print("Mathematics:" + str(train["Mathematics"][123]))
print("Statistics:" + str(train["Statistics"][123]))
print("Quantitative Biology:" + str(train["Quantitative Biology"][123]))
print("Quantitative Finance:" + str(train["Quantitative Finance"][123]))
# + [markdown] id="YCX_Iwcg73_F" colab_type="text"
# Plotting the count of each topic
# + id="vL2lHSfU73_G" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 197} executionInfo={"status": "ok", "timestamp": 1597664354053, "user_tz": -330, "elapsed": 56549, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="f42b5632-771f-494b-83f7-7492d2f475e1"
labels = train[["Computer Science", "Physics", "Mathematics", "Statistics", "Quantitative Biology", "Quantitative Finance"]]
labels.head()
# + id="ZY5--gk573_J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 485} executionInfo={"status": "ok", "timestamp": 1597664354055, "user_tz": -330, "elapsed": 55816, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="14e0e3ce-11c1-4f52-b676-d85b3c89710e"
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 16
fig_size[1] = 6
plt.rcParams["figure.figsize"] = fig_size
labels.sum(axis = 0).plot.bar()
# + [markdown] id="BSx90nEP73_M" colab_type="text"
# We observe that the columns are in order of the occurences of the courses
# + [markdown] id="J_dkTP2I73_N" colab_type="text"
# # Creating the Text classification model
# + id="qFkQk9Oo73_O" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597664354056, "user_tz": -330, "elapsed": 52010, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="5a46d47e-6591-4e37-ff6e-ba8ab0e264f1"
import nltk
nltk.download('stopwords')
def preprocess_text(text):
#remove punctuations and numbers
text = re.sub('[^a-zA-Z]', ' ', text)
#single character removal
text = re.sub(r"\s+[a-zA-Z]\s+", ' ', text)
#removing multiple spaces
text = re.sub(r'\s+', ' ', text)
new_text = ""
for word in text.split():
if word not in stopwords.words("english"):
new_text = new_text + ' ' + word
return new_text
# + id="OHiDmp2g73_Q" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664742090, "user_tz": -330, "elapsed": 439054, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
X = []
texts = list(train["ABSTRACT"])
for t in texts:
X.append(preprocess_text(t))
y = labels.values
# + id="Mo1lwBpD73_T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} executionInfo={"status": "ok", "timestamp": 1597664742094, "user_tz": -330, "elapsed": 438089, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="cc1d9096-e197-4c6b-926f-fd6d8943e8a8"
X[4]
# + id="cihPqK0N73_W" colab_type="code" colab={}
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.1, random_state = 42, shuffle = True)
# + id="_GRNMh0E73_Y" colab_type="code" colab={}
tokenizer = Tokenizer(num_words = 10000)
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
vocab_size = len(tokenizer.word_index)+1
max_len = 200
X_train = pad_sequences(X_train, padding = 'post', maxlen = max_len)
X_test = pad_sequences(X_test, padding = 'post', maxlen = max_len)
# + id="lNZYe7qILwAg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1597613499459, "user_tz": -330, "elapsed": 1027, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="5fd7a74b-796e-4f4e-adbb-18ffa3765ca9"
vocab_size
# + [markdown] id="2CG7pKio73_b" colab_type="text"
# I will be using GLoVe word embeddings to convert text into numerals
# + id="DtO5s1ww73_b" colab_type="code" colab={}
from numpy import array
from numpy import asarray
from numpy import zeros
# + id="1ZNBYSqL73_f" colab_type="code" colab={}
embeddings_dictionary = dict()
glove_file = open('/content/drive/My Drive/done assg/glove.6B.100d.txt', encoding = "utf8")
# + id="8Sb33sOY73_h" colab_type="code" colab={}
for line in glove_file:
records = line.split()
word = records[0]
vector_dimensions = asarray(records[1:], dtype = 'float32')
embeddings_dictionary[word] = vector_dimensions
glove_file.close()
# + id="VQM2yZATMJI8" colab_type="code" colab={}
# + id="bfz8T4yJMdDj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 136} executionInfo={"status": "ok", "timestamp": 1597613899355, "user_tz": -330, "elapsed": 994, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="c083bc45-263f-4e77-cca8-ab678c4d62b2"
y_test
# + id="lSAsfG3O73_k" colab_type="code" colab={}
embedding_matrix = zeros((vocab_size, 100))
for word, index in tokenizer.word_index.items():
embedding_vector = embeddings_dictionary.get(word)
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
# + id="aSLEMWHS73_m" colab_type="code" colab={}
inputs = Input(shape = (max_len,))
embedding_layer = Embedding(vocab_size, 100, weights = [embedding_matrix], trainable = False)(inputs)
LSTM_1 = LSTM(256)(embedding_layer)
dense_1 = Dense(6, activation = 'sigmoid')(LSTM_1)
model = Model(inputs = inputs, outputs = dense_1)
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['acc'])
# + id="Qg4wG-Zn73_o" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 289} executionInfo={"status": "ok", "timestamp": 1597613773496, "user_tz": -330, "elapsed": 2318, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="6866d767-e078-4b72-975d-ea9b33e68aa7"
model.summary()
# + id="cIkWI8PR73_r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 422} executionInfo={"status": "ok", "timestamp": 1597613775535, "user_tz": -330, "elapsed": 1957, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="9b578765-2c36-4c9f-97d4-c54bcae8fa33"
from keras.utils import plot_model
plot_model(model, to_file='model_plot4a.png', show_shapes=True, show_layer_names=True)
# + id="tdg6xjy273_t" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} executionInfo={"status": "ok", "timestamp": 1597613873232, "user_tz": -330, "elapsed": 95789, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="812a7f7a-e3d1-4c7b-a416-1261a075c925"
history = model.fit(X_train, y_train, batch_size = 128, epochs = 20, verbose = 1)
# + id="xK02Ue0N73_v" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} executionInfo={"status": "ok", "timestamp": 1597613890138, "user_tz": -330, "elapsed": 2084, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="281297cc-d9e6-4999-9f41-fd73afc799c5"
score = model.evaluate(X_test, y_test, verbose = 1)
print("Loss : ", score[0])
print("Accuracy : ", score[1])
# + id="u1H6cXDF73_y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 693} executionInfo={"status": "ok", "timestamp": 1597613909551, "user_tz": -330, "elapsed": 1372, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="8a2f1068-3ec7-4838-9d1b-92d124ec5317"
import matplotlib.pyplot as plt
plt.plot(history.history['acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.show()
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.show()
# + id="M8nR1rvu73_1" colab_type="code" colab={}
X_t = []
test_abs = list(test['ABSTRACT'])
for t in test_abs:
X_t.append(preprocess_text(t))
# + id="B_xmhEI-73_3" colab_type="code" colab={}
X_t = tokenizer.texts_to_sequences(X_t)
X_t = pad_sequences(X_t, padding = 'post', maxlen = max_len)
pred = model.predict(X_t)
# + id="yP5YYOoY73_7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} executionInfo={"status": "ok", "timestamp": 1597614044779, "user_tz": -330, "elapsed": 3982, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="6ddfba9d-4d28-44d2-d918-0f48a1c6eae0"
pred[0]
# + id="d6PobRZl73_9" colab_type="code" colab={}
for i in range(pred.shape[0]):
for j in range(pred.shape[1]):
if pred[i][j] >= 0.45:
pred[i][j] = 1
else:
pred[i][j] = 0
# + id="1YcseiAz74AA" colab_type="code" colab={}
output = pd.DataFrame()
output['ID'] = test['ID']
output['Computer Science'] = pred[:, 0]
output['Physics'] = pred[:, 1]
output['Mathematics'] = pred[:, 2]
output['Statistics'] = pred[:, 3]
output['Quantitative Biology'] = pred[:, 4]
output['Quantitative Finance'] = pred[:, 5]
# + id="7xIe5bwY74AC" colab_type="code" colab={}
courses = [["Computer Science", "Physics", "Mathematics", "Statistics", "Quantitative Biology", "Quantitative Finance"]]
for column in courses:
output[column] = output[column].astype(int)
# + id="voFb_kyA74AE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 221} executionInfo={"status": "ok", "timestamp": 1597614069438, "user_tz": -330, "elapsed": 1106, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="c6ec8be1-d6b6-4328-bcbf-0c55eaa8c801"
output.head()
# + id="By7FHPZX74AG" colab_type="code" colab={}
output.to_csv("Keras LSTM.csv", index = False)
# + id="W9zRjJyh74AJ" colab_type="code" colab={}
# + [markdown] id="j9euIKQDDlXP" colab_type="text"
# # ROBERTA TRANSFORMER
# + [markdown] id="NT8ZGD68Dnm4" colab_type="text"
# train["ABSTRACT"] = pd.Series(X)
# + id="1Pjv83igyKxE" colab_type="code" colab={}
# + id="8kRiWnj-yLQV" colab_type="code" colab={}
train = pd.read_csv("train.csv")
test = pd.read_csv("test.csv")
# + id="cRPsvKn_yLZR" colab_type="code" colab={}
filt = train['ABSTRACT'] != ""
train = train[filt]
train = train.dropna()
# + id="HhXT5REtyLiR" colab_type="code" colab={}
import nltk
nltk.download('stopwords')
def preprocess_text(text):
#remove punctuations and numbers
text = re.sub('[^a-zA-Z]', ' ', text)
#single character removal
text = re.sub(r"\s+[a-zA-Z]\s+", ' ', text)
#removing multiple spaces
text = re.sub(r'\s+', ' ', text)
new_text = ""
for word in text.split():
if word not in stopwords.words("english"):
new_text = new_text + ' ' + word
return new_text
# + id="4yKCI_JCyLo_" colab_type="code" colab={}
X = []
texts = list(train["ABSTRACT"])
for t in texts:
X.append(preprocess_text(t))
y = labels.values
# + id="HcqmOoMmFjbN" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664742095, "user_tz": -330, "elapsed": 426149, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
traintmp = train.drop(['TITLE'],axis=1)
# + id="KGMfxCVQHlaQ" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664742097, "user_tz": -330, "elapsed": 424723, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
df = traintmp
df['labels'] = list(zip(df['Computer Science'].tolist(), df.Physics.tolist(), df.Mathematics.tolist(), df.Statistics.tolist(), df['Quantitative Biology'].tolist(), df['Quantitative Finance'].tolist()))
# traintmp
# + id="oI9rVIt9H9A7" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664742099, "user_tz": -330, "elapsed": 423932, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
df1 = df.drop(["Computer Science","Physics", "Mathematics", "Statistics","Quantitative Biology",
"Quantitative Finance", "ID"], axis=1)
# + id="tt_P6dJNIeBQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 407} executionInfo={"status": "ok", "timestamp": 1597664742099, "user_tz": -330, "elapsed": 423510, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="3d0b09f6-f991-48f5-faa4-a50c60465904"
df1
# + id="rwstWB3wFktT" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597664742100, "user_tz": -330, "elapsed": 422142, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
from sklearn.model_selection import train_test_split
train_df, eval_df = train_test_split(df1, test_size=0.2)
# + id="1eKQz4HeGDqw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1597664747707, "user_tz": -330, "elapsed": 426867, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="a156fb03-745f-4ffc-b493-fbabd670a39f"
# !pip install simpletransformers
# + id="_x9nlbb8GKth" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} executionInfo={"status": "ok", "timestamp": 1597664748882, "user_tz": -330, "elapsed": 427025, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="7b5b35fd-9cf1-4adf-c29f-730692bb16a7"
from simpletransformers.classification import MultiLabelClassificationModel
# + id="jnpH-kYxGsUI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 120} executionInfo={"status": "ok", "timestamp": 1597664757957, "user_tz": -330, "elapsed": 434950, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="bc9e47b8-2559-4314-e618-d6a55f8d8c2b"
model = MultiLabelClassificationModel('roberta', 'roberta-base', num_labels=6, args={'train_batch_size':2, 'gradient_accumulation_steps':16, 'learning_rate': 3e-5, 'num_train_epochs': 3, 'max_seq_length': 200})
# + id="K3NUP3lEG77s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 380, "referenced_widgets": ["ae94ca4b519040b0acad1e18f570d0e9", "dbb11b6258234e16b0b6360920fa92a5", "926562fc6c45462b9faf93689e11bd80", "d9c2a36c052a423ab8fe3cb2ca059a31", "<KEY>", "<KEY>", "3d9f04faca784ef394f54a1ed81fce95", "<KEY>", "<KEY>", "7d975128ee9b4a8b8ca65d75ca81dfc9", "<KEY>", "a467fa39600b4e57b7dd0e242fe76c65", "5c0d9e0943ca4c0088fe0d282d215b91", "<KEY>", "da72155c442e4eb682a0ba00d1fbbbd4", "<KEY>", "9d774165356a403fba5d72884fc82813", "14b01676caf643ca80f045a450767ac9", "<KEY>", "<KEY>", "6569f7ab6bd545daa8d74fb5a6016c47", "5741d74b877e4e77a313676bdd6ff19c", "<KEY>", "b98d7179d47f44518fa3c2b0a5bad40a", "<KEY>", "<KEY>", "<KEY>", "ee0d8fe509514092ae1a9241503439ac", "<KEY>", "<KEY>", "<KEY>", "843dcb7e40d24e5f8d7aed79782f6649", "a4677b18ffac4de79d83dcabb699ac60", "b5a1dd2dd2b141a5abd68dee1c827e77", "<KEY>", "<KEY>", "88d1cc37e44c476889eda3563a8795d9", "<KEY>", "3ee2c7d3dd474ed4ac633da74d09edfa", "c051fc54a85741ce98ecf026e9c7f7d7"]} executionInfo={"status": "ok", "timestamp": 1597672709005, "user_tz": -330, "elapsed": 8385037, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="bda7c251-46e8-4b75-93ed-14ebf177ff5c"
model.train_model(train_df)
# + id="vXA4_RKrHB-d" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 167, "referenced_widgets": ["bf45b30e4d3d487b890d15fbe7ff348f", "df59b2bc6618430dafc7d9a329f9ab81", "9ed094699a71445baf9f976edd4cbf73", "0a31ef23534b4cc7ae71093552620af3", "6b85dcbaef8f4b63bf9271757aceb195", "68bf0d6af82e49479163f2a0de855481", "2200a90ebf494bd4ab3a75f856c879ec", "9842b3d162084492b86ce115f8b91e2e", "3e2c92dddc5d417b8027dd5c378df96e", "2c74e7c0272a47239b55c0bee01c759a", "09eccf92944d43849287d880909342f2", "dc1177d12f45473d8baa18273e7eb090", "0c9ab0368a134ab49313e80217ab7a93", "<KEY>", "0da9b27311fb404eaee8ce35f32c08a8", "a17d75564de54f61810a26c34159cd00"]} executionInfo={"status": "ok", "timestamp": 1597672837789, "user_tz": -330, "elapsed": 122140, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="a2ffd1e7-2f73-4f26-bac0-be0b0592d2b0"
result, model_outputs, wrong_predictions = model.eval_model(eval_df)
# + id="f4kE_3_dhj-D" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673110165, "user_tz": -330, "elapsed": 169797, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
X_t = []
test_abs = list(test['ABSTRACT'])
for t in test_abs:
X_t.append(preprocess_text(t))
# + id="Pw1TKbgTi--S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 114, "referenced_widgets": ["4b5ee4d83ef34d30a49392dd4cb02115", "58efc6595f124b059fa029db767b1985", "abba49a7e7cc41b0907a8f3597d49c05", "02ff9bbb674a41589d68166fb5dfcb8c", "bd0223438fb348f4bec62bb0d4485728", "04480424900247ceab9108ec708f034a", "6d1fee9e8fb6449396931c200de3070c", "1ce9e07bbd854df0b37d2934a6f2beb5", "0526537e436c444da183e6e5012a5ca4", "e3f3746753944379b7955d000c2f0aca", "ab393edbb6be42debe4f0a82694f7fb3", "f4e898013127485b9a42291d50056ecf", "548b0d1b92f64432ba71ca9a021dae82", "959653e1bb9e41509fe4b7864e6added", "2c7f54e7d1e342eda9f3bec6e68d2820", "4771e15a5902457882cef91530dfcf00"]} executionInfo={"status": "ok", "timestamp": 1597673362799, "user_tz": -330, "elapsed": 419078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="7ad1ebbc-5573-4271-bad9-cb1e7eff255e"
preds, outputs = model.predict(X_t)
# + id="GIKN4_88js6j" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} executionInfo={"status": "ok", "timestamp": 1597674003718, "user_tz": -330, "elapsed": 1539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="8bdaaf4e-2923-48c1-c0bd-092dc8754ae4"
outputs[6]
# + id="K6JriGy_kz3x" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 33} executionInfo={"status": "ok", "timestamp": 1597673997598, "user_tz": -330, "elapsed": 1245, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}} outputId="592cd090-ab45-4935-e106-f7df4ad870cf"
preds[6]
# df1 = df.drop(["Computer Science","Physics", "Mathematics", "Statistics","Quantitative Biology",
# "Quantitative Finance", "ID"], axis=1)
# + id="GqTiKqPMk8RS" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673584384, "user_tz": -330, "elapsed": 1335, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
sub_df = pd.DataFrame(preds,columns=["Computer Science","Physics", "Mathematics", "Statistics","Quantitative Biology",
"Quantitative Finance"])
# + id="GRWrCvDmlPZq" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673587140, "user_tz": -330, "elapsed": 1420, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
sub_df['ID'] = test['ID']
# + id="Uk94_oTqlQSo" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673587756, "user_tz": -330, "elapsed": 1617, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
sub_df = sub_df[["ID","Computer Science","Physics", "Mathematics", "Statistics","Quantitative Biology",
"Quantitative Finance"]]
# + id="s9JelzV0ldY8" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673593633, "user_tz": -330, "elapsed": 2585, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
sub_df.to_csv('transformer(roberta3).csv', index=False)
# + id="chOgwOZGleZb" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1597673851952, "user_tz": -330, "elapsed": 1476, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiCkbw3JU_dnYFIpfjihWpa8mst4QBm2Ub0VrYvcQ=s64", "userId": "06643994408893532204"}}
| av-independence-day-hackathon.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sudoku
#
# This tutorial includes everything you need to set up decision optimization engines, build constraint programming models.
#
#
# When you finish this tutorial, you'll have a foundational knowledge of _Prescriptive Analytics_.
#
# >This notebook is part of **[Prescriptive Analytics for Python](http://ibmdecisionoptimization.github.io/docplex-doc/)**
# >
# >It requires either an [installation of CPLEX Optimizers](http://ibmdecisionoptimization.github.io/docplex-doc/getting_started.html) or it can be run on [IBM Watson Studio Cloud](https://www.ibm.com/cloud/watson-studio/>) (Sign up for a [free IBM Cloud account](https://dataplatform.cloud.ibm.com/registration/stepone?context=wdp&apps=all>)
# and you can start using Watson Studio Cloud right away).
#
# Table of contents:
#
# - [Describe the business problem](#Describe-the-business-problem)
# * [How decision optimization (prescriptive analytics) can help](#How--decision-optimization-can-help)
# * [Use decision optimization](#Use-decision-optimization)
# * [Step 1: Download the library](#Step-1:-Download-the-library)
# * [Step 2: Model the Data](#Step-2:-Model-the-data)
# * [Step 3: Set up the prescriptive model](#Step-3:-Set-up-the-prescriptive-model)
# * [Define the decision variables](#Define-the-decision-variables)
# * [Express the business constraints](#Express-the-business-constraints)
# * [Express the objective](#Express-the-objective)
# * [Solve with Decision Optimization solve service](#Solve-with-Decision-Optimization-solve-service)
# * [Step 4: Investigate the solution and run an example analysis](#Step-4:-Investigate-the-solution-and-then-run-an-example-analysis)
# * [Summary](#Summary)
# ****
# ### Describe the business problem
#
# * Sudoku is a logic-based, combinatorial number-placement puzzle.
# * The objective is to fill a 9x9 grid with digits so that each column, each row,
# and each of the nine 3x3 sub-grids that compose the grid contains all of the digits from 1 to 9.
# * The puzzle setter provides a partially completed grid, which for a well-posed puzzle has a unique solution.
# #### References
# * See https://en.wikipedia.org/wiki/Sudoku for details
# *****
# ## How decision optimization can help
# * Prescriptive analytics technology recommends actions based on desired outcomes, taking into account specific scenarios, resources, and knowledge of past and current events. This insight can help your organization make better decisions and have greater control of business outcomes.
#
# * Prescriptive analytics is the next step on the path to insight-based actions. It creates value through synergy with predictive analytics, which analyzes data to predict future outcomes.
#
# * Prescriptive analytics takes that insight to the next level by suggesting the optimal way to handle that future situation. Organizations that can act fast in dynamic conditions and make superior decisions in uncertain environments gain a strong competitive advantage.
# <br/>
#
# + For example:
# + Automate complex decisions and trade-offs to better manage limited resources.
# + Take advantage of a future opportunity or mitigate a future risk.
# + Proactively update recommendations based on changing events.
# + Meet operational goals, increase customer loyalty, prevent threats and fraud, and optimize business processes.
#
# ## Use decision optimization
# ### Step 1: Download the library
#
# Run the following code to install Decision Optimization CPLEX Modeling library. The *DOcplex* library contains the two modeling packages, Mathematical Programming and Constraint Programming, referred to earlier.
import sys
try:
import docplex.cp
except:
if hasattr(sys, 'real_prefix'):
#we are in a virtual env.
# !pip install docplex
else:
# !pip install --user docplex
# Note that the more global package <i>docplex</i> contains another subpackage <i>docplex.mp</i> that is dedicated to Mathematical Programming, another branch of optimization.
from docplex.cp.model import *
from sys import stdout
# ### Step 2: Model the data
# #### Grid range
GRNG = range(9)
# #### Different problems
# _zero means cell to be filled with appropriate value_
# +
SUDOKU_PROBLEM_1 = ( (0, 0, 0, 0, 9, 0, 1, 0, 0),
(2, 8, 0, 0, 0, 5, 0, 0, 0),
(7, 0, 0, 0, 0, 6, 4, 0, 0),
(8, 0, 5, 0, 0, 3, 0, 0, 6),
(0, 0, 1, 0, 0, 4, 0, 0, 0),
(0, 7, 0, 2, 0, 0, 0, 0, 0),
(3, 0, 0, 0, 0, 1, 0, 8, 0),
(0, 0, 0, 0, 0, 0, 0, 5, 0),
(0, 9, 0, 0, 0, 0, 0, 7, 0),
)
SUDOKU_PROBLEM_2 = ( (0, 7, 0, 0, 0, 0, 0, 4, 9),
(0, 0, 0, 4, 0, 0, 0, 0, 0),
(4, 0, 3, 5, 0, 7, 0, 0, 8),
(0, 0, 7, 2, 5, 0, 4, 0, 0),
(0, 0, 0, 0, 0, 0, 8, 0, 0),
(0, 0, 4, 0, 3, 0, 5, 9, 2),
(6, 1, 8, 0, 0, 0, 0, 0, 5),
(0, 9, 0, 1, 0, 0, 0, 3, 0),
(0, 0, 5, 0, 0, 0, 0, 0, 7),
)
SUDOKU_PROBLEM_3 = ( (0, 0, 0, 0, 0, 6, 0, 0, 0),
(0, 5, 9, 0, 0, 0, 0, 0, 8),
(2, 0, 0, 0, 0, 8, 0, 0, 0),
(0, 4, 5, 0, 0, 0, 0, 0, 0),
(0, 0, 3, 0, 0, 0, 0, 0, 0),
(0, 0, 6, 0, 0, 3, 0, 5, 4),
(0, 0, 0, 3, 2, 5, 0, 0, 6),
(0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0)
)
# -
try:
import numpy as np
import matplotlib.pyplot as plt
VISU_ENABLED = True
except ImportError:
VISU_ENABLED = False
def print_grid(grid):
""" Print Sudoku grid """
for l in GRNG:
if (l > 0) and (l % 3 == 0):
stdout.write('\n')
for c in GRNG:
v = grid[l][c]
stdout.write(' ' if (c % 3 == 0) else ' ')
stdout.write(str(v) if v > 0 else '.')
stdout.write('\n')
def draw_grid(values):
# %matplotlib inline
fig, ax = plt.subplots(figsize =(4,4))
min_val, max_val = 0, 9
R = range(0,9)
for l in R:
for c in R:
v = values[c][l]
s = " "
if v > 0:
s = str(v)
ax.text(l+0.5,8.5-c, s, va='center', ha='center')
ax.set_xlim(min_val, max_val)
ax.set_ylim(min_val, max_val)
ax.set_xticks(np.arange(max_val))
ax.set_yticks(np.arange(max_val))
ax.grid()
plt.show()
def display_grid(grid, name):
stdout.write(name)
stdout.write(":\n")
if VISU_ENABLED:
draw_grid(grid)
else:
print_grid(grid)
display_grid(SUDOKU_PROBLEM_1, "PROBLEM 1")
display_grid(SUDOKU_PROBLEM_2, "PROBLEM 2")
display_grid(SUDOKU_PROBLEM_3, "PROBLEM 3")
# #### Choose your preferred problem (SUDOKU_PROBLEM_1 or SUDOKU_PROBLEM_2 or SUDOKU_PROBLEM_3)
# If you change the problem, ensure to re-run all cells below this one.
problem = SUDOKU_PROBLEM_3
# ### Step 3: Set up the prescriptive model
mdl = CpoModel(name="Sudoku")
# #### Define the decision variables
grid = [[integer_var(min=1, max=9, name="C" + str(l) + str(c)) for l in GRNG] for c in GRNG]
# #### Express the business constraints
# Add alldiff constraints for lines
for l in GRNG:
mdl.add(all_diff([grid[l][c] for c in GRNG]))
# Add alldiff constraints for columns
for c in GRNG:
mdl.add(all_diff([grid[l][c] for l in GRNG]))
# Add alldiff constraints for sub-squares
ssrng = range(0, 9, 3)
for sl in ssrng:
for sc in ssrng:
mdl.add(all_diff([grid[l][c] for l in range(sl, sl + 3) for c in range(sc, sc + 3)]))
# Initialize known cells
for l in GRNG:
for c in GRNG:
v = problem[l][c]
if v > 0:
grid[l][c].set_domain((v, v))
# #### Solve with Decision Optimization solve service
print("\nSolving model....")
msol = mdl.solve(TimeLimit=10)
# ### Step 4: Investigate the solution and then run an example analysis
display_grid(problem, "Initial problem")
if msol:
sol = [[msol[grid[l][c]] for c in GRNG] for l in GRNG]
stdout.write("Solve time: " + str(msol.get_solve_time()) + "\n")
display_grid(sol, "Solution")
else:
stdout.write("No solution found\n")
# ## Summary
#
# You learned how to set up and use the IBM Decision Optimization CPLEX Modeling for Python to formulate and solve a Constraint Programming model.
# #### References
# * [CPLEX Modeling for Python documentation](https://rawgit.com/IBMDecisionOptimization/docplex-doc/master/docs/index.html)
# * [Decision Optimization on Cloud](https://developer.ibm.com/docloud/)
# * Need help with DOcplex or to report a bug? Please go [here](https://stackoverflow.com/questions/tagged/docplex)
# * Contact us at <EMAIL>
# Copyright © 2017, 2018 IBM. IPLA licensed Sample Materials.
| examples/cp/jupyter/sudoku.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Create a file with hands and Classes
from deuces.deuces.evaluatorMJM import Evaluator
from deuces.deuces.cardMJM import Card
from deuces.deuces.deckMJM import Deck
import time
import random
# create an evaluator
evaluator = Evaluator()
import numpy as np
import zipfile
import os
# +
nhands = 200000
handarray = np.empty((nhands,53))
deck = Deck()
start = time.time()
for i in range(nhands):
deck.shuffle()
hand = deck.draw(random.randrange(5,8))
evaluator.get_rank_class(evaluator.evaluate(hand))
handarray[i] = evaluator.hand_line(hand)
b = np.ascontiguousarray(handarray).view(np.dtype((np.void, handarray.dtype.itemsize * handarray.shape[1])))
_, idx = np.unique(b, return_index=True)
unique_hands = handarray[idx]
print(len(unique_hands))
print(time.time()-start)
# -
filename = 'npdata_200k'
print('Saving data...')
np.savetxt('../data/' + filename + '.txt',unique_hands,fmt='%d',delimiter='')
print('Creating archive...')
zf = zipfile.ZipFile('../data/' + filename + '.zip','w', zipfile.ZIP_DEFLATED)
try:
zf.write('../data/' + filename + '.txt')
finally:
zf.close()
print('Deleting data file...')
os.remove('../data/' + filename + '.txt')
| src/Generator_poker_hands.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import pickle
from matplotlib.backends.backend_pdf import PdfPages
mean_img = np.load('mean.npy')
# +
def rescaler(img):
low, high = np.min(img), np.max(img)
return 255*(img - low) / (high - low)
def show(image, rescale=False, add_mean=True, clip=False):
img = image.copy()
img = img.transpose(1,2,0)
if add_mean:
img += mean_img.reshape(64,64,3)
if rescale:
img = rescaler(img)
if clip is True:
img = np.clip(img, 0, 255)
return img
# +
fast_grad_images = pickle.load(open('./data/tiny_foolers_fast/tiny_foolers_fast.p', 'rb'), encoding='latin1')
iter_grad_images = pickle.load(open('./data/tiny_foolers/tiny_foolers.p', 'rb'), encoding='latin1')
fast_grad_images_keys = fast_grad_images.keys()
iter_grad_images_keys = iter_grad_images.keys()
# -
with PdfPages('clipping.pdf') as pdf:
fig = plt.figure(1, (12, 24))
rows, cols = 4, 8
grid = ImageGrid(fig, 111, # similar to subplot(111)
nrows_ncols=(rows, cols),
axes_pad=0.2, # pad between axes in inch.
)
grid[0].axes.set_ylabel('Fast Gradient\nSign')
grid[cols].axes.set_ylabel('Fast Gradient\nSign')
for i in range(0,2*cols,4):
k = np.random.choice(list(fast_grad_images_keys))
image = fast_grad_images[k][4][0]
image_fool = fast_grad_images[k][3][0]
for j in range(i,i+4):
grid[j].axes.xaxis.set_ticklabels([])
grid[j].axes.yaxis.set_ticklabels([])
grid[j].axes.xaxis.set_ticks([])
grid[j].axes.yaxis.set_ticks([])
if i == 0 or i == 4:
grid[i].axes.set_title('Clean')
grid[i+1].axes.set_title('Adversarial')
grid[i+2].axes.set_title('Adv. Rescaled')
grid[i+3].axes.set_title('Adv. Clipped')
grid[i].imshow(show(image).astype('uint8'))
grid[i+1].imshow(show(image_fool).astype('uint8'))
grid[i+2].imshow(show(image_fool, rescale=True).astype('uint8'))
grid[i+3].imshow(show(image_fool, clip=True).astype('uint8'))
grid[2*cols].axes.set_ylabel('Iterative')
grid[3*cols].axes.set_ylabel('Iterative')
for i in range(2*cols,4*cols,4):
k = np.random.choice(list(iter_grad_images_keys))
image = iter_grad_images[k][4][0]
image_fool = iter_grad_images[k][3][0]
for j in range(i,i+4):
grid[j].axes.xaxis.set_ticklabels([])
grid[j].axes.yaxis.set_ticklabels([])
grid[j].axes.xaxis.set_ticks([])
grid[j].axes.yaxis.set_ticks([])
grid[i].imshow(show(image).astype('uint8'))
grid[i+1].imshow(show(image_fool).astype('uint8'))
grid[i+2].imshow(show(image_fool, rescale=True).astype('uint8'))
grid[i+3].imshow(show(image_fool, clip=True).astype('uint8'))
pdf.savefig(fig, bbox_inches='tight', pad_inches=0)
| tiny-imagenet/clip_and_rescale.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %config IPCompleter.greedy=True
import pandas as pd
from IPython.display import display, HTML
import os
import numpy as np
import json
# +
with open('HeroesOfPymoli/purchase_data.json') as file:
data = json.load(file)
df = pd.DataFrame(data)
df
# -
# +
#this checks how many rows the DF is
total = df.shape[0]
#this counts all of the uniq items and how many of each were purchased
uniq_items = pd.value_counts(df['Item Name'])
Purchases_made = df['SN']
#this counts how many unique player names there are
uniq_players = Purchases_made.shape[0]
total_spent = round(df['Price'].sum(), 2)
avg_transaction = total_spent/total
#this gets each unique item
how_many_uniq_items = uniq_items.shape[0]
avg_transaction = round(avg_transaction, 2)
display(HTML(' <span style="color:Black"><h1>Players</h1>total number of player </span> '))
print()
uniq_players
# -
display(HTML(' <span style="color:Black"><h1>Purchase Analysis</h1></span> '))
how_many_uniq_items
purchase_analysis = pd.DataFrame({"Number of unique items": [uniq_items.shape[0]],
"Average Purchase Price in dollars": ['${:,.2f}'.format(avg_transaction)],
"Total number of Purchases": [total],
"Total Revenue in dollars": ['${:,.2f}'.format(total_spent)]},index=[''])
purchase_analysis
purchase_analysis
# +
display(HTML(' <span style="color:Black"><h1>Gender Demographic Analysis</h1></span> '))
Gender_demo = pd.value_counts(df['Gender'])
num_men = Gender_demo[0]
num_wom = Gender_demo[1]
num_non = Gender_demo[2]
gender_group = df.groupby('Gender')
percentage_series = gender_group["Age"].count()/uniq_players
#this is the DF
genderDF = pd.DataFrame({"Number of players": gender_group["Age"].count(),
"Percentage of total": percentage_series.map("{0:.0%}".format)})
genderDF
# +
display(HTML(' <span style="color:Black"><h1>Purchase Analysis by Gender Demographic</h1></span> '))
mens_purchases_df = df.loc[df["Gender"] == "Male"]
womens_purchases_df = df.loc[df["Gender"] == "Female"]
non_binary_purchases_df = df.loc[df["Gender"] == "Other / Non-Disclosed"]
#create the rows for the gender_purchase_df data frame * Purchase Count * Average Purchase Price * Total Purchase Value * Normalized Totals
mens_col = [mens_purchases_df.shape[0], mens_purchases_df["Price"].mean(), mens_purchases_df["Price"].sum(), mens_purchases_df["Price"].mean()/mens_purchases_df["Price"].sum()]
womens_col = [womens_purchases_df.shape[0], womens_purchases_df["Price"].mean(), womens_purchases_df["Price"].sum(), womens_purchases_df["Price"].mean()/womens_purchases_df["Price"].sum()]
non_binary_col = [non_binary_purchases_df.shape[0], non_binary_purchases_df["Price"].mean(), non_binary_purchases_df["Price"].sum(),non_binary_purchases_df["Price"].mean()/non_binary_purchases_df["Price"].sum()]
#this takes each of my rows and puts them into the new DF
gender_purchase_df = pd.DataFrame({"Male": mens_col,
"Women": womens_col,
"Other / Non-Disclosed": non_binary_col},index=['Purchase Count', 'Average Purchase Price', 'Total Purchase Value', 'Normalized Totals'])
#formating the data in our DF
gender_purchase_df = gender_purchase_df.transpose()
#gender_purchase_df[] = gender_purchase_df['Purchase Count'].
gender_purchase_df['Average Purchase Price'] = gender_purchase_df['Average Purchase Price'].map("${:,.2f}".format)
gender_purchase_df['Total Purchase Value'] = gender_purchase_df['Total Purchase Value'].map("${:,.2f}".format)
gender_purchase_df['Purchase Count'] = gender_purchase_df['Purchase Count'].map("{:,.0f}".format)
gender_purchase_df
# +
display(HTML(' <span style="color:Black"><h1>Age Demographic</h1></span> '))
minimum_age = df['Age'].min()
maximum_age = df['Age'].max()
age_group = []
age_demographic = pd.DataFrame()
for rows in df.iterrows():
age_group.append((df.iloc[rows[0]][0])/4)
age_group[rows[0]] = int(age_group[rows[0]])
#I never finished this last part
#for rows in df.iterrows():
# age_demographic[0][]
#now i have a list consisting of every
age_demographic.append(age_group)
age_demographic = pd.concat([age_demographic, df])
#this is where I got stuck trying to concatinate the data frame
# -
| Heros1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zjZww9cMGFhH" colab_type="code" outputId="ef8763e2-06b6-4dc3-8901-cea507c9a9ae" executionInfo={"status": "ok", "timestamp": 1585708895980, "user_tz": -480, "elapsed": 1569, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 36}
from google.colab import drive
drive.mount('/content/gdrive')
import os
os.chdir('/content/gdrive/My Drive/finch/tensorflow2/text_matching/chinese/main')
# + id="BAFZj-l_L8At" colab_type="code" colab={}
# %tensorflow_version 2.x
# + id="5a3xywkzJCfe" colab_type="code" outputId="ec18cf2e-2f7d-4a7a-f4a0-35cb313b307a" executionInfo={"status": "ok", "timestamp": 1585708898203, "user_tz": -480, "elapsed": 3753, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 112}
import csv
import tensorflow as tf
import numpy as np
import pprint
import logging
import time
import math
print("TensorFlow Version", tf.__version__)
print('GPU Enabled:', tf.test.is_gpu_available())
# + id="5mawYgLcG-4x" colab_type="code" colab={}
def get_vocab(f_path):
k2v = {}
with open(f_path) as f:
for i, line in enumerate(f):
line = line.rstrip()
k2v[line] = i
return k2v
# + id="Zs0_iv9tHCmG" colab_type="code" colab={}
# stream data from text files
def data_generator(f_path, char2idx):
with open(f_path) as f:
print('Reading', f_path)
for i, line in enumerate(csv.reader(f, delimiter=',')):
if i == 0:
continue
text1, text2, label = line
text1 = [char2idx.get(c, len(char2idx)) for c in list(text1)]
text2 = [char2idx.get(c, len(char2idx)) for c in list(text2)]
yield ((text1, text2), int(label))
def dataset(is_training, params):
_shapes = (([None], [None]), ())
_types = ((tf.int32, tf.int32), tf.int32)
_pads = ((0, 0), -1)
if is_training:
ds = tf.data.Dataset.from_generator(
lambda: data_generator(params['train_path'], params['char2idx']),
output_shapes = _shapes,
output_types = _types,)
ds = ds.shuffle(params['buffer_size'])
ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
else:
ds = tf.data.Dataset.from_generator(
lambda: data_generator(params['test_path'], params['char2idx']),
output_shapes = _shapes,
output_types = _types,)
ds = ds.padded_batch(params['batch_size'], _shapes, _pads)
return ds
# + id="jhkmFgnBxM8e" colab_type="code" colab={}
class FFNBlock(tf.keras.Model):
def __init__(self, params, name):
super().__init__(name = name)
self.dropout1 = tf.keras.layers.Dropout(params['dropout_rate'])
self.fc1 = tf.keras.layers.Dense(params['hidden_units'], tf.nn.elu)
self.dropout2 = tf.keras.layers.Dropout(params['dropout_rate'])
self.fc2 = tf.keras.layers.Dense(params['hidden_units'], tf.nn.elu)
def call(self, inputs, training=False):
x = inputs
x = self.dropout1(x, training=training)
x = self.fc1(x)
x = self.dropout2(x, training=training)
x = self.fc2(x)
return x
# + id="MgeqVOY0K-rA" colab_type="code" colab={}
class Pyramid(tf.keras.Model):
def __init__(self, params: dict):
super().__init__()
self.embedding = tf.Variable(np.load(params['embedding_path']), name='pretrained_embedding', dtype=tf.float32)
self.inp_dropout = tf.keras.layers.Dropout(params['dropout_rate'])
self.encoder = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(
params['hidden_units'], return_sequences=True), name='encoder')
self.conv_1 = tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation=tf.nn.elu, padding='same')
self.conv_2 = tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation=tf.nn.elu, padding='same')
self.conv_3 = tf.keras.layers.Conv2D(filters=128, kernel_size=3, activation=tf.nn.elu, padding='same')
self.W_0 = tf.keras.layers.Dense(2*params['hidden_units'], use_bias=False)
self.W_1_1 = tf.keras.layers.Dense(params['hidden_units'], use_bias=False)
self.W_1_2 = tf.keras.layers.Dense(params['hidden_units'], use_bias=False)
self.v_1 = tf.keras.layers.Dense(1, use_bias=False)
self.W_2 = tf.keras.layers.Dense(params['hidden_units'], use_bias=False)
self.v_2 = tf.keras.layers.Dense(1, use_bias=False)
self.W_3 = tf.keras.layers.Dense(params['hidden_units'], use_bias=False)
self.v_3 = tf.keras.layers.Dense(1, use_bias=False)
self.flatten = tf.keras.layers.Flatten()
self.out_hidden = FFNBlock(params, name='out_hidden')
self.out_linear = tf.keras.layers.Dense(1, name='out_linear')
def call(self, inputs, training=False):
x1, x2 = inputs
if x1.dtype != tf.int32:
x1 = tf.cast(x1, tf.int32)
if x2.dtype != tf.int32:
x2 = tf.cast(x2, tf.int32)
batch_sz = tf.shape(x1)[0]
len1, len2 = x1.shape[1], x2.shape[1]
stride1, stride2 = len1 // params['fixed_len1'], len2 // params['fixed_len2']
if len1 // stride1 != params['fixed_len1']:
remin = (stride1 + 1) * params['fixed_len1'] - len1
zeros = tf.zeros([batch_sz, remin], tf.int32)
x1 = tf.concat([x1, zeros], 1)
len1 = x1.shape[1]
stride1 = len1 // params['fixed_len1']
if len2 // stride2 != params['fixed_len2']:
remin = (stride2 + 1) * params['fixed_len2'] - len2
zeros = tf.zeros([batch_sz, remin], tf.int32)
x2 = tf.concat([x2, zeros], 1)
len2 = x2.shape[1]
stride2 = len2 // params['fixed_len2']
if x1.dtype != tf.int32:
x1 = tf.cast(x1, tf.int32)
if x2.dtype != tf.int32:
x2 = tf.cast(x2, tf.int32)
batch_sz = tf.shape(x1)[0]
mask1 = tf.sign(x1)
mask2 = tf.sign(x2)
x1 = tf.nn.embedding_lookup(self.embedding, x1)
x2 = tf.nn.embedding_lookup(self.embedding, x2)
x1 = self.inp_dropout(x1, training=training)
x2 = self.inp_dropout(x2, training=training)
x1, x2 = self.encoder(x1), self.encoder(x2)
x = []
# attention 1 (bilinear)
a = tf.matmul(x1, self.W_0(x2), transpose_b=True)
x.append(tf.expand_dims(a, -1))
# attention 2 (add)
a1 = tf.expand_dims(self.W_1_1(x1), 2)
a2 = tf.expand_dims(self.W_1_2(x2), 1)
x.append(self.v_1(tf.tanh(a1 + a2)))
# attention 3 (minus)
a1 = tf.expand_dims(x1, 2)
a2 = tf.expand_dims(x2, 1)
x.append(self.v_2(tf.tanh(self.W_2(tf.abs(a1 - a2)))))
# attention 4 (dot)
a1 = tf.expand_dims(x1, 2)
a2 = tf.expand_dims(x2, 1)
x.append(self.v_3(tf.tanh(self.W_3(a1 * a2))))
x = tf.concat(x, -1)
x = self.conv_1(x)
x = tf.nn.max_pool(x, [1, stride1, stride2, 1], [1, stride1, stride2, 1], 'VALID')
x = self.conv_2(x)
x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
x = self.conv_3(x)
x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
x = self.flatten(x)
x = self.out_hidden(x, training=training)
x = self.out_linear(x)
x = tf.squeeze(x, 1)
return x
# + id="GtOWHpEAIMvu" colab_type="code" colab={}
params = {
'train_path': '../data/train.csv',
'test_path': '../data/test.csv',
'vocab_path': '../vocab/char.txt',
'embedding_path': '../vocab/char.npy',
'batch_size': 32,
'buffer_size': 100000,
'num_blocks': 2,
'dropout_rate': 0.2,
'hidden_units': 300,
'fixed_len1': 12,
'fixed_len2': 12,
'lr': 4e-4,
'clip_norm': 5.,
'num_patience': 5,
}
# + id="gK47NT01MsJn" colab_type="code" colab={}
def is_descending(history):
history = history[-(params['num_patience']+1):]
for i in range(1, len(history)):
if history[i-1] <= history[i]:
return False
return True
# + id="PVi6QK_TIKGR" colab_type="code" outputId="fa0d551a-5cda-410c-cb68-060c9e59db1b" executionInfo={"status": "error", "timestamp": 1585716789018, "user_tz": -480, "elapsed": 6083306, "user": {"displayName": "\u5982\u5b50", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi3ItGjzEGzUOlXTUHjOgeuVA5TICdNcY-Q1TGicA=s64", "userId": "01997730851420384589"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
params['char2idx'] = get_vocab(params['vocab_path'])
params['vocab_size'] = len(params['char2idx']) + 1
model = Pyramid(params)
model.build([[None, 24], [None, 24]])
pprint.pprint([(v.name, v.shape) for v in model.trainable_variables])
decay_lr = tf.optimizers.schedules.ExponentialDecay(params['lr'], 1000, 0.99)
optim = tf.optimizers.Adam(params['lr'])
global_step = 0
history_acc = []
best_acc = .0
t0 = time.time()
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)
while True:
# TRAINING
for ((text1, text2), labels) in dataset(is_training=True, params=params):
with tf.GradientTape() as tape:
logits = model((text1, text2), training=True)
loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(labels, tf.float32), logits=logits)
loss = tf.reduce_mean(loss)
optim.lr.assign(decay_lr(global_step))
grads = tape.gradient(loss, model.trainable_variables)
grads, _ = tf.clip_by_global_norm(grads, params['clip_norm'])
optim.apply_gradients(zip(grads, model.trainable_variables))
if global_step % 100 == 0:
logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format(
global_step, loss.numpy().item(), time.time()-t0, optim.lr.numpy().item()))
t0 = time.time()
global_step += 1
# EVALUATION
m = tf.keras.metrics.Accuracy()
for ((text1, text2), labels) in dataset(is_training=False, params=params):
logits = tf.sigmoid(model((text1, text2), training=False))
y_pred = tf.cast(tf.math.greater_equal(logits, .5), tf.int32)
m.update_state(y_true=labels, y_pred=y_pred)
acc = m.result().numpy()
logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc))
history_acc.append(acc)
if acc > best_acc:
best_acc = acc
# you can save model here
logger.info("Best Accuracy: {:.3f}".format(best_acc))
if len(history_acc) > params['num_patience'] and is_descending(history_acc):
logger.info("Testing Accuracy not improved over {} epochs, Early Stop".format(params['num_patience']))
break
| finch/tensorflow2/text_matching/chinese/main/pyramid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from workflows import DYStudiesProcessor
import json
from coffea.processor import (
run_uproot_job,
iterative_executor,
futures_executor,
NanoAODSchema
)
with open("dummy_samples.json") as f:
samples = json.load(f)
import json
import os.path as osp
metaconditions_file = "metaconditions/Era2017_legacy_v1.json"
with open(metaconditions_file) as f:
metaconditions = json.load(f)
dystudies = DYStudiesProcessor(metaconditions, False, True, './outputs/')
# -
histos = run_uproot_job(
samples,
'Events',
dystudies,
executor=futures_executor,
executor_args={"schema": NanoAODSchema,"workers": 4},
)
x = ak.ones_like(ak.flatten(events.Photon.pt))
# +
import pandas as pd
import pyarrow as pa
import awkward as ak
x = pd.read_parquet('./outputs/')
#y = ak.from_arrow(pa.Table.from_pandas(x))
# -
x
# +
# di-photon MVA
import xgboost as xg
min_diphoton_mass = 100
max_diphoton_mass = 180
model = xg.Booster()
model.load_model('aux-data/altDiphoModel_coffea.model')
# get the number of diphotons per row
# and save for re-wrapping xgb outputs
counts = ak.num(diphotons, axis=1)
# extract diphoton vars into flat lists
dipho_leadIDMVA = ak.flatten(diphotons["0"].mvaID)
dipho_subleadIDMVA = ak.flatten(diphotons["1"].mvaID)
dipho_lead_ptoM = ak.flatten(diphotons["0"].pt / diphotons.mass)
dipho_sublead_ptoM = ak.flatten(diphotons["1"].pt / diphotons.mass)
dipho_lead_eta = ak.flatten(diphotons["0"].eta)
dipho_sublead_eta = ak.flatten(diphotons["1"].eta)
diphoVars = ['dipho_leadIDMVA', 'dipho_subleadIDMVA', 'dipho_lead_ptoM',
'dipho_sublead_ptoM', 'dipho_leadEta', 'dipho_subleadEta',
'CosPhi', 'vtxprob', 'sigmarv', 'sigmawv']
allVars = diphoVars + ["dipho_mass"]
f = uproot.open('data/ggH_powheg_UL_2017.root')
tree = f['vbfTagDumper/trees/ggh_125_13TeV_GeneralDipho']
arrays = tree.arrays(allVars, how=dict)
mask = ( (arrays["dipho_mass"]> min_diphoton_mass) & (arrays["dipho_mass"]< max_diphoton_mass)
& (arrays["dipho_leadIDMVA"]>-0.9) & (arrays["dipho_subleadIDMVA"]>-0.9)
& (arrays["dipho_lead_ptoM"]>0.333) & (arrays["dipho_sublead_ptoM"]>0.25))
x = np.column_stack((ak.to_numpy(arrays[var][mask]) for var in diphoVars))[:100]
print(x.shape)
diphoMatrix = xg.DMatrix(x, feature_names=diphoVars)
y = model.predict(diphoMatrix)
# +
import xgboost
import warnings
from functools import partial
from pprint import pprint
import json
import os.path as osp
metaconditions_file = "metaconditions/Era2017_legacy_xgb_v1.json"
with open(metaconditions_file) as f:
metaconditions = json.load(f)
print(metaconditions["PhoIdInputCorrections"])
from hgg_coffea.tools.chained_quantile import ChainedQuantileRegression
test = ChainedQuantileRegression(**metaconditions["PhoIdInputCorrections"])
# +
from coffea import nanoevents
import time
# get some events to play with
events = nanoevents.NanoEventsFactory.from_root("../data/step2_HggNano_test.root").events()
tic = time.monotonic()
photons = test.apply(events)
toc = time.monotonic()
print(toc - tic)
# -
test.transforms["isolations"]["phoIso"]["peak2tail"]
# +
import pickle
import gzip
x = pickle.dumps(test)
# +
import lz4.frame
print(len(x))
print(len(lz4.frame.compress(x, compression_level=9)))
# +
import time
import xgboost
from functools import partial
from pprint import pprint
import json
import os.path as osp
import awkward as ak
import numpy as np
import vector
from coffea import nanoevents
vector.register_awkward()
metaconditions_file = "metaconditions/Era2017_legacy_xgb_v1.json"
with open(metaconditions_file) as f:
metaconditions = json.load(f)
print(metaconditions["flashggDiPhotonMVA"])
var_order = metaconditions["flashggDiPhotonMVA"]["inputs"]
# get some events to play with
events = nanoevents.NanoEventsFactory.from_root("../data/step2_HggNano_test.root").events()
diphotons = ak.combinations(events.Photon, 2)
p4s = diphotons.slot0 + diphotons.slot1
diphotons["pt"] = p4s.pt
diphotons["eta"] = p4s.eta
diphotons["phi"] = p4s.phi
diphotons["mass"] = p4s.mass
diphotons["charge"] = p4s.charge
diphotons = ak.with_name(diphotons, "PtEtaPhiMCandidate")
bdt_vars = {}
bdt_vars["dipho_leadIDMVA"] = diphotons.slot0.mvaID
bdt_vars["dipho_subleadIDMVA"] = diphotons.slot1.mvaID
bdt_vars["dipho_leadEta"] = diphotons.slot0.eta
bdt_vars["dipho_subleadEta"] = diphotons.slot1.eta
bdt_vars["dipho_lead_ptoM"] = diphotons.slot0.pt / diphotons.mass
bdt_vars["dipho_sublead_ptoM"] = diphotons.slot1.pt / diphotons.mass
# calculate sigma_wv
def calc_displacement(photons, events):
x = photons.x_calo - events.PV.x
y = photons.y_calo - events.PV.y
z = photons.z_calo - events.PV.z
return ak.zip({"x": x, "y": y, "z": z}, with_name="Vector3D")
tic = time.monotonic()
v_lead = calc_displacement(diphotons.slot0, events)
v_sublead = calc_displacement(diphotons.slot1, events)
p_lead = v_lead.unit() * diphotons.slot0.energyRaw
p_lead["energy"] = diphotons.slot0.energyRaw
p_lead = ak.with_name(p_lead, "Momentum4D")
p_sublead = v_sublead.unit() * diphotons.slot1.energyRaw
p_sublead["energy"] = diphotons.slot1.energyRaw
p_sublead = ak.with_name(p_sublead, "Momentum4D")
sech_lead = 1.0 / np.cosh(p_lead.eta)
sech_sublead = 1.0/ np.cosh(p_sublead.eta)
tanh_lead = np.cos(p_lead.theta)
tanh_sublead = np.cos(p_sublead.theta)
cos_dphi = np.cos(p_lead.deltaphi(p_sublead))
numerator_lead = sech_lead * (sech_lead * tanh_sublead - tanh_lead * sech_sublead * cos_dphi)
numerator_sublead = sech_sublead * (sech_sublead * tanh_lead - tanh_sublead * sech_lead * cos_dphi)
denominator = 1.0 - tanh_lead * tanh_sublead - sech_lead * sech_sublead * cos_dphi
add_reso = 0.5 * (-np.sqrt(2.0) * events.BeamSpot.sigmaZ / denominator) * (numerator_lead/p_lead.mag + numerator_sublead/p_sublead.mag)
dEnorm_lead = diphotons.slot0.energyErr / diphotons.slot0.energy
dEnorm_sublead = diphotons.slot1.energyErr / diphotons.slot1.energy
sigma_m = 0.5 * np.sqrt(dEnorm_lead**2 + dEnorm_sublead**2)
sigma_wv = np.sqrt(add_reso**2 + sigma_m**2)
toc = time.monotonic()
print("var calc", toc - tic)
vtx_prob = ak.full_like(sigma_m, 0.999) # !!!! placeholder !!!!
bdt_vars["CosPhi"] = cos_dphi
bdt_vars["vtxprob"] = vtx_prob
bdt_vars["sigmarv"] = sigma_m
bdt_vars["sigmawv"] = sigma_wv
# create the diphoton BDT
diphotonMVA = xgboost.Booster()
diphotonMVA.load_model(metaconditions["flashggDiPhotonMVA"]["weightFile"])
counts = ak.num(diphotons, axis=-1)
bdt_inputs = np.column_stack([ak.to_numpy(ak.flatten(bdt_vars[name])) for name in var_order])
tic = time.monotonic()
tempmatrix = xgboost.DMatrix(bdt_inputs, feature_names=var_order)
toc = time.monotonic()
print("dmatrix", toc - tic)
tic = time.monotonic()
scores = diphotonMVA.predict(tempmatrix)
toc = time.monotonic()
print("predict", toc - tic)
diphotons["bdt_score"] = ak.unflatten(scores, counts)
# -
events.Photon.pfChargedIsoWorstVtx
scores.shape[0]
# +
import uproot
import xgboost
import numpy as np
import awkward as ak
diphoVars = ['dipho_leadIDMVA', 'dipho_subleadIDMVA', 'dipho_lead_ptoM',
'dipho_sublead_ptoM', 'dipho_leadEta', 'dipho_subleadEta',
'CosPhi', 'vtxprob', 'sigmarv', 'sigmawv']
allVars = diphoVars + ["dipho_mass"]
f = uproot.open('../data/ggH_powheg_UL_2017.root')
tree = f['vbfTagDumper/trees/ggh_125_13TeV_GeneralDipho']
arrays = tree.arrays(allVars, how=dict)
mask = ( (arrays["dipho_mass"]>100.) & (arrays["dipho_mass"]<180.)
& (arrays["dipho_leadIDMVA"]>-0.9) & (arrays["dipho_subleadIDMVA"]>-0.9)
& (arrays["dipho_lead_ptoM"]>0.333) & (arrays["dipho_sublead_ptoM"]>0.25))
x = np.column_stack((ak.to_numpy(arrays[var][mask]) for var in diphoVars))[:100]
print(x.shape)
diphoMatrix = xgboost.DMatrix(x, feature_names=diphoVars)
model = xgboost.Booster()
model.load_model('aux-data/altDiphoModel_coffea.model')
y = model.predict(diphoMatrix)
print(type(x))
print(type(y))
# -
events.behavior["__events_factory__"]._partition_key.replace('/', '_') + '.parquet'
events.metadata
metaconditions
from workflows import taggers
dir(taggers)
# +
import awkward as ak
x = ak.Array([[1.0], [2.0, 3.0], [4.0, 5.0, 6.0]])
# -
counts = ak.num(x, axis=1)
ak.unflatten([0, 0] * ak.flatten(x)[:, None], counts)
# +
import numpy as np
x = np.array([0,1,2])
y = np.array([3,4,5])
np.stack((x,y), axis=1)
# -
ak.Array([1,2,3,4])[ak.min(ak.Array([[1,-1], []]), axis=1)]
str(['abc', 1, 5.0])
eval("['abc', 1, 5.0]")
import sys
sys.maxsize
(None or 1.0)
| processor_dev.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Predicting-Mutation-Effects
# language: python
# name: predicting-mutation-effects
# ---
# # TP53
# +
import os
os.chdir('../')
# +
from IPython.display import display
from PredatorAnalysis import PredatorAnalysis
# Paths
## SNVs
SNV_COMMON_PATH = "C:/Users/ibrah/Desktop/TUSEB_Study/Data_Collection_and_Filtering/SNV/"
SNV_BRCA_PATH = SNV_COMMON_PATH + "SNV_BRCA_hg38.csv"
PREDICTION_ID = "c6b0c1a8/"
PREDICTIONS_COMMON_PATH = "../data/predictions_datasets/brca_prediction_2021-11-06/" + PREDICTION_ID
PREDICTION_BRCA_REDUCED_PATH = PREDICTIONS_COMMON_PATH + "predictions_soft_2021-11-06.csv"
ELASPIC_RESULTS_COMMON_PATH = "../../My-ELASPIC-Web-API/Elaspic_Results/Merged_Results/" # elaspic_results_datasets
BRCA_CORE_PATH = ELASPIC_RESULTS_COMMON_PATH + "BRCA_Core_2021-09-28.txt"
BRCA_INTERFACE_PATH = ELASPIC_RESULTS_COMMON_PATH + "BRCA_Interface_2021-09-28.txt"
# CANCER MINE GENES
CANCER_MINE_ALL_PATH = "../data/cancer_mine_genes/all_genes_2021-10-26.txt"
CANCER_MINE_BREAST_PATH = "../data/cancer_mine_genes/breast_genes_2021-10-26.txt"
# Reflect changes in the modules immediately.
# %load_ext autoreload
# %autoreload 2
# + tags=[]
predator_analysis = PredatorAnalysis(
tcga="brca",
snv_path=SNV_BRCA_PATH,
prediction_data_path=PREDICTION_BRCA_REDUCED_PATH,
prediction_id=PREDICTION_ID,
elaspic_core_path=BRCA_CORE_PATH,
elaspic_interface_path=BRCA_INTERFACE_PATH,
reference_data_name="cancermine",
reference_data_spec_cohort_path=CANCER_MINE_BREAST_PATH,
reference_data_path=CANCER_MINE_ALL_PATH
)
# -
# Simplified SNV Dataset
print(predator_analysis.data_materials["brca_snv_data_simplified"].shape)
predator_analysis.data_materials["brca_snv_data_simplified"].head(3)
# + tags=[]
predator_analysis.prepare_analysis()
# -
print('Number of proteins in ELASPIC BRCA:', len(predator_analysis.data_materials["brca_elaspic_proteins"]))
print('Number of patients in BRCA:', len(predator_analysis.data_materials["brca_patients"]))
patients_original = [p[:12] for p in predator_analysis.data_materials["brca_snv_data"]["Tumor_Sample_Barcode"].unique()]
patients_original = sorted(patients_original)
print(len(patients_original))
patients_after = sorted(predator_analysis.data_materials["brca_patients"])
print(len(patients_after))
set(patients_original) ^ set(patients_after)
BRCA_CORE_PATH
BRCA_INTERFACE_PATH
import pandas as pd
pd.read_csv(BRCA_CORE_PATH, sep='\t', low_memory=False).head()
pairs_core = pd.read_csv(BRCA_CORE_PATH, sep='\t', low_memory=False)["UniProt_ID"] + "." + \
pd.read_csv(BRCA_CORE_PATH, sep='\t', low_memory=False)["Mutation"]
pairs_core = sorted(pairs_core.unique())
pairs_interface = pd.read_csv(BRCA_CORE_PATH, sep='\t', low_memory=False)["UniProt_ID"] + "." + \
pd.read_csv(BRCA_CORE_PATH, sep='\t', low_memory=False)["Mutation"]
pairs_interface = sorted(pairs_interface.unique())
# + tags=[]
overlapping_pairs = sorted(set(pairs_interface) & set(pairs_core))
overlapping_pairs[:10]
# -
"P04049.E607Q" in overlapping_pairs
predator_analysis.data_materials["brca_elaspic_core_data_simplified"][
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["UniProt_ID"] == "A0AV96") &
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["Mutation"] == "F285L")
]
from helpers.helpers_analysis.get_patient_protein_to_mutations_dict import get_patient_protein_to_mutations_dict
from helpers.helpers_analysis.is_in_elaspic import is_in_elaspic
from helpers.helpers_analysis.is_core import is_core
from tqdm.notebook import tqdm
# + jupyter={"outputs_hidden": true} tags=[]
pats = set()
for patient in tqdm(predator_analysis.data_materials["brca_patients"]):
patient_snv = predator_analysis.data_materials["brca_snv_data_simplified"][
predator_analysis.data_materials["brca_snv_data_simplified"]["Tumor_Sample_Barcode"] == patient
]
patient_protein_to_mutations = get_patient_protein_to_mutations_dict(patient_snv)
for protein, mutations in patient_protein_to_mutations.items():
for mutation in mutations:
in_elaspic = is_in_elaspic(
protein,
mutation,
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
if in_elaspic:
if "P04637" in patient_protein_to_mutations:
pats.add(patient)
# + tags=[]
pats = sorted(pats)
print(len(pats))
pats
# -
def is_any_mutations_core(protein, mutations):
assert isinstance(protein, str)
assert isinstance(mutations, list)
core_found = False
for mutation in mutations:
in_elaspic = is_in_elaspic(
protein,
mutation,
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
if in_elaspic and is_core(
protein, mutation, predator_analysis.data_materials["brca_elaspic_core_data_simplified"]
):
core_found = True
return core_found
is_any_mutations_core("P15923", ["R561W"])
predator_analysis.data_materials["brca_elaspic_interface_processed_data"]["UniProt_ID"].sample()
set(predator_analysis.data_materials["brca_elaspic_interface_processed_data"][
predator_analysis.data_materials["brca_elaspic_interface_processed_data"]["UniProt_ID"] == "P15923"
]["Mutation"])
# +
print("interface data:")
sampled_protein = predator_analysis.data_materials["brca_elaspic_interface_processed_data"]["UniProt_ID"].sample()
[sampled_protein] = sampled_protein
# display(sampled_protein)
muts = sorted(set(predator_analysis.data_materials["brca_elaspic_interface_processed_data"][
predator_analysis.data_materials["brca_elaspic_interface_processed_data"]["UniProt_ID"] == sampled_protein
]["Mutation"]))
print(sampled_protein, muts)
print("is_any_mutations_core: {}".format(is_any_mutations_core(protein, muts)))
# +
p = "P04637"
m = "R280S"
display(
predator_analysis.data_materials["brca_elaspic_core_data_simplified"][
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["UniProt_ID"] == p) &
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["Mutation"] == m)
]
)
# +
# f"{p}.{m}" in overlapping_pairs
# -
[p] = predator_analysis.data_materials["brca_elaspic_interface_processed_data"].sample()["UniProt_ID"]
p
predator_analysis.data_materials["brca_elaspic_core_data_simplified"][
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["UniProt_ID"] == "Q9H172") &
(predator_analysis.data_materials["brca_elaspic_core_data_simplified"]["UniProt_ID"] == "N287T")
]
# + jupyter={"outputs_hidden": true} tags=[]
pats = set()
for patient in tqdm(predator_analysis.data_materials["brca_patients"]):
patient_snv = predator_analysis.data_materials["brca_snv_data_simplified"][
predator_analysis.data_materials["brca_snv_data_simplified"]["Tumor_Sample_Barcode"] == patient
]
patient_protein_to_mutations = get_patient_protein_to_mutations_dict(patient_snv)
patient_interface_only_flag = True
# I am only looking for P04637
for protein, mutations in patient_protein_to_mutations.items():
# Skip proteins other than P04637
if protein != "P04637":
continue
for mutation in mutations:
in_elaspic = is_in_elaspic(
protein,
mutation,
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
if in_elaspic:
if is_core(protein, mutation, predator_analysis.data_materials["brca_elaspic_core_data_simplified"]):
patient_interface_only_flag = False
if "P04637" in patient_protein_to_mutations:
pats.add(patient)
# + tags=[]
pats = sorted(pats)
print(len(pats))
pats
# -
{'TCGA-AN-A0FF',
'TCGA-AO-A03O',
'TCGA-AR-A0TX',
'TCGA-AR-A0TY',
'TCGA-BH-A1F8',
'TCGA-C8-A26Y',
'TCGA-E9-A1ND',
'TCGA-E9-A22E',
'TCGA-EW-A1IZ'}
# + tags=[]
predator_analysis.construct_analysis_table()
# -
preliminary_data = predator_analysis.data_materials["brca_preliminary_data"]
def add_patient_interface_count(preliminary_data, snv_data, elaspic_core_data, elaspic_interface_data):
"""
Given a tcga preliminary_data data, add number of patients that only interface mutation occurred
for each protein as a new column.
Parameters
----------
preliminary_data : <DataFrame>
Preliminary data which `PATIENT_INTERFACE_COUNT` column will be to be added on.
snv_data : <DataFrame>
An SNV dataframe, we use the processed version of SNV.
elaspic_core_data : <DataFrame>
The ELASPIC results file that contains only the `core` type entries.
elaspic_interface_data : <DataFrame>
The ELASPIC results file that contains only the `interface` type entries. It will be used to
check if a specific (protein, mutation) pair is an interface via `is_interface` function.
Returns
-------
None. Modifies the dataframe.
"""
# Initialize the `proteins_to_patient_interface_counts_dict` dictionary with proteins in preliminary_data in
# correct order, and the default values of 0.
proteins_to_patient_interface_counts_dict = dict.fromkeys(list(preliminary_data['PROTEIN']), 0)
# Get the patient IDs.
patients = list(snv_data['Tumor_Sample_Barcode'].unique())
for patient in tqdm(patients):
if patient == "TCGA-A2-A04T":
print(F"PROCESSING PATIENT {patient}")
# Patient filtered dataframe: Filter SNV file for current patient.
patient_snv_data = snv_data[snv_data["Tumor_Sample_Barcode"] == patient]
for protein, mutations in get_patient_protein_to_mutations_dict(patient_snv_data).items():
if protein != "P04637":
continue
core_flag = 'N/A'
# print(protein, mutations)
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
# print(f'{protein}.{mutation} IS IN ELASPIC.')
if is_core(protein, mutation, elaspic_core_data):
# print(' → core found!')
core_flag = 1
break
else:
# print(' → interface found!')
core_flag = 0
else:
# print(f'{protein}.{mutation} IS NOT IN ELASPIC.')
# print(f'CORE_FLAG = {core_flag}')
continue
if core_flag == 0:
# Adding the corresponding gene counter
print(f"adding patient: {patient} -- protein: {protein}")
proteins_to_patient_interface_counts_dict[protein] += 1
# Add the column
preliminary_data['PATIENT_INTERFACE_COUNT2'] = list(proteins_to_patient_interface_counts_dict.values())
def add_patient_core_count(preliminary_data, snv_data, elaspic_core_data, elaspic_interface_data):
# Initialize the `proteins_to_patient_interface_counts_dict` dictionary with proteins in preliminary_data in
# correct order, and the default values of 0.
proteins_to_patient_interface_counts_dict = dict.fromkeys(list(preliminary_data['PROTEIN']), 0)
# Get the patient IDs.
patients = list(snv_data['Tumor_Sample_Barcode'].unique())
for patient in tqdm(patients):
# Patient filtered dataframe: Filter SNV file for current patient.
patient_snv_data = snv_data[snv_data["Tumor_Sample_Barcode"] == patient]
for protein, mutations in get_patient_protein_to_mutations_dict(patient_snv_data).items():
if protein != "P04637":
continue
core_flag = 'N/A'
# print(protein, mutations)
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
# print(f'{protein}.{mutation} IS IN ELASPIC.')
if is_core(protein, mutation, elaspic_core_data):
# print(' → core found!')
core_flag = 1
break
else:
# print(' → interface found!')
core_flag = 0
else:
# print(f'{protein}.{mutation} IS NOT IN ELASPIC.')
# print(f'CORE_FLAG = {core_flag}')
continue
if core_flag == 1:
# Adding the corresponding gene counter
print(f"adding patient: {patient} -- protein: {protein}")
proteins_to_patient_interface_counts_dict[protein] += 1
# Add the column
return list(proteins_to_patient_interface_counts_dict.values())
# + tags=[]
def add_patient_core_count2(preliminary_data, snv_data, elaspic_core_data, elaspic_interface_data):
# Initialize the `proteins_to_patient_interface_counts_dict` dictionary with proteins in preliminary_data in
# correct order, and the default values of 0.
proteins_to_patient_interface_counts_dict = dict.fromkeys(list(preliminary_data['PROTEIN']), 0)
# Get the patient IDs.
patients = list(snv_data['Tumor_Sample_Barcode'].unique())
for patient in tqdm(patients):
# Patient filtered dataframe: Filter SNV file for current patient.
patient_snv_data = snv_data[snv_data["Tumor_Sample_Barcode"] == patient]
for protein, mutations in get_patient_protein_to_mutations_dict(patient_snv_data).items():
# if protein != "A0AVT1":
# continue
core_flag = 'N/A'
# print(protein, mutations)
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
# print(f'{protein}.{mutation} IS IN ELASPIC.')
if is_core(protein, mutation, elaspic_core_data):
# print(' → core found!')
core_flag = 1
break
# else:
# # print(' → interface found!')
# core_flag = 0
else:
# print(f'{protein}.{mutation} IS NOT IN ELASPIC.')
# print(f'CORE_FLAG = {core_flag}')
continue
if core_flag == 1:
# Adding the corresponding gene counter
# print(f"adding patient: {patient} -- protein: {protein}")
proteins_to_patient_interface_counts_dict[protein] += 1
# Add the column
return list(proteins_to_patient_interface_counts_dict.values())
# + tags=[]
def add_patient_elaspic_count(preliminary_data, snv_data, elaspic_core_data, elaspic_interface_data):
# Initialize the `proteins_to_patient_interface_counts_dict` dictionary with proteins in preliminary_data in
# correct order, and the default values of 0.
proteins_to_patient_interface_counts_dict = dict.fromkeys(list(preliminary_data['PROTEIN']), 0)
# Get the patient IDs.
patients = list(snv_data['Tumor_Sample_Barcode'].unique())
patients_TP53 = set()
for patient in tqdm(patients):
# Patient filtered dataframe: Filter SNV file for current patient.
patient_snv_data = snv_data[snv_data["Tumor_Sample_Barcode"] == patient]
for protein, mutations in get_patient_protein_to_mutations_dict(patient_snv_data).items():
if protein != "P04637":
continue
patient_flag = 'N/A'
# print(protein, mutations)
for mutation in mutations:
# Check if (protein.mutation) is in ELASPIC.
if is_in_elaspic(protein, mutation, elaspic_core_data, elaspic_interface_data):
# print(f'{protein}.{mutation} IS IN ELASPIC.')
patient_flag = 1
break
else:
# print(f'{protein}.{mutation} IS NOT IN ELASPIC.')
# print(f'CORE_FLAG = {core_flag}')
continue
if patient_flag == 1:
# Adding the corresponding gene counter
print(f"adding patient: {patient} -- protein: {protein}")
patients_TP53.add(patient)
# proteins_to_patient_interface_counts_dict[protein] += 1
# Add the column
# return list(proteins_to_patient_interface_counts_dict.values())
print(f"NUMBER OF PATIENTS TP53 {len(patients_TP53)}")
return patients_TP53
# -
# +
a = add_patient_core_count(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
b = add_patient_core_count2(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
# -
a == b
P68036:UBE2L3,Q9H832:UBE2Z,P51668:UBE2D1,P62837:UBE2D2,P61077:UBE2D3,P60604:UBE2G2,Q16763:UBE2S
P68036:UBE2L3, P62837:UBE2D2, P60604:UBE2G2,Q16763:UBE2S
P61077:UBE2D3
add_patient_interface_count(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
# + tags=[]
add_patient_elaspic_count(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
# -
patients_55 = [
"TCGA-LD-A74U",
"TCGA-A7-A26F",
"TCGA-AR-A0U2",
"TCGA-A7-A6VW",
"TCGA-A7-A6VX",
"TCGA-B6-A0I1",
"TCGA-AR-A0TP",
"TCGA-C8-A134",
"TCGA-BH-A203",
"TCGA-A2-A0CL",
"TCGA-C8-A26W",
"TCGA-AR-A24T",
"TCGA-D8-A147",
"TCGA-AR-A1AJ",
"TCGA-A2-A0T1",
"TCGA-E2-A574",
"TCGA-E2-A1B1",
"TCGA-BH-A5IZ",
"TCGA-C8-A12P",
"TCGA-OL-A5RY",
"TCGA-LL-A8F5",
"TCGA-AR-A2LH",
"TCGA-A8-A09X",
"TCGA-D8-A1JF",
"TCGA-BH-A0WA",
"TCGA-AN-A0FX",
"TCGA-A2-A4S1",
"TCGA-UU-A93S",
"TCGA-BH-A1FE",
"TCGA-AR-A1AW",
"TCGA-D8-A1JM",
"TCGA-EW-A1P4",
"TCGA-LL-A6FR",
"TCGA-B6-A0RS",
"TCGA-A8-A08X",
"TCGA-AN-A04C",
"TCGA-PL-A8LV",
"TCGA-C8-A1HF",
"TCGA-C8-A278",
"TCGA-EW-A1P1",
"TCGA-A2-A0SW",
"TCGA-D8-A1XL",
"TCGA-LL-A5YP",
"TCGA-BH-A1FN",
"TCGA-EW-A6S9",
"TCGA-C8-A12O",
"TCGA-BH-A0C3",
"TCGA-BH-A0EB",
"TCGA-E9-A1NC",
"TCGA-B6-A0WX",
"TCGA-BH-A0BC",
"TCGA-A8-A07W",
"TCGA-BH-A0AV",
"TCGA-OL-A5D6",
"TCGA-D8-A1XT",
]
# +
patients_56 = [
"TCGA-LD-A74U",
"TCGA-A7-A26F",
"TCGA-AR-A0U2",
"TCGA-A7-A6VW",
"TCGA-A7-A6VX",
"TCGA-B6-A0I1",
"TCGA-AR-A0TP",
"TCGA-C8-A134",
"TCGA-BH-A203",
"TCGA-A2-A0CL",
"TCGA-C8-A26W",
"TCGA-AR-A24T",
"TCGA-D8-A147",
"TCGA-AR-A1AJ",
"TCGA-A2-A0T1",
"TCGA-E2-A574",
"TCGA-E2-A1B1",
"TCGA-BH-A5IZ",
"TCGA-C8-A12P",
"TCGA-OL-A5RY",
"TCGA-A2-A04T",
"TCGA-LL-A8F5",
"TCGA-AR-A2LH",
"TCGA-A8-A09X",
"TCGA-D8-A1JF",
"TCGA-BH-A0WA",
"TCGA-AN-A0FX",
"TCGA-A2-A4S1",
"TCGA-UU-A93S",
"TCGA-BH-A1FE",
"TCGA-AR-A1AW",
"TCGA-D8-A1JM",
"TCGA-EW-A1P4",
"TCGA-LL-A6FR",
"TCGA-B6-A0RS",
"TCGA-A8-A08X",
"TCGA-AN-A04C",
"TCGA-PL-A8LV",
"TCGA-C8-A1HF",
"TCGA-C8-A278",
"TCGA-EW-A1P1",
"TCGA-A2-A0SW",
"TCGA-D8-A1XL",
"TCGA-LL-A5YP",
"TCGA-BH-A1FN",
"TCGA-EW-A6S9",
"TCGA-C8-A12O",
"TCGA-BH-A0C3",
"TCGA-BH-A0EB",
"TCGA-E9-A1NC",
"TCGA-B6-A0WX",
"TCGA-BH-A0BC",
"TCGA-A8-A07W",
"TCGA-BH-A0AV",
"TCGA-OL-A5D6",
"TCGA-D8-A1XT",
]
print(len(patients_56))
print(len(set(patients_56)))
# -
set(patients_55) ^ set(patients_56)
"TCGA-A2-A04T" in patients_original
"TCGA-A2-A04T" in patients_after
snv_TCGA_E9_A22E = predator_analysis.data_materials["brca_snv_data_simplified"][
predator_analysis.data_materials["brca_snv_data_simplified"]["Tumor_Sample_Barcode"] == "TCGA-E9-A22E"
]
add_patient_interface_count(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
add_patient_core_count(
preliminary_data,
predator_analysis.data_materials["brca_snv_data_simplified"],
elaspic_core_data=predator_analysis.data_materials["brca_elaspic_core_data_simplified"],
elaspic_interface_data=predator_analysis.data_materials["brca_elaspic_interface_processed_data"]
)
get_patient_protein_to_mutations_dict(snv_TCGA_E9_A22E)["A0AVT1"]
predator_analysis.data_materials["brca_snv_data"].HGVSp_Short
predator_analysis.data_materials["brca_snv_data_simplified"][
predator_analysis.data_materials["brca_snv_data_simplified"]["Hugo_Symbol"].str.contains("BRCA")
]["HGVSp_Short"]
'TCGA-AN-A0FF',
'TCGA-AO-A03O',
'TCGA-AR-A0TX',
'TCGA-AR-A0TY',
'TCGA-BH-A1F8',
'TCGA-C8-A26Y',
'TCGA-E9-A1ND',
'TCGA-E9-A22E',
'TCGA-EW-A1IZ'
"A0AVT1" in patient_protein_to_mutations
TCGA-AN-A0FF
TCGA-AR-A0TX
TCGA-E9-A22E
TCGA-E9-A1ND
TCGA-C8-A26Y
TCGA-AR-A0TY
TCGA-AO-A03O
TCGA-BH-A1F8
TCGA-EW-A1IZ
# +
import os.path as op
import pandas as pd
SNV_COMMON_PATH = "C:/Users/ibrah/Desktop/SNV_data/SNV_datasets/"
BRCA_SNV_PATH = op.join(SNV_COMMON_PATH, "SNV_BRCA_hg38_2021-09-22.csv")
# -
brca_snv = pd.read_csv(BRCA_SNV_PATH, low_memory=False)
brca_snv.head(2)
[e for e in brca_snv.columns if "Sample" in e]
uba6_patients = list(brca_snv[brca_snv["Hugo_Symbol"] == "UBA6"]["Tumor_Sample_Barcode"].unique())
print(len(uba6_patients))
uba6_patients
[('P04745', ['AMY1A', 'AMY1C']), ('P04908', ['HIST1H2AB', 'HIST1H2AE']), ('P0C0S8', ['HIST1H2AL', 'HIST1H2AG', 'HIST1H2AK', 'HIST1H2AM', 'HIST1H2AI']), ('P12532', ['CKMT1B', 'CKMT1A']), ('P62805', ['HIST1H4F', 'HIST1H4H', 'HIST1H4E', 'HIST1H4L', 'HIST1H4J', 'HIST1H4K', 'HIST1H4D', 'HIST4H4', 'HIST1H4B']), ('P62807', ['HIST1H2BC', 'HIST1H2BG', 'HIST1H2BE', 'HIST1H2BF', 'HIST1H2BI']), ('P68431', ['HIST1H3B', 'HIST1H3D', 'HIST1H3F', 'HIST1H3E', 'HIST1H3J', 'HIST1H3A', 'HIST1H3I', 'HIST1H3G', 'HIST1H3C']), ('Q13748', ['TUBA3C', 'TUBA3D']), ('Q5VU13', ['VSIG8', 'C1orf204'])]
# +
with open("dev/text.txt", "r") as fin:
lines = fin.readlines()
lines = [line.strip() for line in lines]
unique_elements = set()
for line in lines:
for item in line.split(','):
unique_elements.add(item)
print(len(unique_elements))
unique_elements
# -
brca_snv["HGVSp_Short"]
brca_snv[
(brca_snv["Tumor_Sample_Barcode"].str.contains("TCGA-A2-A0T6")) &
(brca_snv["Variant_Classification"] == "Missense_Mutation") &
(brca_snv["SWISSPROT"] == "P04626")
][["Tumor_Sample_Barcode", "SWISSPROT", "HGVSp_Short"]]
L = ["TCGA-LD-A74U",
"TCGA-A7-A26F",
"TCGA-AR-A0U2",
"TCGA-A7-A6VW",
"TCGA-A7-A6VX",
"TCGA-B6-A0I1",
"TCGA-AR-A0TP",
"TCGA-C8-A134",
"TCGA-BH-A203",
"TCGA-A2-A0CL",
"TCGA-C8-A26W",
"TCGA-AR-A24T",
"TCGA-D8-A147",
"TCGA-AR-A1AJ",
"TCGA-A2-A0T1",
"TCGA-E2-A574",
"TCGA-E2-A1B1",
"TCGA-BH-A5IZ",
"TCGA-C8-A12P",
"TCGA-OL-A5RY",
"TCGA-A2-A04T",
"TCGA-LL-A8F5",
"TCGA-AR-A2LH",
"TCGA-A8-A09X",
"TCGA-D8-A1JF",
"TCGA-BH-A0WA",
"TCGA-AN-A0FX",
"TCGA-A2-A4S1",
"TCGA-UU-A93S",
"TCGA-BH-A1FE",
"TCGA-AR-A1AW",
"TCGA-D8-A1JM",
"TCGA-EW-A1P4",
"TCGA-LL-A6FR",
"TCGA-B6-A0RS",
"TCGA-A8-A08X",
"TCGA-AN-A04C",
"TCGA-PL-A8LV",
"TCGA-C8-A1HF",
"TCGA-C8-A278",
"TCGA-EW-A1P1",
"TCGA-A2-A0SW",
"TCGA-D8-A1XL",
"TCGA-LL-A5YP",
"TCGA-BH-A1FN",
"TCGA-EW-A6S9",
"TCGA-C8-A12O",
"TCGA-BH-A0C3",
"TCGA-BH-A0EB",
"TCGA-E9-A1NC",
"TCGA-B6-A0WX",
"TCGA-BH-A0BC",
"TCGA-A8-A07W",
"TCGA-BH-A0AV",
"TCGA-OL-A5D6",
"TCGA-D8-A1XT"]
len(L)
len(set(L))
| src/checks/TP53.ipynb |
# %matplotlib inline
from fenics import *
parameters["plotting_backend"] = 'matplotlib'
import pylab
# +
__author__ = "<NAME> <<EMAIL>>"
__date__ = "2012-01-19"
__copyright__ = "Copyright (C) 2012 <NAME>"
__license__ = "GNU LGPL version 3 or any later version"
# Modified by <NAME>
# Last changed: 2016-04-10
# Create mesh and define function space
mesh = UnitSquareMesh(32, 32)
V = FunctionSpace(mesh, "Lagrange", 1)
# Define Dirichlet boundary (x = 0 or x = 1)
def boundary(x, on_boundary):
return on_boundary
# Define boundary condition
u0 = Constant(0.0)
bc = DirichletBC(V, u0, boundary)
# Define rhs using Expression or symbolic expression
x = SpatialCoordinate(mesh)
f = (2.0*pi**2) * sin(pi*x[0]) * sin(pi*x[1])
#f = Expression("2.0*DOLFIN_PI*DOLFIN_PI*sin(DOLFIN_PI*x[0])*sin(DOLFIN_PI*x[1])", degree=4)
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
a = inner(grad(u), grad(v))*dx
L = f*v*dx(degree=4) # Note specification of quadrature degree for integration
# Compute solution
u = Function(V)
solve(a == L, u, bc)
# Save solution in VTK format
file = File("poisson.pvd")
file << u
# Plot solution
plot(u)
pylab.show()
| notebooks/02_static_linear_pdes/kul/poisson2.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .java
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Java
// language: java
// name: java
// ---
// ## Interfaces
//
// Uma interface é um tipo abstrato que é usado para especificar comportamento das classes, agindo como um protocolo.
//
// As interfaces podem declarar apenas assinaturas de métodos que serão implementados.
//
// Uma classe **implementa** uma ou mais interfaces. Caso uma classe implemente uma interface, todos os métodos declarados na interface deverão ser implementados pela classe.
//
// Abaixo o exemplo da [https://github.com/Marcosddf/programacaojava/blob/master/Calculadora.ipynb](calculadora polimórfica) desenvolvido com interfaces.
//
// Neste exemplo, a classe *Soma* também implementa a interface *imprime*, então esta deve obrigatoriamente implementar este método.
//
// Na última linha da execução, há uma chamada sobre a variavel *soma*, do tipo *IOperacao*, porém com um *cast* para *IImprime*. Isto só é possível pois o objeto instanciado é do tipo Soma, que implementa as 2 interfaces.
//
//
// +
interface IOperacao {
public int executa (int a, int b);
}
interface IImprime {
void imprime();
}
class Soma implements IOperacao, IImprime {
int valor;
public int executa (int a, int b){
valor = a + b;
return valor;
}
public void imprime (){
System.out.println("impressão do valor calculado previamente: "+valor);
}
}
class Subtracao implements IOperacao {
public int executa (int a, int b){
return a - b;
}
}
class Calculadora {
int calcula (IOperacao opt, int a, int b){
return opt.executa(a,b);
}
}
class Programa {
public static void main (){
Calculadora calc = new Calculadora ();
IOperacao soma = new Soma();
IOperacao subtracao = new Subtracao();
System.out.println( calc.calcula (soma,2,3) );
System.out.println( calc.calcula (subtracao,2,3) );
((IImprime)soma).imprime();
}
}
Programa.main();
// -
| Interfaces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Evaluation metrics in NLP
# -
__author__ = "<NAME>"
__version__ = "CS224u, Stanford, Spring 2020"
# + [markdown] slideshow={"slide_type": "slide"}
# ## Contents
#
# 1. [Overview](#Overview)
# 1. [Set-up](#Set-up)
# 1. [Classifier metrics](#Classifier-metrics)
# 1. [Confusion matrix](#Confusion-matrix)
# 1. [Accuracy](#Accuracy)
# 1. [Precision](#Precision)
# 1. [Recall](#Recall)
# 1. [F scores](#F-scores)
# 1. [Macro-averaged F scores](#Macro-averaged-F-scores)
# 1. [Weighted F scores](#Weighted-F-scores)
# 1. [Micro-averaged F scores](#Micro-averaged-F-scores)
# 1. [Precision–recall curves](#Precision–recall-curves)
# 1. [Average precision](#Average-precision)
# 1. [Receiver Operating Characteristic (ROC) curve](#Receiver-Operating-Characteristic-(ROC)-curve)
# 1. [Regression metrics](#Regression-metrics)
# 1. [Mean squared error](#Mean-squared-error)
# 1. [R-squared scores](#R-squared-scores)
# 1. [Pearson correlation](#Pearson-correlation)
# 1. [Spearman rank correlation](#Spearman-rank-correlation)
# 1. [Sequence prediction](#Sequence-prediction)
# 1. [Word error rate](#Word-error-rate)
# 1. [BLEU scores](#BLEU-scores)
# 1. [Perplexity](#Perplexity)
# 1. [Other resources](#Other-resources)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Overview
#
# 1. Different evaluation metrics __encode different values__ and have __different biases and other weaknesses__. Thus, you should choose your metrics carefully, and motivate those choices when writing up and presenting your work.
#
# 1. This notebook reviews some of the most prominent evaluation metrics in NLP, seeking not only to define them, but also to articulate what values they encode and what their weaknesses are.
#
# 1. In your own work, __you shouldn't feel confined to these metrics__. Per item 1 above, you should feel that you have the freedom to motivate new metrics and specific uses of existing metrics, depending on what your goals are.
#
# 1. If you're working on an established problem, then you'll feel pressure from readers (and referees) to use the metrics that have already been used for the problem. This might be a compelling pressure. However, you should always feel free to argue against those cultural norms and motivate new ones. Areas can stagnate due to poor metrics, so we must be vigilant!
# + [markdown] slideshow={"slide_type": "slide"}
# This notebook discusses prominent metrics in NLP evaluations. I've had to be selective to keep the notebook from growing too long and complex. I think the measures and considerations here are fairly representative of the issues that arise in NLP evaluation.
#
# The scikit-learn [model evaluation usage guide](http://scikit-learn.org/stable/modules/model_evaluation.html) is excellent as a source of implementations, definitions, and references for a wide range of metrics for classification, regression, ranking, and clustering.
#
# This notebook is the first in a two-part series on evaluation. Part 2 is on [evaluation methods](evaluation_methods.ipynb).
# + [markdown] slideshow={"slide_type": "slide"}
# ## Set-up
# -
# %matplotlib inline
from nltk.metrics.distance import edit_distance
from nltk.translate import bleu_score
import numpy as np
import pandas as pd
import scipy.stats
from sklearn import metrics
# + [markdown] slideshow={"slide_type": "slide"}
# ## Classifier metrics
# + [markdown] slideshow={"slide_type": "slide"}
# ### Confusion matrix
#
# A confusion matrix gives a complete comparison of how the observed/gold labels compare to the labels predicted by a classifier.
#
# `ex1 = `
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td>15</td>
# <td>10</td>
# <td>100</td>
# </tr>
# <tr>
# <th>neg</th>
# <td>10</td>
# <td>15</td>
# <td>10</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td>10</td>
# <td>100</td>
# <td>1000</td>
# </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# For classifiers that predict real values (scores, probabilities), it is important to remember that __a threshold was imposed to create these categorical predictions__.
#
# The position of this threshold can have a large impact on the overall assessment that uses the confusion matrix as an input. The default is to choose the class with the highest probability. This is so deeply ingrained that it is often not even mentioned. However, it might be inappropriate:
#
# 1. We might care about the full distribution.
# 1. Where the important class is very small relative to the others, any significant amount of positive probability for it might be important.
#
# Metrics like [average precision](#Average-precision) explore this threshold as part of their evaluation procedure.
# + [markdown] slideshow={"slide_type": "skip"}
# This function creates the toy confusion matrices that we will use for illustrative examples:
# + slideshow={"slide_type": "skip"}
def illustrative_confusion_matrix(data):
classes = ['pos', 'neg', 'neutral']
ex = pd.DataFrame(
data,
columns=classes,
index=classes)
ex.index.name = "observed"
return ex
# + slideshow={"slide_type": "skip"}
ex1 = illustrative_confusion_matrix([
[15, 10, 100],
[10, 15, 10],
[10, 100, 1000]])
# + [markdown] slideshow={"slide_type": "slide"}
# ### Accuracy
#
# [Accuracy](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html#sklearn.metrics.accuracy_score) is the sum of the correct predictions divided by the sum of all predictions:
# -
def accuracy(cm):
return cm.values.diagonal().sum() / cm.values.sum()
# Here's an illustrative confusion matrix:
# `ex1 = `
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td style="background-color: green">15</td>
# <td>10</td>
# <td>100</td>
# </tr>
# <tr>
# <th>neg</th>
# <td>10</td>
# <td style="background-color: green">15</td>
# <td>10</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td>10</td>
# <td>100</td>
# <td style="background-color: green">1000</td>
# </tr>
# </table>
accuracy(ex1)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Accuracy bounds
#
# [0, 1], with 0 the worst and 1 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by accuracy
#
# Accuracy seems to directly encode a core value we have for classifiers – how often they are correct. In addition, the accuracy of a classifier on a test set will be negatively correlated with the [negative log (logistic, cross-entropy) loss](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html#sklearn.metrics.log_loss), which is a common loss for classifiers. In this sense, these classifiers are optimizing for accuracy.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of accuracy
#
# * Accuracy does not give per-class metrics for multi-class problems.
#
# * Accuracy fails to control for size imbalances in the classes. For instance, consider the variant of the above in which the classifier guessed only __neutral__:
# + slideshow={"slide_type": "skip"}
ex2 = illustrative_confusion_matrix([
[0, 0, 125],
[0, 0, 35],
[0, 0, 1110]])
# -
ex2
# Intuitively, this is a worse classifier than the one that produced `ex1`. Whereas `ex1` does well at __pos__ and __neg__ despite their small size, this classifier doesn't even try to get them right – it always predicts __neutral__. However, its accuracy is higher!
print(accuracy(ex1))
print(accuracy(ex2))
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to accuracy
#
# * Accuracy is inversely proportional to the [negative log (logistic, cross-entropy) loss](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.log_loss.html#sklearn.metrics.log_loss) that many classifiers optimize:
#
# $$
# -\frac{1}{N} \sum_{i=1}^{N} \sum_{k=1}^{K} y_{i,k} \log(p_{i,k})
# $$
#
# * Accuracy can be related in a similar way to [KL divergence](https://en.wikipedia.org/wiki/Kullback–Leibler_divergence):
# $$
# D_{\text{KL}}(y \parallel p) =
# \sum _{k=1}^{K} y_{k} \log\left(\frac {y_{k}}{p_{k}}\right)
# $$
# Where $y$ is a "one-hot vector" (a classification label) with $1$ at position $k$, this reduces to
# $$
# \log\left(\frac{1}{p_{k}}\right) = -\log(p_{k})
# $$
# Thus, KL-divergence is an analogue of accuracy for soft labels.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Precision
#
# [Precision](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html#sklearn.metrics.precision_score) is the sum of the correct predictions divided by the sum of all guesses. This is a per-class notion; in our confusion matrices, it's the diagonal values divided by the column sums:
# -
def precision(cm):
return cm.values.diagonal() / cm.sum(axis=0)
# `ex1 =`
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td style="background-color: #ADD8E6; font-weight: bold">15</td>
# <td style="background-color: #00FFAA">10</td>
# <td style="background-color: #FFC686">100</td>
# </tr>
# <tr>
# <th>neg</th>
# <td style="background-color: #ADD8E6">10</td>
# <td style="background-color: #00FFAA; font-weight: bold">15</td>
# <td style="background-color: #FFC686">10</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td style="background-color: #ADD8E6">10</td>
# <td style="background-color: #00FFAA">100</td>
# <td style="background-color: #FFC686; font-weight: bold">1000</td>
# </tr>
# <tr>
# <th></th>
# <th>precision</th>
# <td>0.43</td>
# <td>0.12</td>
# <td>0.90</td>
# </tr>
# </table>
precision(ex1)
# + [markdown] slideshow={"slide_type": "slide"}
# For our problematic __all neutral__ classifier above, precision is strictly speaking undefined for __pos__ and __neg__:
# -
ex2
precision(ex2)
# It's common to see these `NaN` values mapped to 0.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Precision bounds
#
# [0, 1], with 0 the worst and 1 the best. (Caveat: undefined values resulting from dividing by 0 need to be mapped to 0.)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by precision
#
# Precision encodes a _conservative_ value in penalizing incorrect guesses.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of precision
#
# Precision's dangerous edge case is that one can achieve very high precision for a category by rarely guessing it. Consider, for example, the following classifier's flawless predictions for __pos__ and __neg__. These predictions are at the expense of __neutral__, but that is such a big class that it hardly matters to the precision for that class either.
# + slideshow={"slide_type": "skip"}
ex3 = illustrative_confusion_matrix([
[1, 0, 124],
[0, 1, 24],
[0, 0, 1110]])
# -
ex3
precision(ex3)
# These numbers mask the fact that this is a very poor classifier!
# + [markdown] slideshow={"slide_type": "slide"}
# Compare with our less imbalanced `ex1`; for "perfect" precision on `pos` and `neg`, we incurred only a small drop in `neutral` here:
# -
ex1
precision(ex1)
# + [markdown] slideshow={"slide_type": "slide"}
# ### Recall
#
# [Recall](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html#sklearn.metrics.recall_score) is the sum of the correct predictions divided by the sum of all true instances. This is a per-class notion; in our confusion matrices, it's the diagonal values divided by the row sums. Recall is sometimes called the "true positive rate".
# -
def recall(cm):
return cm.values.diagonal() / cm.sum(axis=1)
# `ex1 =`
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# <th></th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# <th>recall</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td style="background-color: #ADD8E6; font-weight: bold">15</td>
# <td style="background-color: #ADD8E6">10</td>
# <td style="background-color: #ADD8E6">100</td>
# <td>0.12</td>
# </tr>
# <tr>
# <th>neg</th>
# <td style="background-color: #00FFAA">10</td>
# <td style="background-color: #00FFAA; font-weight: bold">15</td>
# <td style="background-color: #00FFAA">10</td>
# <td>0.43</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td style="background-color: #FFC686">10</td>
# <td style="background-color: #FFC686">100</td>
# <td style="background-color: #FFC686; font-weight: bold">1000</td>
# <td>0.90</td>
# </tr>
# </table>
recall(ex1)
# + [markdown] slideshow={"slide_type": "slide"}
# Recall trades off against precision. For instance, consider again `ex3`, in which the classifier was very conservative with __pos__ and __neg__:
# -
# `ex3 =`
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# <th></th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# <th>recall</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td style="background-color: #CCCCCC; font-weight: bold">1</td>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC">124</td>
# <td>0.008</td>
# </tr>
# <tr>
# <th>neg</th>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC; font-weight: bold">1</td>
# <td style="background-color: #CCCCCC">24</td>
# <td>0.040</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC; font-weight: bold">1110</td>
# <td>1.000</td>
# </tr>
# <tr>
# <th></th>
# <th>precision</th>
# <td>1.00</td>
# <td>1.00</td>
# <td>0.88</td>
# </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Recall bounds
#
# [0, 1], with 0 the worst and 1 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by recall
#
# Recall encodes a _permissive_ value in penalizing only missed true cases.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of recall
#
# Recall's dangerous edge case is that one can achieve very high recall for a category by always guessing it. This could mean a lot of incorrect guesses, but recall sees only the correct ones. You can see this in `ex3` above. The model did make some incorrect __neutral__ predictions, but it missed none, so it achieved perfect recall for that category.
#
# `ex3 =`
# <table>
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# <th></th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# <th>recall</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td style="background-color: #CCCCCC; font-weight: bold">1</td>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC">124</td>
# <td>0.008</td>
# </tr>
# <tr>
# <th>neg</th>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC; font-weight: bold">1</td>
# <td style="background-color: #CCCCCC">24</td>
# <td>0.040</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC">0</td>
# <td style="background-color: #CCCCCC; font-weight: bold">1110</td>
# <td>1.000</td>
# </tr>
# <tr>
# <th></th>
# <th>precision</th>
# <td>1.00</td>
# <td>1.00</td>
# <td>0.88</td>
# </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# ### F scores
#
# [F scores](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.fbeta_score.html#sklearn.metrics.fbeta_score) combine precision and recall via their harmonic mean, with a value $\beta$ that can be used to emphasize one or the other. Like precision and recall, this is a per-category notion.
#
# $$
# (\beta^{2}+1) \cdot \frac{\textbf{precision} \cdot
# \textbf{recall}}{(\beta^{2} \cdot \textbf{precision}) +
# \textbf{recall}}
# $$
#
# Where $\beta=1$, we have F1:
#
# $$
# 2 \cdot \frac{\textbf{precision} \cdot \textbf{recall}}{\textbf{precision} + \textbf{recall}}
# $$
# -
def f_score(cm, beta):
p = precision(cm)
r = recall(cm)
return (beta**2 + 1) * ((p * r) / ((beta**2 * p) + r))
def f1_score(cm):
return f_score(cm, beta=1.0)
# + slideshow={"slide_type": "slide"}
ex1
# -
f1_score(ex1)
# + slideshow={"slide_type": "slide"}
ex2
# -
f1_score(ex2)
# + slideshow={"slide_type": "slide"}
ex3
# -
f1_score(ex3)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of F scores
#
# [0, 1], with 0 the worst and 1 the best, and guaranteed to be between precision and recall.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by F scores
#
# The F$_{\beta}$ score for a class $K$ is an attempt to summarize how well the classifier's $K$ predictions align with the true instances of $K$. Alignment brings in both missed cases and incorrect predictions. Intuitively, precision and recall keep each other in check in the calculation. This idea runs through almost all robust classification metrics.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of F scores
#
# * There is no normalization for the size of the dataset within $K$ or outside of it.
#
# * For a given category $K$, the F$_{\beta}$ score for $K$ ignores all the values that are off the row and column for $K$, which might be the majority of the data. This means that the individual scores for a category can be very misleading about the overall performance of the system.
# -
# `ex1 = `
# <table display="inline">
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# <th></th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# <th>F1</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td>15</td>
# <td>10</td>
# <td>100</td>
# <td>0.187</td>
# </tr>
# <tr>
# <th>neg</th>
# <td>10</td>
# <td>15</td>
# <td>10</td>
# <td>0.187</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td>10</td>
# <td>100</td>
# <td style="background-color: #D050D0">1,000</td>
# <td>0.90</td>
# </tr>
# </table>
#
#
# `ex4 =`
# <table display="inline">
# <tr>
# <th></th>
# <th></th>
# <th colspan=3 style="text-align:center">predicted</th>
# <th></th>
# </tr>
# <tr>
# <th></th>
# <th></th>
# <th>pos</th>
# <th>neg</th>
# <th>neutral</th>
# <th>F1</th>
# </tr>
# <tr>
# <th rowspan=3>gold</th>
# <th>pos</th>
# <td>15</td>
# <td>10</td>
# <td>100</td>
# <td>0.187</td>
# </tr>
# <tr>
# <th>neg</th>
# <td>10</td>
# <td>15</td>
# <td>10</td>
# <td>0.187</td>
# </tr>
# <tr>
# <th>neutral</th>
# <td>10</td>
# <td>100</td>
# <td style="background-color: #D050D0">100,000</td>
# <td>0.999</td>
# </tr>
# </table>
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to F scores
#
# * Dice similarity for binary vectors is sometimes used to assess how well a model has learned to identify a set of items. In this setting, [it is equivalent to the per-token F1 score](https://brenocon.com/blog/2012/04/f-scores-dice-and-jaccard-set-similarity/).
#
# * The intuition behind F scores (balancing precision and recall) runs through many of the metrics discussed below.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Macro-averaged F scores
#
# The [macro-averaged F$_{\beta}$ score](http://scikit-learn.org/stable/modules/model_evaluation.html#multiclass-and-multilabel-classification) (macro F$_{\beta}$) is the mean of the F$_{\beta}$ score for each category:
# -
def macro_f_score(cm, beta):
return f_score(cm, beta).mean(skipna=False)
# + slideshow={"slide_type": "slide"}
ex1
# -
f1_score(ex1)
macro_f_score(ex1, beta=1)
# + slideshow={"slide_type": "slide"}
ex2
# -
f1_score(ex2)
macro_f_score(ex2, beta=1)
# + slideshow={"slide_type": "slide"}
ex3
# -
f1_score(ex3)
macro_f_score(ex3, beta=1)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of macro-averaged F scores
#
# [0, 1], with 0 the worst and 1 the best, and guaranteed to be between precision and recall.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by macro-averaged F scores
#
# Macro F$_{\beta}$ scores inherit the values of F$_{\beta}$ scores, and they additionally say that we care about all the classes equally regardless of their size.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of macro-averaged F scores
#
# In NLP, we typically care about modeling all of the classes well, so macro-F$_{\beta}$ scores often seem appropriate. However, this is also the source of their primary weaknesses:
#
# * If a model is doing really well on a small class $K$, its high macro F$_{\beta}$ score might mask the fact that it mostly makes incorrect predictions outside of $K$. So F$_{\beta}$ scoring will make this kind of classifier look better than it is.
#
# * Conversely, if a model does well on a very large class, its overall performance might be high even if it stumbles on some small classes. So F$_{\beta}$ scoring will make this kind of classifier look worse than it is, as measured by sheer number of good predictions.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Weighted F scores
#
# [Weighted F$_{\beta}$ scores](http://scikit-learn.org/stable/modules/model_evaluation.html#multiclass-and-multilabel-classification) average the per-category F$_{\beta}$ scores, but it's a weighted average based on the size of the classes in the observed/gold data:
# -
def weighted_f_score(cm, beta):
scores = f_score(cm, beta=beta).values
weights = cm.sum(axis=1)
return np.average(scores, weights=weights)
weighted_f_score(ex3, beta=1.0)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of weighted F scores
#
# [0, 1], with 0 the worst and 1 the best, but without a guarantee that it will be between precision and recall.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by weighted F scores
#
# Weighted F$_{\beta}$ scores inherit the values of F$_{\beta}$ scores, and they additionally say that we want to weight the summary by the number of actual and predicted examples in each class. This will probably correspond well with how the classifier will perform, on a per example basis, on data with the same class distribution as the training data.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of weighted F scores
#
# Large classes will dominate these calculations. Just like macro-averaging, this can make a classifier look artificially good or bad, depending on where its errors tend to occur.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Micro-averaged F scores
#
# [Micro-averaged F$_{\beta}$ scores](http://scikit-learn.org/stable/modules/model_evaluation.html#multiclass-and-multilabel-classification) (micro F$_{\beta}$ scores) add up the 2 $\times$ 2 confusion matrices for each category versus the rest, and then they calculate the F$_{\beta}$ scores, with the convention being that the positive class's F$_{\beta}$ score is reported.
# + [markdown] slideshow={"slide_type": "skip"}
# This function creates the 2 $\times$ 2 matrix for a category `cat` in a confusion matrix `cm`:
# + slideshow={"slide_type": "skip"}
def cat_versus_rest(cm, cat):
yes = cm.loc[cat, cat]
yes_no = cm.loc[cat].sum() - yes
no_yes = cm[cat].sum() - yes
no = cm.values.sum() - yes - yes_no - no_yes
return pd.DataFrame(
[[yes, yes_no],
[no_yes, no]],
columns=['yes', 'no'],
index=['yes', 'no'])
# + slideshow={"slide_type": "slide"}
display(ex1)
display(cat_versus_rest(ex1, 'pos'))
display(cat_versus_rest(ex1, 'neg'))
display(cat_versus_rest(ex1, 'neutral'))
# -
sum([cat_versus_rest(ex1, cat) for cat in ex1.index])
# + [markdown] slideshow={"slide_type": "slide"}
# For the micro F$_{\beta}$ score, we just add up these per-category confusion matrices and calculate the F$_{\beta}$ score:
# -
def micro_f_score(cm, beta):
c = sum([cat_versus_rest(cm, cat) for cat in cm.index])
return f_score(c, beta=beta).loc['yes']
micro_f_score(ex1, beta=1.0)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of micro-averaged F scores
#
# [0, 1], with 0 the worst and 1 the best, and guaranteed to be between precision and recall.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by micro-averaged F scores
#
# * Micro F$_{\beta}$ scores inherit the values of weighted F$_{\beta}$ scores. (The resulting scores tend to be very similar.)
#
# * For two-class problems, this has an intuitive interpretation in which precision and recall are defined in terms of correct and incorrect guesses ignoring the class.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of micro-averaged F scores
#
# The weaknesses too are the same as those of weighted F$_{\beta}$ scores, with the additional drawback that we actually get two potentially very different values, for the positive and negative classes, and we have to choose one to meet our goal of having a single summary number. (See the `'yes'` in the final line of `micro_f_score`.)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to micro-averaged F scores
#
# * Micro-averaging is equivalent to accuracy.
#
# * F1 is identical to both precision and recall on the 2 $\times$ 2 matrix that is the basis for the calculation.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Precision–recall curves
#
# I noted above that confusion matrices hide a threshold for turning probabilities/scores into predicted labels. With precision–recall curves, we finally address this.
#
# A precision–recall curve is a method for summarizing the relationship between precision and recall for a binary classifier.
#
# The basis for this calculation is not the confusion matrix, but rather the raw scores or probabilities returned by the classifier. Normally, we use 0.5 as the threshold for saying that a prediction is positive. However, each distinct real value in the set of predictions is a potential threshold. The precision–recall curve explores this space.
# + [markdown] slideshow={"slide_type": "slide"}
# Here's a basic implementation; [the sklearn version](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_recall_curve.html) is more flexible and so recommended for real experimental frameworks.
# -
def precision_recall_curve(y, probs):
"""`y` is a list of labels, and `probs` is a list of predicted
probabilities or predicted scores -- likely a column of the
output of `predict_proba` using an `sklearn` classifier.
"""
thresholds = sorted(set(probs))
data = []
for t in thresholds:
# Use `t` to create labels:
pred = [1 if p >= t else 0 for p in probs]
# Precision/recall analysis as usual, focused on
# the positive class:
cm = pd.DataFrame(metrics.confusion_matrix(y, pred))
prec = precision(cm)[1]
rec = recall(cm)[1]
data.append((t, prec, rec))
# For intuitive graphs, always include this end-point:
data.append((None, 1, 0))
return pd.DataFrame(
data, columns=['threshold', 'precision', 'recall'])
# + [markdown] slideshow={"slide_type": "skip"}
# I'll illustrate with a hypothetical binary classification problem involving balanced classes:
# + slideshow={"slide_type": "slide"}
y = np.random.choice((0, 1), size=1000, p=(0.5, 0.5))
# + [markdown] slideshow={"slide_type": "skip"}
# Suppose our classifier is generally able to distinguish the two classes, but it never predicts a value above 0.4, so our usual methods of thresholding at 0.5 would make the classifier look very bad:
# -
y_pred = [np.random.uniform(0.0, 0.3) if x == 0 else np.random.uniform(0.1, 0.4)
for x in y]
# + [markdown] slideshow={"slide_type": "skip"}
# The precision–recall curve can help us identify the optimal threshold given whatever our real-world goals happen to be:
# -
prc = precision_recall_curve(y, y_pred)
# + slideshow={"slide_type": "skip"}
def plot_precision_recall_curve(prc):
ax1 = prc.plot.scatter(x='recall', y='precision', legend=False)
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1.1])
ax1.set_ylabel("precision")
ax2 = ax1.twiny()
ax2.set_xticklabels(prc['threshold'].values[::100].round(3))
_ = ax2.set_xlabel("threshold")
# -
plot_precision_recall_curve(prc)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by precision–recall curves
#
# With precision–recall curves, we get a generalized perspective on F1 scores (and we could weight precision and recall differently to achieve the effects of `beta` for F scores more generally). These curves can be used, not only to assess a system, but also to identify an optimal decision boundary given external goals.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of precision–recall curves
#
# * Most implementations are limited to binary problems. The basic concepts are defined for multi-class problems, but it's very difficult to understand the resulting hyperplanes.
#
# * There is no single statistic that does justice to the full curve, so this metric isn't useful on its own for guiding development and optimization. Indeed, opening up the decision threshold in this way really creates another hyperparameter that one has to worry about!
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to precision–recall curves
#
# * The [Receiver Operating Characteristic (ROC) curve](#Receiver-Operating-Characteristic-(ROC)-curve) is superficially similar to the precision–recall, but it compares recall with the false positive rate.
#
# * [Average precision](#Average-precision), covered next, is a way of summarizing these curves with a single number.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Average precision
#
# Average precision is a method for summarizing the precision–recall curve. It does this by calculating the average precision weighted by the change in recall from step to step along the curve.
#
# Here is the calculation in terms of the data structures returned by `precision_recall_curve` above, in which (as in sklearn) the largest recall value is first:
#
# $$\textbf{average-precision}(r, p) = \sum_{i=1}^{n} (r_{i} - r_{i+1})p_{i}$$
#
# where $n$ is the increasing sequence of thresholds and the precision and recall vectors $p$ and $r$ are of length $n+1$. (We insert a final pair of values $p=1$ and $r=0$ in the precision–recall curve calculation, with no threshold for that point.)
# -
def average_precision(p, r):
total = 0.0
for i in range(len(p)-1):
total += (r[i] - r[i+1]) * p[i]
return total
# + slideshow={"slide_type": "slide"}
plot_precision_recall_curve(prc)
# -
average_precision(prc['precision'].values, prc['recall'].values)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of average precision
#
# [0, 1], with 0 the worst and 1 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by average precision
#
# This measure is very similar to the F1 score, in that it is seeking to balance precision and recall. Whereas the F1 score does this with the harmonic mean, average precision does it by making precision a function of recall.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of average precision
#
# * An important weakness of this metric is cultural: it is often hard to tell whether a paper is reporting average precision or some interpolated variant thereof. The interpolated versions are meaningfully different and will tend to inflate scores. In any case, they are not comparable to the calculation defined above and implemented in `sklearn` as `sklearn.metrics.average_precision_score`.
#
# * Unlike for precision–recall curves, we aren't strictly speaking limited to binary classification here. Since we aren't trying to visualize anything, we can do these calculations for multi-class problems. However, then we have to decide on how the precision and recall values will be combined for each step: macro-averaged, weighted, or micro-averaged, just as with F$_{\beta}$ scores. This introduces another meaningful design choice.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related
#
# * There are interpolated versions of this score, and some tasks/communities have even settled on specific versions as their standard metrics. All such measures should be approached with skepticism, since all of them can inflate scores artificially in specific cases.
#
# * [This blog post](https://roamanalytics.com/2016/09/07/stepping-away-from-linear-interpolation/) is an excellent discussion of the issues with linear interpolation. It proposes a step-wise interpolation procedure that is much less problematic. I believe the blog post and subsequent PR to `sklearn` led the `sklearn` developers to drop support for all interpolation mechanisms for this metric!
#
# * Average precision as defined above is a discrete approximation of the [area under the precision–recall curve](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.auc.html#sklearn.metrics.auc). This is a separate measure often referred to as "AUC". In calculating AUC for a precision–recall curve, some kind of interpolation will be done, and this will generally produce exaggerated scores for the same reasons that interpolated average precison does.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Receiver Operating Characteristic (ROC) curve
#
# The Receiver Operating Characteristic (ROC) curve for a class $k$ depicts recall the __false positive rate__ (FPR) for $k$ as a function of the __recall__ for $k$. For instance, suppose we focus on $k$ as the positive class $A$:
#
# $$
# \begin{array}{r r r}
# \hline
# & A & B \\
# \hline
# A & \text{TP}_{A} & \text{FN}_{A}\\
# B & \text{FP}_{A} & \text{TN}_{A}\\
# \hline
# \end{array}
# $$
#
# The false positive rate is
#
# $$
# \textbf{fpr}(A) = \frac{\text{FP}_{A}}{\text{FP}_{A} + \text{TN}_{A}}
# $$
#
# which is equivalent to 1 minus the recall for $B$ class.
#
# ROC curves are implemented in [sklearn.metrics.roc_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_curve.html).
#
# The area under the ROC curve is often used as a summary statistic: see [sklearn.metrics.roc_auc_curve](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.roc_auc_score.html#sklearn.metrics.roc_auc_score).
#
# ROC is limited to binary problems.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of ROC
#
# * For individual ROC calculations of recall divided fpr: [0, $\infty$), with larger better.
# * For ROC AUC: [0, 1], with 1 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of ROC
#
# Recall that, for two classes $A$ and $B$,
#
# $$
# \begin{array}{r r r}
# \hline
# & A & B \\
# \hline
# A & \text{TP}_{A} & \text{FN}_{A}\\
# B & \text{FP}_{A} & \text{TN}_{B}\\
# \hline
# \end{array}
# $$
#
# we can express ROC as comparing $\textbf{recall}(A)$ with $1.0 - \textbf{recall}(B)$.
#
# This reveals a point of contrast with scores based in precision and recall: the entire table is used, whereas precision and recall for a class $k$ ignore the $\text{TN}_{k}$ values. Thus, whereas precision and recall for a class $k$ will be insensitive to changes in $\text{TN}_{k}$, ROC will be affected by such changes. The following individual ROC calculations help to bring this out:
#
# $$
# \begin{array}{r r r r r}
# \hline
# & A & B & \textbf{F1} & \textbf{ROC}\\
# \hline
# A & 15 & 10 & 0.21 & 0.90 \\
# B & 100 & {\color{blue}{50}} & 0.48 & 0.83 \\
# \hline
# \end{array}
# \qquad
# \begin{array}{r r r r r}
# \hline
# & A & B & \textbf{F1} & \textbf{ROC} \\
# \hline
# A & 15 & 10 & 0.21 & 3.6 \\
# B & 100 & {\color{blue}{500}} & 0.90 & 2.08 \\
# \hline
# \end{array}
# $$
#
# One might worry that the model on the right isn't better at identifying class $A$, even though its ROC value for $A$ is larger.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to ROC
#
# ROC-based analysis is superficially similar to precision–recall curves and average precision, but we should have no expectation that the results will align, particularly in the presence of class imbalances like the one sketched above.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Regression metrics
# + [markdown] slideshow={"slide_type": "slide"}
# ### Mean squared error
#
# The [mean squared error](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html#sklearn.metrics.mean_squared_error) is a summary of the distance between predicted and actual values:
#
# $$
# \textbf{mse}(y, \widehat{y}) = \frac{1}{N}\sum_{i=1}^{N} (y_{i} - \hat{y_{i}})^{2}
# $$
# -
def mean_squared_error(y_true, y_pred):
diffs = (y_true - y_pred)**2
return np.mean(diffs)
# The raw distances `y_true - y_pred` are often called the __residuals__.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of mean-squared error
#
# [0, $\infty$), with 0 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by mean-squared error
#
# This measure seeks to summarize the errors made by a regression classifier. The smaller it is, the closer the model's predictions are to the truth. In this sense, it is intuitively like a counterpart to [accuracy](#Accuracy) for classifiers.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of mean-squared error
#
# These values are highly dependent on scale of the output variables, making them very hard to interpret in isolation. One really needs a clear baseline, and scale-independent ways of comparing scores are also needed.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to mean-squared error
#
# Scikit-learn implements a variety of closely related measures: [mean absolute error](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html#sklearn.metrics.mean_absolute_error), [mean squared logarithmic error](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_log_error.html#sklearn.metrics.mean_squared_log_error), and [median absolute error](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.median_absolute_error.html#sklearn.metrics.median_absolute_error). I'd say that one should choose among these metrics based on how the output values are scaled and distributed. For instance:
#
# * The median absolute error will be less sensitive to outliers than the others.
# * Mean squared logarithmic error might be more appropriate where the outputs are not strictly speaking linearly increasing.
# + [markdown] slideshow={"slide_type": "slide"}
# ### R-squared scores
#
# The R$^{2}$ score is probably the most prominent method for summarizing regression model performance, in statistics, social sciences, and ML/NLP. This is the value that `sklearn`'s regression models deliver with their `score` functions.
#
# $$
# \textbf{r2}(y, \widehat{y}) =
# 1.0 - \frac{
# \sum_{i}^{N} (y_{i} - \hat{y_{i}})^{2}
# }{
# \sum_{i}^{N} (y_{i} - \mu)^{2}
# }
# $$
# where $\mu$ is the mean of the gold values $y$.
# -
def r2(y_true, y_pred):
mu = y_true.mean()
# Total sum of squares:
total = ((y_true - mu)**2).sum()
# Sum of squared errors:
res = ((y_true - y_pred)**2).sum()
return 1.0 - (res / total)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of R-squared scores
#
# [0, 1], with 0 the worst and 1 the best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by R-squared scores
#
# The numerator in the R$^{2}$ calculation is the sum of errors:
#
# $$
# \textbf{r2}(y, \widehat{y}) =
# 1.0 - \frac{
# \sum_{i}^{N} (y_{i} - \hat{y_{i}})^{2}
# }{
# \sum_{i}^{N} (y_{i} - \mu)^{2}
# }
# $$
#
# In the context of regular linear regression, the model's objective is to minimize the total sum of squares, which is the denominator in the calculation. Thus, R$^{2}$ is based in the ratio between what the model achieved and what its objective was, which is a measure of the goodness of fit of the model.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of R-squared scores
#
# For comparative purposes, it's nice that R$^{2}$ is scaled between [0, 1]; as noted above, this lack of scaling makes mean squared error hard to interpret. But this also represents a trade-off: R$^{2}$ doesn't tell us about the magnitude of the errors.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to R-squared scores
#
# * R$^{2}$ is [closely related to the squared Pearson correlation coefficient](https://en.wikipedia.org/wiki/Coefficient_of_determination#As_squared_correlation_coefficient).
#
# * R$^{2}$ is closely related to the [explained variance](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.explained_variance_score.html#sklearn.metrics.explained_variance_score), which is also defined in terms of a ratio of the residuals and the variation in the gold data. For explained variance, the numerator is the variance of the residuals and the denominator is the variance of the gold values.
#
# * [Adjusted R$^{2}$](https://en.wikipedia.org/wiki/Coefficient_of_determination#Adjusted_R2) seeks to take into account the number of predictors in the model, to reduce the incentive to simply add more features in the hope of lucking into a better score. In ML/NLP, relatively little attention is paid to model complexity in this sense. The attitude is like: if you can improve your model by adding features, you might as well do that!
# + [markdown] slideshow={"slide_type": "slide"}
# ### Pearson correlation
#
# The [Pearson correlation coefficient $\rho$](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) between two vectors $y$ and $\widehat{y}$ of dimension $N$ is:
#
# $$
# \textbf{pearsonr}(y, \widehat{y}) =
# \frac{
# \sum_{i}^{N} (y_{i} - \mu_{y}) \cdot (\widehat{y}_{i} - \mu_{\widehat{y}})
# }{
# \sum_{i}^{N} (y_{i} - \mu_{y})^{2} \cdot (\widehat{y}_{i} - \mu_{\widehat{y}})^{2}
# }
# $$
# where $\mu_{y}$ is the mean of $y$ and $\mu_{\widehat{y}}$ is the mean of $\widehat{y}$.
#
# This is implemented as `scipy.stats.pearsonr`, which returns the coefficient and a p-value.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of Pearson correlations
#
# $[-1, 1]$, where $-1$ is a complete negative linear correlation, $+1$ is a complete positive linear correlation, and $0$ is no linear correlation at all.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of Pearson correlation
#
# Pearson correlations are highly sensitive to the magnitude of the differences between the gold and predicted values. As a result, they are also very sensitive to outliers.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to Pearson correlation
#
# * For comparing gold values $y$ and predicted values $\widehat{y}$, Pearson correlation is equivalent to a linear regression using $\widehat{y}$ and a bias term to predict $y$. [See this great blog post for details.](https://lindeloev.github.io/tests-as-linear/)
#
# * [As noted above](#Related-to-R-squared-scores), there is also a close relationship to R-squared values.
# + [markdown] slideshow={"slide_type": "slide"}
# ### Spearman rank correlation
#
# The Spearman rank correlation coefficient between between two vectors $y$ and $\widehat{y}$ of dimension $N$ is the Pearson coefficient with all of the data mapped to their ranks.
#
# It is implemented as `scipy.stats.spearmanr`, which returns the coefficient and a p-value.
# -
corr_df = pd.DataFrame({
'y1': np.random.uniform(-10, 10, size=1000),
'y2': np.random.uniform(-10, 10, size=1000)})
scipy.stats.spearmanr(corr_df['y1'], corr_df['y2'])
scipy.stats.pearsonr(corr_df['y1'].rank(), corr_df['y2'].rank())
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of Spearman rank correlations
#
# $[-1, 1]$, where $-1$ is a complete negative linear correlation, $+1$ is a complete positive linear correlation, and $0$ is no linear correlation at all.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of Spearman rank correlation
#
# Unlike Pearson, Spearman is not sensitive to the magnitude of the differences. In fact, it's invariant under all monotonic rescaling, since the values are converted to ranks. This also makes it less sensitive to outliers than Pearson.
#
# Of course, these strengths become weaknesses in domains where the raw differences do matter. That said, in most NLU contexts, Spearman will be a good conservative choice for system assessment.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to Spearman rank correlation
#
# For comparing gold values $y$ and predicted values $\widehat{y}$, Pearson correlation is equivalent to a linear regression using $\textbf{rank}(\widehat{y})$ and a bias term to predict $\textbf{rank}(y)$. [See this great blog post for details.](https://lindeloev.github.io/tests-as-linear/)
# + [markdown] slideshow={"slide_type": "slide"}
# ## Sequence prediction
#
# Sequence prediction metrics all seek to summarize and quantify the extent to which a model has managed to reproduce, or accurately match, some gold standard sequences. Such problems arise throughout NLP. Examples:
#
# 1. Mapping speech signals to their desired transcriptions.
# 1. Mapping texts in a language $L_{1}$ to their translations in a distinct language or dialect $L_{2}$.
# 1. Mapping input dialogue acts to their desired responses.
# 1. Mapping a sentence to one of its paraphrases.
# 1. Mapping real-world scenes or contexts (non-linguistic) to descriptions of them (linguistic).
# + [markdown] slideshow={"slide_type": "slide"}
# Evaluations is very challenging because the relationships tend to be __many-to-one__: a given sentence might have multiple suitable translations; a given dialogue act will always have numerous felicitous responses; any scene can be described in multiple ways; and so forth. The most constrained of these problems is the speech-to-text case in 1, but even that one has indeterminacy in real-world contexts (humans often disagree about how to transcribe spoken language).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Word error rate
#
# The [word error rate](https://en.wikipedia.org/wiki/Word_error_rate) (WER) metric is a word-level, length-normalized measure of [Levenshtein string-edit distance](https://en.wikipedia.org/wiki/Levenshtein_distance):
# -
def wer(seq_true, seq_pred):
d = edit_distance(seq_true, seq_pred)
return d / len(seq_true)
wer(['A', 'B', 'C'], ['A', 'A', 'C'])
wer(['A', 'B', 'C', 'D'], ['A', 'A', 'C', 'D'])
# + [markdown] slideshow={"slide_type": "slide"}
# To calculate this over the entire test-set, one gets the edit-distances for each gold–predicted pair and normalizes these by the length of all the gold examples, rather than normalizing each case:
# -
def corpus_wer(y_true, y_pred):
dists = [edit_distance(seq_true, seq_pred)
for seq_true, seq_pred in zip(y_true, y_pred)]
lengths = [len(seq) for seq in y_true]
return sum(dists) / sum(lengths)
# This gives a single summary value for the entire set of errors.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of word error rate
#
# $[0, \infty)$, where 0 is best. (The lack of a finite upper bound derives from the fact that the normalizing constant is given by the true sequences, and the predicted sequences can differ from them in any conceivable way in principle.)
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by word error rate
#
# This method says that our desired notion of closeness or accuracy can be operationalized in terms of the low-level operations of insertion, deletion, and substitution. The guiding intuition is very much like that of F scores.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of word error rate
#
# The value encoded reveals a potential weakness in certain domains. Roughly, the more __semantic__ the task, the less appropriate WER is likely to be.
#
# For example, adding a negation to a sentence will radically change its meaning but incur only a small WER penalty, whereas passivizing a sentence (_Kim won the race_ → _The race was won by Kim_) will hardly change its meaning at all but incur a large WER penalty.
#
# See also [Liu et al. 2016](https://www.aclweb.org/anthology/D16-1230) for similar arguments in the context of dialogue generation.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to word error rate
#
# * WER can be thought of as a family of different metrics varying in the notion of edit distance that they employ.
#
# * The Word Accuracy Rate is 1.0 minus the WER, which, despits its name, is intuitively more like [recall](#Recall) than [accuracy](#Accuracy).
# + [markdown] slideshow={"slide_type": "slide"}
# ### BLEU scores
#
# BLEU (Bilingual Evaluation Understudy) scores were originally developed in the context of machine translation, but they are applied in other generation tasks as well. For BLEU scoring, we require a set of gold outputs. The metric has two main components:
#
# * __Modified n-gram precision__: A direct application of precision would divide the number of correct n-grams in the predicted output (n-grams that appear in any translation) by the number of n-grams in the predicted output. This has a degenerate solution in which the predicted output contains only one word. BLEU's modified version substitutes the actual count for each n-gram by the maximum number of times it appears in any translation.
#
# * __Brevity penalty (BP)__: to avoid favoring outputs that are too short, a penalty is applied. Let $Y$ be the set of gold outputs, $\widehat{y}$ the predicted output, $c$ the length of the predicted output, and $r$ the smallest absolute difference between the length of $c$ and the length of any of its gold outputs in $Y$. Then:
#
# $$\textbf{BP}(Y, \widehat{y}) =
# \begin{cases}
# 1 & \textrm{ if } c > r \\
# \exp(1 - \frac{r}{c}) & \textrm{otherwise}
# \end{cases}$$
# + [markdown] slideshow={"slide_type": "slide"}
# The BLEU score itself is typically a combination of modified n-gram precision for various $n$ (usually up to 4):
#
# $$\textbf{BLEU}(Y, \widehat{y}) = \textbf{BP}(Y, \widehat{y}) \cdot
# \exp\left(\sum_{n=1}^{N} w_{n} \cdot \log\left(\textbf{modified-precision}(Y, \widehat{y}, n\right)\right)$$
#
# where $Y$ is the set of gold outputs, $\widehat{y}$ is the predicted output, and $w_{n}$ is a weight for each $n$-gram level (usually set to $1/N$).
#
# NLTK has [implementations of Bleu scoring](http://www.nltk.org/_modules/nltk/translate/bleu_score.html) for the sentence-level, as defined above, and for the corpus level (`nltk.translate.bleu_score.corpus_bleu`). At the corpus level, it is typical to do a kind of [micro-averaging](#Micro-averaged-F-scores) of the modified precision scores and use a cumulative version of the brevity penalty.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of BLEU scores
#
# [0, 1], with 1 being the best, though with no expectation that any system will achieve 1, since even sets of human-created translations do not reach this level.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Value encoded by BLEU scores
#
# BLEU scores attempt to achieve the same balance between precision and recall that runs through the majority of the metrics discussed here. It has many affinities with [word error rate](#Word-error-rate), but seeks to accommodate the fact that there are typically multiple suitable outputs for a given input.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of BLEU scores
#
# * [Callison-Burch et al. (2006)](http://www.aclweb.org/anthology/E06-1032) criticize BLEU as a machine translation metric on the grounds that it fails to correlate with human scoring of translations. They highlight its insensitivity to n-gram order and its insensitivity to n-gram types (e.g., function vs. content words) as causes of this lack of correlation.
#
# * [Liu et al. (2016)](https://www.aclweb.org/anthology/D16-1230) specifically argue against BLEU as a metric for assessing dialogue systems, based on a lack of correlation with human judgments about dialogue coherence.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to BLEU scores
#
# There are many competitors/alternatives to BLEU, most proposed in the context of machine translation. Examples: [ROUGE](https://en.wikipedia.org/wiki/ROUGE_(metric), [METEOR](https://en.wikipedia.org/wiki/METEOR), [HyTER](http://www.aclweb.org/anthology/N12-1017), [Orange (smoothed Bleu)](http://www.aclweb.org/anthology/C04-1072).
# + [markdown] slideshow={"slide_type": "slide"}
# ### Perplexity
#
# [Perplexity](https://en.wikipedia.org/wiki/Perplexity) is a common metric for directly assessing generation models by calculating the probability that they assign to sequences in the test data. It is based in a measure of average surprisal:
#
# $$H(P, x) = -\frac{1}{m}\log_{2} P(x)$$
#
# where $P$ is a model assigning probabilities to sequences and $x$ is a sequence.
#
# Perplexity is then the exponent of this:
#
# $$\textbf{perplexity}(P, x) = 2^{H(P, x)}$$
#
# Using any base $n$ both in defining $H$ and as the base in $\textbf{perplexity}$ will lead to identical results.
#
# Minimizing perplexity is equivalent to maximizing probability.
# + [markdown] slideshow={"slide_type": "slide"}
# It is common to report per-token perplexity; here the averaging should be done in log-space to deliver a [geometric mean](https://en.wikipedia.org/wiki/Geometric_mean):
#
# $$\textbf{token-perplexity}(P, x) = \exp\left(\frac{\log\textbf{perplexity}(P, x)}{\textbf{length}(x)}\right)$$
#
# When averaging perplexity values obtained from all the sequences in a text corpus, one should again use the geometric mean:
#
# $$\textbf{mean-perplexity}(P, X) =
# \exp\left(\frac{1}{m}\sum_{x\in X}\log(\textbf{token-perplexity}(P, x))\right)$$
#
# for a set of $m$ examples $X$.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Bounds of perplexity
#
# [1, $\infty$], where 1 is best.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Values encoded by perplexity
#
# The guiding idea behind perplexity is that a good model will assign high probability to the sequences in the test data. This is an intuitive, expedient intrinsic evaluation, and it matches well with the objective for models trained with a cross-entropy or logistic objective.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Weaknesses of perplexity
#
# * Perplexity is heavily dependent on the nature of the underlying vocabulary in the following sense: one can artificially lower one's perplexity by having a lot of `UNK` tokens in the training and test sets. Consider the extreme case in which _everything_ is mapped to `UNK` and perplexity is thus perfect on any test set. The more worrisome thing is that any amount of `UNK` usage side-steps the pervasive challenge of dealing with infrequent words.
#
# * [As <NAME> discusses in this post](https://nlpers.blogspot.com/2014/05/perplexity-versus-error-rate-for.html), the perplexity metric imposes an artificial constrain that one's model outputs are probabilistic.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Related to perplexity
#
# Perplexity is the inverse of probability and, [with some assumptions](http://www.cs.cmu.edu/~roni/11761/PreviousYearsHandouts/gauntlet.pdf), can be seen as an approximation of the cross-entropy between the model's predictions and the true underlying sequence probabilities.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Other resources
#
# The scikit-learn [model evaluation usage guide](http://scikit-learn.org/stable/modules/model_evaluation.html) is a great resource for metrics I didn't cover here. In particular:
#
# * Clustering
#
# * Ranking
#
# * Inter-annotator agreement
| evaluation_metrics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module 3 Required Coding Activity
# Introduction to Python Unit 1
#
# This is an activity based on code similar to the Jupyter Notebook **`Practice_MOD03_1-4_IntroPy.ipynb`** and **`Practice_MOD03_1-5_IntroPy.ipynb`** which you may have completed as practice.
#
# > **NOTE:** This program requires the use of **`if, elif, else`**, and casting between strings and numbers. The program should use the various code syntax covered in module 3.
# >
# >The program must result in print output using numeric input similar to that shown in the sample below.
#
# ## Program: Cheese Order Function
# - define function with max, min, price, and order_amount parameters
# - set default values for maximum and minimum order parameters
# - set default value for price parameter
# - cast order_amount and other arguments to numbers
# - check order_amount and give message checking against
# - over maximum
# - under minimum
# - else within maximum and minimum give message with calculated price
# - call your function using order weight input from user
#
#
# Sample input and output:
# ```
# Enter cheese order weight (numeric value): 113
# 113.0 is more than currently available stock
# ```
#
# ```
# Enter cheese order weight (numeric value): .15
# 0.15 is below minimum order amount
# ```
#
# ```
# Enter cheese order weight (numeric value): 2
# 2.0 costs $15.98
# ```
# [ ] create, call and test
# Submit this by creating a python file (.py) and submitting it in D2L. Be sure to test that it works.
| Python Absolute Beginner/Module_3.2_Required_Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:riboraptor]
# language: python
# name: conda-env-riboraptor-py
# ---
# %pylab inline
# %load_ext autoreload
# %autoreload 2
from pyseqlogo.pyseqlogo import draw_logo, setup_axis, draw_protein
from pyseqlogo.format_utils import read_alignment, calc_relative_information, format_matrix
from Bio import motifs
import warnings
warnings.filterwarnings('ignore')
# +
ALL_SCORES1 = [[('C', 0.02247014831444764),
('T', 0.057903843733384308),
('A', 0.10370837683591219),
('G', 0.24803586793255664)],
[('T', 0.046608227674354567),
('G', 0.048827667087419063),
('A', 0.084338697696451109),
('C', 0.92994511407402669)],
[('G', 0.0),
('T', 0.011098351287382456),
('A', 0.022196702574764911),
('C', 1.8164301607015951)],
[('C', 0.020803153636453006),
('T', 0.078011826136698756),
('G', 0.11268374886412044),
('A', 0.65529933954826969)],
[('T', 0.017393530660176126),
('A', 0.030438678655308221),
('G', 0.22611589858228964),
('C', 0.45078233627623127)],
[('G', 0.022364103549245576),
('A', 0.043412671595594352),
('T', 0.097349627214363091),
('C', 0.1657574733649966)],
[('C', 0.03264675899941203),
('T', 0.045203204768416654),
('G', 0.082872542075430544),
('A', 1.0949220710572034)],
[('C', 0.0),
('T', 0.0076232429756614498),
('A', 0.011434864463492175),
('G', 1.8867526364762088)],
[('C', 0.0018955903000026028),
('T', 0.0094779515000130137),
('A', 0.35637097640048931),
('G', 0.58005063180079641)],
[('A', 0.01594690817903021),
('C', 0.017541598996933229),
('T', 0.2774762023151256),
('G', 0.48638069946042134)],
[('A', 0.003770051401807444),
('C', 0.0075401028036148881),
('T', 0.011310154205422331),
('G', 1.8624053924928772)],
[('C', 0.036479877757360731),
('A', 0.041691288865555121),
('T', 0.072959755514721461),
('G', 1.1517218549109602)],
[('G', 0.011831087684038642),
('T', 0.068620308567424126),
('A', 0.10174735408273231),
('C', 1.0009100180696691)],
[('C', 0.015871770937774379),
('T', 0.018757547471915176),
('A', 0.32176408355669878),
('G', 0.36505073156881074)],
[('A', 0.022798100897300954),
('T', 0.024064662058262118),
('G', 0.24571286522646588),
('C', 0.34070495229855319)]]
ALL_SCORES2 = [[('A', 0.01653482213365913),
('G', 0.026710097292833978),
('C', 0.035613463057111966),
('T', 0.057235922770358522)],
[('C', 0.020055669245080433),
('G', 0.023816107228533015),
('A', 0.031336983195438178),
('T', 0.058913528407423782)],
[('T', 0.018666958185377256),
('G', 0.084001311834197651),
('A', 0.093334790926886277),
('C', 0.30333807051238043)],
[('C', 0.0),
('G', 0.0),
('A', 0.32027512306044359),
('T', 0.82203948252180525)],
[('C', 0.012698627658037786),
('A', 0.053334236163758708),
('T', 0.096509570201087178),
('G', 0.10920819785912497)],
[('C', 0.0),
('G', 0.089472611853783468),
('A', 0.1930724782107959),
('T', 0.22132698721725386)],
[('C', 0.020962390607965918),
('A', 0.026202988259957396),
('G', 0.066380903591892068),
('T', 0.07336836712788071)],
[('G', 0.0),
('A', 0.10236420974570831),
('C', 0.15354631461856247),
('T', 0.29173799777526871)],
[('G', 0.027681850851852024),
('C', 0.089966015268519078),
('A', 0.089966015268519078),
('T', 0.53287562889815143)],
[('A', 0.034165612000664765),
('C', 0.06833122400132953),
('G', 0.072601925501412631),
('T', 0.28186629900548432)],
[('G', 0.0),
('A', 0.037325935579058833),
('C', 0.23328709736911771),
('T', 0.72785574379164719)],
[('A', 0.017470244196759552),
('C', 0.062892879108334396),
('G', 0.094339318662501587),
('T', 0.19916078384305891)],
[('G', 0.0),
('A', 0.096447131567581681),
('C', 0.15844885900388422),
('T', 0.48223565783790845)],
[('G', 0.0),
('A', 0.069291952024925829),
('C', 0.20787585607477749),
('T', 0.46425607856700307)],
[('G', 0.0),
('A', 0.0),
('C', 0.21713201856318373),
('T', 1.1495224512168551)],
[('G', 0.0),
('A', 0.048934292002649343),
('T', 0.27263391258618919),
('C', 0.42642740173737281)],
[('A', 0.0),
('G', 0.053607190685875404),
('C', 0.2054942309625224),
('T', 0.69689347891638032)],
[('G', 0.0),
('A', 0.0),
('C', 0.31312908494534769),
('T', 0.84220926295645249)],
[('G', 0.0),
('C', 0.068079835765814778),
('A', 0.068079835765814778),
('T', 1.3207488138568066)],
[('G', 0.020257705570431345),
('A', 0.020257705570431345),
('C', 0.048618493369035232),
('T', 0.055371061892512348)],
[('G', 0.0),
('A', 0.076286510680262556),
('C', 0.20538675952378382),
('T', 0.34622339462580698)]]
# -
# # Default mode is bits
plt.rcParams['figure.dpi'] = 300
fig, axarr = draw_logo(ALL_SCORES1, coordinate_type='data')
fig.tight_layout()
plt.rcParams['figure.dpi'] = 300
fig, axarr = draw_logo(ALL_SCORES1, coordinate_type='ds')
fig.tight_layout()
data = '../data/K562_ATF1_all_m_lcr_100.fa_archs_reclust_wms.tr'
draw_logo(data, data_type='transfac')
# # Amino Acids Fasta
data = '../data/globin_aa.fasta'
draw_logo(data, data_type='fasta',
yaxis='probability',
draw_range = [61, 69],
seq_type='amino_acid',
colorscheme='chemistry',
coordinate_type='ds')
# # Transfac
from Bio import motifs
data = '../data/K562_ATF1_all_m_lcr_100.fa_archs_reclust_wms.tr'
motifs = motifs.parse(open(data, 'r'), 'transfac')
for motif in motifs:
name = list(motif.viewvalues())[0]
total = int(name.split('(')[1].replace('seqs)', ''))
pfm = dict(motif.counts.normalize())
ic = calc_relative_information(pfm, total)
# # Transfac - bits as input
fig, axarr = draw_logo(format_matrix(ic), data_type='bits')
fig.tight_layout()
# # Transfac - counts as input
# +
fig, axarr = draw_logo(motif.counts, data_type='counts', yaxis='probability')
fig.tight_layout()
# -
# # JASPAR
# +
ex = '../data/jaspardb/MA0002.2.jaspar'
fig, axarr = draw_logo(ex, data_type='jaspar', nrow=1, ncol=1)
#from Bio import motifs
#m = motifs.parse(open(ex, 'r'), 'JASPAR')[0]
# -
# # Colorscheme
plt.rcParams['figure.dpi'] = 300
fig, axarr = draw_logo(ALL_SCORES1, data_type='bits', colorscheme='meme')
fig.tight_layout()
# # Ultra loooooooooong motifs
ALL_SCORES = [[('G', 0.1),
('A', 0.1),
('C', 0.31312908494534769),
('T', 0.84220926295645249)]] * 50
ALL_SCORES2 = [[('G', 0.1),
('A', 0.1),
('C', 0.31312908494534769),
('T', 0.84220926295645249)]] * 5
stem_scores = np.random.rand(6)
fig, axarr = draw_logo(ALL_SCORES, data_type='bits', nrow=1, ncol=1)
# # Counts as input
counts = {'A' : [3,4,5,6], 'C': [2,3,1,1], 'T': [2,1,3,1], 'G': [3,2,1,2]}
fig, axarr = draw_logo(counts, data_type='counts', yaxis='probability')
fig.tight_layout()
# # BOSC
counts_bosc = {'B' : [200,0,0,0], 'O': [0, 200,0,0], 'S': [0,0,200,0], 'C': [0,0,200,200], 'G':[0,250,0,0]}
counts_2018 = {'2' : [200,0,0,0], '0': [0, 200,0,0], '1': [0,0,200,0], '8': [0,0,0,200]}
colors_bosc = {'B': '#f7fcb9', 'O': '#addd8e', 'S': '#31a354', 'C': 'royalblue', 'G': 'royalblue'}
fig, axarr = draw_logo(counts_bosc,
data_type='counts',
yaxis='probability',
fontfamily='Monospace',
colorscheme=colors_bosc)
fig.savefig('bosc.png', dpi=300)
# +
fig, axarr = draw_logo(counts_2018,
data_type='counts',
yaxis='probability',
fontfamily='Helvetica',
colorscheme={'2': 'grey',
'0': 'grey',
'1': 'grey',
'8': 'grey'})
#fig.tight_layout()
fig.savefig('2018.png', dpi=300)
# -
# # Different font!
fig, axarr = draw_logo(counts, data_type='counts', yaxis='probability', fontfamily='Comic Sans MS')
fig.tight_layout()
# # Plot conservation scores
# +
from matplotlib import transforms
ALL_SCORES = [[('G', 0.1),
('A', 0.1),
('C', 0.31312908494534769),
('T', 0.84220926295645249)]] * 6
stem_scores = np.random.rand(4)
fig = plt.figure()
ax = plt.subplot(211)
#fig, axarr =
counts = {'A' : [3,4,5,6], 'C': [2,3,1,1], 'T': [2,1,3,1], 'G': [3,2,1,2]}
draw_logo(counts, data_type='counts', ax=ax, draw_axis=True)
#ax = axarr[0,0]
ax = plt.subplot(212)
ax.stem(stem_scores)#, transform=trans_offset)
#setup_axis(axarr[1,0], axis='y', majorticks=1, minorticks=0.1)
fig.tight_layout()
# -
# # Protein with fasta
ALL_SCORES = [[('G', 0.1),
('A', 0.1),
('C', 0.31312908494534769),
('T', 0.84220926295645249)]] * 15
ALL_SCORES2 = [[('G', 0.1),
('A', 0.1),
('C', 0.31312908494534769),
('T', 0.84220926295645249)]] * 5
stem_scores = np.random.rand(6)
fig, axarr = draw_logo(ALL_SCORES, data_type='bits', nrow=2, ncol=1)
#fig.subplots_adjust(left=0,right=0.1,bottom=0,top=1)
ax = axarr[1,0]
draw_protein(ALL_SCORES2, ax, scalex=3)
setup_axis(axarr[1,0], axis='y', majorticks=1, minorticks=0.1)
#fig.tight_layout()#rect=[0, 0.03, 1, 0.95])
| notebooks/Example01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .ps1
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: .NET (PowerShell)
# language: PowerShell
# name: .net-powershell
# ---
# # T1574.002 - Hijack Execution Flow: DLL Side-Loading
# Adversaries may execute their own malicious payloads by hijacking the library manifest used to load DLLs. Adversaries may take advantage of vague references in the library manifest of a program by replacing a legitimate library with a malicious one, causing the operating system to load their malicious library when it is called for by the victim program.
#
# Programs may specify DLLs that are loaded at runtime. Programs that improperly or vaguely specify a required DLL may be open to a vulnerability in which an unintended DLL is loaded. Side-loading vulnerabilities specifically occur when Windows Side-by-Side (WinSxS) manifests (Citation: About Side by Side Assemblies) are not explicit enough about characteristics of the DLL to be loaded. Adversaries may take advantage of a legitimate program that is vulnerable by replacing the legitimate DLL with a malicious one. (Citation: FireEye DLL Side-Loading)
#
# Adversaries likely use this technique as a means of masking actions they perform under a legitimate, trusted system or software process.
# ## Atomic Tests
#Import the Module before running the tests.
# Checkout Jupyter Notebook at https://github.com/cyb3rbuff/TheAtomicPlaybook to run PS scripts.
Import-Module /Users/0x6c/AtomicRedTeam/atomics/invoke-atomicredteam/Invoke-AtomicRedTeam.psd1 - Force
# ### Atomic Test #1 - DLL Side-Loading using the Notepad++ GUP.exe binary
# GUP is an open source signed binary used by Notepad++ for software updates, and is vulnerable to DLL Side-Loading, thus enabling the libcurl dll to be loaded.
# Upon execution, calc.exe will be opened.
#
# **Supported Platforms:** windows
# #### Dependencies: Run with `powershell`!
# ##### Description: Gup.exe binary must exist on disk at specified location (#{gup_executable})
#
# ##### Check Prereq Commands:
# ```powershell
# if (Test-Path PathToAtomicsFolder\T1574.002\bin\GUP.exe) {exit 0} else {exit 1}
#
# ```
# ##### Get Prereq Commands:
# ```powershell
# New-Item -Type Directory (split-path PathToAtomicsFolder\T1574.002\bin\GUP.exe) -ErrorAction ignore | Out-Null
# Invoke-WebRequest "https://github.com/redcanaryco/atomic-red-team/blob/master/atomics/T1574.002/bin/GUP.exe" -OutFile "PathToAtomicsFolder\T1574.002\bin\GUP.exe"
#
# ```
Invoke-AtomicTest T1574.002 -TestNumbers 1 -GetPreReqs
# #### Attack Commands: Run with `command_prompt`
# ```command_prompt
# PathToAtomicsFolder\T1574.002\bin\GUP.exe
# ```
Invoke-AtomicTest T1574.002 -TestNumbers 1
# ## Detection
# Monitor processes for unusual activity (e.g., a process that does not use the network begins to do so). Track DLL metadata, such as a hash, and compare DLLs that are loaded at process execution time against previous executions to detect differences that do not correlate with patching or updates.
| playbook/tactics/privilege-escalation/T1574.002.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
# +
#tf.keras.datasets have a lot of preloaded datasets that you can directly use
# More details can be found here: https://www.tensorflow.org/api_docs/python/tf/keras/datasets
# Image classification dataset:
# a. CIFAR-10: https://www.cs.toronto.edu/~kriz/cifar.html
# b. CIFAR-100: https://www.cs.toronto.edu/~kriz/cifar.html
# c. MNIST: http://yann.lecun.com/exdb/mnist/
# d. Fashion-MNIST: https://github.com/zalandoresearch/fashion-mnist
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
# -
# Reshape input data from (28, 28) to (28, 28, 1)
# You can also make use of the same shapes for the MNIST dataset.
img_width, img_height, channels = 32, 32, 3
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
cifar_labels = ["airplane", # index 0
"automobile", # index 1
"bird", # index 2
"cat", # index 3
"deer", # index 4
"dog", # index 5
"frog", # index 6
"horse", # index 7
"ship", # index 8
"truck"] # index 9
# Image index, you can pick any number between 0 and 59,999
index = 1000
labelIndex =int(y_train[index])
print ("y = " + str(index) + " " +(cifar_labels[labelIndex]))
plt.imshow(x_train[index])
plt.set_cmap('Greys')
figure = plt.figure(figsize=(20, 8))
for i, index in enumerate(np.random.choice(x_test.shape[0], size=15, replace=False)):
ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
# Display each image
ax.imshow(x_train[index])
labelIndex = int(y_train[index])
# Set the title for each image
ax.set_title("{}".format(cifar_labels[labelIndex]))
# ## Data Normalization
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# +
# Further break training data into train / validation sets (# put 5000 into validation set and keep remaining 55,000 for train)
(x_train, x_valid) = x_train[5000:], x_train[:5000]
(y_train, y_valid) = y_train[5000:], y_train[:5000]
# x_train = x_train.reshape(x_train.shape[0], img_width, img_height, channels)
# x_valid = x_valid.reshape(x_valid.shape[0], img_width, img_height, channels)
# x_test = x_test.reshape(x_test.shape[0], img_width, img_height, channels)
# One-hot encode the labels
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_valid = tf.keras.utils.to_categorical(y_valid, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
# Print training set shape
print("x_train shape:", x_train.shape, "y_train shape:", y_train.shape)
# Print the number of training, validation, and test datasets
print(x_train.shape[0], 'train set')
print(x_valid.shape[0], 'validation set')
print(x_test.shape[0], 'test set')
# -
# ## Create the Model Architecture
# We will make use of the Keras Sequential API (https://keras.io/getting-started/sequential-model-guide/). The sequential model is a linear stack of layers.
model = tf.keras.Sequential()
# We will try to build a simple shallow neural network calle LeNet-5 (http://vision.stanford.edu/cs598_spring07/papers/Lecun98.pdf)
# +
# Must define the input shape in the first layer of the neural network
#layer 1: Convolution
model.add(tf.keras.layers.Conv2D(filters = 64,
kernel_size = 2,
strides = 1,
activation = 'relu',
input_shape = (img_width,img_height, channels)))
#Pooling layer 1
model.add(tf.keras.layers.MaxPooling2D(pool_size = 2, strides = 2))
model.add(tf.keras.layers.Dropout(0.3))
#Layer 2
#Conv Layer 2
model.add(tf.keras.layers.Conv2D(filters = 32,
kernel_size = 2,
strides = 1,
activation = 'relu'))
#Pooling Layer 2
model.add(tf.keras.layers.MaxPooling2D(pool_size = 2, strides = 2))
model.add(tf.keras.layers.Dropout(0.3))
#Flatten
model.add(tf.keras.layers.Flatten())
#Layer 3
#Fully connected layer 1
model.add(tf.keras.layers.Dense(units = 256, activation = 'relu'))
#Layer 4
#Fully connected layer 2
model.add(tf.keras.layers.Dense(units = 128, activation = 'relu'))
#Layer 5
#Output Layer
model.add(tf.keras.layers.Dense(units = 10, activation = 'softmax'))
# -
# # Must define the input shape in the first layer of the neural network
# bmodel.add(tf.keras.layers.MaxPooling2D(pool_size=2))
# model.add(tf.keras.layers.Dropout(0.3))
#
# model.add(tf.keras.layers.Conv2D(filters=32, kernel_size=2, padding='same', activation='relu'))
# model.add(tf.keras.layers.MaxPooling2D(pool_size=2))
# model.add(tf.keras.layers.Dropout(0.3))
#
# model.add(tf.keras.layers.Flatten())
# model.add(tf.keras.layers.Dense(256, activation='relu'))
# model.add(tf.keras.layers.Dropout(0.5))
# model.add(tf.keras.layers.Dense(10, activation='softmax'))
# Take a look at the model summary
#total_params =(filter_height * filter_width * input_image_channels + 1) * number_of_filters
#For layer 1:
# when our filter size is 2; we have (2*4*1+1)*64 = 320 Parameters
# For LeNet-5: filters = 6,kernel_size = 5; (5*5*1+1)*6 =
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# +
from keras.callbacks import ModelCheckpoint
checkpointer = ModelCheckpoint(filepath='model.weights.best.hdf5', verbose = 1, save_best_only=True)
model.fit(x_train,
y_train,
batch_size=64,
epochs=2,
validation_data=(x_valid, y_valid),
callbacks=[checkpointer])
# +
# Evaluate the model on test set
score = model.evaluate(x_test, y_test, verbose=0)
# Print test accuracy
print('\n', 'Test accuracy:', score[1])
# +
y_hat = model.predict(x_test)
# Plot a random sample of 10 test images, their predicted labels and ground truth
figure = plt.figure(figsize=(20, 8))
for i, index in enumerate(np.random.choice(x_test.shape[0], size=15, replace=False)):
ax = figure.add_subplot(3, 5, i + 1, xticks=[], yticks=[])
# Display each image
ax.imshow(np.squeeze(x_test[index]))
predictedIndex = int(np.argmax(y_hat[index]))
actualIndex = int(np.argmax(y_test[index]))
# Set the title for each image
ax.set_title("{} ({})".format(cifar_labels[predictedIndex],
cifar_labels[actualIndex]),
color=("green" if predictedIndex == actualIndex else "red"))
# -
# ## Visualize the layers
def deprocess_image(x):
# normalize tensor: center on 0., ensure std is 0.1
x -= x.mean()
x /= (x.std() + tf.keras.backend.epsilon())
x *= 0.1
# clip to [0, 1]
x += 0.5
x = np.clip(x, 0, 1)
# convert to RGB array
x *= 255
if tf.keras.backend.image_data_format() == 'channels_first':
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
def normalize(x):
# utility function to normalize a tensor by its L2 norm
return x / (tf.keras.backend.sqrt(tf.keras.backend.mean(tf.keras.backend.square(x))) + tf.keras.backend.epsilon())
input_img = model.input
layer_dict = dict([(layer.name, layer) for layer in model.layers[1:]])
layer_dict
layer_name = 'conv2d_2'
kept_filters = []
for filter_index in range(16):
# we only scan through the first 200 filters,
# but there are actually 512 of them
print('Processing filter %d' % filter_index)
start_time = time.time()
# we build a loss function that maximizes the activation
# of the nth filter of the layer considered
layer_output = layer_dict[layer_name].output
if tf.keras.backend.image_data_format() == 'channels_first':
loss = tf.keras.backend.mean(layer_output[:, filter_index, :, :])
else:
loss = tf.keras.backend.mean(layer_output[:, :, :, filter_index])
# we compute the gradient of the input picture wrt this loss
grads = tf.keras.backend.gradients(loss, input_img)[0]
# normalization trick: we normalize the gradient
grads = normalize(grads)
# this function returns the loss and grads given the input picture
iterate = tf.keras.backend.function([input_img], [loss, grads])
# step size for gradient ascent
step = 1.
# we start from a gray image with some random noise
if tf.keras.backend.image_data_format() == 'channels_first':
input_img_data = np.random.random((1, 3, img_width, img_height))
else:
input_img_data = np.random.random((1, img_width, img_height, channels))
input_img_data = (input_img_data - 0.5) * 20 + 128
# we run gradient ascent for 20 steps
for i in range(20):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
print('Current loss value:', loss_value)
if loss_value <= 0.:
# some filters get stuck to 0, we can skip them
break
# decode the resulting input image
if loss_value > 0:
img = deprocess_image(input_img_data[0])
kept_filters.append((img, loss_value))
end_time = time.time()
print('Filter %d processed in %ds' % (filter_index, end_time - start_time))
# +
n = 3
# the filters that have the highest loss are assumed to be better-looking.
# we will only keep the top n^2 filters.
kept_filters.sort(key=lambda x: x[1], reverse=True)
#kept_filters = kept_filters[:n * n]
# build a black picture with enough space for
# our 8 x 8 filters of size 128 x 128, with a 5px margin in between
margin = 5
width = n * img_width + (n - 1) * margin
height = n * img_height + (n - 1) * margin
stitched_filters = np.zeros((width, height, 3))
# fill the picture with our saved filters
for i in range(n):
for j in range(n):
print(i)
img, loss = kept_filters[i * n + j]
width_margin = (img_width + margin) * i
height_margin = (img_height + margin) * j
stitched_filters[
width_margin: width_margin + img_width,
height_margin: height_margin + img_height, :] = img
plt.imshow(stitched_filters)
# -
| 17-Sept-2018/CIFAR-10 - Worked Out Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Description
# In this notebook we explore the attitude update alone, assuming we know the the astrometric parameters of the sources.
# +
# Module import
from IPython.display import Image
import sys
# append to path the folder that contains the analytic scanner
sys.path.append('../GaiaLab/scan/analytic_scanner')
import copy
from tqdm import trange
import quaternion
# local imports
import frame_transformations as ft
from scanner import Scanner
from satellite import Satellite
from source import Source
import constants as const
from agis import Agis
from agis import Calc_source
from agis_functions import *
from analytic_plots import *
# Ipython magics
# %load_ext autoreload
# %autoreload 2
# %reload_ext autoreload
# -
# # For scaling:
# Table resuming the scaling that has to be done (not yet implemented). However it can be kept in mind while playing with the parameters.
Image('./figures/scaling_factors.png')
# # 1.- **Initializing objects:**
# ### 1.1 satellite
# +
t_init = 0
t_end = t_init + 1
my_dt = 1/24/4 # [days]
# objects
spline_degree = 3
gaia = Satellite(ti=t_init, tf=t_end, dt= my_dt, k=spline_degree)
print('#knots:', len(gaia.s_w.get_knots()))
# -
# ### 1.2 scanner
double_telescope = True
my_scanner = Scanner(zeta_limit=np.radians(50) ,double_telescope=double_telescope)
# # End scanner
# +
num_times_for_sources=100 # We will create 3 source per given time
times_for_source = np.linspace(t_init, t_end, num=num_times_for_sources, endpoint=True)
alphas, deltas = generate_angles_of_sources(times_for_source, sat=gaia, noise_factor=0)
sources = []
for n in range(len(alphas)):
sources.append(Source(str(n), np.degrees(alphas[n]), np.degrees(deltas[n]), 0, 0, 0, 0))
# -
# # 2- Scan and results
# ### 2.1 Scan
# +
obs_times = []
calc_sources = []
real_sources = []
# scan the sources and keep as real sources only the ones that are observed
for i in trange(len(sources)):
s = sources[i]
my_scanner.scan(gaia, s, ti=t_init, tf=t_end)
my_scanner.compute_angles_eta_zeta(gaia, s)
if len(my_scanner.obs_times)>0:
obs_times += my_scanner.obs_times
real_sources.append(s)
calc_sources.append(Calc_source(obs_times=my_scanner.obs_times.copy(), source=s))
sources = real_sources
obs_times = list(np.sort(obs_times))
# -
# check for consistency
if list(np.array(obs_times)[np.where(np.array(obs_times)>t_end)]):
raise ValueError('observed time greater than end time')
# ### 2.2 Visualise results of scan
# +
# Plot the distribution of sources and observation along the time axis
plt.figure()
plt.title('Created_sources:'+str(len(alphas))+' || scanner-found sources:'+str(len(calc_sources)))
plt.plot(obs_times,np.zeros(len(obs_times)), 'r+', label='obs_times')
plt.plot(times_for_source, np.zeros(len(times_for_source)), 'b.', label='times of sources', alpha=0.5)
sampled_times = np.linspace(t_init, t_end, num=500)
plt.plot(sampled_times, np.zeros(len(sampled_times)), 'k,', label='original times')
plt.xlabel('time [days]'), plt.ylabel('unit value') # , plt.ylim((-1e-6, 1e-6))
plt.grid(), plt.legend(), plt.show();
# -
# plot the distibution of observations as a histogram with bin-size approximatively equal
# to the one the attitude knot interval
plt.figure()
plt.title('Observation per (approximative) knot interval')
knots = gaia.s_x.get_knots()
bins = int(len(knots))
print('#bins:', bins)
plt.hist(obs_times, bins=bins, align='mid');
plt.xlabel('time [days]'), plt.ylabel('number of observations')
plt.grid(), plt.show();
# # 3.- *Update Attitude*
# ### 3.1 Create Solver
spline_degree = 3
Solver = Agis(sat=gaia, calc_sources=calc_sources, real_sources=sources, updating='attitude',
double_telescope=double_telescope,
attitude_splines=[gaia.s_w, gaia.s_x, gaia.s_y, gaia.s_z],
spline_degree=spline_degree,
attitude_regularisation_factor=5e-1)
# ##### 3.1.1 add noise to attitude
# To ignore if we want to start directly with the correct attitude
# Ignore this cell if you don't want to modify the initial attitude
# Can be used to check that when recreating the splines in the solver we (almost) do not create additional errors
my_times = np.array(obs_times)
print('Error before Noise: ', Solver.error_function())
c_noise = Solver.att_coeffs * np.random.rand(Solver.att_coeffs.shape[0], Solver.att_coeffs.shape[1]) * 1e-4
last_coef = 4
Solver.att_coeffs[:last_coef] = Solver.att_coeffs[:last_coef] + c_noise[:last_coef]
Solver.actualise_splines()
print('Error after Noise: ', Solver.error_function())
# ### 3.2 Visualize Solver data
# ##### 3.2.1 Visualize attitude
# compare initial difference between satellite and solver attitudes
fig = compare_attitudes(gaia, Solver, obs_times)
# ##### 3.2.2 Visualize Matrices
# Compute Normal matrix
N_aa = Solver.compute_attitude_LHS()
A=N_aa.copy()
plt.figure(figsize=(8,8))
threshold = 0
A[np.where(A==threshold)] = A.max()
plt.imshow(A, vmin=None, vmax=None)
plt.colorbar()
plt.show();
# ##### 3.2.3 Visualize eigen properties of Normal matrix
# +
eig_vals, eig_vecs = np.linalg.eigh(N_aa)
plt.figure()
plt.title('Ordered Eigenvalues.')
plt.semilogy(eig_vals, label='eigenvalues')
plt.legend()
plt.grid(), plt.show();
# -
# ### 3.3 **Iterate**
errors = [] # list to store the errors
errors.append(Solver.error_function())
Solver.iterate(1, verbosity=1)
errors.append(Solver.error_function())
# ##### 3.3.1 Visualized residuals
fig3 = multi_compare_attitudes_errors(gaia, Solver, obs_times)
# ##### 3.3.2 Iterate more
for i in range(5):
Solver.iterate(1, verbosity=1)
errors.append(Solver.error_function())
# ##### 3.3.3 Visualize convergence
plt.figure()
plt.plot(errors[:], label='error')
plt.grid(), plt.legend(), plt.xlabel('iterations'), plt.ylabel('error')
plt.show()
fig3 = multi_compare_attitudes_errors(gaia, Solver, obs_times);
# # 4.- Conclusion
# We hope you enjoyed this notebook! For any question, bugs, doubts, do not hesitate to contact us.
| notebooks/03-Attitude_update.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # process_fgmax
#
# Read in fgmax results and produce plots.
# + tags=["hide-py"]
# %matplotlib inline
# -
from pylab import *
# +
import os,sys
import glob
from scipy.interpolate import RegularGridInterpolator
import matplotlib as mpl
from matplotlib import colors
from clawpack.geoclaw import topotools, dtopotools
from clawpack.visclaw import colormaps
from clawpack.visclaw.plottools import pcolorcells
from clawpack.geoclaw import fgmax_tools
# -
save_figs = True
fgmax_plotdir = '_plots/fgmax_plots'
os.system('mkdir -p %s' % fgmax_plotdir)
def savefigp(fname):
global save_figs
if save_figs:
fullname = '%s/%s' % (fgmax_plotdir, fname)
savefig(fullname)
print('Created ', fullname)
else:
print('save_figs = False')
outdir = '_output'
t_files = glob.glob(outdir + '/fort.t0*')
times = []
for f in t_files:
lines = open(f,'r').readlines()
for line in lines:
if 'time' in line:
t = float(line.split()[0])
times.append(t)
times.sort()
print('Output times found: ',times)
if len(times) > 0:
t_hours = times[-1] / 3600.
print('\nfgmax results are presumably from final time: %.1f seconds = %.2f hours'\
% (times[-1], t_hours))
else:
t_hours = nan
# +
# Read fgmax data:
fgno = 1
fg = fgmax_tools.FGmaxGrid()
fg.read_fgmax_grids_data(fgno)
fg.read_output(outdir=outdir)
# +
zmin = -60.
zmax = 20.
land_cmap = colormaps.make_colormap({ 0.0:[0.1,0.4,0.0],
0.25:[0.0,1.0,0.0],
0.5:[0.8,1.0,0.5],
1.0:[0.8,0.5,0.2]})
sea_cmap = colormaps.make_colormap({ 0.0:[0,0,1], 1.:[.8,.8,1]})
cmap, norm = colormaps.add_colormaps((land_cmap, sea_cmap),
data_limits=(zmin,zmax),
data_break=0.)
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.B, cmap=cmap, norm=norm)
cb = colorbar(pc,shrink=0.5,extend='both')
cb.set_label('meters')
cb.set_ticks(hstack((linspace(zmin,0,5), linspace(0,zmax,5))))
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20);
title('GeoClaw B topography on fg1 grid');
# -
fg.B0 = fg.B # no seafloor deformation in this problem
fg.h_onshore = ma.masked_where(fg.B0 < 0., fg.h)
# +
bounds_depth = array([1e-6,0.5,1.0,1.5,2,2.5,3.0])
cmap_depth = colors.ListedColormap([[.7,.7,1],[.5,.5,1],[0,0,1],\
[1,.7,.7], [1,.4,.4], [1,0,0]])
# Set color for value exceeding top of range to purple:
cmap_depth.set_over(color=[1,0,1])
# Set color for land points without inundation to light green:
cmap_depth.set_under(color=[.7,1,.7])
norm_depth = colors.BoundaryNorm(bounds_depth, cmap_depth.N)
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.h_onshore, cmap=cmap_depth, norm=norm_depth)
cb = colorbar(pc, extend='max', shrink=0.7)
cb.set_label('meters')
contour(fg.X, fg.Y, fg.B, [0], colors='g')
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20)
title('Maximum Onshore flow depth over %.2f hours\nfgmax grid %s' % (t_hours,fgno))
savefigp('fgmax%s_h_onshore.png' % str(fgno).zfill(4))
# +
bounds_speed = np.array([1e-6,0.5,1.0,1.5,2,2.5,3,4.5,6])
cmap_speed = mpl.colors.ListedColormap([[.9,.9,1],[.6,.6,1],\
[.3,.3,1],[0,0,1], [1,.8,.8],\
[1,.6,.6], [1,.3,.3], [1,0,0]])
bounds_speed = np.array([1e-6,0.5,1.0,1.5,2,2.5,3,4.5])
cmap_speed = mpl.colors.ListedColormap([[.9,.9,1],[.6,.6,1],\
[.3,.3,1],[0,0,1], [1,.8,.8],\
[1,.6,.6], [1,0,0]])
# Set color for value exceeding top of range to purple:
cmap_speed.set_over(color=[1,0,1])
# Set color for land points without inundation to light green:
cmap_speed.set_under(color=[.7,1,.7])
norm_speed = colors.BoundaryNorm(bounds_speed, cmap_speed.N)
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.s, cmap=cmap_speed, norm=norm_speed)
cb = colorbar(pc, extend='max', shrink=0.7)
cb.set_label('m/s')
contour(fg.X, fg.Y, fg.B0, [0], colors='g')
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20)
title('Maximum speed over %.2f hours\nfgmax grid %s' % (t_hours,fgno))
savefigp('fgmax%s_speed.png' % str(fgno).zfill(4))
# -
# Save this so we can plot the topo below...
import copy
fg1 = copy.copy(fg)
# ## Read fgmax values specified on a Transect
# Read fgmax data:
fgno = 2
fg = fgmax_tools.FGmaxGrid()
fg.read_fgmax_grids_data(fgno)
fg.read_output(outdir=outdir)
xx = fg.X
yy = fg.Y
# +
figure(figsize=(8,8))
pc = pcolorcells(fg1.X, fg1.Y, fg1.B, cmap=cmap, norm=norm)
cb = colorbar(pc,shrink=0.5,extend='both')
cb.set_label('meters')
cb.set_ticks(hstack((linspace(zmin,0,5), linspace(0,zmax,5))))
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20);
plot(xx,yy,'r')
title('GeoClaw B topography values on fg1 grid\n with transect from fg2');
# -
figure(figsize=(12,4))
fill_between(xx, fg.B, fg.B+fg.h, color=[.5,.5,1])
plot(xx,fg.B+fg.h,'b')
plot(xx,fg.B,'g')
plot(xx, ma.masked_where(fg.B>0, 0*xx), 'k')
grid(True)
ylim(-10,20);
title('Maximum elevation over %.2f hours\nfgmax grid %s' % (t_hours,fgno))
savefigp('fgmax%s_surface.png' % str(fgno).zfill(4));
# ## Read fgmax points as specified on a masked grid
# +
fgno = 3
fg = fgmax_tools.FGmaxGrid()
fg.read_fgmax_grids_data(fgno)
fg.read_output(outdir=outdir)
# -
fg.B0 = fg.B # no seafloor deformation in this problem
fg.h_onshore = ma.masked_where(fg.B0 < 0., fg.h)
# +
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.B, cmap=cmap, norm=norm)
cb = colorbar(pc, extend='both', shrink=0.7)
cb.set_label('meters')
cb.set_ticks(hstack((linspace(zmin,0,5), linspace(0,zmax,5))))
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20)
title('GeoClaw B at points selected as fgmax grid\nfgmax grid %s' % fgno);
# +
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.h_onshore, cmap=cmap_depth, norm=norm_depth)
cb = colorbar(pc, extend='max', shrink=0.7)
cb.set_label('meters')
contour(fg.X, fg.Y, fg.B0, [0], colors='g')
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20)
title('Maximum Onshore flow depth over %.2f hours' % t_hours);
savefigp('fgmax%s_h_onshore.png' % str(fgno).zfill(4))
# +
figure(figsize=(8,8))
pc = pcolorcells(fg.X, fg.Y, fg.s, cmap=cmap_speed, norm=norm_speed)
cb = colorbar(pc, extend='max', shrink=0.7)
cb.set_label('m/s')
contour(fg.X, fg.Y, fg.B0, [0], colors='g')
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20)
title('Maximum speed over %.2f hours\nfgmax grid %s' % (t_hours,fgno))
savefigp('fgmax%s_speed.png' % str(fgno).zfill(4))
# -
# ### View fgmax points selected
#
# This isn't generally needed, but if you want to inspect the file that specified fgmax points originally:
# +
fg3input = topotools.Topography(path=fg.xy_fname, topo_type=3)
fg3input.X.shape
figure(figsize=(8,8))
pc = pcolorcells(fg3input.X, fg3input.Y, fg3input.Z)
cb = colorbar(pc, shrink=0.7)
gca().set_aspect(1./cos(48*pi/180.))
ticklabel_format(useOffset=False)
xticks(rotation=20);
# -
# ## Read points with `point_style == 0`
# +
# Read fgmax data:
fg = fgmax_tools.FGmaxGrid()
fg.read_fgmax_grids_data(4)
fg.read_output(outdir=outdir)
print('\n x y max depth')
for j in range(fg.npts):
print('%10.3f %10.3f %10.3f' % (fg.X[j], fg.Y[j], fg.h[j]))
# +
# Read fgmax data:
fg = fgmax_tools.FGmaxGrid()
fg.read_fgmax_grids_data(5)
fg.read_output(outdir=outdir)
print('\n x y max speed')
for j in range(fg.npts):
print('%10.3f %10.3f %10.3f' % (fg.X[j], fg.Y[j], fg.s[j]))
# -
| examples/tsunami/radial-ocean-island-fgmax/process_fgmax.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Syntaxe des listes en compréhension
# Lorsque l'on désigne *explicitement* chaque élément d'un tableau comme dans `t = [4, 3, 2, 1]`, on parle de **notation en extension** du tableau.
#
# Python connaît une autre notation pour *construire des listes* appelée **notation en compréhension**.
#
# En voici quelques exemples; **expérimenter** pour vous familiariser avec cette *syntaxe*.
[None for _ in range(10)] # on peut faire plus court pour obtenir cela ...
## version plus courte du précédent
[None] * 10
[i for i in range(6)] # même résultat avec list(range(6))
# On peut **utiliser la variable de boucle** pour calculer
[x + 1 for x in [0, 1, 4, 9, 16, 25]]
[x * x for x in range(6)]
# On peut combiner les deux exemples précédents en **imbriquant** ...
[x + 1 for x in [i ** 2 for i in range(6)]] # la liste interne est construite en premier!
# L'intérêt? Supposer que vous ayez besoins des précédents (et non suivants) des carrés de 1 à 100... (pas de 0 à 99...)
# #### À faire toi-même
#
# Réalise la liste indiquée précédemment en utilisant la **notation en compréhension**.
# solution 1
sol1 = [x - 1 for x in [i ** 2 for i in range(1,101)]]
sol1
# solution 2
sol2 = [ x ** 2 - 1 for x in range(1, 101)]
assert sol1 == sol2 # doit passer sans erreur
print(sol2) # avec print(...) la liste est affichée différemment...
# ___
# On peut utiliser une **fonction** avec la variable de boucle
ma_fonc = lambda x: (x, x**2) # x -> (x, x**2); attend un nombre et renvoie un tuple (ce nombre, son carré)
[ma_fonc(nb) for nb in range(5)] # la fonction est exécuté à chaque changement de nb
# on peut faire des **sélections** avec un `if`...
N = 10
t = [i for i in range(N) if i not in [3, 7]]
t
# Attention à l'ordre
# produit une erreur!
[i if i not in [3, 7] for i in range(N)] # la sélection if doit toujours avoir lieu **après** la boucle associée
a, b = 10, 20
[i for i in range(a, b) if i % 2 != 0] # i % 2 ne vaut zéro que lorsque i est pair!
# #### À faire toi-même
# Je voudrais une petite valse à trois temps de 100 mesures `[1,2,3,1,2,3,...]`
#
# *aide*: utiliser l'opérateur modulo `%` et un peu de sélection ... au fait, le reste d'une division est toujours strictement inférieure au diviseur ...
# correction: note que le reste d'une division ne peut être que 0, 1, 2, ..., diviseur-1 (toujours strictement inférieur au diviseur)
solution = [(i % 3) + 1 for i in range(300)]
print(solution[:10]) # on ne regarde que les 10 premières valeurs
# pour tester ta solution.
from random import randint
i = 3 * randint(0, 100)
assert len(solution) == 300
assert solution[i:i+3] == [1,2,3]
# ___
# #### À faire toi-même
# Construire la liste de tous les entiers entre 50 et 100 qui ne sont ni des multiples de 2 ni des multiples de 5
# correction
sol = [x for x in range(50, 101) if x % 2 != 0 and x % 5 != 0]
print(sol)
# ___
# On peut utiliser plusieurs `for`
[(i, j) for i in range(3) for j in range(6)]
# observe qu'à chaque tour de la première boucle, la seconde est complètement réalisée.
#
# Et lorsqu'on change l'ordre des deux boucles:
[(i, j) for j in range(6) for i in range(3)]
# #### À faire toi-même
# Construire la liste de tous les mots possibles de trois lettres qu'on peut former à partir de 'a', 'b' et 'c'. Combien y en a-t-il?
#
# *aide*:
# - dans la boucle `for l in "abc": ...` -> la variable `l` prend successivement les valeurs `'a'`, `'b'`, `'c'`.
# - Penser à la **concaténation** des `str` avec `+`... Utiliser plusieurs boucles... (combien?).
# test de l'aide
for l in "abc":
print(l)
# **Rappel**: lorsqu'on ajoute des chaînes de caractères, le résultat est une chaîne de caractère obtenue en mettant bout à bout chaque chaîne (**concaténation**)
# ex: `"123"+"4"+"cinq"` donne `"1234cinq"`
# solution partielle pour deux lettres "ab"
sol = [l1+l2 for l1 in "ab" for l2 in "ab"]
print(sol)
# solution
sol = [l1+l2+l3 for l1 in "abc" for l2 in "abc" for l3 in "abc"]
print(sol)
# Inutile de compter à la main ou avec `len`: il y en a $3\times 3\times 3 = 9\times 3={\bf 27}$.
#
# _____
# #### À faire toi-même
# Pareil que le précédent mais les mots ne doivent pas contenir deux fois la même lettre (*aide*: utiliser un if...)
# solution pour 2 lettres
sol = [l1 + l2 for l1 in "ab" for l2 in "ab" if l1 != l2]
print(sol)
# solution complète
[l1 + l2 + l3
for l1 in "abc"
for l2 in "abc"
for l3 in "abc"
if l1 != l2 and # première lettre différente de la seconde,
l2 != l3 and # la seconde de la troisième
l3 != l1 # et la troisième de la première!
]
# ____
# # Complément: difficile!
# On peut imbriquer les listes en compréhension à un bout ...
[x + 1 for x in [x**2 for x in range(6)]] # déjà vu!
# ou ... à l'autre! C'est-à-dire l'**expression** peut aussi être une liste en compréhension!
[[i for i in range(j, j+4)] for j in [1, 5, 9]]
# ou au deux ...
N = 10
matrice = [
[ i for i in range(j, j+N) ] # expression à évaluer à chaque ...
for j in [ 1 + i*N for i in range(N) ] # ... changement de la valeur de j
]
matrice
bg, bd = 3, 18 # borne gauche, borne droite
largeur = bd - bg # ici 15
nb_subdivisions = 5 # nb de «pas» pour aller de la borne gauche à la borne droite
pas = largeur / nb_subdivisions
sub_intervalle = [bg + i * pas for i in range(nb_subdivisions + 1)]
sub_intervalle
depart = 2
taille_pas = 5
nb_pas = 12
pas = [depart + i * taille_pas for i in range(nb_pas)]
pas
| 01_donnees_en_tables/correction/02_syntaxe_listes_en_comprehension_correction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from scipy.io import loadmat
import numpy as np
# import numpy.linalg as la
import matplotlib.pyplot as plt
#import matplotlib.cm as cm
# #%matplotlib notebook
# -
######## importing data #########
data = loadmat('ex8data1.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval']
######## visualizing data #########
plt.figure(1)
plt.scatter(X[:,0], X[:,1])
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)')
# +
def estimateGaussian(inp):
'''returns mean and varaince of data'''
return np.mean(inp,axis=0), np.var(inp,axis=0)
def multivariateGaussian(x, mean, cov):
d = len(mean)
cov = cov.squeeze() #reduces redundant dimensions
if len(cov.shape) == 1 : cov = np.diag(cov)
xm = x-mean
result = np.e**(-0.5 * np.sum((xm @ np.linalg.pinv(cov)) * xm, 1))
result = result / (((2 * np.pi) ** (d/2)) * np.sqrt(np.linalg.det(cov)))
return result
def findThreshold(yval, pval):
''' find threshold value for anamoly detection'''
bestEpsilon = 0
bestF1 = 0
stepsize = (np.max(pval) - np.min(pval)) / 1000
for epsilon in np.arange(np.min(pval), np.max(pval) + stepsize, stepsize):
pred = (pval < epsilon)
F1 = (2*(pred.T @ yval))/(np.sum(pred)+np.sum(yval))
if F1 > bestF1:
bestF1 = F1
bestEpsilon = epsilon
return bestEpsilon, bestF1
# +
######## visualizing data and gaussian countors #########
plt.figure(1)
plt.scatter(X[:,0], X[:,1])
x1d, x2d = np.meshgrid(np.arange(30), np.arange(30,0,-1))
x12d = np.hstack((x1d.reshape(-1,1), x2d.reshape(-1,1)))
z12d = multivariateGaussian(x12d, *estimateGaussian(X)).reshape(30,30)
plt.contour(x1d, x2d, z12d, 10.0**np.arange(-20,0,3))
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)')
# +
# testing
# findThreshold(yval, multivariateGaussian(Xval, *estimateGaussian(X)))
# (8.990852779269496e-05, array([0.875]))
# -
ypred = multivariateGaussian(X, *estimateGaussian(X))
epsilon, J = findThreshold(yval, multivariateGaussian(Xval, *estimateGaussian(X)))
######## visualizing outliers #########
plt.figure(1)
plt.scatter(X[:,0], X[:,1], color = 'blue', label = 'data', zorder = 0)
plt.axis([0, 30, 0, 30])
plt.xlabel('Latency (ms)')
plt.ylabel('Throughput (mb/s)')
plt.plot(X[(ypred < epsilon), 0], X[(ypred < epsilon), 1], 'ro', label = 'outliers', zorder = 1)
plt.legend()
# ### Multi dimensional outlier handling
data = loadmat('ex8data2.mat')
X = data['X']
Xval = data['Xval']
yval = data['yval']
ypred = multivariateGaussian(X, *estimateGaussian(X))
epsilon, J = findThreshold(yval, multivariateGaussian(Xval, *estimateGaussian(X)))
print('epsilon=', epsilon, ' J=',J)
print('No. of outliers = ', np.sum(ypred < epsilon))
#epsilon= 1.3772288907613581e-18 J= [0.61538462]
#No. of outliers = 117
| ex8/anamoly_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mandatory assignments
#
# ## Mandatory assignment (i)
#
# Verify the analytical solutions (3-47), (3-49) and (3-52) in {% cite white06 %}. Experiment with higher order "CG" elements (the solution is then higher order continuous piecewise polynomials) and compute the *errornorm*. Report tables of the error vs mesh size (`mesh.hmin()`) and comment on the order of accuracy. See [Analysing the error](../chapter3/poiseuille.ipynb#Analysing-the-error).
#
# ## Mandatory assignment (ii)
#
# Solve the normalized equations for [plane stagnation flow](../chapter3/similarity.ipynb#mjx-eqn-eq:StagnationNonNorm) and [axissymmetric stagnation flow](../chapter3/similarity.ipynb#mjx-eqn-eq:AxiStagnationNonNorm) using both Picard and Newton iterations. Hint: Define a new variable $H = F'$ and solve a coupled system of equations for $H$ and $F$. Start for example like this:
#
# ```python
# from dolfin import *
# L = 10
# x = IntervalMesh(50, 0, L)
# Ve = FiniteElement('CG', x.ufl_cell(), 1)
# V = FunctionSpace(x, Ve)
# VV = FunctionSpace(x, Ve * Ve)
# hf = TrialFunction(VV)
# h, f = split(hf)
# vh, vf = TestFunctions(VV)
# ```
#
# ## Mandatory assignment (iii)
#
# Assume low Reynolds number and use FEniCS to compute a numerical solution of Stokes flow for a driven cavity. The domain of the cavity is $[0, 1]\times[0, 1]$ and the dynamic viscosity $\mu=100$. The domain consists of 4 solid walls, where the top lid ($y=1$ and $ 0 < x < 1$) is moving at speed $\boldsymbol{u}=(1, 0)$. The remaining 3 walls are not moving. Compute the streamfunction and find the center of the vortex, i.e., the point where the streamfunction goes through a minimum. For the driven cavity the value of the streamfunction can be set to zero on the entire exterior domain.
#
# Note that the boundary condition on $\psi$ follows from the very [definition of the streamfunction](../chapter3/stokes.ipynb#eq:streamfunction2D), where it should be understood that the variable $\psi$ will only need to be known up to an arbitrary constant. If $\psi$ is a solution, then $\psi+C$ gives exactly the same velocity field since the partial derivative of a constant (here $C$) is zero. As such we can put an "anchor" on the solution by specifying that $\psi=0$ at the corner where $x=0, y=0$. It then follows that $\psi$ will be zero on the entire domain. Along the left hand border, where $x=0$ and $0 \leq y \leq 1$, we have the boundary condition on velocity stating that $u=\partial \psi / \partial y = 0$. Since the gradient of $\psi$ is 0 along the border it follows that $\psi=0$ along this border. Similarly, for the top lid $v=-\partial \psi / \partial x=0$, and thus $\psi$ must be equal to $0$ for the entire top lid. The same procedure applies to the last two borders and consequently $\psi=0$ for the entire border. Note that the value does not have to be zero, any constant value may be chosen.
#
# ## Mandatory assignment (iv)
#
# Assume low Reynolds number and use FEniCS to compute a numerical solution of Stokes flow creeping past a step (see Fig. 3-37 in {% cite white06 %}. A mesh for the geometry is shown below
#
# <img src="../images/Backstep_mesh.png" width="400" height="300" angle=-90 />
#
# The height of the step is $0.1L$. The height and width of the rectangular geometry is $0.5L$ and $L$ respectively. Use $\mathrm{Re}=UL/\mu = 0.01$ (setting density to unity) as in Fig. 3-37 in {% cite white06 %}. Set the velocity of the top (at $y=0.5L$) to constant $\boldsymbol{u}=(1, 0)$. Use pseudo-traction for the inlet located at $x=0$ and the outlet located at $x=L$. No-slip for the bottom wall.
#
# Create a suitable mesh using for example [Gmsh](http://geuz.org/gmsh). It is also possible to create this mesh directly using [mshr](https://bitbucket.org/fenics-project/mshr).
#
| homepage/content/.ipynb_checkpoints/mandatory-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
# ### Why you need a good init
# To understand why initialization is important in a neural net, we'll focus on the basic operation you have there: matrix multiplications. So let's just take a vector `x`, and a matrix `a` initiliazed randomly, then multiply them 100 times (as if we had 100 layers).
# [Jump_to lesson 9 video](https://course.fast.ai/videos/?lesson=9&t=1132)
x = torch.randn(512)
a = torch.randn(512,512)
x.mean(), x.std(), a.mean(), a.std()
for i in range(100):
x = x @ a
x.mean(), x.std()
# The problem you'll get with that is activation explosion: very soon, your activations will go to nan. We can even ask the loop to break when that first happens:
x = torch.randn(512)
a = torch.randn(512,512)
for i in range(100):
x = a @ x
if x.std() != x.std():
break
i
# It only takes 27 multiplications! On the other hand, if you initialize your activations with a scale that is too low, then you'll get another problem:
x = torch.randn(512)
a = torch.randn(512,512) * 0.01
# +
for i in range(100):
x = a @ x
x.mean(), x.std()
# -
# Here, every activation vanished to 0. So to avoid that problem, people have come with several strategies to initialize their weight matices, such as:
# - use a standard deviation that will make sure x and Ax have exactly the same scale
# - use an orthogonal matrix to initialize the weight (orthogonal matrices have the special property that they preserve the L2 norm, so x and Ax would have the same sum of squares in that case)
# - use [spectral normalization](https://arxiv.org/pdf/1802.05957.pdf) on the matrix A (the spectral norm of A is the least possible number M such that `torch.norm(A@x) <= M*torch.norm(x)` so dividing A by this M insures you don't overflow. You can still vanish with this)
# ### The magic number for scaling
# Here we will focus on the first one, which is the Xavier initialization. It tells us that we should use a scale equal to `1/math.sqrt(n_in)` where `n_in` is the number of inputs of our matrix.
# [Jump_to lesson 9 video](https://course.fast.ai/videos/?lesson=9&t=1273)
import math
x = torch.randn(512)
a = torch.randn(512,512) / math.sqrt(512)
for i in range(100): x = a @ x
x.mean(),x.std()
# And indeed it works. Note that this magic number isn't very far from the 0.01 we had earlier.
1/ math.sqrt(512)
# But where does it come from? It's not that mysterious if you remember the definition of the matrix multiplication. When we do `y = a @ x`, the coefficients of `y` are defined by
#
# $$y_{i} = a_{i,0} x_{0} + a_{i,1} x_{1} + \cdots + a_{i,n-1} x_{n-1} = \sum_{k=0}^{n-1} a_{i,k} x_{k}$$
#
# or in code:
# ```
# y[i] = sum([c*d for c,d in zip(a[i], x)])
# ```
#
# Now at the very beginning, our `x` vector has a mean of roughly 0. and a standard deviation of roughly 1. (since we picked it that way).
# NB: This is why it's extremely important to normalize your inputs in Deep Learning, the intialization rules have been designed with inputs that have a mean 0. and a standard deviation of 1.
#
# If you need a refresher from your statistics course, the mean is the sum of all the elements divided by the number of elements (a basic average). The standard deviation represents if the data stays close to the mean or on the contrary gets values that are far away. It's computed by the following formula:
#
# $$\sigma = \sqrt{\frac{1}{n}\left[(x_{0}-m)^{2} + (x_{1}-m)^{2} + \cdots + (x_{n-1}-m)^{2}\right]}$$
#
# where m is the mean and $\sigma$ (the greek letter sigma) is the standard deviation. Here we have a mean of 0, so it's just the square root of the mean of x squared.
#
# If we go back to `y = a @ x` and assume that we chose weights for `a` that also have a mean of 0, we can compute the standard deviation of `y` quite easily. Since it's random, and we may fall on bad numbers, we repeat the operation 100 times.
# Now that looks very close to the dimension of our matrix 512. And that's no coincidence! When you compute y, you sum 512 product of one element of a by one element of x. So what's the mean and the standard deviation of such a product? We can show mathematically that as long as the elements in `a` and the elements in `x` are independent, the mean is 0 and the std is 1. This can also be seen experimentally:
# Then we sum 512 of those things that have a mean of zero, and a mean of squares of 1, so we get something that has a mean of 0, and mean of square of 512, hence `math.sqrt(512)` being our magic number. If we scale the weights of the matrix `a` and divide them by this `math.sqrt(512)`, it will give us a `y` of scale 1, and repeating the product has many times as we want won't overflow or vanish.
# ### Adding ReLU in the mix
# We can reproduce the previous experiment with a ReLU, to see that this time, the mean shifts and the standard deviation becomes 0.5. This time the magic number will be `math.sqrt(2/512)` to properly scale the weights of the matrix.
# We can double check by running the experiment on the whole matrix product.
# Or that scaling the coefficient with the magic number gives us a scale of 1.
# The math behind is a tiny bit more complex, and you can find everything in the [Kaiming](https://arxiv.org/abs/1502.01852) and the [Xavier](http://proceedings.mlr.press/v9/glorot10a.html) paper but this gives the intuition behing those results.
| nbs/dl2/02b_initializing-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.4 64-bit (''base'': conda)'
# metadata:
# interpreter:
# hash: dca0ade3e726a953b501b15e8e990130d2b7799f14cfd9f4271676035ebe5511
# name: python3
# ---
# # Distributed data parallel MaskRCNN training with PyTorch and SageMaker distributed
#
# [Amazon SageMaker's distributed library](https://docs.aws.amazon.com/sagemaker/latest/dg/distributed-training.html) can be used to train deep learning models faster and cheaper. The [data parallel](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) feature in this library (`smdistributed.dataparallel`) is a distributed data parallel training framework for PyTorch, TensorFlow, and MXNet.
#
# This notebook demonstrates how to use `smdistributed.dataparallel` with PyTorch(version 1.8.1) on [Amazon SageMaker](https://aws.amazon.com/sagemaker/) to train a MaskRCNN model on [COCO 2017 dataset](https://cocodataset.org/#home) using [Amazon FSx for Lustre file-system](https://aws.amazon.com/fsx/lustre/) as data source.
#
# The outline of steps is as follows:
#
# 1. Stage COCO 2017 dataset in [Amazon S3](https://aws.amazon.com/s3/)
# 2. Create Amazon FSx Lustre file-system and import data into the file-system from S3
# 3. Build Docker training image and push it to [Amazon ECR](https://aws.amazon.com/ecr/)
# 4. Configure data input channels for SageMaker
# 5. Configure hyper-prarameters
# 6. Define training metrics
# 7. Define training job, set distribution strategy to SMDataParallel and start training
#
# **NOTE:** With large traning dataset, we recommend using [Amazon FSx](https://aws.amazon.com/fsx/) as the input filesystem for the SageMaker training job. FSx file input to SageMaker significantly cuts down training start up time on SageMaker because it avoids downloading the training data each time you start the training job (as done with S3 input for SageMaker training job) and provides good data read throughput.
#
#
# **NOTE:** This example requires SageMaker Python SDK v2.X.
# ## Amazon SageMaker Initialization
#
# Initialize the notebook instance. Get the AWS Region and a SageMaker execution role.
#
# ### SageMaker role
#
# The following code cell defines `role` which is the IAM role ARN used to create and run SageMaker training and hosting jobs. This is the same IAM role used to create this SageMaker Notebook instance.
#
# `role` must have permission to create a SageMaker training job and host a model. For granular policies you can use to grant these permissions, see [Amazon SageMaker Roles](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html). If you do not require fine-tuned permissions for this demo, you can used the IAM managed policy AmazonSageMakerFullAccess to complete this demo.
#
# As described above, since we will be using FSx, please make sure to attach `FSx Access` permission to this IAM role.
# +
# %%time
# ! python3 -m pip install --upgrade sagemaker
import sagemaker
from sagemaker import get_execution_role
from sagemaker.estimator import Estimator
import boto3
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
role = get_execution_role() # provide a pre-existing role ARN as an alternative to creating a new role
role_name = role.split(['/'][-1])
print(f'SageMaker Execution Role:{role}')
print(f'The name of the Execution role: {role_name[-1]}')
client = boto3.client('sts')
account = client.get_caller_identity()['Account']
print(f'AWS account:{account}')
session = boto3.session.Session()
region = session.region_name
print(f'AWS region:{region}')
# -
# To verify that the role above has required permissions:
#
# 1. Go to the IAM console: https://console.aws.amazon.com/iam/home.
# 2. Select **Roles**.
# 3. Enter the role name in the search box to search for that role.
# 4. Select the role.
# 5. Use the **Permissions** tab to verify this role has required permissions attached.
# ## Prepare SageMaker Training Images
#
# 1. SageMaker by default use the latest [Amazon Deep Learning Container Images (DLC)](https://github.com/aws/deep-learning-containers/blob/master/available_images.md) PyTorch training image. In this step, we use it as a base image and install additional dependencies required for training MaskRCNN model.
# 2. In the Github repository https://github.com/HerringForks/DeepLearningExamples.git we have made a `smdistributed.dataparallel` PyTorch MaskRCNN training script available for your use. We will be installing the same on the training image.
#
# ### Build and Push Docker Image to ECR
#
# Run the below command build the docker image and push it to ECR.
image = "<ADD NAME OF REPO>" # Example: mask-rcnn-smdataparallel-sagemaker
tag = "<ADD TAG FOR IMAGE>" # Example: pt1.8
# !pygmentize ./Dockerfile
# !pygmentize ./build_and_push.sh
# %%time
# ! chmod +x build_and_push.sh; bash build_and_push.sh {region} {image} {tag}
# ## Preparing FSx Input for SageMaker
#
# 1. Download and prepare your training dataset on S3.
# 2. Follow the steps listed here to create a FSx linked with your S3 bucket with training data - https://docs.aws.amazon.com/fsx/latest/LustreGuide/create-fs-linked-data-repo.html. Make sure to add an endpoint to your VPC allowing S3 access.
# 3. Follow the steps listed here to configure your SageMaker training job to use FSx https://aws.amazon.com/blogs/machine-learning/speed-up-training-on-amazon-sagemaker-using-amazon-efs-or-amazon-fsx-for-lustre-file-systems/
#
# ### Important Caveats
#
# 1. You need use the same `subnet` and `vpc` and `security group` used with FSx when launching the SageMaker notebook instance. The same configurations will be used by your SageMaker training job.
# 2. Make sure you set appropriate inbound/output rules in the `security group`. Specically, opening up these ports is necessary for SageMaker to access the FSx filesystem in the training job. https://docs.aws.amazon.com/fsx/latest/LustreGuide/limit-access-security-groups.html
# 3. Make sure `SageMaker IAM Role` used to launch this SageMaker training job has access to `AmazonFSx`.
#
# ## SageMaker PyTorch Estimator function options
#
# In the following code block, you can update the estimator function to use a different instance type, instance count, and distrubtion strategy. You're also passing in the training script you reviewed in the previous cell.
#
# **Instance types**
#
# SMDataParallel supports model training on SageMaker with the following instance types only.
#
# 1. ml.p3.16xlarge
# 1. ml.p3dn.24xlarge [Recommended]
# 1. ml.p4d.24xlarge [Recommended]
#
# **Instance count**
#
# To get the best performance and the most out of SMDataParallel, you should use at least 2 instances, but you can also use 1 for testing this example.
#
# **Distribution strategy**
#
# Note that to use DDP mode, you update the the `distribution` strategy, and set it to use `smdistributed dataparallel`.
import os
from sagemaker.pytorch import PyTorch
instance_type = "ml.p3dn.24xlarge" # Other supported instance type: ml.p3.16xlarge, ml.p4d.24xlarge
instance_count = 2 # You can use 2, 4, 8 etc.
docker_image = f"{account}.dkr.ecr.{region}.amazonaws.com/{image}:{tag}" # YOUR_ECR_IMAGE_BUILT_WITH_ABOVE_DOCKER_FILE
region = '<REGION>' # Example: us-west-2
username = 'AWS'
subnets=['<SUBNET_ID>'] # Should be same as Subnet used for FSx. Example: subnet-0f9XXXX
security_group_ids=['<SECURITY_GROUP_ID>'] # Should be same as Security group used for FSx. sg-03ZZZZZZ
job_name = 'pytorch-smdataparallel-mrcnn-fsx' # This job name is used as prefix to the sagemaker training job. Makes it easy for your look for your training job in SageMaker Training job console.
file_system_id= '<FSX_ID>' # FSx file system ID with your training dataset. Example: 'fs-0bYYYYYY'
config_file = 'e2e_mask_rcnn_R_50_FPN_1x_16GPU_4bs.yaml'
hyperparameters = {
"config-file": config_file,
"skip-test": "",
"seed": 987,
"dtype": "float16",
}
estimator = PyTorch(entry_point='train_pytorch_smdataparallel_maskrcnn.py',
role=role,
image_uri=docker_image,
source_dir='.',
instance_count=instance_count,
instance_type=instance_type,
framework_version='1.8.1',
py_version='py36',
sagemaker_session=sagemaker_session,
hyperparameters=hyperparameters,
subnets=subnets,
security_group_ids=security_group_ids,
debugger_hook_config=False,
# Training using SMDataParallel Distributed Training Framework
distribution={'smdistributed':{
'dataparallel':{
'enabled': True
}
}
}
)
# +
# Configure FSx Input for your SageMaker Training job
from sagemaker.inputs import FileSystemInput
file_system_directory_path= 'YOUR_MOUNT_PATH_FOR_TRAINING_DATA' # NOTE: '/fsx/' will be the root mount path. Example: '/fsx/mask_rcnn/PyTorch'
file_system_access_mode='ro'
file_system_type='FSxLustre'
train_fs = FileSystemInput(file_system_id=file_system_id,
file_system_type=file_system_type,
directory_path=file_system_directory_path,
file_system_access_mode=file_system_access_mode)
data_channels = {'train': train_fs}
# -
# Submit SageMaker training job
estimator.fit(inputs=data_channels, job_name=job_name)
| training/distributed_training/pytorch/data_parallel/maskrcnn/pytorch_smdataparallel_maskrcnn_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbpresent={"id": "d6c34227-ffbc-4a95-b0be-917320458616"} slideshow={"slide_type": "slide"}
# <img src="imagenes/logoLCC.jpg" width="200">
# <img src="imagenes/letragrama-rgb-150.jpg" width="200">
#
# ## Introducción a Jupyter
#
# ### Taller de herramientas para *Big Data*
#
# #### del 4 al 6 de marzo de 2019
#
# [<NAME>]() y [<NAME>](http://mat.uson.mx/~juliowaissman/)
# + [markdown] nbpresent={"id": "52f4aa72-684c-4b39-adb8-09a8ccc3a19d"} slideshow={"slide_type": "slide"}
# <img src="imagenes/jupyter.png" width="200">
#
# # ¿Que es Jupyter?
#
# - Un entrono de *computación interactiva* atractivo
#
# - Un entrono de [*literate programming*](https://en.wikipedia.org/wiki/Literate_programming) en python
#
# - Un entrono para [*investigación reproducible*](https://reproducibleresearch.net)
#
# - Una forma de generar tutoriales y material de autoaprendizaje amigables
#
# + [markdown] nbpresent={"id": "a99de61b-c6a3-4a34-ba95-eaa2c35f8a2b"} slideshow={"slide_type": "subslide"}
# # Historia
#
# - Primera versión en diciembre 2011 como un método de libretas para *Ipython*.
#
# - Inició como una versión para python de las libretas de *Mathematica*
#
# - La arquitectura desarrollada permitió su uso más allá de *python*
#
# - En 2014 se separan como proyecto independiente bajo el nombre de **Jupyter**
# + [markdown] nbpresent={"id": "821e7efe-97c6-4fbd-a09f-7eb5ff372ced"} slideshow={"slide_type": "subslide"}
# # Estructura de una libreta
#
#
# - Se basa en el concepto de *celdas*
#
# - Las celdas pueden ser de *código* o *texto*
#
# - Las celdas de texto se realiza en *Markdown*
#
# - *Markdown* con sabor de *Github*
#
# - Incluye *MathJax*
#
# - Las celdas de código se ejecutan en el orden solicitado en forma asíncrona
#
# - El sistema guarda memoria de lo ejecutado
# + [markdown] nbpresent={"id": "55d7a4d4-3c50-4794-9dbc-353a5787f14e"} slideshow={"slide_type": "subslide"}
# # Arquitectura del sistema
#
#
# - Aplicación cliente-servidor
#
# - El cliente interactúa a través de un navegador
#
# - El servidor se compone de *Kernel* y *dashboard*
#
# - El *Kernel* ejecuta el codigo de las celdas
#
# - El *dashboard* se encarga de la visualización y las celdas de texto.
# + [markdown] nbpresent={"id": "e26a2a06-262e-4114-bd74-baac53965764"} slideshow={"slide_type": "slide"}
# # ¿Que se requiere para poder utilizar Jupyter?
#
#
# - Python instalado (de preferencia *Anaconda*)
#
# - Se requieren varias bibliotecas especializadas
# - Se basa en *Ipython* y *tornado* principalmente
#
# - Al menos un kernel instalado (i.e. *Ipython*)
#
# - Python, R y/o Julia como nativos y con soporte oficial
#
# - [Kernel para muchos lenguajes desarrollados por la comunidad](https://github.com/jupyter/jupyter/wiki/Jupyter-kernels)
#
# + [markdown] nbpresent={"id": "e25eeb1d-8290-464a-9cf5-ec4659ccbcec"} slideshow={"slide_type": "slide"}
# # Comenzamos con Python
#
#
# Empezamos por cargar algunas librerías básicas
# + nbpresent={"id": "bd2da93c-212e-46a8-8087-a41c45f01990"} slideshow={"slide_type": "-"}
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (20,8)
plt.style.use('ggplot')
# + [markdown] nbpresent={"id": "1869922a-a074-4d60-bb5c-2ebb0c81d199"} slideshow={"slide_type": "slide"}
# # Probemos con un código sencillo
#
# + nbpresent={"id": "801c6142-fc4b-48c5-ab64-67386b059838"}
x = np.linspace(-np.pi, np.pi, 100)
y1 = np.sin(x)
y2 = np.cos(x)
# + [markdown] nbpresent={"id": "5a3f3830-7873-481f-bb1c-4a746d495771"} slideshow={"slide_type": "subslide"}
# # Todo se queda en memoria
#
# mientras no se reinicie el kernel
#
# + nbpresent={"id": "3a281208-0fc5-4e37-9d30-ab29516ab6a1"} slideshow={"slide_type": "-"}
plt.plot(x, y1, label='sen(x)')
plt.plot(x, y2, label='cos(x)')
plt.title('Funciones de ejemplo')
plt.xlabel('x')
plt.ylabel('f(x)')
plt.legend()
plt.show()
# + [markdown] nbpresent={"id": "b62ccfc5-797c-4760-80d2-fb68b9dd357e"} slideshow={"slide_type": "slide"}
# # Vamos por partes
#
#
# - ¿Qué puedo agregar en las celdas de texto?
#
# - ¿Qué puedo agregar en la celdas de código (aparte de código, por supuesto)?
#
# - ¿Cómo puedo cambiar el Kernel para usar otro lenguaje de programación?
# + [markdown] nbpresent={"id": "4951015f-694d-4dd4-acf1-5d85b29bbb76"} slideshow={"slide_type": "slide"}
# # Celdas de texto
#
#
# - Todo es [Markdown](https://en.wikipedia.org/wiki/Markdown)
#
# - Una buena referencia rápida la puedes consultar [aquí](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet)
# + [markdown] nbpresent={"id": "b160551a-9571-4225-bc46-5621ecc7a85f"} slideshow={"slide_type": "subslide"}
# # Insertar imágenes
#
#
# - Lo más fácil es:
# ```md
# 
# ```
#
# - Pero muchas veces es más conveniente
# ```html
# <img src="imagenes/jupyter.png" width="70">
# ```
#
# + [markdown] nbpresent={"id": "61e601ba-78de-4285-a57e-16dc17e0511e"} slideshow={"slide_type": "fragment"}
# 
# <img src="imagenes/jupyter.png" width="70">
# + [markdown] nbpresent={"id": "41d4a4a9-2d81-465f-9424-ebcd64951a3c"} slideshow={"slide_type": "subslide"}
# # Cuadros
#
# - Debe haber al menos trés lineas para marcar una separación
# - Los delimitadores externos son opcionales
#
# ```md
# | Col 1 | Col 2 | Col 3 |
# | ------------- |:-------------:| ------:|
# | Esta | Esta | \$1600 |
# | columna | se encuentra | \$12 |
# | a la derecha | centrada | \$1 |
#
#
# Markdown | bien | feo
# ---|---|---
# *No se ve muy bien* | ~el código~ | **pero genera un cuadro decente**
# 1 | 2 | 3
# ```
# + [markdown] nbpresent={"id": "eb6dd74c-8e5f-4127-b428-1eca223879ed"} slideshow={"slide_type": "fragment"}
# | Col 1 | Col 2 | Col 3 |
# | ------------- |:-------------:| ------:|
# | Esta | Esta | \$1600 |
# | columna | se encuentra | \$12 |
# | a la derecha | centrada | \$1 |
#
#
#
# Markdown | bien | feo
# ---|---|---
# *No se ve muy bien* | ~el código~ | **pero genera un cuadro decente**
# 1 | 2 | 3
# + [markdown] nbpresent={"id": "17cefc2f-5c7a-4064-b96a-890513058725"} slideshow={"slide_type": "subslide"}
# # Notación matemática
#
#
# 1. Jupyter usa por default [mathjax](https://www.mathjax.org)
#
# 2. Las ecuaciones *en linea* van entre signos \$ ecuación \$
#
# 3. Las ecuaciones en propia linea van entre \$\$ ecuación \$\$
#
# 4. Utiliza la notación estandard de $\LaTeX$
#
# 5. Un buen lugar de consulta está [aquí](https://en.wikibooks.org/wiki/LaTeX/Mathematics)
# + [markdown] nbpresent={"id": "6d09bcdc-fff8-45d7-b1ae-eb5350bbe58e"} slideshow={"slide_type": "subslide"}
# # Algunas ecuaciones
#
#
# ```tex
# Podemos escribr cosas tanto en linea como $\forall x \in X, \quad \exists y \leq \epsilon$ que es muy práctico para calculo 1. Igualmente, se pueden escribir cosas como $\cos (2\theta) = \cos^2 \theta - \sin^2 \theta$ que es útil en trigonometría. O tambien podemos escribir
# $$
# \frac{n!}{k!(n-k)!} = \binom{n}{k},
# $$
#
# $$
# P\left(A=2\middle|\frac{A^2}{B}>4\right) = \int_0^\infty \mathrm{e}^{-x}\,\mathrm{d}x
# $$
#
# ```
# + [markdown] nbpresent={"id": "05c7a8e3-fb92-4fd3-bed9-6c96e346a33f"} slideshow={"slide_type": "fragment"}
# Podemos escribr cosas tanto en linea como $\forall x \in X, \quad \exists y \leq \epsilon$
# que es muy práctico para calculo 1. Igualmente se pueden escribir cosas como $\cos (2\theta) = \cos^2 \theta - \sin^2 \theta$ que
# es útil en trigonometría. O tambien podemos escribir
# $$
# \frac{n!}{k!(n-k)!} = \binom{n}{k},
# $$
#
# $$
# P\left(A=2\middle|\frac{A^2}{B}>4\right) = \int_0^\infty \mathrm{e}^{-x}\,\mathrm{d}x
# $$
# + [markdown] nbpresent={"id": "23045984-4ba2-4119-b008-2b77b546925d"} slideshow={"slide_type": "subslide"}
# # Ingresando código de ejemplo en el texto
#
#
# ```
# ```python
# def fibo(n):
# def fibo_r(x, y, acc):
# return y if acc < 2 else fibo_r(y, x + y, acc - 1)
# return fibo_r(1, 1, n)
# `` `
# ```
#
# lo que queda como
#
# ```python
# def fibo(n):
# def fibo_r(x, y, acc):
# return y if acc < 2 else fibo_r(y, x + y, acc - 1)
# return fibo_r(1, 1, n)
# ```
#
#
# + [markdown] nbpresent={"id": "35c0b4f8-a87f-421e-9482-cff621cc592a"} slideshow={"slide_type": "slide"}
# # Celdas de código
#
# - Comandos [*mágicos* de Jupyter](http://ipython.readthedocs.io/en/stable/interactive/magics.html)
#
# - Código ejecutado por el *Kernel*
# + [markdown] nbpresent={"id": "d31f4065-e86e-44c8-ae1e-7488029366d8"} slideshow={"slide_type": "subslide"}
# # Comandos mágicos
#
# - Jupyter tiene una serie de *comandos mágicos*,
#
# - Todos empiezan con '%'.
#
# - Todos son específicos a Python
#
# Para probar, ejecuta en una casilla lo siguiente
# ```
# # # %quickref
# # # %lsmagic
# ```
# + nbpresent={"id": "8a35d671-7d1b-4014-ba9f-ab56f660850a"} slideshow={"slide_type": "subslide"}
# %lsmagic
# + nbpresent={"id": "8cbf18b0-2474-4840-b9b0-6955add80699"} slideshow={"slide_type": "subslide"}
def fibo(n):
def fibo_r(x, y, acc):
return y if acc < 2 else fibo_r(y, x + y, acc - 1)
return fibo_r(1, 1, n)
display(fibo(4))
display(fibo(20))
# %timeit fibo(20)
# ¿Y que pasa si ejecutas %prun fibo(200)?
# + nbpresent={"id": "328a1bf3-709d-4303-b052-be3462ca27ca"} slideshow={"slide_type": "subslide"}
# Comandos mágicos que mandan llamar al shell
# !which python
# %ls
# %pwd
# + [markdown] nbpresent={"id": "e87e1440-8d9f-4a45-bd5f-4d87d75b2882"} slideshow={"slide_type": "subslide"}
# # Tambien existe la biblioteca de Ipython
# + nbpresent={"id": "412a294b-dc46-4398-b746-3abc79996108"}
from IPython.display import YouTubeVideo
# a talk about IPython at Sage Days at U. Washington, Seattle.
# Video credit: <NAME>.
YouTubeVideo('1j_HxD4iLn8')
# + [markdown] nbpresent={"id": "27f448db-e525-43b8-b909-5f4c6b99408b"} slideshow={"slide_type": "subslide"}
# # ¿Y el código?
#
# - Depende de lo que quieras hacer.
#
# - Se puede hacer uso intensivo del autocompletado
#
# - Igualmente, se tiene la documentación de todas las funciones
#
# - Prácticamente todo lo que se hace en python funciona en Jupyter,
# sin embargo, se usa principalmente con:
#
# - [`numpy` y `matplotlib`](https://nbviewer.jupyter.org/github/IA-UNISON/IA-UNISON.github.io/blob/master/assets/docs/intro_numpy.ipynb)
#
# - [`pandas`](https://nbviewer.jupyter.org/github/IA-UNISON/IA-UNISON.github.io/blob/master/assets/docs/intro_pandas.ipynb)
# + [markdown] nbpresent={"id": "06ada5a0-09c7-4b18-9a3f-5494ce6c6547"} slideshow={"slide_type": "subslide"}
# # ¿Y si quiero ejecutar y guardar mis libretas en linea?
#
#
# Dos opciones:
#
#
# - [Colab](https://colab.research.google.com). Libre, pero de *Google* (se usa desde *drive*)
#
# - Ejemplo de [introducción a pandas](https://colab.research.google.com/notebooks/mlcc/intro_to_pandas.ipynb?hl=es)
#
# - [Binder](https://mybinder.org). Libre pero en etapa *Beta* y algo tardado (se usa desde *github*). No siepre funciona con las bibliotecas que se consideran de *base*.
#
# + [markdown] nbpresent={"id": "f5d991e7-88d1-40c7-8adf-d0b5025a7369"} slideshow={"slide_type": "slide"}
# # Agregando otros Kernels a Jupyter
#
#
# - El nombre viene de **Ju**lia, **pyt**hon y **R**.
#
# - Instalar *R*, y **desde R en la terminal**:
# ```r
# install.packages(c('repr', 'IRdisplay', 'evaluate', 'crayon',
# 'pbdZMQ', 'devtools', 'uuid', 'digest'))
# devtools::install_github('IRkernel/IRkernel')
# IRkernel::installspec()
# ```
#
# - Instalar *Julia*, y luego ejecutar dentro del *repl* de *Julia*
# ```julia
# Pkg.add("IJulia")
# ```
#
# + [markdown] nbpresent={"id": "c6c5a328-26ef-4f21-ac54-11bdeff553ca"} slideshow={"slide_type": "subslide"}
# # ¿Y si quiero agregar un interprete de C++?
#
#
# - Necesito tener instalado Jupyter a través de la distribución [Anaconda](https://www.anaconda.com/distribution/)
#
# - Usar el *interprete* (basado en python) [Xeus-cling](https://github.com/QuantStack/xeus-cling)
#
# + [markdown] nbpresent={"id": "3a0e50b3-45dc-4e71-b464-4699e835ba2d"} slideshow={"slide_type": "fragment"}
# Para instalar, desde la consola:
#
# ```bash
# $ conda create -n cling
# $ source activate cling
# $ conda install xeus-cling notebook -c QuantStack -c conda-forge
# $ source deactivate
# ```
# + [markdown] nbpresent={"id": "73e45caa-c467-4f93-bf99-188b88947e5f"} slideshow={"slide_type": "fragment"}
# Para probar
#
# ```bash
# $ source activate cling
# $ jupyter notebook
# ```
# + [markdown] nbpresent={"id": "789bd196-180c-4391-80e8-ba12cb34babb"} slideshow={"slide_type": "slide"}
# # ¿Y si quiero hacer una presentación en Jupyter?
#
#
# - Habilitar la edición de diapositivas en el menú (View -> Cell Toolbar -> Slideshow)
#
# - Seleccionar el tipo de diapositiva:
#
# - `Slide` Transparencia a la derecha
#
# - `Subslide` Transparencia hacia abajo
#
# - `Fragment` Para completar la transparencia paso a paso
#
# - `-` Para continuar con la transparencia
#
# + [markdown] nbpresent={"id": "d22335f4-4bf4-4093-ab54-d1a5bd8875da"} slideshow={"slide_type": "skip"}
# # ¿Y para que parezca presentación?
#
# Utilizar [reveal.js](https://github.com/hakimel/reveal.js) ya sea descargandolo físicamente o utilizandolo en linea
#
#
# - Opción 1: Descargando una versión local de `reveal.js`:
#
# ```bash
# $ jupyter-nbconvert --to slides libreta.ipynb --reveal-prefix=reveal.js
# ```
#
# - Opción 2: Usando la localización original de `reveal.js` (preferida):
#
# ```bash
# $ jupyter-nbconvert --to slides libreta.ipynb --reveal-prefix="https://cdnjs.cloudflare.com/ajax/libs/reveal.js/3.1.0"
# ```
#
#
# - Abrir `libreta.slides.html` en el navegador.
# + [markdown] slideshow={"slide_type": "slide"}
# # Y esto es todo por el momento...
#
# 
#
# ### *Muchas gracias por su atención*
#
#
| material/jupyter/intro-jupyter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Getting deeper with Keras
# * Tensorflow is a powerful and flexible tool, but coding large neural architectures with it is tedious.
# * There are plenty of deep learning toolkits that work on top of it like Slim, TFLearn, Sonnet, Keras.
# * Choice is matter of taste and particular task
# * We'll be using Keras
import sys
sys.path.append("..")
import grading
# use preloaded keras datasets and models
# ! mkdir -p ~/.keras/datasets
# ! mkdir -p ~/.keras/models
# ! ln -s $(realpath ../readonly/keras/datasets/*) ~/.keras/datasets/
# ! ln -s $(realpath ../readonly/keras/models/*) ~/.keras/models/
import numpy as np
from preprocessed_mnist import load_dataset
import keras
X_train, y_train, X_val, y_val, X_test, y_test = load_dataset()
y_train,y_val,y_test = map(keras.utils.np_utils.to_categorical,[y_train,y_val,y_test])
import matplotlib.pyplot as plt
# %matplotlib inline
plt.imshow(X_train[0]);
# ## The pretty keras
import tensorflow as tf
s = tf.InteractiveSession()
# +
import keras
from keras.models import Sequential
import keras.layers as ll
model = Sequential(name="mlp")
model.add(ll.InputLayer([28, 28]))
model.add(ll.Flatten())
# network body
model.add(ll.Dense(25))
model.add(ll.Activation('linear'))
model.add(ll.Dense(25))
model.add(ll.Activation('linear'))
# output layer: 10 neurons for each class with softmax
model.add(ll.Dense(10, activation='softmax'))
# categorical_crossentropy is your good old crossentropy
# but applied for one-hot-encoded vectors
model.compile("adam", "categorical_crossentropy", metrics=["accuracy"])
# -
model.summary()
# ### Model interface
#
# Keras models follow __Scikit-learn__'s interface of fit/predict with some notable extensions. Let's take a tour.
# fit(X,y) ships with a neat automatic logging.
# Highly customizable under the hood.
model.fit(X_train, y_train,
validation_data=(X_val, y_val), epochs=5);
# estimate probabilities P(y|x)
model.predict_proba(X_val[:2])
# Save trained weights
model.save("weights.h5")
print("\nLoss, Accuracy = ", model.evaluate(X_test, y_test))
# ### Whoops!
# So far our model is staggeringly inefficient. There is something wring with it. Guess, what?
# +
# Test score...
test_predictions = model.predict_proba(X_test).argmax(axis=-1)
test_answers = y_test.argmax(axis=-1)
test_accuracy = np.mean(test_predictions==test_answers)
print("\nTest accuracy: {} %".format(test_accuracy*100))
assert test_accuracy>=0.92,"Logistic regression can do better!"
assert test_accuracy>=0.975,"Your network can do better!"
print("Great job!")
# -
answer_submitter = grading.Grader("0ybD9ZxxEeea8A6GzH-6CA")
answer_submitter.set_answer("N56DR", test_accuracy)
answer_submitter.submit(<your-email>, <your-assignment-token>)
# ## Keras + tensorboard
#
# Remember the interactive graphs from Tensorboard one notebook ago?
#
# Thing is, Keras can use tensorboard to show you a lot of useful information about the learning progress. Just take a look!
# ! rm -r /tmp/tboard/**
from keras.callbacks import TensorBoard
model.fit(X_train, y_train, validation_data=(X_val, y_val),
epochs=10,
callbacks=[TensorBoard("/tmp/tboard")])
# # Tips & tricks
#
# Here are some tips on what you could do. Don't worry, to reach the passing threshold you don't need to try all the ideas listed here, feel free to stop once you reach the 0.975 accuracy mark.
#
# * __Network size__
# * More neurons,
# * More layers, ([docs](https://keras.io/))
#
# * Nonlinearities in the hidden layers
# * tanh, relu, leaky relu, etc
# * Larger networks may take more epochs to train, so don't discard your net just because it could didn't beat the baseline in 5 epochs.
#
#
# * __Early Stopping__
# * Training for 100 epochs regardless of anything is probably a bad idea.
# * Some networks converge over 5 epochs, others - over 500.
# * Way to go: stop when validation score is 10 iterations past maximum
#
#
# * __Faster optimization__
# * rmsprop, nesterov_momentum, adam, adagrad and so on.
# * Converge faster and sometimes reach better optima
# * It might make sense to tweak learning rate/momentum, other learning parameters, batch size and number of epochs
#
#
# * __Regularize__ to prevent overfitting
# * Add some L2 weight norm to the loss function, theano will do the rest
# * Can be done manually or via - https://keras.io/regularizers/
#
#
# * __Data augmemntation__ - getting 5x as large dataset for free is a great deal
# * https://keras.io/preprocessing/image/
# * Zoom-in+slice = move
# * Rotate+zoom(to remove black stripes)
# * any other perturbations
# * Simple way to do that (if you have PIL/Image):
# * ```from scipy.misc import imrotate,imresize```
# * and a few slicing
# * Stay realistic. There's usually no point in flipping dogs upside down as that is not the way you usually see them.
| 1 Introduction to Deep Learning/week2/Keras-task.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Importing Libraries for data preparation
import pandas as pd
import numpy as np
from fastai.imports import *
from fastai.structured import *
train = pd.read_csv("train_black_friday.csv")
test = pd.read_csv("test_black_friday.csv")
train_cats(train)
df, y, nas = proc_df(train, 'Purchase')
#Creata a validation set
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(df, y, test_size=0.30, random_state=42)
# +
#import necessary libraries to build model
import random
from sklearn.ensemble import RandomForestRegressor
random.seed(42)
rf = RandomForestRegressor(n_estimators=10)
rf.fit(X_train, y_train)
# +
from keras.models import Sequential
from keras.layers import Dense, Activation
# Define model
model = Sequential()
model.add(Dense(100, input_dim=13, activation= "relu"))
model.add(Dense(50, activation= "relu"))
model.add(Dense(1))
model.summary() #Print model Summary
# -
# Compile model
model.compile(loss= "mean_squared_error" , optimizer="adam", metrics=["mean_squared_error"])
# Fit Model
model.fit(X_train, y_train, epochs=10)
# Evaluation while fitting the model
model.fit(X_train, y_train, epochs=10, validation_data=(X_valid, y_valid))
from sklearn.metrics import mean_squared_error
#Prediction using Random Forest
y_valid_rf = rf.predict(X_valid)
score = np.sqrt(mean_squared_error(y_valid,y_valid_rf))
print (score)
#Prediction using Neural Network
y_valid_nn = model.predict(X_valid)
score = np.sqrt(mean_squared_error(y_valid,y_valid_nn))
print (score)
| PXL_DIGITAL_JAAR_2/AI & Robotics/Week 11/docker_notebooks/keras/jupyter/NeuralNetworks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All the IPython Notebooks in this lecture series by Dr. <NAME> are available @ **[GitHub](https://github.com/milaan9/04_Python_Functions/tree/main/002_Python_Functions_Built_in)**
# </i></small></small>
# # Python `compile()`
#
# The **`compile()`** method returns a Python code object from the source (normal string, a byte string, or an AST object).
#
# **Syntax**:
#
# ```python
# compile(source, filename, mode, flags=0, dont_inherit=False, optimize=-1)
# ```
#
# **`compile()`** method is used if the Python code is in string form or is an AST object, and you want to change it to a code object.
#
# The code object returned by **`compile()`** method can later be called using methods like: **[exec()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/027_Python_exec%28%29.ipynb)** and **[eval()](https://github.com/milaan9/04_Python_Functions/blob/main/002_Python_Functions_Built_in/021_Python_eval%28%29.ipynb)** which will execute dynamically generated Python code.
# ## `compile()` Parameters
#
# * **`source`** - a normal string, a byte string, or an AST object
# * **`filename`** - file from which the code was read. If it wasn't read from a file, you can give a name yourself
# * **`mode`** - Either **`eval`** or **`exec`** or **`single`**.
# * **`eval`** - accepts only a single expression.
# * **`exec`** - It can take a code block that has Python statements, class and functions, and so on.
# * **`single`** - if it consists of a single interactive statement
# * **`flags`** (optional) and **`dont_inherit`** (optional) - controls which future statements affect the * compilation of the source. Default Value: 0
# * **`optimize`** (optional) - optimization level of the compiler. Default value -1.
# ## Return Value from `compile()`
#
# **`compile()`** method returns a Python code object.
# +
# Example 1: How compile() works?
codeInString = 'a = 5\nb=6\nsum=a+b\nprint("sum =",sum)'
codeObejct = compile(codeInString, 'sumstring', 'exec')
exec(codeObejct)
# -
# **Explanation**:
#
# Here, **`source`** is in normal string form. The **`filename`** is sumstring. And, the **`exec`** mode later allows the use of **`exec()`** method.
#
# **`compile()`** method converts the string to Python code object. The code object is then executed using **`exec()`** method.
| 002_Python_Functions_Built_in/011_Python_compile().ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text"
# # Introduction to Keras for Engineers
#
# **Author:** [fchollet](https://twitter.com/fchollet)<br>
# **Date created:** 2020/04/01<br>
# **Last modified:** 2020/04/28<br>
# **Description:** Everything you need to know to use Keras to build real-world machine learning solutions.
# + [markdown] colab_type="text"
# ## Setup
#
# + colab_type="code"
import numpy as np
import tensorflow as tf
from tensorflow import keras
# + [markdown] colab_type="text"
# ## Introduction
#
# Are you a machine learning engineer looking to use Keras
# to ship deep-learning powered features in real products? This guide will serve
# as your first introduction to core Keras API concepts.
#
# In this guide, you will learn about:
#
# - How to prepare you data before training a model (by turning it into either NumPy
# arrays or `tf.data.Dataset` objects).
# - How to do data preprocessing, for instance feature normalization or vocabulary
# indexing.
# - How to build a model that turns your data into useful predictions,
# using the Keras Functional API.
# - How to train your model with the built-in Keras `fit()` method, while being
# mindful of checkpointing, metrics monitoring, and fault tolerance.
# - How to evaluate your model on a test data and how to use it for inference on new data.
# - How to customize what `fit()` does, for instance to build a GAN.
# - How to speed up training by leveraging multiple GPUs.
# - How to refine your model through hyperparameter tuning.
#
# At the end of this guide, you will get pointers to end-to-end examples to solidify
# these concepts:
#
# - Image classification
# - Text classification
# - Credit card fraud detection
#
#
# + [markdown] colab_type="text"
# ## Data loading & preprocessing
#
# Neural networks don't process raw data, like text files, encoded JPEG image files, or
# CSV files. They process **vectorized** & **standardized** representations.
#
# - Text files needs to be read into string tensors, then split into words. Finally, the
# words need to be indexed & turned into integer tensors.
# - Images need to be read and decoded into integer tensors, then converted to floating
# point and normalized to small values (usually between 0 and 1).
# - CSV data needs to be parsed, with numerical features converted to floating point
# tensors and categorical features indexed and converted to integer tensors.
# Then each feature typically needs to be normalized to zero-mean and unit-variance.
# - Etc.
#
# Let's start with data loading.
#
# ## Data loading
#
# Keras models accept three types of inputs:
#
# - **NumPy arrays**, just like Scikit-Learn and many other Python-based libraries. This
# is a good option if your data fits in memory.
# - **[TensorFlow `Dataset` objects](https://www.tensorflow.org/guide/data)**. This is a
# high-performance option that is more suitable for datasets that do not fit in memory
# and that are streamed from disk or from a distributed filesystem.
# - **Python generators** that yield batches of data (such as custom subclasses of
# the `keras.utils.Sequence` class).
#
# Before you start training a model, you will need to make your data available as one of
# these formats. If you have a large dataset and you are training on GPU(s), consider
# using `Dataset` objects, since they will take care of performance-critical details,
# such as:
#
# - Asynchronously preprocessing your data on CPU while your GPU is busy, and bufferring
# it into a queue.
# - Prefetching data on GPU memory so it's immediately available when the GPU has
# finished processing the previous batch, so you can reach full GPU utilization.
#
# Keras features a range of utilities to help you turn raw data on disk into a `Dataset`:
#
# - `tf.keras.preprocessing.image_dataset_from_directory` turns image files sorted into
# class-specific folders into a labeled dataset of image tensors.
# - `tf.keras.preprocessing.text_dataset_from_directory` does the same for text files.
#
# In addition, the TensorFlow `tf.data` includes other similar utilities, such as
# `tf.data.experimental.make_csv_dataset` to load structured data from CSV files.
#
# **Example: obtaining a labeled dataset from image files on disk**
#
# Supposed you have image files sorted by class in different folders, like this:
#
# ```
# main_directory/
# ...class_a/
# ......a_image_1.jpg
# ......a_image_2.jpg
# ...class_b/
# ......b_image_1.jpg
# ......b_image_2.jpg
# ```
#
# Then you can do:
#
# ```python
# # Create a dataset.
# dataset = keras.preprocessing.image_dataset_from_directory(
# 'path/to/main_directory', batch_size=64, image_size=(200, 200))
#
# # For demonstration, iterate over the batches yielded by the dataset.
# for data, labels in dataset:
# print(data.shape) # (64, 200, 200, 3)
# print(data.dtype) # float32
# print(labels.shape) # (64,)
# print(labels.dtype) # int32
# ```
#
# The label of a sample is the rank of its folder in alphanumeric order. Naturally, this
# can also be configured explicitly by passing, e.g.
# `class_names=['class_a', 'class_b']`, in which cases label `0` will be `class_a` and
# `1` will be `class_b`.
#
# **Example: obtaining a labeled dataset from text files on disk**
#
# Likewise for text: if you have `.txt` documents sorted by class in different folders,
# you can do:
#
# ```python
# dataset = keras.preprocessing.text_dataset_from_directory(
# 'path/to/main_directory', batch_size=64)
#
# # For demonstration, iterate over the batches yielded by the dataset.
# for data, labels in dataset:
# print(data.shape) # (64,)
# print(data.dtype) # string
# print(labels.shape) # (64,)
# print(labels.dtype) # int32
# ```
#
#
#
# + [markdown] colab_type="text"
# ## Data preprocessing with Keras
#
# Once your data is in the form of string/int/float NumpPy arrays, or a `Dataset` object
# (or Python generator) that yields batches of string/int/float tensors,
# it is time to **preprocess** the data. This can mean:
#
# - Tokenization of string data, followed by token indexing.
# - Feature normalization.
# - Rescaling the data to small values (in general, input values to a neural network
# should be close to zero -- typically we expect either data with zero-mean and
# unit-variance, or data in the `[0, 1]` range.
#
# ### The ideal machine learning model is end-to-end
#
# In general, you should seek to do data preprocessing **as part of your model** as much
# as possible, not via an external data preprocessing pipeline. That's because external
# data preprocessing makes your models less portable when it's time to use them in
# production. Consider a model that processes text: it uses a specific tokenization
# algorithm and a specific vocabulary index. When you want to ship your model to a
# mobile app or a JavaScript app, you will need to recreate the exact same preprocessing
# setup in the target language. This can get very tricky: any small discrepancy between
# the original pipeline and the one you recreate has the potential to completely
# invalidate your model, or at least severely degrade its performance.
#
# It would be much easier to be able to simply export an end-to-end model that already
# includes preprocessing. **The ideal model should expect as input something as close as
# possible to raw data: an image model should expect RGB pixel values in the `[0, 255]`
# range, and a text model should accept strings of `utf-8` characters.** That way, the
# consumer of the exported model doesn't have
# to know about the preprocessing pipeline.
#
# ### Using Keras preprocessing layers
#
# In Keras, you do in-model data preprocessing via **preprocessing layers**. This
# includes:
#
# - Vectorizing raw strings of text via the `TextVectorization` layer
# - Feature normalization via the `Normalization` layer
# - Image rescaling, cropping, or image data augmentation
#
# The key advatange of using Keras preprocessing layers is that **they can be included
# directly into your model**, either during training or after training,
# which makes your models portable.
#
# Some preprocessing layers have a state:
#
# - `TextVectorization` holds an index mapping words or tokens to integer indices
# - `Normalization` holds the mean and variance of your features
#
# The state of a preprocessing layers is obtained by calling `layer.adapt(data)` on a
# sample of the training data (or all of it).
#
#
# **Example: turning strings into sequences of integer word indices**
#
#
# + colab_type="code"
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="int")
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
# + [markdown] colab_type="text"
# **Example: turning strings into sequences of one-hot encoded bigrams**
#
# + colab_type="code"
from tensorflow.keras.layers.experimental.preprocessing import TextVectorization
# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="binary", ngrams=2)
# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)
# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)
# + [markdown] colab_type="text"
# **Example: normalizing features**
#
# + colab_type="code"
from tensorflow.keras.layers.experimental.preprocessing import Normalization
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
normalizer = Normalization(axis=-1)
normalizer.adapt(training_data)
normalized_data = normalizer(training_data)
print("var: %.4f" % np.var(normalized_data))
print("mean: %.4f" % np.mean(normalized_data))
# + [markdown] colab_type="text"
# **Example: rescaling & center-cropping images**
#
# Both the `Rescaling` layer and the `CenterCrop` layer are stateless, so it isn't
# necessary to call `adapt()` in this case.
#
# + colab_type="code"
from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
cropper = CenterCrop(height=150, width=150)
scaler = Rescaling(scale=1.0 / 255)
output_data = scaler(cropper(training_data))
print("shape:", output_data.shape)
print("min:", np.min(output_data))
print("max:", np.max(output_data))
# + [markdown] colab_type="text"
# ## Building models with the Keras Functional API
#
# A "layer" is a simple input-output transformation (such as the scaling &
# center-cropping transformations above). For instance, here's a linear projection layer
# that maps its inputs to a 16-dimensional feature space:
#
# ```python
# dense = keras.layers.Dense(units=16)
# ```
#
# A "model" is a directed acyclic graph of layers. You can think of a model as a
# "bigger layer" that encompasses multiple sublayers and that can be trained via exposure
# to data.
#
# The most common and most powerful way to build Keras models is the Functional API. To
# build models with the Functional API, you start by specifying the shape (and
# optionally the dtype) of your inputs. If any dimension of your input can vary, you can
# specify it as `None`. For instance, an input for 200x200 RGB image would have shape
# `(200, 200, 3)`, but an input for RGB images of any size would have shape `(None,
# None, 3)`.
#
# + colab_type="code"
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))
# + [markdown] colab_type="text"
# After defining your input(s), you chain layer transformations on top of your inputs,
# until your final output:
#
# + colab_type="code"
from tensorflow.keras import layers
# Center-crop images to 150x150
x = CenterCrop(height=150, width=150)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1.0 / 255)(x)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation="softmax")(x)
# + [markdown] colab_type="text"
# Once you have defined the directed acyclic graph of layers that turns your input(s) into
# your outputs, instantiate a `Model` object:
#
# + colab_type="code"
model = keras.Model(inputs=inputs, outputs=outputs)
# + [markdown] colab_type="text"
# This model behaves basically like a bigger layer. You can call it on batches of data, like
# this:
#
# + colab_type="code"
data = np.random.randint(0, 256, size=(64, 200, 200, 3)).astype("float32")
processed_data = model(data)
print(processed_data.shape)
# + [markdown] colab_type="text"
# You can print a summary of how your data gets transformed at each stage of the model.
# This is useful for debugging.
#
# Note that the output shape displayed for each layers includes the **batch size**. Here
# the batch size is None, which indicates our model can process batchs of any size.
#
# + colab_type="code"
model.summary()
# + [markdown] colab_type="text"
# The Functional API also makes it easy to build models that have multiple inputs (for
# instance, an image *and* its metadata) or multiple outputs (for instance, predicting
# the class of the image *and* the likelihood that a user will click on it). For a
# deeper dive into what you can do, see our
# [guide to the Functional API](/guides/functional_api/).
#
# + [markdown] colab_type="text"
# ## Training models with `fit()`
#
# At this point, you know:
#
# - How to prepare your data (e.g. as a NumPy array or a `tf.data.Dataset` object)
# - How to build a model that will process your data
#
# The next step is to train your model on your data. The `Model` class features a
# built-in training loop, the `fit()` method. It accepts `Dataset` objects, Python
# generators that yield batches of data, or NumPy arrays.
#
# Before you can call `fit()`, you need to specify an optimizer and a loss function (we
# assume you are already familiar with these concepts). This is the `compile()` step:
#
# ```python
# model.compile(optimizer=keras.optimizers.RMSprop(learning_rate=1e-3),
# loss=keras.losses.CategoricalCrossentropy())
# ```
#
# Loss and optimizer can be specified via their string identifiers (in this case
# their default constructor argument values are used):
#
#
# ```python
# model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
# ```
#
# Once your model is compiled, you can start "fitting" the model to the data.
# Here's what fitting a model looks like with NumPy data:
#
# ```python
# model.fit(numpy_array_of_samples, numpy_array_of_labels,
# batch_size=32, epochs=10)
# ```
#
# Besides the data, you have to specify two key parameters: the `batch_size` and
# the number of epochs (iterations on the data). Here our data will get sliced on batches
# of 32 samples, and the model will iterate 10 times over the data during training.
#
# Here's what fitting a model looks like with a dataset:
#
# ```python
# model.fit(dataset_of_samples_and_labels, epochs=10)
# ```
#
# Since the data yielded by a dataset is expect to be already batched, you don't need to
# specify the batch size here.
#
# Let's look at it in practice with a toy example model that learns to classify MNIST
# digits:
#
# + colab_type="code"
# Get the data as Numpy arrays
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
# Build a simple model
inputs = keras.Input(shape=(28, 28))
x = layers.experimental.preprocessing.Rescaling(1.0 / 255)(inputs)
x = layers.Flatten()(x)
x = layers.Dense(128, activation="relu")(x)
x = layers.Dense(128, activation="relu")(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.summary()
# Compile the model
model.compile(optimizer="adam", loss="sparse_categorical_crossentropy")
# Train the model for 1 epoch from Numpy data
batch_size = 64
print("Fit on NumPy data")
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=1)
# Train the model for 1 epoch using a dataset
dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(batch_size)
print("Fit on Dataset")
history = model.fit(dataset, epochs=1)
# + [markdown] colab_type="text"
# The `fit()` call returns a "history" object which records what happened over the course
# of training. The `history.history` dict contains per-epoch timeseries of metrics
# values (here we have only one metric, the loss, and one epoch, so we only get a single
# scalar):
#
# + colab_type="code"
print(history.history)
# + [markdown] colab_type="text"
# For a detailed overview of how to use `fit()`, see the
# [guide to training & evaluation with the built-in Keras methods](
# /guides/training_with_built_in_methods/).
#
# + [markdown] colab_type="text"
# ### Keeping track of performance metrics
#
# As you're training a model, you want to keep of track of metrics such as classification
# accuracy, precision, recall, AUC, etc. Besides, you want to monitor these metrics not
# only on the training data, but also on a validation set.
#
# **Monitoring metrics**
#
# You can pass a list of metric objects to `compile()`, like this:
#
#
# + colab_type="code"
model.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=[keras.metrics.SparseCategoricalAccuracy(name="acc")],
)
history = model.fit(dataset, epochs=1)
# + [markdown] colab_type="text"
# **Passing validation data to `fit()`**
#
# You can pass validation data to `fit()` to monitor your validation loss & validation
# metrics. Validation metrics get reported at the end of each epoch.
#
# + colab_type="code"
val_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
history = model.fit(dataset, epochs=1, validation_data=val_dataset)
# + [markdown] colab_type="text"
# ### Using callbacks for checkpointing (and more)
#
# If training goes on for more than a few minutes, it's important to save your model at
# regular intervals during training. You can then use your saved models
# to restart training in case your training process crashes (this is important for
# multi-worker distributed training, since with many workers at least one of them is
# bound to fail at some point).
#
# An important feature of Keras is **callbacks**, configured in `fit()`. Callbacks are
# objects that get called by the model at different point during training, in particular:
#
# - At the beginning and end of each batch
# - At the beginning and end of each epoch
#
# Callbacks are a way to make model trainable entirely scriptable.
#
# You can use callbacks to periodically save your model. Here's a simple example: a
# `ModelCheckpoint` callback
# configured to save the model at the end of every epoch. The filename will include the
# current epoch.
#
# ```python
# callbacks = [
# keras.callbacks.ModelCheckpoint(
# filepath='path/to/my/model_{epoch}',
# save_freq='epoch')
# ]
# model.fit(dataset, epochs=2, callbacks=callbacks)
# ```
#
# + [markdown] colab_type="text"
# You can also use callbacks to do things like periodically changing the learning of your
# optimizer, streaming metrics to a Slack bot, sending yourself an email notification
# when training is complete, etc.
#
# For detailed overview of what callbacks are available and how to write your own, see
# the [callbacks API documentation](/api/callbacks/) and the
# [guide to writing custom callbacks](/guides/writing_your_own_callbacks/).
#
# + [markdown] colab_type="text"
# ### Monitoring training progress with TensorBoard
#
# Staring at the Keras progress bar isn't the most ergonomic way to monitor how your loss
# and metrics are evolving over time. There's a better solution:
# [TensorBoard](https://www.tensorflow.org/tensorboard),
# a web application that can display real-time graphs of your metrics (and more).
#
# To use TensorBoard with `fit()`, simply pass a `keras.callbacks.TensorBoard` callback
# specifying the directory where to store TensorBoard logs:
#
#
# ```python
# callbacks = [
# keras.callbacks.TensorBoard(log_dir='./logs')
# ]
# model.fit(dataset, epochs=2, callbacks=callbacks)
# ```
#
# You can then launch a TensorBoard instance that you can open in your browser to monitor
# the logs getting written to this location:
#
# ```
# tensorboard --logdir=./logs
# ```
#
# What's more, you can launch an in-line TensorBoard tab when training models in Jupyter
# / Colab notebooks.
# [Here's more information](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks).
#
# + [markdown] colab_type="text"
# ### After `fit()`: evaluating test performance & generating predictions on new data
#
# Once you have a trained model, you can evaluate its loss and metrics on new data via
# `evaluate()`:
#
# + colab_type="code"
loss, acc = model.evaluate(val_dataset) # returns loss and metrics
print("loss: %.2f" % loss)
print("acc: %.2f" % acc)
# + [markdown] colab_type="text"
# You can also generate NumPy arrays of predictions (the activations of the output
# layer(s) in the model) via `predict()`:
#
# + colab_type="code"
predictions = model.predict(val_dataset)
print(predictions.shape)
# + [markdown] colab_type="text"
# ## Using `fit()` with a custom training step
#
# By default, `fit()` is configured for **supervised learning**. If you need a different
# kind of training loop (for instance, a GAN training loop), you
# can provide your own implementation of the `Model.train_step()` method. This is the
# method that is repeatedly called during `fit()`.
#
# Metrics, callbacks, etc. will work as usual.
#
# Here's a simple example that reimplements what `fit()` normally does:
#
# ```python
# class CustomModel(keras.Model):
# def train_step(self, data):
# # Unpack the data. Its structure depends on your model and
# # on what you pass to `fit()`.
# x, y = data
# with tf.GradientTape() as tape:
# y_pred = self(x, training=True) # Forward pass
# # Compute the loss value
# # (the loss function is configured in `compile()`)
# loss = self.compiled_loss(y, y_pred,
# regularization_losses=self.losses)
# # Compute gradients
# trainable_vars = self.trainable_variables
# gradients = tape.gradient(loss, trainable_vars)
# # Update weights
# self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# # Update metrics (includes the metric that tracks the loss)
# self.compiled_metrics.update_state(y, y_pred)
# # Return a dict mapping metric names to current value
# return {m.name: m.result() for m in self.metrics}
#
# # Construct and compile an instance of CustomModel
# inputs = keras.Input(shape=(32,))
# outputs = keras.layers.Dense(1)(inputs)
# model = CustomModel(inputs, outputs)
# model.compile(optimizer='adam', loss='mse', metrics=[...])
#
# # Just use `fit` as usual
# model.fit(dataset, epochs=3, callbacks=...)
# ```
#
# For a detailed overview of how you customize the built-in training & evaluation loops,
# see the guide:
# ["Customizing what happens in `fit()`"](/guides/customizing_what_happens_in_fit/).
#
# + [markdown] colab_type="text"
# ## Debugging your model with eager execution
#
# If you write custom training steps or custom layers, you will need to debug them. The
# debugging experience is an integral part of a framework: with Keras, the debugging
# workflow is designed with the user in mind.
#
# By default, your Keras models are compiled to highly-optimized computation graphs that
# deliver fast execution times. That means that the Python code you write (e.g. in a
# custom `train_step`) is not the code you are actually executing. This introduces a
# layer of indirection that can make debugging hard.
#
# Debugging is best done step by step. You want to be able to sprinkle your code with
# `print()` statement to see what your data looks like after every operation, you want
# to be able to use `pdb`. You can achieve this by **running your model eagerly**. With
# eager execution, the Python code you write is the code that gets executed.
#
# Simply pass `run_eagerly=True` to `compile()`:
#
# ```python
# model.compile(optimizer='adam', loss='mse', run_eagerly=True)
# ```
#
# Of course, the downside is that it makes your model significantly slower. Make sure to
# switch it back off to get the benefits of compiled computation graphs once you are
# done debugging!
#
# In general, you will use `run_eagerly=True` every time you need to debug what's
# happening inside your `fit()` call.
#
# + [markdown] colab_type="text"
# ## Speeding up training with multiple GPUs
#
# Keras has built-in industry-strength support for multi-GPU training and distributed
# multi-worker training, via the `tf.distribute` API.
#
# If you have multiple GPUs on your machine, you can train your model on all of them by:
#
# - Creating a `tf.distribute.MirroredStrategy` object
# - Building & compiling your model inside the strategy's scope
# - Calling `fit()` and `evaluate()` on a dataset as usual
#
# ```python
# # Create a MirroredStrategy.
# strategy = tf.distribute.MirroredStrategy()
#
# # Open a strategy scope.
# with strategy.scope():
# # Everything that creates variables should be under the strategy scope.
# # In general this is only model construction & `compile()`.
# model = Model(...)
# model.compile(...)
#
# # Train the model on all available devices.
# train_dataset, val_dataset, test_dataset = get_dataset()
# model.fit(train_dataset, epochs=2, validation_data=val_dataset)
#
# # Test the model on all available devices.
# model.evaluate(test_dataset)
# ```
#
# For a detailed introduction to multi-GPU & distributed training, see
# [this guide](/guides/distributed_training/).
#
# + [markdown] colab_type="text"
# ## Doing preprocessing synchronously on-device vs. asynchronously on host CPU
#
# You've learned about preprocessing, and you've seen example where we put image
# preprocessing layers (`CenterCrop` and `Rescaling`) directly inside our model.
#
# Having preprocessing happen as part of the model during training
# is great if you want to do on-device preprocessing, for instance, GPU-accelerated
# feature normalization or image augmentation. But there are kinds of preprocessing that
# are not suited to this setup: in particular, text preprocessing with the
# `TextVectorization` layer. Due to its sequential nature and due to the fact that it
# can only run on CPU, it's often a good idea to do **asynchronous preprocessing**.
#
# With asynchronous preprocessing, your preprocessing operations will run on CPU, and the
# preprocessed samples will be buffered into a queue while your GPU is busy with
# previous batch of data. The next batch of preprocessed samples will then be fetched
# from the queue to the GPU memory right before the GPU becomes available again
# (prefetching). This ensures that preprocessing will not be blocking and that your GPU
# can run at full utilization.
#
# To do asynchronous preprocessing, simply use `dataset.map` to inject a preprocessing
# operation into your data pipeline:
#
# + colab_type="code"
# Example training data, of dtype `string`.
samples = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])
labels = [[0], [1]]
# Prepare a TextVectorization layer.
vectorizer = TextVectorization(output_mode="int")
vectorizer.adapt(samples)
# Asynchronous preprocessing: the text vectorization is part of the tf.data pipeline.
# First, create a dataset
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Apply text vectorization to the samples
dataset = dataset.map(lambda x, y: (vectorizer(x), y))
# Prefetch with a buffer size of 2 batches
dataset = dataset.prefetch(2)
# Our model should expect sequences of integers as inputs
inputs = keras.Input(shape=(None,), dtype="int64")
x = layers.Embedding(input_dim=10, output_dim=32)(inputs)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
# + [markdown] colab_type="text"
# Compare this to doing text vectorization as part of the model:
#
# + colab_type="code"
# Our dataset will yield samples that are strings
dataset = tf.data.Dataset.from_tensor_slices((samples, labels)).batch(2)
# Our model should expect strings as inputs
inputs = keras.Input(shape=(1,), dtype="string")
x = vectorizer(inputs)
x = layers.Embedding(input_dim=10, output_dim=32)(x)
outputs = layers.Dense(1)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="adam", loss="mse", run_eagerly=True)
model.fit(dataset)
# + [markdown] colab_type="text"
# When training text models on CPU, you will generally not see any performance difference
# between the two setups. When training on GPU, however, doing asynchronous buffered
# preprocessing on the host CPU while the GPU is running the model itself can result in
# a significant speedup.
#
# After training, if you to export an end-to-end model that includes the preprocessing
# layer(s), this is easy to do, since `TextVectorization` is a layer:
#
# ```python
# inputs = keras.Input(shape=(1,), dtype='string')
# x = vectorizer(inputs)
# outputs = trained_model(x)
# end_to_end_model = keras.Model(inputs, outputs)
# ```
#
# + [markdown] colab_type="text"
# ## Finding the best model configuration with hyperparameter tuning
#
# Once you have a working model, you're going to want to optimize its configuration --
# architecture choices, layer sizes, etc. Human intuition can only go so far, so you'll
# want to leverage a systematic approach: hyperparameter search.
#
# You can use
# [Keras Tuner](https://keras-team.github.io/keras-tuner/documentation/tuners/) to find
# the best hyperparameter for your Keras models. It's as easy as calling `fit()`.
#
# Here how it works.
#
# First, place your model definition in a function, that takes a single `hp` argument.
# Inside this function, replace any value you want to tune with a call to hyperparameter
# sampling methods, e.g. `hp.Int()` or `hp.Choice()`:
#
# ```python
# def build_model(hp):
# inputs = keras.Input(shape=(784,))
# x = layers.Dense(
# units=hp.Int('units', min_value=32, max_value=512, step=32),
# activation='relu'))(inputs)
# outputs = layers.Dense(10, activation='softmax')(x)
# model = keras.Model(inputs, outputs)
# model.compile(
# optimizer=keras.optimizers.Adam(
# hp.Choice('learning_rate',
# values=[1e-2, 1e-3, 1e-4])),
# loss='sparse_categorical_crossentropy',
# metrics=['accuracy'])
# return model
# ```
#
# The function should return a compiled model.
#
# Next, instantiate a tuner object specifying your optimiation objective and other search
# parameters:
#
#
# ```python
# import kerastuner
#
# tuner = kerastuner.tuners.Hyperband(
# build_model,
# objective='val_loss',
# max_epochs=100,
# max_trials=200,
# executions_per_trial=2,
# directory='my_dir')
# ```
#
# Finally, start the search with the `search()` method, which takes the same arguments as
# `Model.fit()`:
#
# ```python
# tuner.search(dataset, validation_data=val_dataset)
# ```
#
# When search is over, you can retrieve the best model(s):
#
# ```python
# models = tuner.get_best_models(num_models=2)
# ```
#
# Or print a summary of the results:
#
# ```python
# tuner.results_summary()
# ```
#
# + [markdown] colab_type="text"
# ## End-to-end examples
#
# To familiarize yourself with the concepts in this introduction, see the following
# end-to-end examples:
#
# - [Text classification](/examples/nlp/text_classification_from_scratch/)
# - [Image classification](/examples/vision/image_classification_from_scratch/)
# - [Credit card fraud detection](/examples/structured_data/imbalanced_classification/)
#
# + [markdown] colab_type="text"
# ## What to learn next
#
# - Learn more about the
# [Functional API](/guides/functional_api/).
# - Learn more about the
# [features of `fit()` and `evaluate()`](/guides/training_with_built_in_methods/).
# - Learn more about
# [callbacks](/guides/writing_your_own_callbacks/).
# - Learn more about
# [creating your own custom training steps](/guides/customizing_what_happens_in_fit/).
# - Learn more about
# [multi-GPU and distributed training](/guides/distributed_training/).
# - Learn how to do [transfer learning](/guides/transfer_learning/).
#
| guides/ipynb/intro_to_keras_for_engineers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy import stats
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas as pd
import numpy as np
# ### A/B тестирование
# В этом задании вы познакомитесь с A/B тестированием и примените полученные знания по статистике.
#
# Рассмотрим A/B тестирование на примере сайта. У сайта есть два дизайна - старый и новый, и мы хотим оценить, насколько новый дизайн лучше старого. Для этого пользователи сайта случайным образом разделяются на контрольную и тестовую группы. Контрольной группе показывается старая версия сайта, тестовой группе - измененная версия. Оценить изменение можно несколькими способами, самый простой - оценить конверсию. Конверсия - доля пользователей, совершивших заранее определенное действие(например подписка, нажатие на кнопку, заполнение формы).
# ### Описание данных
# Для начала нужно загрузить данные из файла `a_b_testing.csv` при помощи функции `read_csv` из библиотеки `pandas`. В данном случае 1 - была совершена подписка на сайт, 0 - подписки не было. A - контрольная группа, B - тестовая группа.
# Далее нужно выполнить следующие пункты, описание выходного формата содержится внутри каждого задания.
# ### Доверительный интервал
# В видео про доверительный интервал мы рассмотрели, как можно аппроксимировать биномиальное распределение нормальным. В некоторых случаях параметры нормального распределения можно вывести математически и ниже мы рассмотрим как.
# Представим количество пользователей как случайную величину из биномиального распределения с параметрами `n` - количество пользователей и `p` - вероятность конверсии или как сумму `n` независимых бросаний монетки. Определим следующую случайную величину:
#
# $$Y = X_{1} + X_{2} + \dots + X_{n} , \, $$
# где случайная величина $X_{i}$ имеет распределение Бернулли. Для случайной величины $Y$ математическое ожидание и дисперсия равны:
#
# $$\mu = np, \, \sigma^{2} = np\cdot(1 - p)$$
#
# Далее применяя центральную предельную теорему(случайные величины $X_{i}$ распределены независимо и размер выборки большой), получаем что
#
# $$Y \sim \mathcal{N}(np \, np\cdot(1 - p))\$$
#
# Мы перешли от биномиального распределения к нормальному. Следующий шаг - стандартизация нормального распределения:
#
# $$Z = \frac{Y - np}{\sqrt{np\cdot(1-p)}} \sim \mathcal{N}(0, \, 1) $$
#
# Преобразуем выражение выше:
#
# $$Z = \frac{Y - np}{\sqrt{np\cdot(1-p)}} = \frac{\frac{Y}{n} - p}{\sqrt{\frac{p(1-p)}{n}}} \sim \mathcal{N}(0, \, 1) $$
# Так как среднее значение по выборке - это наблюдаемый процент конверсии, то доверительный интервал будет выглядеть следующим образом:
# $${P}\left(p - z_{1-\frac{\alpha}{2}} \sqrt{\frac{p(1-p)}{n}} \le \mu \le p + z_{1-\frac{\alpha}{2}}\sqrt{\frac{p(1-p)}{n}}\right) = 1-\alpha$$
# ### ЗАДАНИЕ
# Найдите доверительный интервал для средней конверсии пользователей из контрольной выборки с уровнем значимости 95%. Запишите значения левой и правой границ через запятую, сохраняя приведенный порядок, в переменную `answer1`, которая будет являтся строкой
# #### РЕШЕНИЕ
df = pd.read_csv("a_b_testing.csv")
df_A = df[df['group']=='A']
n = df_A.shape[0]
print(n)
p = df_A['converted'].mean()
p
alpha = 1-0.95
alpha
z_value = stats.norm.ppf(q = 1-alpha/2)
z_value
interval = z_value * np.sqrt(p*(1-p)/n)
interval
lb = p - interval
lb
ub = p + interval
ub
answer1 = "{:.2f},{:.2f}".format(lb,ub)
answer1
# ### Задача A/B тестирования
# Рассмотрим независимые выборки $X$ и $Y$ для которых есть $\mu_x$ и $\mu_y$, определяющие среднее значение распределения.
#
# Рассматривается следующая гипотеза:
# $$
# H_0: \mu_x = \mu_y
# $$
# против альтернативы:
#
# $$
# H_1: \mu_x \ne \mu_y.
# $$
# Если гипотеза $H_0$ отвергается, то показатель действительно поменялся.
# Также можно тест можно записать и другим способом:
# $$
# H_0: \mu_x \le \mu_y
# $$
#
# против альтернативы:
#
# $$
# H_1: \mu_x > \mu_y
# $$
# ### Задание по статистике Стьюдента
# Найдите значение статистики Стьюдента в предположении независимости выборок
# $$
# T(X, Y) = \frac{\bar{X} - \bar{Y}}{\sqrt{\frac{s_x^2}{n} + \frac{s_y^2}{m}}}
# $$
#
# `n` - размер контрольной выборки, `m` - размер тестовой выборки
# Ответ запишите в переменную `answer2` с точностью до 2 знака после запятой
# ### РЕШЕНИЕ
# +
df = pd.read_csv('a_b_testing.csv')
df_A = df[df['group']=='A']
# Размерность контрольной выборки
n = df_A.shape[0]
df_B = df[df['group']=='B']
# Размерность тестовой выборки
m = df_B.shape[0]
print(n, m)
# Средние по выборкам
pA = df_A['converted'].mean()
pB = df_B['converted'].mean()
pA, pB
# -
# Дисперсия по генеральной выборке
st_dev_A = df_A.std()
st_dev_B = df_B.std()
st_dev_A, st_dev_B
t = (pA - pB)/(st_dev_A * st_dev_A / n + st_dev_B * st_dev_B / m) ** 0.5
answer2 = "{:.2f}".format(t[0])
answer2
# ### Статистика Стьюдента из библиотеки Scipy
# Найдите p-value для статистики Стьюдента, используя функцию `stats.ttest_ind`.
# ### РЕШЕНИЕ
from scipy.stats import ttest_ind
df = pd.read_csv('a_b_testing.csv')
# +
# Размерность контрольной выборки
df_A = df[df['group']=='A']
n = df_A.shape[0]
df_B = df[df['group']=='B']
# Размерность тестовой выборки
m = df_B.shape[0]
print(n, m)
# -
t = stats.ttest_ind(df_A['converted'].sample(n), df_B['converted'].sample(m))
answer3 = "{:.2f}".format(t[1])
answer3
# Дополнительная проверка: значение статистики Стьюдента, посчитанная двумя способами, должны совпадать
# Ответ запишите в переменную `answer3` с точностью до 2 знака после запятой
# ### Ответы
output = """Confidence interval:{0}
T score custom {1:.2f}
p value {2:.2f}"""
print(output.format(answer1, answer2, answer3))
| W2/task-2.2/task-2.2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Car Price Prediction
import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# %matplotlib inline
df = pd.read_csv('car data.csv')
df.head()
df.shape
print(f"Unique Seller Types : {df['Seller_Type'].unique()}")
print(f"Unique Transmissions : {df['Transmission'].unique()}")
print(f"Unique Owners : {df['Owner'].unique()}")
print(f"Unique Fuel Types : {df['Fuel_Type'].unique()}")
# check for missing or null values
df.isnull().any().sum()
final_dataset = df[['Year', 'Selling_Price', 'Present_Price', 'Kms_Driven','Fuel_Type', 'Seller_Type', 'Transmission', 'Owner']]
# +
current_year = datetime.datetime.now().year
final_dataset = (
final_dataset
.assign(num_years = lambda x : current_year - x['Year'])
.drop(['Year'], axis=1)
)
final_dataset.head()
# -
final_dataset = pd.get_dummies(final_dataset, drop_first=True)
final_dataset.head()
final_dataset.corr()
corr_mat = final_dataset.corr()
top_corr_features = corr_mat.index
plt.figure(figsize=(20,20))
# plot heatmap
g=sns.heatmap(final_dataset[top_corr_features].corr(), annot=True, cmap='RdYlGn')
sns.pairplot(final_dataset)
final_dataset.head()
# Independent and dependent features
X = final_dataset.iloc[:, 1:]
y = final_dataset.iloc[:, 0]
X.head()
y.head()
# ### Feature Importance
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
model.fit(X,y)
model.feature_importances_
# plot graph of feature importances for better visualization
feat_importance = pd.Series(model.feature_importances_, index=X.columns)
feat_importance.sort_values().plot(kind='barh')
plt.show()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.2)
X_train.shape
# The features do not require scaling because we are using RandomForestRegressor. RandomForest uses decision trees and usually in decision trees, scaling is not required.
from sklearn.ensemble import RandomForestRegressor
rfr = RandomForestRegressor()
# ### Hyperparameters Tuning
# +
# Number of trees in the random forest regressor
n_estimators = [int(x) for x in np.linspace(start=100, stop=1200, num=12)]
# Number of features to consider at every split
max_features = ['auto','sqrt']
# Maximum number of levels in the decision tree
max_depth = [int(x) for x in np.linspace(start=5, stop=30, num=6)]
#Minimum number of samples required to split a node
min_samples_split = [2, 5, 10, 15, 100]
#Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 5, 10]
# -
from sklearn.model_selection import RandomizedSearchCV
param_grid = {
'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf':min_samples_leaf
}
print(param_grid)
rfr_random = RandomizedSearchCV(
estimator = rfr,
param_distributions = param_grid,
scoring = 'neg_mean_squared_error',
n_iter=10,
cv=5,
verbose =2,
random_state=42,
n_jobs=1
)
rfr_random.fit(X_train, y_train)
predictions = rfr_random.predict(X_test)
sns.distplot(y_test-predictions);
plt.scatter(y_test,predictions);
# +
import pickle
# open a file for storing data
file = open('random_forest_regression_model.pkl', 'wb')
# dump information into the file
pickle.dump(rfr_random, file)
| car_price_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import numpy as np
import tifffile as tiff
import keras.backend as K
from keras.metrics import binary_crossentropy
from math import sqrt
from skimage.transform import resize
import logging
import sys
import tensorflow as tf
import sys; #sys.path.append('../')
from src.models.clr_callback import *
from src.models.unet_dilation import UNet
from src.utils.runtime import gpu_selection
from src.utils.data import random_transforms
from src.utils.model import dice_coef, jaccard_coef
import cv2
import numpy as np
import cv2
import glob
import random
from matplotlib.image import imsave
import mahotas as mh
from scipy import ndimage
from skimage.measure import regionprops
import matplotlib.pyplot as plt
import seaborn as sns
from src.utils.model import dice_coef, jaccard_coef,tru_pos,fls_pos,tru_neg,fls_neg
sns.set_style("whitegrid", {'axes.grid' : False})
# -
import keras
#model = UNet()
model = UNet('unet')
model.config['data_path'] = '.'
model.load_data()
gpu_selection(visible_devices="0")
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 1
session = tf.Session(config=config)
model.compile() # THIS IS USING BEST VALIDATION LOSS WEIGHTS :]
model.net.load_weights('/well/lindgren/craig/isbi-2012/checkpoints/unet_1024_dilation/weights_loss_val.weights')
model.net.summary()
def metric_wrapper(yt, yp, metric):
return K.get_value(metric(K.variable(yt), K.variable(yp)))
val_imgs = np.load('montage_img_val.npy')
val_masks = np.load('montage_msk_val.npy')
import time
start = time.time()
data_gen = model.batch_gen_trn(imgs=val_imgs, msks=val_masks, batch_size=4)
img_batch, msk_batch = next(data_gen)
prd_batch = model.net.predict(img_batch)
stop = time.time()
print(stop-start)
for img, msk, prd in zip(img_batch, msk_batch, prd_batch):
r = c = int(sqrt(img.size))
d = metric_wrapper(msk, prd, dice_coef)
j = metric_wrapper(msk, prd, jaccard_coef)
fig,_ = plt.subplots(nrows=1, ncols=4, figsize=(20,4))
fig.axes[0].set_title('Adipocyte tile')
fig.axes[0].imshow(img.reshape(r,c), cmap='gray')
fig.axes[1].set_title('Input Mask')
fig.axes[1].imshow(msk.reshape(r,c), cmap='gray')
fig.axes[2].set_title('Predicted Mask')
fig.axes[2].imshow(prd.reshape(r,c), cmap='gray')
fig.axes[3].set_title('True Pixel Classes')
fig.axes[3].hist(msk.reshape(msk.size), label='Input', alpha=0.4, bins=2,color='c')
fig.axes[3].hist(prd.reshape(prd.size) + 1, label='Pred', alpha=0.4, bins=2,color='g')
fig.axes[3].set_xticklabels([0,0,1,0,1],rotation='vertical')
fig.axes[3].legend()
fig.suptitle('dc = %s, j = %s' % (d,j))
plt.show()
def precision(msk_batch,prd_batch):
prec=[]
recall=[]
for i,_ in enumerate(msk_batch):
TP = metric_wrapper(msk_batch[i], prd_batch[i], tru_pos)
TN = metric_wrapper(msk_batch[i], prd_batch[i], tru_neg)
FN = metric_wrapper(msk_batch[i], prd_batch[i], fls_neg)
FP = metric_wrapper(msk_batch[i], prd_batch[i], fls_pos)
prec.append(TP/(TP+FP))
recall.append(TP/(TP+FN))
return (prec,recall)
prec,recall = precision(msk_batch,prd_batch)
# Number of false positives and false negative pixels is so low ( and true positives so high),
# precision/recall basically 1.
np.mean(prec),np.mean(recall)
normalize = lambda x: (x - _mean) / (_std + 1e-10)
# Out of sample prediction on an image neither trained nor validated on.
# +
test_tiles = glob.glob('*.jpg')
test_samples=[]
for i in test_tiles:
test_sample=cv2.imread(i,0)
test_sample = np.array(test_sample,np.float32) /255
#test_sample=cv2.resize(test_sample,(1024,1024))
_mean, _std = np.mean(test_sample), np.std(test_sample)
test_sample=normalize(test_sample)
test_samples.append(test_sample)
test_samples=np.array(test_samples)
# -
test_samples.shape
plt.figure(figsize=(10,10))
plt.imshow(test_samples[3],cmap='gray')
plt.show()
prd_batch = model.net.predict(test_samples,batch_size=4)
plt.figure(figsize=(10,10))
plt.imshow(prd_batch[3],cmap='gray')
plt.show()
img = np.array(prd_batch[3] * 255,dtype='uint8')
T = mh.thresholding.otsu(img)
print('Otsu threshold is: ',str(T))
plt.figure(figsize=(10,10))
plt.imshow(img > T ,cmap='gray')
plt.show()
blobs = np.where(img>T,0, 1)
blobs = ndimage.morphology.binary_fill_holes(blobs,structure=np.ones((5,5))).astype(int)
labels, no_objects = ndimage.label(blobs)
props = regionprops(blobs)
plt.figure(figsize=(10,10))
plt.imshow(blobs)
plt.show()
imsave('mask.pred.tif',blobs)
labelled=ndimage.label(blobs)
resh_labelled=labelled[0].reshape((img.shape[0],img.shape[1])) #labelled is a tuple: only the first element matters
props=regionprops(resh_labelled)
size={i:props[i].area for i in range (0, no_objects)}
no_of_cells=(sum(i > 200 and i < 50000 for i in size.values()))
areas=[i for i in size.values() if i >= 200 and i <= 70000]
print('Number of Cells counted: '+str(no_of_cells))
sns.set();
ax = sns.distplot(areas)
ax.set(xlim=(round(np.min(areas)), 100000))
ax.grid(False)
plt.show()
plt.figure(figsize=(10,10))
plt.set_cmap('OrRd')
plt.imshow(labels,origin='upper')
plt.show()
plt.imsave('predicted_mask.png',prd_batch[0],cmap='gray')
| notebooks/U-net prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
from hagelslag.processing import EnhancedWatershed, ObjectMatcher, centroid_distance, shifted_centroid_distance
from hagelslag.processing.tracker import extract_storm_objects, track_storms
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from scipy.ndimage import find_objects
from matplotlib.colors import ListedColormap
g_len = 60
grid = np.zeros((2, g_len, g_len))
labels = np.zeros((2, g_len, g_len), dtype=int)
x, y = np.meshgrid(np.arange(g_len), np.arange(g_len))
coords = np.dstack((x, y))
mvn1 = multivariate_normal(mean=[20, 15], cov=np.array([[50, 30], [30, 30]]))
mvn2 = multivariate_normal(mean=[25, 35], cov=np.array([[40, 20], [20, 20]]))
grid[0] += 10000 * mvn1.pdf(coords) + 10000 * mvn2.pdf(coords)
plt.pcolormesh(x, y, grid[0], cmap="Reds")
plt.colorbar()
ew = EnhancedWatershed(20, 1, 80, 80, 80)
labels[0] = ew.label(grid[0])
cmap = ListedColormap(["white", "red", "blue"])
plt.pcolormesh(x,y, labels[0], cmap=cmap)
plt.colorbar()
mvn1b = multivariate_normal(mean=[20 + 10, 15 + 10], cov=np.array([[50, 30], [30, 30]]))
mvn2b = multivariate_normal(mean=[25 + 10, 35 + 10], cov=np.array([[40, 20], [20, 20]]))
grid[1] = 10000 * mvn1b.pdf(coords) + 10000 * mvn2b.pdf(coords)
plt.pcolormesh(x, y, grid[1], cmap="Reds")
labels[1] = ew.label(grid[1])
plt.pcolormesh(x,y, labels[1])
plt.xlim(0, 80)
plt.ylim(0, 80)
storm_objs = extract_storm_objects(labels, grid, x, y, np.array([0, 1]))
out_storms = track_storms(storm_objs, np.array([0, 1]), [shifted_centroid_distance], np.array([30]), np.array([1]))
print(out_storms)
plt.pcolormesh(storm_objs[0][0].x[0], storm_objs[0][0].y[0], storm_objs[0][0].timesteps[0], snap=False)
# +
plt.pcolormesh(x, y, np.ma.array(grid[1], mask=labels[1] == 0), cmap="Reds")
plt.pcolormesh(x, y, np.ma.array(grid[0], mask=labels[0] == 0), cmap="Blues")
plt.plot(*out_storms[0].trajectory(), 'k--')
plt.plot(*out_storms[1].trajectory(), 'k--')
# +
conts = np.arange(10, 90, 10)
plt.figure(figsize=(4, 6))
plt.subplot(3, 2, 1)
plt.contourf(x, y, grid[0], conts, cmap="Blues")
plt.xlim(0, 60)
plt.ylim(0, 60)
plt.title("Time=0")
plt.ylabel("Storm Grid")
plt.subplot(3, 2, 2)
plt.contourf(x, y, grid[1], conts, cmap="Reds")
plt.xlim(0, 60)
plt.ylim(0, 60)
plt.title("Time=1")
plt.subplot(3, 2, 3)
plt.ylabel("Storm Identification")
plt.contourf(x, y, grid[0], conts, cmap="Blues")
cmap_1 = ListedColormap(["red", "green"])
plt.pcolormesh(x,y, np.ma.array(labels[0], mask=labels[0]==0), cmap=cmap_1)
plt.xlim(0, 60)
plt.ylim(0, 60)
plt.subplot(3,2,4)
cmap_2 = ListedColormap(["blue", "purple"])
plt.contourf(x, y, grid[1], conts, cmap="Reds")
plt.pcolormesh(x,y, np.ma.array(labels[1], mask=labels[1] == 0), cmap=cmap_2)
plt.xlim(0, 60)
plt.ylim(0, 60)
plt.subplot(3,2,5)
plt.ylabel("Tracking")
plt.contourf(x, y, np.ma.array(grid[1], mask=labels[1] == 0), conts, cmap="Reds")
plt.contourf(x, y, np.ma.array(grid[0], mask=labels[0] == 0), conts, cmap="Blues")
storm_1_traj = out_storms[0].trajectory().T
storm_2_traj = out_storms[1].trajectory().T
plt.arrow(*storm_1_traj[0], *(storm_1_traj[1] - storm_1_traj[0]), width=0.1, color='k')
plt.arrow(*storm_2_traj[0], *(storm_2_traj[1] - storm_2_traj[0]), width=0.1, color='k')
plt.xlim(0, 60)
plt.ylim(0, 60)
plt.savefig("storm_tracking_demo.pdf", bbox_inches="tight")
# -
storm_1_traj
| demos/StormTrackingDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p36)
# language: python
# name: conda_tensorflow_p36
# ---
# +
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, AveragePooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, CuDNNLSTM, CuDNNGRU, BatchNormalization, LocallyConnected2D, Permute, TimeDistributed, Bidirectional
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
from keras.utils.generic_utils import Progbar
from keras.layers.merge import _Merge
import keras.losses
from functools import partial
from collections import defaultdict
import tensorflow as tf
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from sequence_logo_helper_protein import plot_protein_logo
import pandas as pd
from keras.backend.tensorflow_backend import set_session
def contain_tf_gpu_mem_usage() :
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
set_session(sess)
contain_tf_gpu_mem_usage()
class EpochVariableCallback(Callback) :
def __init__(self, my_variable, my_func) :
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_begin(self, epoch, logs={}) :
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
class IdentityEncoder(iso.SequenceEncoder) :
def __init__(self, seq_len, channel_map) :
super(IdentityEncoder, self).__init__('identity', (seq_len, len(channel_map)))
self.seq_len = seq_len
self.n_channels = len(channel_map)
self.encode_map = channel_map
self.decode_map = {
val : key for key, val in channel_map.items()
}
def encode(self, seq) :
encoding = np.zeros((self.seq_len, self.n_channels))
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
return encoding
def encode_inplace(self, seq, encoding) :
for i in range(len(seq)) :
if seq[i] in self.encode_map :
channel_ix = self.encode_map[seq[i]]
encoding[i, channel_ix] = 1.
def encode_inplace_sparse(self, seq, encoding_mat, row_index) :
raise NotImplementError()
def decode(self, encoding) :
seq = ''
for pos in range(0, encoding.shape[0]) :
argmax_nt = np.argmax(encoding[pos, :])
max_nt = np.max(encoding[pos, :])
if max_nt == 1 :
seq += self.decode_map[argmax_nt]
else :
seq += self.decode_map[-1]
return seq
def decode_sparse(self, encoding_mat, row_index) :
encoding = np.array(encoding_mat[row_index, :].todense()).reshape(-1, 4)
return self.decode(encoding)
class NopTransformer(iso.ValueTransformer) :
def __init__(self, n_classes) :
super(NopTransformer, self).__init__('nop', (n_classes, ))
self.n_classes = n_classes
def transform(self, values) :
return values
def transform_inplace(self, values, transform) :
transform[:] = values
def transform_inplace_sparse(self, values, transform_mat, row_index) :
transform_mat[row_index, :] = np.ravel(values)
# +
#Re-load cached dataframe (shuffled)
dataset_name = "coiled_coil_binders"
experiment = "baker_big_set_5x_negatives"
pair_df = pd.read_csv("pair_df_" + experiment + "_in_shuffled.csv", sep="\t")
print("len(pair_df) = " + str(len(pair_df)))
print(pair_df.head())
#Generate training and test set indexes
valid_set_size = 0.0005
test_set_size = 0.0995
data_index = np.arange(len(pair_df), dtype=np.int)
train_index = data_index[:-int(len(pair_df) * (valid_set_size + test_set_size))]
valid_index = data_index[train_index.shape[0]:-int(len(pair_df) * test_set_size)]
test_index = data_index[train_index.shape[0] + valid_index.shape[0]:]
print('Training set size = ' + str(train_index.shape[0]))
print('Validation set size = ' + str(valid_index.shape[0]))
print('Test set size = ' + str(test_index.shape[0]))
# +
#Sub-select smaller dataset
n_train_pos = 20000
n_train_neg = 20000
n_test_pos = 2000
n_test_neg = 2000
orig_n_train = train_index.shape[0]
orig_n_valid = valid_index.shape[0]
orig_n_test = test_index.shape[0]
train_index_pos = np.nonzero((pair_df.iloc[train_index]['interacts'] == 1).values)[0][:n_train_pos]
train_index_neg = np.nonzero((pair_df.iloc[train_index]['interacts'] == 0).values)[0][:n_train_neg]
train_index = np.concatenate([train_index_pos, train_index_neg], axis=0)
np.random.shuffle(train_index)
test_index_pos = np.nonzero((pair_df.iloc[test_index]['interacts'] == 1).values)[0][:n_test_pos] + orig_n_train + orig_n_valid
test_index_neg = np.nonzero((pair_df.iloc[test_index]['interacts'] == 0).values)[0][:n_test_neg] + orig_n_train + orig_n_valid
test_index = np.concatenate([test_index_pos, test_index_neg], axis=0)
np.random.shuffle(test_index)
print('Training set size = ' + str(train_index.shape[0]))
print('Test set size = ' + str(test_index.shape[0]))
# +
#Calculate sequence lengths
pair_df['amino_seq_1_len'] = pair_df['amino_seq_1'].str.len()
pair_df['amino_seq_2_len'] = pair_df['amino_seq_2'].str.len()
# -
pair_df.head()
# +
#Initialize sequence encoder
seq_length = 81
residue_map = {'D': 0, 'E': 1, 'V': 2, 'K': 3, 'R': 4, 'L': 5, 'S': 6, 'T': 7, 'N': 8, 'H': 9, 'A': 10, 'I': 11, 'G': 12, 'P': 13, 'Q': 14, 'Y': 15, 'W': 16, 'M': 17, 'F': 18, '#': 19}
encoder = IdentityEncoder(seq_length, residue_map)
# +
#Construct data generators
class CategoricalRandomizer :
def __init__(self, case_range, case_probs) :
self.case_range = case_range
self.case_probs = case_probs
self.cases = 0
def get_random_sample(self, index=None) :
if index is None :
return self.cases
else :
return self.cases[index]
def generate_random_sample(self, batch_size=1, data_ids=None) :
self.cases = np.random.choice(self.case_range, size=batch_size, replace=True, p=self.case_probs)
def get_amino_seq(row, index, flip_randomizer, homodimer_randomizer, max_seq_len=seq_length) :
is_flip = True if flip_randomizer.get_random_sample(index=index) == 1 else False
is_homodimer = True if homodimer_randomizer.get_random_sample(index=index) == 1 else False
amino_seq_1, amino_seq_2 = row['amino_seq_1'], row['amino_seq_2']
if is_flip :
amino_seq_1, amino_seq_2 = row['amino_seq_2'], row['amino_seq_1']
if is_homodimer and row['interacts'] < 0.5 :
amino_seq_2 = amino_seq_1
return amino_seq_1, amino_seq_2
flip_randomizer = CategoricalRandomizer(np.arange(2), np.array([0.5, 0.5]))
homodimer_randomizer = CategoricalRandomizer(np.arange(2), np.array([0.95, 0.05]))
batch_size = 32
data_gens = {
gen_id : iso.DataGenerator(
idx,
{ 'df' : pair_df },
batch_size=(idx.shape[0] // batch_size) * batch_size,
inputs = [
{
'id' : 'amino_seq_1',
'source_type' : 'dataframe',
'source' : 'df',
#'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: (get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[0] + "#" * seq_length)[:seq_length],
'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[0],
'encoder' : IdentityEncoder(seq_length, residue_map),
'dim' : (1, seq_length, len(residue_map)),
'sparsify' : False
},
{
'id' : 'amino_seq_2',
'source_type' : 'dataframe',
'source' : 'df',
#'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: (get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[1] + "#" * seq_length)[:seq_length],
'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[1],
'encoder' : IdentityEncoder(seq_length, residue_map),
'dim' : (1, seq_length, len(residue_map)),
'sparsify' : False
},
{
'id' : 'amino_seq_1_len',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: len(get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[0]),
'encoder' : lambda t: t,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'amino_seq_2_len',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index, flip_randomizer=flip_randomizer, homodimer_randomizer=homodimer_randomizer: len(get_amino_seq(row, index, flip_randomizer, homodimer_randomizer)[1]),
'encoder' : lambda t: t,
'dim' : (1,),
'sparsify' : False
}
],
outputs = [
{
'id' : 'interacts',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: row['interacts'],
'transformer' : NopTransformer(1),
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [flip_randomizer, homodimer_randomizer],
shuffle = True
) for gen_id, idx in [('train', train_index), ('valid', valid_index), ('test', test_index)]
}
# +
#Load data matrices
[x_1_train, x_2_train, l_1_train, l_2_train], [y_train] = data_gens['train'][0]
[x_1_test, x_2_test, l_1_test, l_2_test], [y_test] = data_gens['test'][0]
print("x_1_train.shape = " + str(x_1_train.shape))
print("x_2_train.shape = " + str(x_2_train.shape))
print("x_1_test.shape = " + str(x_1_test.shape))
print("x_2_test.shape = " + str(x_2_test.shape))
print("l_1_train.shape = " + str(l_1_train.shape))
print("l2_train.shape = " + str(l_2_train.shape))
print("l_1_test.shape = " + str(l_1_test.shape))
print("l2_test.shape = " + str(l_2_test.shape))
print("y_train.shape = " + str(y_train.shape))
print("y_test.shape = " + str(y_test.shape))
# +
#Define sequence templates
sequence_templates = [
'$' * i + '@' * (seq_length - i)
for i in range(seq_length+1)
]
sequence_masks = [
np.array([1 if sequence_templates[i][j] == '$' else 0 for j in range(len(sequence_templates[i]))])
for i in range(seq_length+1)
]
# +
#Calculate background distributions
x_means = []
x_mean_logits = []
for i in range(seq_length + 1) :
x_means.append(np.ones((x_1_train.shape[2], x_1_train.shape[3])) * 0.05)
x_mean_logits.append(np.zeros((x_1_train.shape[2], x_1_train.shape[3])))
# +
#Visualize a few background sequence distributions
visualize_len = 67
plot_protein_logo(residue_map, np.copy(x_means[visualize_len]), sequence_template=sequence_templates[visualize_len], figsize=(12, 1), logo_height=1.0, plot_start=0, plot_end=81)
visualize_len = 72
plot_protein_logo(residue_map, np.copy(x_means[visualize_len]), sequence_template=sequence_templates[visualize_len], figsize=(12, 1), logo_height=1.0, plot_start=0, plot_end=81)
visualize_len = 81
plot_protein_logo(residue_map, np.copy(x_means[visualize_len]), sequence_template=sequence_templates[visualize_len], figsize=(12, 1), logo_height=1.0, plot_start=0, plot_end=81)
# +
#Calculate mean training set kl-divergence against background
mean_kl_divs = []
for i in range(seq_length + 1) :
x_train_len = x_1_train[np.ravel(l_1_train) == i, ...]
if x_train_len.shape[0] > 0 :
x_train_clipped_len = np.clip(np.copy(x_train_len[:, 0, :, :]), 1e-8, 1. - 1e-8)
kl_divs = np.sum(x_train_clipped_len * np.log(x_train_clipped_len / np.tile(np.expand_dims(x_means[i], axis=0), (x_train_clipped_len.shape[0], 1, 1))), axis=-1) / np.log(2.0)
x_mean_kl_divs = np.sum(kl_divs * sequence_masks[i], axis=-1) / np.sum(sequence_masks[i])
x_mean_kl_div = np.mean(x_mean_kl_divs)
mean_kl_divs.append(x_mean_kl_div)
print("[Length = " + str(i) + "] Mean KL Div against background (bits) = " + str(x_mean_kl_div))
else :
mean_kl_divs.append(0)
# +
from tensorflow.python.framework import ops
#Stochastic Binarized Neuron helper functions (Tensorflow)
#ST Estimator code adopted from https://r2rt.com/beyond-binary-ternary-and-one-hot-neurons.html
#See Github https://github.com/spitis/
def st_sampled_softmax(logits):
with ops.name_scope("STSampledSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.squeeze(tf.multinomial(logits, 1), 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
def st_hardmax_softmax(logits):
with ops.name_scope("STHardmaxSoftmax") as namescope :
nt_probs = tf.nn.softmax(logits)
onehot_dim = logits.get_shape().as_list()[1]
sampled_onehot = tf.one_hot(tf.argmax(nt_probs, 1), onehot_dim, 1.0, 0.0)
with tf.get_default_graph().gradient_override_map({'Ceil': 'Identity', 'Mul': 'STMul'}):
return tf.ceil(sampled_onehot * nt_probs)
@ops.RegisterGradient("STMul")
def st_mul(op, grad):
return [grad, grad]
#Gumbel Distribution Sampler
def gumbel_softmax(logits, temperature=0.5) :
gumbel_dist = tf.contrib.distributions.RelaxedOneHotCategorical(temperature, logits=logits)
batch_dim = logits.get_shape().as_list()[0]
onehot_dim = logits.get_shape().as_list()[1]
return gumbel_dist.sample()
# +
#PWM Masking and Sampling helper functions
def mask_pwm(inputs) :
pwm, onehot_template, onehot_mask = inputs
return pwm * onehot_mask + onehot_template
def sample_pwm_st(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 20))
sampled_pwm = st_sampled_softmax(flat_pwm)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 20))
def sample_pwm_gumbel(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 20))
sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 20))
#Generator helper functions
def initialize_sequence_templates(generator, encoder, sequence_templates, background_matrices) :
embedding_templates = []
embedding_masks = []
embedding_backgrounds = []
for k in range(len(sequence_templates)) :
sequence_template = sequence_templates[k]
onehot_template = encoder(sequence_template).reshape((1, len(sequence_template), 20))
for j in range(len(sequence_template)) :
if sequence_template[j] not in ['$', '@'] :
nt_ix = np.argmax(onehot_template[0, j, :])
onehot_template[:, j, :] = 0.0
onehot_template[:, j, nt_ix] = 1.0
onehot_mask = np.zeros((1, len(sequence_template), 20))
for j in range(len(sequence_template)) :
if sequence_template[j] == '$' :
onehot_mask[:, j, :] = 1.0
embedding_templates.append(onehot_template.reshape(1, -1))
embedding_masks.append(onehot_mask.reshape(1, -1))
embedding_backgrounds.append(background_matrices[k].reshape(1, -1))
embedding_templates = np.concatenate(embedding_templates, axis=0)
embedding_masks = np.concatenate(embedding_masks, axis=0)
embedding_backgrounds = np.concatenate(embedding_backgrounds, axis=0)
generator.get_layer('template_dense').set_weights([embedding_templates])
generator.get_layer('template_dense').trainable = False
generator.get_layer('mask_dense').set_weights([embedding_masks])
generator.get_layer('mask_dense').trainable = False
#Generator construction function
def build_sampler(batch_size, seq_length, n_classes=1) :
#Initialize Reshape layer
reshape_layer = Reshape((1, seq_length, 20))
#Initialize background matrix
onehot_background_dense = Embedding(n_classes, seq_length * 20, embeddings_initializer='zeros', name='background_dense')
#Initialize template and mask matrices
onehot_template_dense = Embedding(n_classes, seq_length * 20, embeddings_initializer='zeros', name='template_dense')
onehot_mask_dense = Embedding(n_classes, seq_length * 20, embeddings_initializer='ones', name='mask_dense')
#Initialize Templating and Masking Lambda layer
masking_layer = Lambda(mask_pwm, output_shape = (1, seq_length, 20), name='masking_layer')
sample_layer = Lambda(lambda x: sample_pwm_st(K.zeros_like(x)))
background_layer = Lambda(lambda x: x[0] + x[1] * (1. - x[2]), name='background_layer')
def _sampler_func(class_input, scaled_pwm, scale) :
#Get Template and Mask
onehot_template = reshape_layer(onehot_template_dense(class_input))
onehot_mask = reshape_layer(onehot_mask_dense(class_input))
#Add Template and Multiply Mask
pwm = masking_layer([background_layer([scaled_pwm, sample_layer(scaled_pwm), scale]), onehot_template, onehot_mask])
return pwm, onehot_mask, scaled_pwm
return _sampler_func
# +
#Scrambler network definition
def sample_mask_gumbel(pwm_logits) :
n_sequences = K.shape(pwm_logits)[0]
seq_length = K.shape(pwm_logits)[2]
flat_pwm = K.reshape(pwm_logits, (n_sequences * seq_length, 2))
sampled_pwm = gumbel_softmax(flat_pwm, temperature=0.5)
return K.reshape(sampled_pwm, (n_sequences, 1, seq_length, 2))[..., :1]
def make_resblock(n_channels=64, window_size=8, dilation_rate=1, group_ix=0, layer_ix=0, drop_rate=0.0) :
#Initialize res block layers
batch_norm_0 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_0')
relu_0 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_0 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_0')
batch_norm_1 = BatchNormalization(name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_batch_norm_1')
relu_1 = Lambda(lambda x: K.relu(x, alpha=0.0))
conv_1 = Conv2D(n_channels, (1, window_size), dilation_rate=dilation_rate, strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_conv_1')
skip_1 = Lambda(lambda x: x[0] + x[1], name='scrambler_resblock_' + str(group_ix) + '_' + str(layer_ix) + '_skip_1')
drop_1 = None
if drop_rate > 0.0 :
drop_1 = Dropout(drop_rate)
#Execute res block
def _resblock_func(input_tensor) :
batch_norm_0_out = batch_norm_0(input_tensor)
relu_0_out = relu_0(batch_norm_0_out)
conv_0_out = conv_0(relu_0_out)
batch_norm_1_out = batch_norm_1(conv_0_out)
relu_1_out = relu_1(batch_norm_1_out)
if drop_rate > 0.0 :
conv_1_out = drop_1(conv_1(relu_1_out))
else :
conv_1_out = conv_1(relu_1_out)
skip_1_out = skip_1([conv_1_out, input_tensor])
return skip_1_out
return _resblock_func
def load_scrambler_network(n_groups=1, n_resblocks_per_group=4, n_channels=32, window_size=8, dilation_rates=[1], drop_rate=0.0) :
#Discriminator network definition
conv_0 = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_conv_0')
skip_convs = []
resblock_groups = []
for group_ix in range(n_groups) :
skip_convs.append(Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_skip_conv_' + str(group_ix)))
resblocks = []
for layer_ix in range(n_resblocks_per_group) :
resblocks.append(make_resblock(n_channels=n_channels, window_size=window_size, dilation_rate=dilation_rates[group_ix], group_ix=group_ix, layer_ix=layer_ix, drop_rate=drop_rate))
resblock_groups.append(resblocks)
last_block_conv = Conv2D(n_channels, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_last_block_conv')
skip_add = Lambda(lambda x: x[0] + x[1], name='scrambler_skip_add')
final_conv_2_channels = Conv2D(2, (1, 1), strides=(1, 1), padding='same', activation='linear', kernel_initializer='glorot_normal', name='scrambler_final_conv')
final_conv_sigm = Lambda(lambda x: K.softmax(x, axis=-1)[..., :1])
final_conv_gumbel = Lambda(lambda x: sample_mask_gumbel(x))
scale_inputs = Lambda(lambda x: x[1] * K.tile(x[0], (1, 1, 1, 20)), name='scrambler_input_scale')
def _scrambler_func(sequence_input) :
conv_0_out = conv_0(sequence_input)
#Connect group of res blocks
output_tensor = conv_0_out
#Res block group execution
skip_conv_outs = []
for group_ix in range(n_groups) :
skip_conv_out = skip_convs[group_ix](output_tensor)
skip_conv_outs.append(skip_conv_out)
for layer_ix in range(n_resblocks_per_group) :
output_tensor = resblock_groups[group_ix][layer_ix](output_tensor)
#Last res block extr conv
last_block_conv_out = last_block_conv(output_tensor)
skip_add_out = last_block_conv_out
for group_ix in range(n_groups) :
skip_add_out = skip_add([skip_add_out, skip_conv_outs[group_ix]])
#Final conv out
final_conv_2_channels_out = final_conv_2_channels(skip_add_out)
final_conv_sigm_out = final_conv_sigm(final_conv_2_channels_out)
final_conv_gumbel_out = final_conv_gumbel(final_conv_2_channels_out)
#Scale inputs by importance scores
scaled_inputs = scale_inputs([final_conv_gumbel_out, sequence_input])
return scaled_inputs, final_conv_sigm_out, final_conv_gumbel_out
return _scrambler_func
# +
#Keras loss functions
def get_sigmoid_kl_divergence() :
def _kl_divergence(y_true, y_pred) :
y_true = K.clip(y_true, K.epsilon(), 1.0 - K.epsilon())
y_pred = K.clip(y_pred, K.epsilon(), 1.0 - K.epsilon())
return K.mean(y_true * K.log(y_true / y_pred) + (1.0 - y_true) * K.log((1.0 - y_true) / (1.0 - y_pred)), axis=-1)
return _kl_divergence
def get_margin_lum_ame_masked(pwm_start, pwm_end) :
def _margin_lum_ame(importance_scores, pwm_mask, max_lum) :
p_ons = importance_scores[:, 0, pwm_start:pwm_end, 0]
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_p_on = K.switch(n_unmasked > 0, K.sum(p_ons * mask, axis=-1) / n_unmasked, max_lum[:, 0])
margin_p_on = K.switch(mean_p_on > max_lum[:, 0], mean_p_on - max_lum[:, 0], K.zeros_like(mean_p_on))
return margin_p_on
return _margin_lum_ame
def get_target_lum_sme_masked(pwm_start, pwm_end) :
def _target_lum_sme(importance_scores, pwm_mask, target_lum) :
p_ons = importance_scores[:, 0, pwm_start:pwm_end, 0]
mask = K.max(pwm_mask[:, 0, pwm_start:pwm_end, :], axis=-1)
n_unmasked = K.sum(mask, axis=-1)
mean_p_on = K.switch(n_unmasked > 0, K.sum(p_ons * mask, axis=-1) / n_unmasked, target_lum[:, 0])
return (mean_p_on - target_lum[:, 0])**2
return _target_lum_sme
def get_weighted_loss(loss_coeff=1.) :
def _min_pred(y_true, y_pred) :
return loss_coeff * y_pred
return _min_pred
# +
#Initialize Encoder and Decoder networks
batch_size = 32
seq_length = 81
#Resnet parameters
resnet_n_groups = 5
resnet_n_resblocks_per_group = 4
resnet_n_channels = 48
resnet_window_size = 3
resnet_dilation_rates = [1, 2, 4, 2, 1]
resnet_drop_rate = 0.0
#Load scrambler
scrambler = load_scrambler_network(
n_groups=resnet_n_groups,
n_resblocks_per_group=resnet_n_resblocks_per_group,
n_channels=resnet_n_channels, window_size=resnet_window_size,
dilation_rates=resnet_dilation_rates,
drop_rate=resnet_drop_rate
)
#Load sampler
sampler = build_sampler(batch_size, seq_length, n_classes=seq_length+1)
# +
#Load predictor
predictor_path = 'saved_models/ppi_rnn_baker_big_set_5x_negatives_classifier_symmetric_drop_25_5x_negatives_balanced_partitioned_data_epoch_10.h5'
predictor = load_model(predictor_path, custom_objects={ 'sigmoid_nll' : get_sigmoid_kl_divergence() })
predictor.trainable = False
predictor.compile(loss='mean_squared_error', optimizer=keras.optimizers.SGD(lr=0.1))
# +
#Build scrambler model
scrambler_class = Input(shape=(1,), name='scrambler_class')
scrambler_input = Input(shape=(1, seq_length, 20), name='scrambler_input')
scrambled_pwm, importance_scores, importance_scores_sampled = scrambler(scrambler_input)
pwm, pwm_mask, pwm_no_bg = sampler(scrambler_class, scrambled_pwm, importance_scores_sampled)
zeropad_layer = Lambda(lambda x: x[0] * x[1], name='zeropad')
sampled_pwm_zeropad = zeropad_layer([pwm, pwm_mask])
scrambler_model = Model([scrambler_input, scrambler_class], [pwm, importance_scores])
#Initialize Sequence Templates and Masks
initialize_sequence_templates(scrambler_model, encoder, sequence_templates, x_means)
scrambler_model.compile(
optimizer=keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999),
loss='mean_squared_error'
)
# +
#Set target lum
conservation_target_lum = np.zeros(seq_length+1)
conservation_target_lum[:] = 0.1
conservation_target_lum = conservation_target_lum.tolist()
entropy_target_lum = np.zeros(seq_length+1)
entropy_target_lum[:] = 0.1
entropy_target_lum = entropy_target_lum.tolist()
# +
#Helper function for setting sequence-length-specific parameters
def initialize_sequence_length_params(model, background_matrix_list, conservation_target_lum_list, entropy_target_lum_list) :
flat_background_matrix_list = []
flat_conservation_target_lum_list = []
flat_entropy_target_lum_list = []
for k in range(len(background_matrix_list)) :
flat_background_matrix_list.append(background_matrix_list[k].reshape(1, -1))
flat_conservation_target_lum_list.append(np.array([conservation_target_lum_list[k]]).reshape(1, -1))
flat_entropy_target_lum_list.append(np.array([entropy_target_lum_list[k]]).reshape(1, -1))
flat_background_matrix_list = np.concatenate(flat_background_matrix_list, axis=0)
flat_conservation_target_lum_list = np.concatenate(flat_conservation_target_lum_list, axis=0)
flat_entropy_target_lum_list = np.concatenate(flat_entropy_target_lum_list, axis=0)
layer_names = [layer.name for layer in model.layers]
if 'x_mean_dense' in layer_names :
model.get_layer('x_mean_dense').set_weights([flat_background_matrix_list])
model.get_layer('x_mean_dense').trainable = False
model.get_layer('conservation_target_lum_dense').set_weights([flat_conservation_target_lum_list])
model.get_layer('conservation_target_lum_dense').trainable = False
model.get_layer('entropy_target_lum_dense').set_weights([flat_entropy_target_lum_list])
model.get_layer('entropy_target_lum_dense').trainable = False
# +
#Build Auto-scrambler pipeline
#Define model inputs
ae_scrambler_class_1 = Input(shape=(1,), name='ae_scrambler_class_1')
ae_scrambler_input_1 = Input(shape=(1, seq_length, 20), name='ae_scrambler_input_1')
ae_scrambler_class_2 = Input(shape=(1,), name='ae_scrambler_class_2')
ae_scrambler_input_2 = Input(shape=(1, seq_length, 20), name='ae_scrambler_input_2')
#ae_label_input = Input(shape=(1,), name='ae_label_input')
scrambled_in_1, importance_scores_1, importance_scores_1_sampled = scrambler(ae_scrambler_input_1)
scrambled_in_2, importance_scores_2, importance_scores_2_sampled = scrambler(ae_scrambler_input_2)
#Run encoder and decoder
scrambled_pwm_1, pwm_mask_1, scrambled_pwm_1_no_bg = sampler(ae_scrambler_class_1, scrambled_in_1, importance_scores_1_sampled)
scrambled_pwm_2, pwm_mask_2, scrambled_pwm_2_no_bg = sampler(ae_scrambler_class_2, scrambled_in_2, importance_scores_2_sampled)
zeropad_layer_1 = Lambda(lambda x: x[0] * x[1], name='zeropad_1')
zeropad_layer_2 = Lambda(lambda x: x[0] * x[1], name='zeropad_2')
scrambled_pwm_1_zeropad = zeropad_layer_1([scrambled_pwm_1, pwm_mask_1])
scrambled_pwm_2_zeropad = zeropad_layer_2([scrambled_pwm_2, pwm_mask_2])
#Make reference prediction on non-scrambled input sequence
collapse_input_layer_non_scrambled = Lambda(lambda x: x[:, 0, :, :], output_shape=(seq_length, 20))
collapsed_in_1_non_scrambled = collapse_input_layer_non_scrambled(ae_scrambler_input_1)
collapsed_in_2_non_scrambled = collapse_input_layer_non_scrambled(ae_scrambler_input_2)
y_pred_non_scrambled = predictor([collapsed_in_1_non_scrambled, collapsed_in_2_non_scrambled])#ae_label_input
#Make prediction on scrambled sequence samples
collapse_input_layer = Lambda(lambda x: x[:, 0, :, :], output_shape=(seq_length, 20))
collapsed_in_1 = collapse_input_layer(scrambled_pwm_1_zeropad)
collapsed_in_2 = collapse_input_layer(scrambled_pwm_2_zeropad)
y_pred_scrambled = predictor([collapsed_in_1, collapsed_in_2])
#Cost function parameters
pwm_start = 0
pwm_end = 81
#Define background matrix embeddings and target bits
seq_reshape_layer = Reshape((1, seq_length, 20))
flatten_bit_layer = Reshape((1,))
x_mean_dense = Embedding(seq_length+1, seq_length * 20, embeddings_initializer='zeros', name='x_mean_dense')
conservation_target_lum_dense = Embedding(seq_length+1, 1, embeddings_initializer='zeros', name='conservation_target_lum_dense')
entropy_target_lum_dense = Embedding(seq_length+1, 1, embeddings_initializer='zeros', name='entropy_target_lum_dense')
x_mean_len_1 = seq_reshape_layer(x_mean_dense(ae_scrambler_class_1))
x_mean_len_2 = seq_reshape_layer(x_mean_dense(ae_scrambler_class_2))
conservation_target_lum_len_1 = flatten_bit_layer(conservation_target_lum_dense(ae_scrambler_class_1))
conservation_target_lum_len_2 = flatten_bit_layer(conservation_target_lum_dense(ae_scrambler_class_2))
entropy_target_lum_len_1 = flatten_bit_layer(entropy_target_lum_dense(ae_scrambler_class_1))
entropy_target_lum_len_2 = flatten_bit_layer(entropy_target_lum_dense(ae_scrambler_class_2))
#NLL cost
nll_loss_func = get_sigmoid_kl_divergence()
#Conservation cost
conservation_loss_func = get_target_lum_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end)
#Entropy cost
entropy_loss_func = get_target_lum_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end)
#entropy_loss_func = get_margin_lum_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end)
#Define annealing coefficient
anneal_coeff = K.variable(1.0)
#Execute NLL cost
nll_loss = Lambda(lambda x: nll_loss_func(x[0], x[1]), name='nll')([
y_pred_non_scrambled,
y_pred_scrambled
])
#Execute conservation cost
conservation_loss = Lambda(lambda x: anneal_coeff * (0.5 * conservation_loss_func(x[0], x[1], x[2]) + 0.5 * conservation_loss_func(x[3], x[4], x[5])), name='conservation')([
importance_scores_1,
pwm_mask_1,
conservation_target_lum_len_1,
importance_scores_2,
pwm_mask_2,
conservation_target_lum_len_2
])
#Execute entropy cost
entropy_loss = Lambda(lambda x: (1. - anneal_coeff) * (0.5 * entropy_loss_func(x[0], x[1], x[2]) + 0.5 * entropy_loss_func(x[3], x[4], x[5])), name='entropy')([
importance_scores_1,
pwm_mask_1,
entropy_target_lum_len_1,
importance_scores_2,
pwm_mask_2,
entropy_target_lum_len_2
])
loss_model = Model(
[ae_scrambler_class_1, ae_scrambler_input_1, ae_scrambler_class_2, ae_scrambler_input_2], #ae_label_input
[nll_loss, conservation_loss, entropy_loss]
)
#Initialize Sequence Templates and Masks
initialize_sequence_templates(loss_model, encoder, sequence_templates, x_mean_logits)
#Initialize Sequence Length Parameters
initialize_sequence_length_params(loss_model, x_means, conservation_target_lum, entropy_target_lum)
loss_model.compile(
optimizer=keras.optimizers.Adam(lr=0.0001, beta_1=0.5, beta_2=0.9),
loss={
'nll' : get_weighted_loss(loss_coeff=1.0),
'conservation' : get_weighted_loss(loss_coeff=1.0),
'entropy' : get_weighted_loss(loss_coeff=10.0)
}
)
# -
scrambler_model.summary()
loss_model.summary()
# +
#Training configuration
#Define number of training epochs
n_epochs = 20
#Define experiment suffix (optional)
experiment_suffix = "_kl_divergence_zeropad_gumbel_rand_bg_lum"
#Define anneal function
def _anneal_func(val, epoch, n_epochs=n_epochs) :
if epoch in [0] :
return 1.0
return 0.0
architecture_str = "resnet_" + str(resnet_n_groups) + "_" + str(resnet_n_resblocks_per_group) + "_" + str(resnet_n_channels) + "_" + str(resnet_window_size) + "_" + str(resnet_drop_rate).replace(".", "")
model_name = "autoscrambler_dataset_" + dataset_name + "_" + architecture_str + "_n_epochs_" + str(n_epochs) + "_target_lum_" + str(entropy_target_lum[0]).replace(".", "") + experiment_suffix
print("Model save name = " + model_name)
# +
#Execute training procedure
callbacks =[
#ModelCheckpoint("model_checkpoints/" + model_name + "_epoch_{epoch:02d}.hdf5", monitor='val_loss', mode='min', period=10, save_weights_only=True),
EpochVariableCallback(anneal_coeff, _anneal_func)
]
s_train = np.zeros((x_1_train.shape[0], 1))
s_test = np.zeros((x_1_test.shape[0], 1))
# train the autoencoder
train_history = loss_model.fit(
[l_1_train, x_1_train, l_2_train, x_2_train], #y_train
[s_train, s_train, s_train],
shuffle=True,
epochs=n_epochs,
batch_size=batch_size,
validation_data=(
[l_1_test, x_1_test, l_2_test, x_2_test], #y_test
[s_test, s_test, s_test]
),
callbacks=callbacks
)
# +
f, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(3 * 4, 3))
n_epochs_actual = len(train_history.history['nll_loss'])
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['nll_loss'], linewidth=3, color='green')
ax1.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_nll_loss'], linewidth=3, color='orange')
plt.sca(ax1)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("NLL", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['entropy_loss'], linewidth=3, color='green')
ax2.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_entropy_loss'], linewidth=3, color='orange')
plt.sca(ax2)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Entropy Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['conservation_loss'], linewidth=3, color='green')
ax3.plot(np.arange(1, n_epochs_actual + 1), train_history.history['val_conservation_loss'], linewidth=3, color='orange')
plt.sca(ax3)
plt.xlabel("Epochs", fontsize=14)
plt.ylabel("Conservation Loss", fontsize=14)
plt.xlim(1, n_epochs_actual)
plt.xticks([1, n_epochs_actual], [1, n_epochs_actual], fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.show()
# +
# Save model and weights
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model.save(model_path)
print('Saved scrambler model at %s ' % (model_path))
# +
#Load models
save_dir = 'saved_models'
#model_name = "autoscrambler_dataset_coiled_coil_binders_inverted_scores_sample_mode_st_n_samples_32_resnet_5_4_48_3_00_n_epochs_20_target_bits_24_kl_divergence_log_prob"
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler_model = load_model(model_path, custom_objects={
'sample_mask_gumbel' : sample_mask_gumbel,
'sample_pwm_st' : sample_pwm_st
})
print('Loaded scrambler model %s ' % (model_path))
# +
#Visualize a few reconstructed sequence patterns
pwm_test, importance_scores = scrambler_model.predict_on_batch(x=[x_1_test[:32], l_1_test[:32]])
subtracted_pwm_test = x_1_test[:32] * importance_scores
for plot_i in range(0, 5) :
print("Test sequence " + str(plot_i) + ":")
plot_protein_logo(residue_map, x_1_test[plot_i, 0, :, :], sequence_template=sequence_templates[l_1_test[plot_i, 0]], figsize=(12, 1), plot_start=0, plot_end=96)
plot_protein_logo(residue_map, pwm_test[plot_i, 0, :, :], sequence_template=sequence_templates[l_1_test[plot_i, 0]], figsize=(12, 1), plot_start=0, plot_end=96)
plot_protein_logo(residue_map, subtracted_pwm_test[plot_i, 0, :, :], sequence_template=sequence_templates[l_1_test[plot_i, 0]], figsize=(12, 1), plot_start=0, plot_end=96)
# +
#Binder DHD_154
#seq_1 = ("TAEELLEVHKKSDRVTKEHLRVSEEILKVVEVLTRGEVSSEVLKRVLRKLEELTDKLRRVTEEQRRVVEKLN" + "#" * seq_length)[:81]
#seq_2 = ("DLEDLLRRLRRLVDEQRRLVEELERVSRRLEKAVRDNEDERELARLSREHSDIQDKHDKLAREILEVLKRLLERTE" + "#" * seq_length)[:81]
seq_1 = "TAEELLEVHKKSDRVTKEHLRVSEEILKVVEVLTRGEVSSEVLKRVLRKLEELTDKLRRVTEEQRRVVEKLN"[:81]
seq_2 = "DLEDLLRRLRRLVDEQRRLVEELERVSRRLEKAVRDNEDERELARLSREHSDIQDKHDKLAREILEVLKRLLERTE"[:81]
print("Seq 1 = " + seq_1)
print("Seq 2 = " + seq_2)
encoder = IdentityEncoder(81, residue_map)
test_onehot_1 = np.tile(np.expand_dims(np.expand_dims(encoder(seq_1), axis=0), axis=0), (batch_size, 1, 1, 1))
test_onehot_2 = np.tile(np.expand_dims(np.expand_dims(encoder(seq_2), axis=0), axis=0), (batch_size, 1, 1, 1))
test_len_1 = np.tile(np.array([[len(seq_1)]]), (batch_size, 1))
test_len_2 = np.tile(np.array([[len(seq_2)]]), (batch_size, 1))
pred_interacts = predictor.predict(x=[test_onehot_1[:, 0, ...], test_onehot_2[:, 0, ...]])[0, 0]
print("Predicted interaction prob = " + str(round(pred_interacts, 4)))
# +
#Visualize a few reconstructed sequence patterns
save_figs = False
pair_name = "DHD_154"
pwm_test_1, importance_scores_1 = scrambler_model.predict_on_batch(x=[test_onehot_1, test_len_1])
pwm_test_2, importance_scores_2 = scrambler_model.predict_on_batch(x=[test_onehot_2, test_len_2])
scrambled_pred_interacts = predictor.predict(x=[pwm_test_1[:, 0, ...], pwm_test_2[:, 0, ...]])[:, 0]
print("Scrambler predictions = " + str(np.round(scrambled_pred_interacts[:10], 2)))
subtracted_pwm_test_1 = test_onehot_1 * importance_scores_1
subtracted_pwm_test_2 = test_onehot_2 * importance_scores_2
print("Binder 1:")
plot_protein_logo(residue_map, test_onehot_1[0, 0, :, :], sequence_template=sequence_templates[test_len_1[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_original_example_" + pair_name + "_binder_1")
plot_protein_logo(residue_map, pwm_test_1[0, 0, :, :], sequence_template=sequence_templates[test_len_1[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_scrambled_example_" + pair_name + "_binder_1")
plot_protein_logo(residue_map, subtracted_pwm_test_1[0, 0, :, :], sequence_template=sequence_templates[test_len_1[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_subtracted_example_" + pair_name + "_binder_1")
print("Binder 2:")
plot_protein_logo(residue_map, test_onehot_2[0, 0, :, :], sequence_template=sequence_templates[test_len_2[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_original_example_" + pair_name + "_binder_2")
plot_protein_logo(residue_map, pwm_test_2[0, 0, :, :], sequence_template=sequence_templates[test_len_2[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_scrambled_example_" + pair_name + "_binder_2")
plot_protein_logo(residue_map, subtracted_pwm_test_2[0, 0, :, :], sequence_template=sequence_templates[test_len_2[0, 0]], figsize=(12, 1), plot_start=0, plot_end=96, save_figs=save_figs, fig_name=model_name + "_subtracted_example_" + pair_name + "_binder_2")
# +
#Re-load cached dataframe (shuffled)
dataset_name = "coiled_coil_binders"
experiment = "coiled_coil_binders_alyssa"
data_df = pd.read_csv(experiment + ".csv", sep="\t")
print("len(data_df) = " + str(len(data_df)))
test_df = data_df.copy().reset_index(drop=True)
batch_size = 32
test_df = test_df.iloc[:(len(test_df) // batch_size) * batch_size].copy().reset_index(drop=True)
print("len(test_df) = " + str(len(test_df)))
print(test_df.head())
# +
#Construct test data
batch_size = 32
test_gen = iso.DataGenerator(
np.arange(len(test_df), dtype=np.int),
{ 'df' : test_df },
batch_size=(len(test_df) // batch_size) * batch_size,
inputs = [
{
'id' : 'amino_seq_1',
'source_type' : 'dataframe',
'source' : 'df',
#'extractor' : lambda row, index: (row['amino_seq_1'] + "#" * seq_length)[:seq_length],
'extractor' : lambda row, index: row['amino_seq_1'],
'encoder' : IdentityEncoder(seq_length, residue_map),
'dim' : (1, seq_length, len(residue_map)),
'sparsify' : False
},
{
'id' : 'amino_seq_2',
'source_type' : 'dataframe',
'source' : 'df',
#'extractor' : lambda row, index: row['amino_seq_2'] + "#" * seq_length)[:seq_length],
'extractor' : lambda row, index: row['amino_seq_2'],
'encoder' : IdentityEncoder(seq_length, residue_map),
'dim' : (1, seq_length, len(residue_map)),
'sparsify' : False
},
{
'id' : 'amino_seq_1_len',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: len(row['amino_seq_1']),
'encoder' : lambda t: t,
'dim' : (1,),
'sparsify' : False
},
{
'id' : 'amino_seq_2_len',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: len(row['amino_seq_2']),
'encoder' : lambda t: t,
'dim' : (1,),
'sparsify' : False
}
],
outputs = [
{
'id' : 'interacts',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: row['interacts'],
'transformer' : NopTransformer(1),
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [],
shuffle = False
)
#Load data matrices
[x_1_test, x_2_test, l_1_test, l_2_test], [y_test] = test_gen[0]
print("x_1_test.shape = " + str(x_1_test.shape))
print("x_2_test.shape = " + str(x_2_test.shape))
print("l_1_test.shape = " + str(l_1_test.shape))
print("l_2_test.shape = " + str(l_2_test.shape))
print("y_test.shape = " + str(y_test.shape))
# +
#Predict on test set
pwm_test_1, importance_scores_1 = scrambler_model.predict(x=[x_1_test, l_1_test], batch_size=32, verbose=True)
pwm_test_2, importance_scores_2 = scrambler_model.predict(x=[x_2_test, l_2_test], batch_size=32, verbose=True)
unscrambled_preds = predictor.predict(x=[x_1_test[:, 0, ...], x_2_test[:, 0, ...]], batch_size=32, verbose=True)[:, 0]
scrambled_preds = []
for i in range(pwm_test_1.shape[0]) :
if i % 100 == 0 :
print("Predicting scrambled samples for sequence " + str(i) + "...")
scrambled_pred_sample = predictor.predict(x=[pwm_test_1[i, ...], pwm_test_2[i, ...]], batch_size=32, verbose=False)[0, 0]
scrambled_preds.append(scrambled_pred_sample)
scrambled_preds = np.array(scrambled_preds)
# +
min_val = 0.0
max_val = 1.0
max_y_val = 8
n_bins = 25
save_figs = False
figsize = (6, 4)
measurements = [
unscrambled_preds,
scrambled_preds
]
colors = [
'green',
'red'
]
labels = [
'Unscrambled',
'Scrambled'
]
x_label = 'Prediction'
y_label = 'Density'
min_hist_val = np.min(measurements[0])
max_hist_val = np.max(measurements[0])
for i in range(1, len(measurements)) :
min_hist_val = min(min_hist_val, np.min(measurements[i]))
max_hist_val = max(max_hist_val, np.max(measurements[i]))
if min_val is not None :
min_hist_val = min_val
if max_val is not None :
max_hist_val = max_val
hists = []
bin_edges = []
means = []
for i in range(len(measurements)) :
hist, b_edges = np.histogram(measurements[i], range=(min_hist_val, max_hist_val), bins=n_bins, density=True)
hists.append(hist)
bin_edges.append(b_edges)
means.append(np.mean(measurements[i]))
bin_width = bin_edges[0][1] - bin_edges[0][0]
#Compare Log Likelihoods
f = plt.figure(figsize=figsize)
for i in range(len(measurements)) :
plt.bar(bin_edges[i][1:] - bin_width/2., hists[i], width=bin_width, linewidth=2, alpha=0.5, edgecolor='black', color=colors[i], label=labels[i])
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlim(min_hist_val, max_hist_val)
if max_y_val is not None :
plt.ylim(0, max_y_val)
plt.xlabel(x_label, fontsize=14)
plt.ylabel(y_label, fontsize=14)
for i in range(len(measurements)) :
plt.axvline(x=means[i], linewidth=2, color=colors[i], linestyle="--")
plt.legend(fontsize=14, loc='upper left')
plt.tight_layout()
if save_figs :
fig_name = experiment + "_model_" + model_name + "_pos_hist"
plt.savefig(fig_name + ".png", dpi=300, transparent=True)
plt.savefig(fig_name + ".eps")
plt.show()
# +
#Store unscrambled and scrambled binding predictions
test_df['pred_interacts'] = np.round(unscrambled_preds, 2)
test_df['pred_interacts_scrambled'] = np.round(scrambled_preds, 2)
flat_importance_scores_1 = importance_scores_1[:, 0, :, 0]
flat_importance_scores_2 = importance_scores_2[:, 0, :, 0]
short_model_name = "inclusion_target_lum_" + str(entropy_target_lum[0]).replace(".", "") + "_epochs_" + str(n_epochs) + experiment_suffix
test_df.to_csv(experiment + "_model_" + short_model_name + "_testset.csv", sep="\t", index=False)
np.save(experiment + "_model_" + short_model_name + "_testset_importance_scores_1", flat_importance_scores_1)
np.save(experiment + "_model_" + short_model_name + "_testset_importance_scores_2", flat_importance_scores_2)
# -
| analysis/coiled_coil_binders/train_scrambler_coiled_coil_binders_target_lum_01_epochs_20_gumbel_rand_bg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install transformers[sentencepiece]
# !pip install wandb
# +
import gc
import torch
import numpy as np
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, DataCollatorWithPadding, EarlyStoppingCallback
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_fscore_support, accuracy_score, confusion_matrix
import json
from urllib.request import urlopen
import seaborn as sns
import matplotlib.pyplot as plt
import wandb
# %matplotlib inline
# %config InlineBackend.figure_format='retina'
# +
# Setup device
device_string = 'cuda' if torch.cuda.is_available() else 'cpu'
device_hf = 0 if torch.cuda.is_available() else -1
device = torch.device(device_string)
print("Device:", device)
NUM_WORKERS = 0
# +
# Setup wandb
wandb.login()
# %env WANDB_PROJECT=annotype_text_classification
# +
# Config
IGNORED_CLASSES = [] # e.g. set it to ['sentiment'] to remove the objects with the type of sentiment, from the databset
MODEL_NAME = 'distilbert-base-cased'
INPUT_TYPE = 'TEXT_HEAD' # Possible values: 'TEXT_HEAD', 'TEXT_ONLY', 'HEAD_ONLY'
TRAIN_BATCH_SIZE = 16
EVAL_BATCH_SIZE = 64
LOGGING_STEPS = 100
EVAL_STRATEGY = 'steps'
SAVE_STRATEGY = 'steps'
WEIGHT_DECAY = 0.1
LOAD_BEST_MODEL_AT_END = True
NUM_TRAIN_EPOCHS = 10
CALLBACKS = [EarlyStoppingCallback(4)]
SEED = 0
DATA = 'MPQA3.0_v211021'
TEST_SIZE = 0
VAL_SIZE = 0.2
# +
# Getting data & augmented data urls
data_name_to_google_drive_url = {
'MPQA3.0_v211021': 'https://drive.google.com/file/d/1e-pDfZ2cyBzgD9MEerP9YCcDnPvIQuGo/view?usp=sharing',
'MPQA2.0_v211202': 'https://drive.google.com/file/d/1hVFG4lA-6W9OtWurU5EQ3wDtlwtO7iLY/view?usp=sharing'
}
# Get direct download link
def get_download_url_from_google_drive_url(google_drive_url):
return f'https://drive.google.com/uc?id={google_drive_url.split("/")[5]}&export=download'
# Data URL
google_drive_url = data_name_to_google_drive_url[DATA]
data_url = get_download_url_from_google_drive_url(google_drive_url)
# -
np.random.seed(SEED)
torch.manual_seed(SEED)
# + [markdown] id="faEGTkqS7Hp4"
# # Preparing the dataset
# + id="nyymAwVCYxpf"
# Fetch the dataset
FETCH_FROM_WEB = True ### Set it to true, to download the datasets from github and google drive ###
if FETCH_FROM_WEB:
response = urlopen(data_url)
csds_collection = json.loads(response.read())
else:
file_address = '..\\json2csds\\data.json'
with open(file_address) as file:
csds_collection = json.load(file)
# + colab={"base_uri": "https://localhost:8080/"} id="zyWk_HPcZDG_" outputId="c7799125-54d5-431d-80bf-4bc4f1abdc78"
# Preparing inputs and targets
inputs_text = []
inputs_head = []
inputs_tuple_text_head = []
targets_annotype = []
n_samples = 0
for csds_object in csds_collection['csds_objects']:
if csds_object['annotation_type'] not in IGNORED_CLASSES:
inputs_text += [csds_object['text']]
inputs_head += [csds_object['head']]
inputs_tuple_text_head += [(csds_object['text'], csds_object['head'])]
targets_annotype += [csds_object['annotation_type']]
n_samples += 1
i = 128 # A sample
print(f'inputs and targets for {i+1}-th csds object (out of {n_samples}):')
print('inputs_text:\t\t', inputs_text[i])
print('inputs_head:\t\t', inputs_head[i])
print('inputs_tuple_text_head:\t', inputs_tuple_text_head[i])
print('targets_annotype:\t', targets_annotype[i])
# + colab={"base_uri": "https://localhost:8080/"} id="YkFSm35wDmmJ" outputId="b401d0a1-57e7-4f7a-c3ff-d64c2ae5b602"
# Count the number of each annotation type and extract the labels
num_annotype = {}
for annotype in targets_annotype:
num_annotype[annotype] = num_annotype.get(annotype, 0) + 1
print(sorted(num_annotype.items()))
classes = sorted(list(num_annotype.keys()))
# +
# Create a map for class ids and class names
classname2classid = {classes[i]:i for i in range(len(classes))}
classid2classname = {i:classes[i] for i in range(len(classes))}
# +
# Apply classname2classid mapping
y = [classname2classid[i] for i in targets_annotype]
# +
# Shuffle and split the dataset into training and validation sets
TEST_SPLIT_SEED = 0
VAL_SPLIT_SEED = 0
if TEST_SIZE != 0 :
# Train Val Test -> Train Val | Test
X_train_val, X_test, y_train_val, y_test = train_test_split(
np.array(inputs_tuple_text_head), y, test_size=TEST_SIZE,
random_state=TEST_SPLIT_SEED, shuffle=True, stratify=y
)
X_test_text, X_test_head = X_test[:, 0].tolist(), X_test[:, 1].tolist()
else:
X_train_val = np.array(inputs_tuple_text_head)
y_train_val = y
# Train Val -> Train | Val
X_train, X_val, y_train, y_val = train_test_split(
X_train_val, y_train_val, test_size=VAL_SIZE,
random_state=VAL_SPLIT_SEED, shuffle=True, stratify=y_train_val
)
X_train_text, X_train_head = X_train[:, 0].tolist(), X_train[:, 1].tolist()
X_val_text, X_val_head = X_val[:, 0].tolist(), X_val[:, 1].tolist()
# -
# # Preparing the model and torch dataset
# +
# Load the model, tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForSequenceClassification.from_pretrained(
MODEL_NAME, num_labels=len(classes), resume_download=True, ignore_mismatched_sizes=True
)
# +
# Tokenize the inputs
if INPUT_TYPE == 'TEXT_HEAD':
X_train_tokenized = tokenizer(X_train_text, X_train_head, truncation=True)
X_val_tokenized = tokenizer(X_val_text, X_val_head, truncation=True)
if TEST_SIZE > 0:
X_test_tokenized = tokenizer(X_test_text, X_test_head, truncation=True)
if INPUT_TYPE == 'TEXT_ONLY':
X_train_tokenized = tokenizer(X_train_text, truncation=True)
X_val_tokenized = tokenizer(X_val_text, truncation=True)
if TEST_SIZE > 0:
X_test_tokenized = tokenizer(X_test_text, truncation=True)
if INPUT_TYPE == 'HEAD_ONLY':
X_train_tokenized = tokenizer(X_train_head, truncation=True)
X_val_tokenized = tokenizer(X_val_head, truncation=True)
if TEST_SIZE > 0:
X_test_tokenized = tokenizer(X_test_head, truncation=True)
# +
# Find the largest input size
t = 0
for i in X_train_tokenized['input_ids']:
t = max(t, len(i))
for i in X_val_tokenized['input_ids']:
t = max(t, len(i))
print("Maximum input length:", t)
# +
# Create torch dataset
class Dataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels=None):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
if self.labels:
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.encodings["input_ids"])
train_dataset = Dataset(X_train_tokenized, y_train)
val_dataset = Dataset(X_val_tokenized, y_val)
# +
# Data collator
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
# +
# Metrics
def compute_metrics(pred):
targets = pred.label_ids
preds = pred.predictions.argmax(-1)
labels = [i for i in range(len(classes))] # [0, 1, 2, ..., len(classes)-1]
precision, recall, f1, _ = precision_recall_fscore_support(
targets, preds, labels=labels, zero_division=0, average='weighted'
)
precision_list, recall_list, f1_list, _ = precision_recall_fscore_support(
targets, preds, labels=labels, zero_division=0
)
acc = accuracy_score(targets, preds)
decimals = 4
return {
'accuracy': acc,
'f1': np.around(f1, decimals),
'precision': np.around(precision, decimals),
'recall': np.round(recall, decimals),
'f1-list': np.around(f1_list, decimals).tolist(),
'precision-list': np.around(precision_list, decimals).tolist(),
'recall-list': np.round(recall_list, decimals).tolist(),
}
# +
# Training Arguments
training_args = TrainingArguments(
output_dir = 'models/pretrain_'+MODEL_NAME+'_'+INPUT_TYPE,
overwrite_output_dir = True,
per_device_train_batch_size = TRAIN_BATCH_SIZE,
per_device_eval_batch_size = EVAL_BATCH_SIZE,
evaluation_strategy = EVAL_STRATEGY,
logging_steps = LOGGING_STEPS,
save_strategy = SAVE_STRATEGY,
save_steps = LOGGING_STEPS,
save_total_limit = 2,
weight_decay = WEIGHT_DECAY,
num_train_epochs = NUM_TRAIN_EPOCHS,
load_best_model_at_end = LOAD_BEST_MODEL_AT_END,
dataloader_num_workers = NUM_WORKERS,
seed = SEED,
report_to = 'wandb'
)
# -
# # Train
# +
# Free some space
if 'trainer' in globals():
del trainer
torch.cuda.empty_cache()
gc.collect()
# +
# Setup trainer
trainer = Trainer(
model = model,
args = training_args,
train_dataset = train_dataset,
eval_dataset = val_dataset,
data_collator = data_collator,
compute_metrics = compute_metrics,
callbacks = CALLBACKS
)
# + tags=[]
trainer.train()
# -
pred = trainer.predict(val_dataset)
# +
# Show confusion matrix
targets = pred.label_ids
preds = pred.predictions.argmax(-1)
def show_confusion_matrix(confusion_matrix):
hmap = sns.heatmap(confusion_matrix, annot=True, fmt="d", cmap='Blues')
hmap.yaxis.set_ticklabels(hmap.yaxis.get_ticklabels(), rotation=0, ha='right')
hmap.xaxis.set_ticklabels(hmap.xaxis.get_ticklabels(), rotation=30, ha='right')
plt.ylabel('True annotation type')
plt.xlabel('Predicted annotation type');
cm = confusion_matrix(targets, preds)
df_cm = pd.DataFrame(cm, index=classes, columns=classes)
show_confusion_matrix(df_cm)
# -
wandb.finish()
| summer21/ext_csds2hf/S_annotype_with_text_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Pronóstico de series de tiempo con DeepAR
#
# DeepAR es un algoritmo supervisado para el pronóstico de series de tiempo escalares. En este cuaderno demostraremos como preparar un conjunto de datos para entrenar DeepAR y como utilizar el modelo entrenado para inferencia.
#
# En este laboratorio utilizaremos el conjunto de datos [Deutsche Börse](https://registry.opendata.aws/deutsche-boerse-pds/). El uso de este conjunto de datos es puramente ilustrativo.
#
# Comenzaremos importando las librerías que usaremos durante el laboratorio:
#
# - [NumPy](https://numpy.org/doc/) - Paquete para computo científico dentro de Python.
# - [pandas](https://pandas.pydata.org/docs/index.html) - Herramienta de código abierto utilizada para manipulación de datos.
# - [boto3](https://boto3.amazonaws.com/v1/documentation/api/latest/index.html) - Kit de desarrollo de software para servicios de AWS.
# - [matplotlib](https://matplotlib.org/) - Librería para generación de gráficos con Python.
# - [SageMaker](https://sagemaker.readthedocs.io/en/stable/index.html) - Kit de desarrollo de software para Amazon SageMaker.
#
# Finalmente importaremos las funciones utilitarias dentro del archivo util.py, estas funciones simplificaran la copia y carga del conjunto de datos XETRA.
import time
import numpy as np
import pandas as pd
import json
import matplotlib.pyplot as plt
import boto3
import sagemaker
import util
from datetime import date
from sagemaker import get_execution_role
# ## Preparación
#
# Lo primero que realizaremos en este laboratorio es establecer variables que nos permitirán controlar el lugar donde almacenaremos los datos:
#
# - ```prefix``` - Prefijo del balde de Amazon Simple Storage Service.
# - ```bucket``` - Nombre del balde de Amazon Simple Storage Service.
# - ```role``` - Rol de ejecución de Amazon SageMaker.
# - ```s3_data_path``` - Ruta donde almacenaremos nuestros datos.
# - ```s3_output_path``` - Ruta donde almacenaremos la salida de nuestros procesos.
#
# <details><summary>Tip</summary>
#
# El balde y el rol lo podemos obtener del object ```sagemaker.Session()``` y el método ```get_execution_role``` respectivamente.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# prefix = 'sagemaker/DeepAR-securities'
#
# session = sagemaker.Session()
# role = get_execution_role()
# bucket = session.default_bucket()
#
# s3_data_path = f'{bucket}/data'
# s3_output_path = f'{bucket}/output'
# ```
# </details>
# +
prefix = 'sagemaker/DeepAR-securities'
session = sagemaker.Session()
role = get_execution_role()
bucket = session.default_bucket()
s3_data_path = f'{prefix}/data'
s3_output_path = f'{prefix}/output'
# -
# ### Contenedor de DeepAR
#
# Ahora configuraremos el URI del contender con el algoritmo de DeepAR en la variable ```image_uri```. Para esto utilizaremos el SDK de SageMaker:
#
# ```python
#
# from sagemaker.image_uris import retrieve
# ```
#
# <details><summary>Tip</summary>
#
# ```retrieve``` recibe como parámetros el nombre del algoritmo y el nombre de la región donde se ejecutará el entrenamiento, en este caso *forecasting_deepar* y la región podemos obtenerla de la sesión de boto3.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# from sagemaker.image_uris import retrieve
#
# image_uri = retrieve('forecasting-deepar', boto3.Session().region_name)
# ```
# </details>
# +
from sagemaker.image_uris import retrieve
image_uri = retrieve('forecasting-deepar', boto3.Session().region_name)
# -
# ## Obtención de datos
#
# El conjunto de datos que utilizaremos para este laboratorio contiene información en resolución de 1 minuto. En este laboratorio obtendrémos información de dos meses para entrenar nuestro modelo.
# **NOTA. En caso de experimentar con más datos es probable requerir una instancia con más capacidad de memoria.**
#
# Este set de datos se encuentra localizado en ```s3://deutsche-boerse-xetra-pds/```, se encuentra dividido por día hábil y por hora. Para facilitar la copia y carga de información se incluye en el workshop el archivo ```util.py```.
# +
# Cliente de Amazon Simple Storage Service
s3_client = boto3.client('s3')
# Balde origen de XETRA
source_bucket = 'deutsche-boerse-xetra-pds'
# Rango de fechas para obtener los datos
start_date = date.fromisoformat('2021-10-01')
end_date = date.fromisoformat('2022-03-01')
# Días hábiles
days = util.week_days(start_date, end_date)
# Prefijo donde almacenaremos los datos
dest_prefix = f'{s3_data_path}/raw'
# Copia de objetos del bucket origen al bucket destino
object_keys = util.copy_objects(s3_client, source_bucket, bucket, dest_prefix, days)
# Finalmente cargaremos los datos en un DataFrame de pandas
df = util.create_dataframe(s3_client, bucket, object_keys)
# -
# ## Exploración de datos
#
# Una vez que realizamos la copia de los datos a nuestro balde de S3 y los cargamos en un solo objeto de pandas, exploremos el conjunto de datos para poder prepararlo para entrenar el modelo.
#
# El diccionario de datos perteneciente a XETRA es el siguiente:
#
# | Columna | Descripción | Tipo |
# |---------|-------------|------|
# |ISIN|ISIN del instrumento|string|
# |Mnemonic|Simbolo bursatil|string|
# |SecurityDesc|Descripción del instrumento|string|
# |SecurityType|Tipo del instrumento|string|
# |Currency|Moneda del instrumento|string|
# |SecurityID|Identificador del instrumento|int|
# |Date|Fecha del muestreo|date|
# |Time|Hora del muestreo|time (hh:mm)|
# |StartPrice|Precio de inicio del instrumento en el periodo|float|
# |MaxPrice|Precio máximo del instrumento en el periodo|float|
# |MinPrice|Precio mínimo del instrumento en el periodo|float|
# |EndPrice|Precio al final del periodo|float|
# |TradedVolume|Valor total negociado|float|
# |NumberOfTrades|Total de transacciones durante el periodo|int|
#
df.head(10)
# ## Preparación de datos
#
# En este laboratorio lo que buscamos predecir es la cantidad de operaciones de compra/venta para un intrumento bursatil en un tiempo del día. Pare este laboratorio utilizaremos los instrumentos: **IFX, VOW3, SAP, DBK, SIE, BAS, DHER, BMW, DTE, LIN, LHA, CBK, DPW, BAYN, ADS, ALV, MBG, HFG, CON, ENR**
#
# El algoritmo DeepAR recibe como entrada series de tiempo para realizar el entrenamiento, actualmente nuestro set de datos no se encuentra en esta estructura.
#
# Al tener un conjunto de datos relativamente grande, lo primero que haremos es utilizar solo la información necesaria, para esto deshecharemos las columnas **ISIN, SecurityDesc, SecurityType, Currency, SecurityID, StartPrice, MaxPrice, MinPrice, EndPrice y TradedVolume**.
#
# <details><summary>Tip</summary>
#
# Podemos utilizar el método ```drop``` del ```DataFrame``` de pandas con los argumentos ```axis=1``` para afectar las columnas y ```inplace=True``` para realizarlo sobre el objeto y no generar una copia del mismo.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# mnemonics = ['IFX', 'VOW3', 'SAP', 'DBK', 'SIE', 'BAS', 'DHER', 'BMW', 'DTE', 'LIN',
# 'LHA', 'CBK', 'DPW', 'BAYN', 'ADS', 'ALV', 'MBG', 'HFG', 'CON', 'ENR']
# df = df[df['Mnemonic'].isin(mnemonics)]
# df.drop(['ISIN', 'SecurityDesc', 'SecurityType', 'Currency', 'SecurityID',
# 'StartPrice', 'MaxPrice', 'MinPrice', 'EndPrice', 'TradedVolume'],
# axis=1, inplace=True)
# df.head(10)
# ```
# </details>
mnemonics = ['IFX', 'VOW3', 'SAP', 'DBK', 'SIE', 'BAS', 'DHER', 'BMW', 'DTE', 'LIN',
'LHA', 'CBK', 'DPW', 'BAYN', 'ADS', 'ALV', 'MBG', 'HFG', 'CON', 'ENR']
df = df[df['Mnemonic'].isin(mnemonics)]
df.drop(['ISIN', 'SecurityDesc', 'SecurityType', 'Currency', 'SecurityID',
'StartPrice', 'MaxPrice', 'MinPrice', 'EndPrice', 'TradedVolume'],
axis=1, inplace=True)
df.head(10)
# Ahora necesitamos generar una nueva columna que podamos utilizar como indice para nuestra serie de tiempo, contamos con las columnas ```Date``` y ```Time``` con formato ```hh:mm```. Concatenaremos estas dos columnas en una nueva columna llamada ```Timestamp```.
#
# <details><summary>Tip</summary>
#
# Pandas provee diferentes funciones para poder realizar transformación de datos, en este caso podemos utilizar ```to_datetime```, esta función puede transformar fechas en formato ISO ```YYYY-MM-DD hh:mm:ss```, lo que significa que primero tendrémos que concatenar la fecha y hora y agregar los segundos en cero.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# df['Timestamp'] = pd.to_datetime(df['Date'] + ' ' + df['Time'] + ':00')
# df.head()
# ```
# </details>
df['Timestamp'] = pd.to_datetime(df['Date'] + ' ' + df['Time'] + ':00')
df.head()
# Ahora podemos eliminar las columnas ```Date``` y ```Time```, y asegurarnos que el tipo de dato de ```NumberOfTrades``` sea correcto.
#
# <details><summary>Tip</summary>
#
# Para eliminar las columnas podemos seguir los pasos previos ahora. Para verificar los tipos de datos podemos imprimir el objeto ```dtypes``` del ```DataFrame```.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# df.drop(['Date', 'Time'], axis=1, inplace=True)
# print(df.dtypes)
# ```
# </details>
df.drop(['Date', 'Time'], axis=1, inplace=True)
print(df.dtypes)
# Ahora cambiemos el tipo de dato ```object``` a ```integer```.
#
# <details><summary>Tip</summary>
#
# Pandas proveé el método ```to_numeric```, el cual podemos aplicarlo de forma similar a como lo hicimos con ```Timestamp```, adicionalmente es conveniente utilizar el argumento ```downcast='integer'``` con la finalidad de usar enteros y no flotantes, esto evitara un uso alto de memoria y sabemos previamente que el tipo de dato es entero.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# df['NumberOfTrades'] = pd.to_numeric(df['NumberOfTrades'], downcast='integer')
# print(df.dtypes)
# ```
# </details>
df['NumberOfTrades'] = pd.to_numeric(df['NumberOfTrades'], downcast='integer')
print(df.dtypes)
# Ahora necesitamos transformar nuestro conjunto de datos a una serie de tiempo. Es decir, cambiar del formato:
#
# |Mnemonic|NumberOfTrades|Timestamp|
# |---|---|---|
# |IFX|23|2022-01-03 08:00:00|
# |VOW3|3|2022-01-03 08:00:00|
# |SAP|4|2022-01-03 08:00:00|
# |DBK|1|2022-01-03 08:00:00|
# |SIE|2|2022-01-03 08:00:00|
# |BAS|54|2022-01-03 08:00:00|
# |DHER|4|2022-01-03 08:00:00|
# |BMW|6|2022-01-03 08:00:00|
# |DTE|6|2022-01-03 08:00:00|
# |LIN|7|2022-01-03 08:00:00|
# |LHA|8|2022-01-03 08:00:00|
# |CBK|12|2022-01-03 08:00:00|
# |DPW|4|2022-01-03 08:00:00|
# |BAYN|3|2022-01-03 08:00:00|
# |ADS|21|2022-01-03 08:00:00|
# |ALV|43|2022-01-03 08:00:00|
# |MBG|21|2022-01-03 08:00:00|
# |HFG|12|2022-01-03 08:00:00|
# |CON|3|2022-01-03 08:00:00|
# |ENR|10|2022-01-03 08:00:00|
#
# Al formato:
#
# |Timestamp|IFX|VOW3|SAP|DBK|SIE|BAS|DHER|BMW|DTE|LIN|LHA|CBK|DPW|BAYN|ADS|ALV|MBG|HFG|CON|ENR|
# |---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
# |2022-01-03 08:00:00|23|3|4|1|2|54|4|6|6|7|8|12|4|3|21|43|21|12|3|10|
#
# <details><summary>Tip</summary>
#
# Con pandas podemos crear tablas pivote y dejar que esta función se encargue de hacer las agrupaciones necesarias, está función ```pivot_table``` del objecto ```DataFrame``` recibe los argumentos ```values``` donde pasaremos el nombre o nombres de columnas para agregar en este caso ```NumberOfTrades```, ```index``` con el o los nombres de las columnas que utilizaremos para indexar en este caso ```Timestamp```, ```columns``` con los nombres de las columnas que formaran las nuevas columnas en este caso ```Mnemonic```, y finalmente el parámetro ```fill_value``` el cual podemos establecer como 0.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# series = df.pivot_table('NumberOfTrades', ['Timestamp'], 'Mnemonic', fill_value=0)
# series.head()
# ```
# </details>
series = df.pivot_table('NumberOfTrades', 'Timestamp', 'Mnemonic', fill_value=0)
series.head()
# En este laboratorio nuestro objetivo es poder realizar la predicción del volumen de transacciones para una determinada hora en un día de transacciones. Actualmente nuestra serie de tiempo solo contiene horarios laborales durante días laborales, con pandas podemos llenar estos vacios y cambiar la frecuencia de muestreo. El método ```resample``` del ```DataFrame``` realizará esta operación, usaremos una resolución de una hora.
#
# <details><summary>Tip</summary>
#
# El método ```resample``` recibe como parámetro la nueva resolución de la serie de tiempo. Recordemos que es necesario aplicar una función a la agrupación de los datos, en este caso será una suma.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# series = series.resample('1H').sum()
# series.head(24)
# ```
# </details>
series = series.resample('1H').sum()
series.head(24)
# ## Visualización de datos
#
# Tomaremos una muestra de nuestros datos para observar el comportamiento. Para esto utilizaremos ```matplotlib``` para generar gráficas de líneas.
# Tomaremos los 10 primeros instrumentos y graficaremos el comportamiento en un rango de 4 días.
# +
# %matplotlib inline
fig, axs = plt.subplots(5, 2, figsize=(20, 20), sharex=True)
axx = axs.ravel()
for i in range(0, 10):
series.loc['2022-01-03':'2022-01-06'][[mnemonics[i]]].plot(ax=axx[i])
axx[i].set_xlabel("Timestamp")
axx[i].set_ylabel("Trades")
axx[i].grid(which="minor", axis="x")
# -
# ## Datos de entrenamiento y prueba
#
# Cuando trabajamos con datos en series de tiempo la división de los datos debe ser en un punto en el tiempo, es decir, seleccionaremos un ```Timestamp``` para dividir el conjunto de datos. Podemos utilizar los métodos de ```DataFrame``` ```head``` y ```tail``` para entender el rango de tiempo y hacer una división de los datos.
#
# <details><summary>Tip</summary>
#
# Con pandas podemos crear estos "apuntadores" utilizando el constructor de ```Timestamp```.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# training_start = pd.Timestamp('2022-01-03 08:00:00', freq='1M')
# training_stop = pd.Timestamp('2022-02-10 20:00:00', freq='1M')
# ```
# </details>
series.head()
series.tail()
# En la siguiente celda podemos manipular el tiempo de inicio y fin del conjunto de datos de entrenamiento.
# Obtenemos el primer indice
training_start = series.index[0]
# Obtenemos el indice correspondiente al final del set de datos menos 7 días
training_stop = series.index[-7 * 24:][0]
# Ahora prepararemos las entradas para DeepAR, estos archivos ```JSON``` tienen la siguiente estructura:
#
# ```json
# {"start": timestamp, "target": [timeseries, ...]}
# {"start": timestamp, "target": [timeseries, ...]}
# {"start": timestamp, "target": [timeseries, ...]}
# {"start": timestamp, "target": [timeseries, ...]}
# ```
#
# Primero generaremos la entrada para entrenamiento substrayendo los últimos 7 días de nuestro conjunto de datos.
# **NOTA: En la manipulación de indices con pandas, el segundo parametro es inclusivo.**
#
# <details><summary>Tip</summary>
#
# Podemos utilizar parte del código utilizado para crear las gráficas para separar las series de tiempo y truncarlas al tamaño correcto.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# from datetime import timedelta
#
# training_data = [
# {
# 'start': str(training_start),
# 'target': series.loc[training_start : training_stop - timedelta(days=1)][mnemonic].tolist()
# }
# for mnemonic in mnemonics
# ]
# ```
# </details>
# +
from datetime import timedelta
training_data = [
{
'start': str(training_start),
'target': series.loc[training_start : training_stop - timedelta(hours=1)][mnemonic].tolist()
}
for mnemonic in mnemonics
]
print(len(training_data))
# -
# Ahora preparemos el conjunto de datos de validación.
test_data = [
{
'start': str(training_start),
'target': series[mnemonic].tolist()
}
for mnemonic in mnemonics
]
print(len(training_data))
# Ahora es necesario colocar estos datos en un balde de S3 desde donde serán leídos por DeepAR.
#
# El archivo con el conjunto de entrenamiento lo nombraremos ```train.json``` y utilizaremos el prefijo definido en la variable ```s3_data_path```.
# El archivo con el conjunto de validación lo nombraremos ```test.json``` y de igual forma usaremos el prefijo definido en ```s3_data_path```.
#
# <details><summary>Tip</summary>
#
# Podemos utilizar el serializador de JSON de Python para transformar los diccionarios en arreglos de bytes y el cliente de S3 para cargar los archivos, el método ```put_object``` recibe como parámetro ```Body``` un arreglo de bytes.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# byte_array = b''
# for data in traininng_data:
# byte_array += json.dumps(data).encode('utf-8')
# byte_array += b'\n'
#
# s3_client.put_object(
# Body=byte_array,
# Bucket=bucket,
# Key=f'{s3_data_path}/train/train.json'
# )
#
# byte_array = b''
# for data in test_data:
# byte_array += json.dumps(data).encode('utf-8')
# byte_array += b'\n'
#
# s3_client.put_object(
# Body=byte_array,
# Bucket=bucket,
# Key=f'{s3_data_path}/test/test.json'
# )
# ```
# </details>
# +
byte_array = b''
for data in training_data:
byte_array += json.dumps(data).encode('utf-8')
byte_array += b'\n'
s3_client.put_object(
Body=byte_array,
Bucket=bucket,
Key=f'{s3_data_path}/train/train.json'
)
byte_array = b''
for data in test_data:
byte_array += json.dumps(data).encode('utf-8')
byte_array += b'\n'
s3_client.put_object(
Body=byte_array,
Bucket=bucket,
Key=f'{s3_data_path}/test/test.json'
)
# -
# ## Entrenar un modelo
#
# Aquí necesitamos definir el estimador que lanzará nuestro proceso de entrenamiento. El estimador lo construiremos a partir de ```sagemaker.estimator.Estimator```.
# El estimador recibe los siguientes argumentos:
#
# - image_uri - Esta es la imagen que contiene el algoritmo DeepAR, se encuentra en la variable ```image_uri```.
# - sagemaker_session - Sesión activa de Amazon SageMaker, la tenemos en la variable ```session```.
# - role - Rol para ejecutar tareas de SageMaker, se encuentra en la variable ```role```.
# - instance_count - Cantidad de instancias que usaremos para el entrenamiento, en este caso usaremos 1.
# - instance_type - Tipo de instancia para realizar el entrenamiento, en este caso usaremos ```ml.c5.2xlarge```.
# - base_job_name - Nombre base para la tarea de entrenamiento, usaremos ```xetra-demo```.
# - output_path - Ruta de salida para los artefactos de DeepAR, utilizaremos el nombre de nestro balce ```bucket``` y el path de salida ```s3_output_path``` para crear una ruta del tipo ```s3://bucket/path```.
#
# <details><summary>Tip</summary>
#
# Podemos utilizar el serializador de JSON de Python para transformar los diccionarios en arreglos de bytes y el cliente de S3 para cargar los archivos, el método ```put_object``` recibe como parámetro ```Body``` un arreglo de bytes.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# estimator = sagemaker.estimator.Estimator(
# image_uri=image_uri,
# sagemaker_session=session,
# role=role,
# instance_count=1,
# instance_type='ml.c5.2xlarge',
# base_job_name='xetra-demo',
# output_path=s3_output_path
# )
# ```
# </details>
estimator = sagemaker.estimator.Estimator(
image_uri=image_uri,
sagemaker_session=session,
role=role,
instance_count=1,
instance_type='ml.c5.2xlarge',
base_job_name='xetra-demo',
output_path=f's3://{bucket}/{s3_output_path}'
)
# Cada algorítmo de aprendizaje de máquina utiliza hiper parámetros para ajustar la forma de ejecución del mismo, en caso de DeepAR podemos revisar la documentación detallada [aquí]().
# Cabe mencionar que podríamos utilizar la funcionalidad de tuning automática de SageMaker apoyandonos del objeto [HyperparameterTuner](https://sagemaker.readthedocs.io/en/stable/api/training/tuner.html?highlight=HyperparameterTuner) y creando una tarea de optimización de parámetros. En este [blog](https://aws.amazon.com/blogs/aws/sagemaker-automatic-model-tuning/) podemos ver un ejemplo de como aplicarlo.
# +
# Cantidad de unidades de tiempo que queremos predecir, en este caso horas por siete días
length = 24 * 7
hyperparameters = {
'time_freq': '1H',
'epochs': '400',
'early_stopping_patience': '40',
'mini_batch_size': '64',
'learning_rate': '5E-4',
'context_length': f'{length}',
'prediction_length': f'{length}',
}
estimator.set_hyperparameters(**hyperparameters)
# -
# Ahora lanzamos la tarea de entrenamiento. Esto puede tardar varios minutos dependiendo del tamaño del conjunto de datos utilizado. Para indicar al algoritmo donde se encuentran nuestros datos de entrenamiento y pruebas crearemos un diccionario con las llaves ```train``` y ```test``` para indicarlo.
# +
data_channels = {
'train': f's3://{bucket}/{s3_data_path}/train/',
'test': f's3://{bucket}/{s3_data_path}/test/'
}
estimator.fit(inputs=data_channels, wait=True)
# -
# ## Evaluación del modelo
#
# En el caso de DeepAR las métricas utilizadas para evaluar el modelo son RMSE, mean quantile loss y mean absolute quantile loss. Podemos obtener el detalle de estas métricas a través del objecto ```training_job_analytics```, el cual podemos transformar a un ```DataFrame``` de pandas con el método ```dataframe()```.
#
# <details><summary>Tip</summary>
#
# El nombre de las métricas son ```test:mean_wQuantileLoss``` y ```test:RMSE```.
# </details>
#
# <details><summary>Código</summary>
#
# ```python
# metrics = estimator.training_job_analytics.dataframe()
#
# metrics[metrics['metric_name'].str.startswith('test:')]
# ```
# </details>
# +
metrics = estimator.training_job_analytics.dataframe()
metrics[metrics['metric_name'].str.startswith('test:')]
# -
# ## Realizar predicciones
#
# Ahora que tenemos nuestro modelo entrenado podemos utilizarlo para realizar predicciones. Es necesario crear un Endpoint y desplegar nuestro modelo en el mismo para poder realizar dichas predicciones.
#
# Comencemos creando la clase hija de ```sagemaker.predictor.Predictor``` que contendrá la lógica necesaria para recibir la petición de predicciones y generar una salida adecuada con las predicciones.
# +
from sagemaker.serializers import IdentitySerializer
class DeepARPredictor(sagemaker.predictor.Predictor):
def __init__(self, *args, **kwargs):
super().__init__(
*args,
serializer=IdentitySerializer(content_type='application/json'),
**kwargs
)
def predict(
self,
ts,
cat=None,
dynamic_feat=None,
num_samples=100,
return_samples=False,
quantiles=['0.25', '0.50', '0.75', '0.90']
):
"""
Solicita la predicción para la serie de tiempo en ts. Opcionalmente se puede incluir su categoría y
característica dinámica.
:param ts: Serie de tiempo de pandas.
:param cat: Categoría opcional.
:param dynamic_feat: Característica dinámica opcional.
:param num_samples: Número entero de muestras a calcular, 100 por defecto.
:param return_samples: Bandera para regresar o no las muestras.
:param quantiles: Lista de cuantíles a calcular, por defecto [0.25, 0.50, 0.75, 0.90].
:return: Lista de objetos DataFrame de pandas con las predicciones.
"""
prediction_time = ts.index[-1] + ts.index.freq
quantiles = [str(q) for q in quantiles]
req = self.__encode_request(ts, cat, dynamic_feat, num_samples, return_samples, quantiles)
res = super(DeepARPredictor, self).predict(req)
return self.__decode_response(res, ts.index.freq, prediction_time, return_samples)
def __encode_request(self, ts, cat, dynamic_feat, num_samples, return_samples, quantiles):
instance = {
'start': str(ts.index[0]),
'target': [x for x in ts]
}
if cat:
instance['cat'] = cat
if dynamic_feat:
instance['dynamic_feat'] = dynamic_feat
configuration = {
'num_samples': num_samples,
'output_types': ['quantiles', 'samples'] if return_samples else ['quantiles'],
'quantiles': quantiles
}
http_request_data = {
'instances': [instance],
'configuration': configuration
}
return json.dumps(http_request_data).encode('utf-8')
def __decode_response(self, response, freq, prediction_time, return_samples):
predictions = json.loads(response.decode('utf-8'))['predictions'][0]
prediction_length = len(next(iter(predictions['quantiles'].values())))
prediction_index = pd.date_range(
start=prediction_time, freq=freq, periods=prediction_length
)
if return_samples:
dict_of_samples = {
'sample_' + str(i): s for i, s in enumerate(predictions['samples'])
}
else:
dict_of_samples = {}
return pd.DataFrame(
data={**predictions['quantiles'], **dict_of_samples},
index=prediction_index
)
def set_frequency(self, freq):
self.freq = freq
# -
# Una vez creada nuestra clase podemos realizar el despliegue de nuestro modelo, para esta actividad necesitaremos:
# - ```initial_instance_count``` - Cantidad de instancias iniciales, usaremos 1.
# - ```instance_type``` - Tipo de instancia para la inferencia, usaremos ```ml.m5.large```.
# - ```predictor_cls``` - Nuestra clase de predicción ```DeepARPredictor```.
#
# <details><summary>Código</summary>
#
# ```python
# predictor = estimator.deploy(
# initial_instance_count=1,
# instance_type='ml.m5.large',
# predictor_cls=DeepARPredictor
# )
# ```
# </details>
predictor = estimator.deploy(
initial_instance_count=1,
instance_type='ml.m5.large',
predictor_cls=DeepARPredictor
)
# Hagamos una consulta para verificar el Endpoint de inferencia.
predictor.predict(ts=series['BMW'].loc[training_stop:training_stop + timedelta(days=1)]).head()
# Ahora definiremos una función para visualizar las predicciones.
def plot(
predictor,
target_ts,
cat=None,
dynamic_feat=None,
forecast_date=training_stop,
show_samples=False,
plot_history=7 * 24,
confidence=80,
):
freq = target_ts.index.freq
print(
f'Invocando ejecución de modelo a partir de {forecast_date}'
)
assert confidence > 50 and confidence < 100
low_quantile = 0.5 - confidence * 0.005
up_quantile = confidence * 0.005 + 0.5
# Primero construimos los argumentos
args = {
"ts": target_ts[:forecast_date],
"return_samples": show_samples,
"quantiles": [low_quantile, 0.5, up_quantile],
"num_samples": 100,
}
if dynamic_feat:
args["dynamic_feat"] = dynamic_feat
fig = plt.figure(figsize=(20, 6))
ax = plt.subplot(2, 1, 1)
else:
fig = plt.figure(figsize=(20, 3))
ax = plt.subplot(1, 1, 1)
if cat:
args["cat"] = cat
ax.text(0.9, 0.9, "cat = {}".format(cat), transform=ax.transAxes)
# Llamada al Endpoint
prediction = predictor.predict(**args)
# Diagramar las muestras
if show_samples:
for key in prediction.keys():
if "sample" in key:
prediction[key].plot(color="lightskyblue", alpha=0.2, label="_nolegend_")
# Diagramar el objetivo
target_section = target_ts[
forecast_date - plot_history * freq : forecast_date + 24 * 7 * freq
]
target_section.plot(color="black", label="objetivo")
# Diagramar el intervalo de confianza y la media predecida
ax.fill_between(
prediction[str(low_quantile)].index,
prediction[str(low_quantile)].values,
prediction[str(up_quantile)].values,
color="b",
alpha=0.3,
label="{}% de confianza".format(confidence),
)
prediction["0.5"].plot(color="b", label="P50")
ax.legend(loc=2)
# Ajustar escala, las muestras podrían modificarla
ax.set_ylim(target_section.min() * 0.5, target_section.max() * 1.5)
if dynamic_feat is not None:
for i, f in enumerate(dynamic_feat, start=1):
ax = plt.subplot(len(dynamic_feat) * 2, 1, len(dynamic_feat) + i, sharex=ax)
feat_ts = pd.Series(
index=pd.date_range(
start=target_ts.index[0], freq=target_ts.index.freq, periods=len(f)
),
data=f,
)
feat_ts[
forecast_date - plot_history * freq : forecast_date + prediction_length * freq
].plot(ax=ax, color="g")
style = {"description_width": "initial"}
plot(
predictor,
target_ts=series['BMW'],
forecast_date=training_stop + timedelta(days=7),
show_samples=False,
plot_history=24 * 14,
confidence=80
)
| sagemaker-deepar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Python statistics essential training - 03_01_visualization
# Standard imports
import numpy as np
import scipy.stats
import pandas as pd
# +
import matplotlib
import matplotlib.pyplot as pp
from IPython import display
from ipywidgets import interact, widgets
# %matplotlib inline
# -
import re
import mailbox
import csv
| 03_01_visualization_begin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
"""
The MIT License (MIT)
Copyright (c) 2021 NVIDIA
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# This code example demonstrates how to create a matrix implementation of c4e2_2level_learning_mnist. More context for this code example can be found in the section "Single matrix" in Appendix F in the book Learning Deep Learning by <NAME> (ISBN: 9780137470358).
#
# The first piece of code code is identical to the implementation in c4e2_2level_learning_mnist.
# +
import numpy as np
import matplotlib.pyplot as plt
import idx2numpy
np.random.seed(7) # To make repeatable
LEARNING_RATE = 0.01
EPOCHS = 20
TRAIN_IMAGE_FILENAME = '../data/mnist/train-images-idx3-ubyte'
TRAIN_LABEL_FILENAME = '../data/mnist/train-labels-idx1-ubyte'
TEST_IMAGE_FILENAME = '../data/mnist/t10k-images-idx3-ubyte'
TEST_LABEL_FILENAME = '../data/mnist/t10k-labels-idx1-ubyte'
# Function to read dataset.
def read_mnist():
train_images = idx2numpy.convert_from_file(
TRAIN_IMAGE_FILENAME)
train_labels = idx2numpy.convert_from_file(
TRAIN_LABEL_FILENAME)
test_images = idx2numpy.convert_from_file(
TEST_IMAGE_FILENAME)
test_labels = idx2numpy.convert_from_file(
TEST_LABEL_FILENAME)
# Reformat and standardize.
x_train = train_images.reshape(60000, 784)
mean = np.mean(x_train)
stddev = np.std(x_train)
x_train = (x_train - mean) / stddev
x_test = test_images.reshape(10000, 784)
x_test = (x_test - mean) / stddev
# One-hot encoded output.
y_train = np.zeros((60000, 10))
y_test = np.zeros((10000, 10))
for i, y in enumerate(train_labels):
y_train[i][y] = 1
for i, y in enumerate(test_labels):
y_test[i][y] = 1
return x_train, y_train, x_test, y_test
# Read train and test examples.
x_train, y_train, x_test, y_test = read_mnist()
index_list = list(range(len(x_train))) # Used for random order
def layer_w(neuron_count, input_count):
weights = np.zeros((neuron_count, input_count+1))
for i in range(neuron_count):
for j in range(1, (input_count+1)):
weights[i][j] = np.random.uniform(-0.1, 0.1)
return weights
# Declare matrices and vectors representing the neurons.
hidden_layer_w = layer_w(25, 784)
hidden_layer_y = np.zeros(25)
hidden_layer_error = np.zeros(25)
output_layer_w = layer_w(10, 25)
output_layer_y = np.zeros(10)
output_layer_error = np.zeros(10)
chart_x = []
chart_y_train = []
chart_y_test = []
def show_learning(epoch_no, train_acc, test_acc):
global chart_x
global chart_y_train
global chart_y_test
print('epoch no:', epoch_no, ', train_acc: ',
'%6.4f' % train_acc,
', test_acc: ', '%6.4f' % test_acc)
chart_x.append(epoch_no + 1)
chart_y_train.append(1.0 - train_acc)
chart_y_test.append(1.0 - test_acc)
def plot_learning():
plt.plot(chart_x, chart_y_train, 'r-',
label='training error')
plt.plot(chart_x, chart_y_test, 'b-',
label='test error')
plt.axis([0, len(chart_x), 0.0, 1.0])
plt.xlabel('training epochs')
plt.ylabel('error')
plt.legend()
plt.show()
# -
# The functions that have changed compared to c4e2_2level_learning_mnist are forward_pass, backward_pass, and adjust_weights. In these functions, we no longer loop over the individual neurons and do dot products, but instead, we handle an entire layer in parallel using matrix operations.
#
# The forward_pass function is straightforward. We use the NumPy matmul function to multiply the weight matrix by the input vector and then apply the activation function tanh on the resulting output vector. We then append a bias needed for the output layer using the concatenate function and do the matrix multiplication and activation function for the output layer as well.
#
# The backward_pass function is not much more complicated. We compute the derivatives of the error function and the activation function but note that all these computations are done on vectors (i.e., all neurons in parallel). Another thing to note is that the mathematical operators +, -, and * are elementwise operators. That is, there is a big difference between using * and the matmul function. One thing to note is the call to np.matrix.transpose and the indexing we do with output_layer_w[:, 1:]. The transpose operation is needed to make the dimensions of the weight matrix match what is needed for a matrix multiplication with the error vector. The indexing is done to get rid of the bias weights when computing the error terms for the hidden neurons because the bias weight from the output layer is not needed for that operation.
#
# The adjust_weights function is slightly tricky. For each of the two layers, we need to create a matrix with the same dimensions as the weight matrix for that layer but where the elements represent the delta to subtract from the weights. The elements of this delta matrix are obtained by multiplying the input value that feeds into a weight by the error term for the neuron that the weight connects to and finally multiplying by the learning rate. We already have the error terms arranged in the vectors hidden_layer_error and output_layer_error. Similarly, we have the input values for the two layers arranged in the vectors x and hidden_layer_y. For each layer we now combine the input vector with the error vector using the function np.outer which computes the outer product of the two vectors. It results in a matrix where the elements are all the pairwise products from the elements in the two vectors, which is exactly what we want. We multiply the matrix by the learning rate and then subtract from the weight matrix.
#
# +
def forward_pass(x):
global hidden_layer_y
global output_layer_y
# Activation function for hidden layer.
hidden_layer_z = np.matmul(hidden_layer_w, x)
hidden_layer_y = np.tanh(hidden_layer_z)
hidden_output_array = np.concatenate(
(np.array([1.0]), hidden_layer_y))
# Activation function for output layer.
output_layer_z = np.matmul(output_layer_w,
hidden_output_array)
output_layer_y = 1.0 / (1.0 + np.exp(-output_layer_z))
def backward_pass(y_truth):
global hidden_layer_error
global output_layer_error
# Backpropagate error for each output neuron.
error_prime = -(y_truth - output_layer_y)
output_log_prime = output_layer_y * (
1.0 - output_layer_y)
output_layer_error = error_prime * output_log_prime
# Backpropagate error for each hidden neuron.
hidden_tanh_prime = 1.0 - hidden_layer_y**2
hidden_weighted_error = np.matmul(np.matrix.transpose(
output_layer_w[:, 1:]), output_layer_error)
hidden_layer_error = (
hidden_tanh_prime * hidden_weighted_error)
def adjust_weights(x):
global output_layer_w
global hidden_layer_w
delta_matrix = np.outer(
hidden_layer_error, x) * LEARNING_RATE
hidden_layer_w -= delta_matrix
hidden_output_array = np.concatenate(
(np.array([1.0]), hidden_layer_y))
delta_matrix = np.outer(
output_layer_error,
hidden_output_array) * LEARNING_RATE
output_layer_w -= delta_matrix
# -
# The network training loop is unchanged compared to c4e2_2level_learning_mnist.
#
# Network training loop.
for i in range(EPOCHS): # Train EPOCHS iterations
np.random.shuffle(index_list) # Randomize order
correct_training_results = 0
for j in index_list: # Train on all examples
x = np.concatenate((np.array([1.0]), x_train[j]))
forward_pass(x)
if output_layer_y.argmax() == y_train[j].argmax():
correct_training_results += 1
backward_pass(y_train[j])
adjust_weights(x)
correct_test_results = 0
for j in range(len(x_test)): # Evaluate network
x = np.concatenate((np.array([1.0]), x_test[j]))
forward_pass(x)
if output_layer_y.argmax() == y_test[j].argmax():
correct_test_results += 1
# Show progress.
show_learning(i, correct_training_results/len(x_train),
correct_test_results/len(x_test))
plot_learning() # Create plot
| stand_alone/aFe1_2level_learning_mnist_matmul.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Predict results for test data
# ref: https://www.kaggle.com/c/avazu-ctr-prediction/discussion/12314¶
# +
"""
======================================================
Out-of-core classification of Avazu data
======================================================
wc count for train.csv 40428968
wc count for test.csv 4577465
This file reads archived training results (model_file and preproc_file),
makes the predictions for the test data, and writes the submission file to disk.
"""
# Author: <NAME> <<EMAIL>>
import string
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame
import gc
# joblib library for efficient archiving
from sklearn.externals import joblib
# initialize time
start = datetime.now()
# Set file and folder paths
test_file = 'test.csv'
model_path = './'
submission_path = './'
submission_file = submission_path + 'submission.csv'
###############################################################################
# Main
###############################################################################
# read test data into a dataframe
data = pd.read_table(test_file, sep=',', chunksize=None,header='infer',converters={"id":str})
# load archived model_file and preproc_file from training step
model_file = model_path + 'model-avazu-sgd.pkl'
cls = joblib.load(model_file)
preproc_file = model_path + 'model-avazu-preproc.pkl'
preproc = joblib.load(preproc_file)
# prepare test data for prediction step
def hash_features(data):
# engineered features related to categorical data
add_engineered_categorical_features = False
if add_engineered_categorical_features:
data['app']=data['app_id'].values+data['app_domain'].values+data['app_category'].values
data['site']=data['site_id'].values+data['site_domain'].values+data['site_category'].values
data['device']= data['device_id'].values+data['device_ip'].values+data['device_model'].values+(data['device_type'].values.astype(str))+(data['device_conn_type'].values.astype(str))
data['type']=data['device_type'].values +data['device_conn_type'].values
data['iden']=data['app_id'].values +data['site_id'].values +data['device_id'].values
data['domain']=data['app_domain'].values +data['site_domain'].values
data['category']=data['app_category'].values+data['site_category'].values
data['sum']=data['C1'].values +data['C14'].values +data['C15'].values \
+data['C16'].values+data['C17'].values\
+data['C18'].values+data['C19'].values+data['C20'].values+data['C21'].values
data['pos']= data['banner_pos'].values.astype(str)+data['app_category'].values+data['site_category'].values
# add engineered features related to datetime
add_engineered_datetime_features = True
if add_engineered_datetime_features:
data['hour']=data['hour'].map(lambda x: datetime.strptime(str(x),"%y%m%d%H"))
data['dayoftheweek']=data['hour'].map(lambda x: x.weekday())
data['day']=data['hour'].map(lambda x: x.day)
data['hour']=data['hour'].map(lambda x: x.hour)
#remove id column
data = data.drop(['id'], axis=1)
# Convert all features to str
features = np.asarray(data.astype(str))
# hash all the features
features = preproc.transform(features)
return features
##############################################################################
# predict results for test data, and build Kaggle's submission file ##########
##############################################################################
# convert 'id' to int
data['id'] = data['id'].apply(lambda x: int(x))
# hashed features for test data
features = hash_features(data)
# Get probability for positive class
click_prob = cls.predict_proba(features)[:,1]
# identifiers for test data examples
id = data['id'].values
# clean up
del data
gc.collect()
# put results in a data frame
df = pd.DataFrame({'id':id, 'click':click_prob})
# Convert to str format
df['id']= df['id'].astype(str)
df['click'] = df['click'].astype(str)
# write results to submission file directly from dataframe
with open(submission_file, 'w') as outfile:
df.to_csv(outfile,header=True,index_label=None,index=False,encoding='utf-8')
# Get elapsed time
print('elapsed time: %s' % str(datetime.now() - start))
# -
# ## Submit result to kaggle
# Submit to kaggle
# !kaggle competitions submit -c avazu-ctr-prediction -f submission.csv -m 'lightgbm_10_million_samples'
# ## Check submission score
# Check submission score
# !kaggle competitions submissions -c avazu-ctr-prediction
# +
# Author: <NAME> <<EMAIL>>
import string
from datetime import datetime
import numpy as np
import pandas as pd
from pandas import DataFrame
import gc
# joblib library for efficient archiving
from sklearn.externals import joblib
# initialize time
start = datetime.now()
# Set file and folder paths
test_file = 'test.csv'
model_path = './'
submission_path = './'
submission_file = submission_path + 'submission.csv'
###############################################################################
# Main
###############################################################################
# read test data into a dataframe
data = pd.read_table(test_file, sep=',', chunksize=None,header='infer',converters={"id":str})
# load archived model_file and preproc_file from training step
model_file = model_path + 'model-avazu-sgd.pkl'
cls = joblib.load(model_file)
preproc_file = model_path + 'model-avazu-preproc.pkl'
preproc = joblib.load(preproc_file)
# +
# %matplotlib inline
import lightgbm as lgb
import pandas as pd
import matplotlib.pyplot as plt
print('Plot metrics recorded during training...')
ax = lgb.plot_metric(cls.evals_result_, metric='logloss',booster=cls.booster_)
plt.show()
print('Plot feature importances...')
ax = lgb.plot_importance(cls.feature_importances_, max_num_features=10)
plt.show()
print('Plot 84th tree...') # one tree use categorical feature to split
ax = lgb.plot_tree(cls, tree_index=83, figsize=(20, 8), show_info=['split_gain'])
plt.show()
print('Plot 84th tree with graphviz...')
graph = cls.create_tree_digraph(cls, tree_index=83, name='Tree84')
graph.render(view=True)
# -
dir(cls.booster_)
print('Plot metrics recorded during training...')
ax = lgb.plot_metric(cls.evals_result_, metric='logloss')
plt.show()
| sklearn/Predict.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python3
# ---
import sys
import os
import glob
import re
import pandas as pd
import numpy as np
from lxml import etree
import matplotlib.pyplot as plt
import requests
import time
from collections import Counter
import json
# +
#sys.path.append(os.path.abspath("./../../../../Dropbox/MTB/Göttingen/research/"))
#sys.path.append(os.path.abspath("./../../"))
# -
import json
from urllib.request import urlopen
url = "https://www.wikidata.org/w/api.php?action=wbgetclaims&entity=Q9184&format=json"
jsonurl = urlopen(url)
text = json.loads(jsonurl.read())
text["claims"]
| code/python/notebooks/20210919_testing_wikidata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Process Discovery over synthetic logs
# +
import pm4py
#Algorithms
from pm4py.algo.discovery.alpha import algorithm as alpha_miner
from pm4py.algo.discovery.inductive import algorithm as inductive_miner
#Evaluation
#from pm4py.algo.evaluation.simplicity import algorithm as simplicity_algo
#from pm4py.algo.evaluation.replay_fitness import algorithm as fitness_algo
#from pm4py.algo.evaluation.precision import algorithm as precision_algo
#from pm4py.algo.evaluation.generalization import algorithm as generalization_algo
from pm4py.algo.evaluation import algorithm as evaluator_algo
# Visualization
from pm4py.visualization.petri_net import visualizer as pn_visualizer
# -
# ## Loading XES file
# ##### The dataframe produced in the first step was imported to [Disco Process Mining tool](https://fluxicon.com/disco/) than exported as a XES file
file_path = '../data/synthetic-log/logs.xes.gz'
event_log = event_log = pm4py.read_xes(file_path)
# ## Alpha Algorithm
petri_model, start_marking, end_marking = alpha_miner.apply(event_log)
gviz = pn_visualizer.apply(petri_model, start_marking, end_marking)
pn_visualizer.view(gviz)
results = evaluator_algo.apply(event_log, petri_model, start_marking, end_marking)
results
# ## Inductive Miner
petri_model, start_marking, end_marking = inductive_miner.apply(event_log)
gviz = pn_visualizer.apply(petri_model, start_marking, end_marking)
pn_visualizer.view(gviz)
results = evaluator_algo.apply(event_log, petri_model, start_marking, end_marking)
results
| notebooks/2-fpn-discovery-models-synthetic-log.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# # Class Diagrams
#
# This is a simple viewer for class diagrams. Customized towards the book.
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# **Prerequisites**
#
# * _Refer to earlier chapters as notebooks here, as here:_ [Earlier Chapter](Debugger.ipynb).
# + button=false new_sheet=false run_control={"read_only": false} slideshow={"slide_type": "skip"}
import bookutils
# -
# ## Synopsis
# <!-- Automatically generated. Do not edit. -->
#
# To [use the code provided in this chapter](Importing.ipynb), write
#
# ```python
# >>> from fuzzingbook.ClassDiagram import <identifier>
# ```
#
# and then make use of the following features.
#
#
# The `display_class_hierarchy()` function shows the class hierarchy for the given class. Methods with docstrings (intended to be used by the public) are shown in bold.
#
# ```python
# >>> display_class_hierarchy(GrammarFuzzer)
# ```
#
# 
#
#
# + [markdown] button=false new_sheet=true run_control={"read_only": false}
# ## Getting a Class Hierarchy
# -
import inspect
# Using `mro()`, we can access the class hierarchy. We make sure to avoid duplicates created by `class X(X)`.
# ignore
from typing import Callable, Dict, Type, Set, List, Union, Any, Tuple, Optional
def class_hierarchy(cls: Type) -> List[Type]:
superclasses = cls.mro()
hierarchy = []
last_superclass_name = ""
for superclass in superclasses:
if superclass.__name__ != last_superclass_name:
hierarchy.append(superclass)
last_superclass_name = superclass.__name__
return hierarchy
# Here's an example:
class A_Class:
"""A Class which does A thing right.
Comes with a longer docstring."""
def foo(self) -> None:
"""The Adventures of the glorious Foo"""
pass
def quux(self) -> None:
"""A method that is not used."""
pass
class A_Class(A_Class):
# We define another function in a separate cell.
def second(self) -> None:
pass
class B_Class(A_Class):
"""A subclass inheriting some methods."""
VAR = "A variable"
def foo(self) -> None:
"""A WW2 foo fighter."""
pass
def bar(self, qux: Any = None, bartender: int = 42) -> None:
"""A qux walks into a bar.
`bartender` is an optional attribute."""
pass
class C_Class:
"""A class injecting some method"""
def qux(self) -> None:
pass
class D_Class(B_Class, C_Class):
"""A subclass inheriting from multiple superclasses.
Comes with a fairly long, but meaningless documentation."""
def foo(self) -> None:
B_Class.foo(self)
class D_Class(D_Class):
pass # An incremental addiiton that should not impact D's semantics
class_hierarchy(D_Class)
# ## Getting a Class Tree
# We can use `__bases__` to obtain the immediate base classes.
D_Class.__bases__
# `class_tree()` returns a class tree, using the "lowest" (most specialized) class with the same name.
def class_tree(cls: Type, lowest: Type = None) -> List[Tuple[Type, List]]:
ret = []
for base in cls.__bases__:
if base.__name__ == cls.__name__:
if not lowest:
lowest = cls
ret += class_tree(base, lowest)
else:
if lowest:
cls = lowest
ret.append((cls, class_tree(base)))
return ret
class_tree(D_Class)
class_tree(D_Class)[0][0]
assert class_tree(D_Class)[0][0] == D_Class
# `class_set()` flattens the tree into a set:
def class_set(classes: Union[Type, List[Type]]) -> Set[Type]:
if not isinstance(classes, list):
classes = [classes]
ret = set()
def traverse_tree(tree: List[Tuple[Type, List]]) -> None:
for (cls, subtrees) in tree:
ret.add(cls)
for subtree in subtrees:
traverse_tree(subtrees)
for cls in classes:
traverse_tree(class_tree(cls))
return ret
class_set(D_Class)
assert A_Class in class_set(D_Class)
assert B_Class in class_set(D_Class)
assert C_Class in class_set(D_Class)
assert D_Class in class_set(D_Class)
class_set([B_Class, C_Class])
# ### Getting Docs
A_Class.__doc__
A_Class.__bases__[0].__doc__
A_Class.__bases__[0].__name__
D_Class.foo
D_Class.foo.__doc__
A_Class.foo.__doc__
def docstring(obj: Any) -> str:
doc = inspect.getdoc(obj)
return doc if doc else ""
docstring(A_Class)
docstring(D_Class.foo)
def unknown() -> None:
pass
docstring(unknown)
import html
import re
def escape(text: str) -> str:
text = html.escape(text)
assert '<' not in text
assert '>' not in text
text = text.replace('{', '{')
text = text.replace('|', '|')
text = text.replace('}', '}')
return text
escape("f(foo={})")
def escape_doc(docstring: str) -> str:
DOC_INDENT = 0
docstring = "
".join(
' ' * DOC_INDENT + escape(line).strip()
for line in docstring.split('\n')
)
return docstring
print(escape_doc("'Hello\n {You|Me}'"))
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ## Getting Methods and Variables
# -
inspect.getmembers(D_Class)
def class_items(cls: Type, pred: Callable) -> List[Tuple[str, Any]]:
def _class_items(cls: Type) -> List:
all_items = inspect.getmembers(cls, pred)
for base in cls.__bases__:
all_items += _class_items(base)
return all_items
unique_items = []
items_seen = set()
for (name, item) in _class_items(cls):
if name not in items_seen:
unique_items.append((name, item))
items_seen.add(name)
return unique_items
def class_methods(cls: Type) -> List[Tuple[str, Callable]]:
return class_items(cls, inspect.isfunction)
def defined_in(name: str, cls: Type) -> bool:
if not hasattr(cls, name):
return False
defining_classes = []
def search_superclasses(name: str, cls: Type) -> None:
if not hasattr(cls, name):
return
for base in cls.__bases__:
if hasattr(base, name):
defining_classes.append(base)
search_superclasses(name, base)
search_superclasses(name, cls)
if any(cls.__name__ != c.__name__ for c in defining_classes):
return False # Already defined in superclass
return True
assert not defined_in('VAR', A_Class)
assert defined_in('VAR', B_Class)
assert not defined_in('VAR', C_Class)
assert not defined_in('VAR', D_Class)
def class_vars(cls: Type) -> List[Any]:
def is_var(item: Any) -> bool:
return not callable(item)
return [item for item in class_items(cls, is_var)
if not item[0].startswith('__') and defined_in(item[0], cls)]
class_methods(D_Class)
class_vars(B_Class)
# We're only interested in
#
# * functions _defined_ in that class
# * functions that come with a docstring
def public_class_methods(cls: Type) -> List[Tuple[str, Callable]]:
return [(name, method) for (name, method) in class_methods(cls)
if method.__qualname__.startswith(cls.__name__)]
def doc_class_methods(cls: Type) -> List[Tuple[str, Callable]]:
return [(name, method) for (name, method) in public_class_methods(cls)
if docstring(method) is not None]
public_class_methods(D_Class)
doc_class_methods(D_Class)
def overloaded_class_methods(classes: Union[Type, List[Type]]) -> Set[str]:
all_methods: Dict[str, Set[Callable]] = {}
for cls in class_set(classes):
for (name, method) in class_methods(cls):
if method.__qualname__.startswith(cls.__name__):
all_methods.setdefault(name, set())
all_methods[name].add(cls)
return set(name for name in all_methods if len(all_methods[name]) >= 2)
overloaded_class_methods(D_Class)
# ## Drawing Class Hierarchy with Method Names
from inspect import signature
import warnings
def display_class_hierarchy(classes: Union[Type, List[Type]],
public_methods: Optional[List] = None,
abstract_classes: Optional[List] = None,
include_methods: bool = True,
include_class_vars: bool =True,
include_legend: bool = True,
project: str = 'fuzzingbook',
log: bool = False) -> Any:
"""Visualize a class hierarchy.
`classes` is a Python class (or a list of classes) to be visualized.
`public_methods`, if given, is a list of methods to be shown as "public" (bold).
(Default: all methods with a docstring)
`abstract_classes`, if given, is a list of classes to be shown as "abstract" (cursive).
(Default: all classes with an abstract method)
`include_methods`: if True, include all methods (default)
`include_legend`: if True, include a legend (default)
"""
from graphviz import Digraph
if project == 'debuggingbook':
CLASS_FONT = 'Raleway, Helvetica, Arial, sans-serif'
CLASS_COLOR = '#6A0DAD' # HTML 'purple'
else:
CLASS_FONT = 'Patua One, Helvetica, sans-serif'
CLASS_COLOR = '#B03A2E'
METHOD_FONT = "'Fira Mono', 'Source Code Pro', 'Courier', monospace"
METHOD_COLOR = 'black'
if isinstance(classes, list):
starting_class = classes[0]
else:
starting_class = classes
classes = [starting_class]
title = starting_class.__name__ + " class hierarchy"
dot = Digraph(comment=title)
dot.attr('node', shape='record', fontname=CLASS_FONT)
dot.attr('graph', rankdir='BT', tooltip=title)
dot.attr('edge', arrowhead='empty')
edges = set()
overloaded_methods: Set[str] = set()
drawn_classes = set()
def method_string(method_name: str, public: bool, overloaded: bool,
fontsize: float = 10.0) -> str:
method_string = f'<font face="{METHOD_FONT}" point-size="{str(fontsize)}">'
if overloaded:
name = f'<i>{method_name}()</i>'
else:
name = f'{method_name}()'
if public:
method_string += f'<b>{name}</b>'
else:
method_string += f'<font color="{METHOD_COLOR}">' \
f'{name}</font>'
method_string += '</font>'
return method_string
def var_string(var_name: str, fontsize: int = 10) -> str:
var_string = f'<font face="{METHOD_FONT}" point-size="{str(fontsize)}">'
var_string += f'{var_name}'
var_string += '</font>'
return var_string
def is_overloaded(method_name: str, f: Any) -> bool:
return (method_name in overloaded_methods or
(docstring(f) is not None and "in subclasses" in docstring(f)))
def is_abstract(cls: Type) -> bool:
if not abstract_classes:
return inspect.isabstract(cls)
return (cls in abstract_classes or
any(c.__name__ == cls.__name__ for c in abstract_classes))
def is_public(method_name: str, f: Any) -> bool:
if public_methods:
return (method_name in public_methods or
f in public_methods or
any(f.__qualname__ == m.__qualname__
for m in public_methods))
return bool(docstring(f))
def class_vars_string(cls: Type, url: str) -> str:
cls_vars = class_vars(cls)
if len(cls_vars) == 0:
return ""
vars_string = f'<table border="0" cellpadding="0" ' \
f'cellspacing="0" ' \
f'align="left" tooltip="{cls.__name__}" href="#">'
for (name, var) in cls_vars:
if log:
print(f" Drawing {name}")
var_doc = escape(f"{name} = {repr(var)}")
tooltip = f' tooltip="{var_doc}"'
href = f' href="{url}"'
vars_string += f'<tr><td align="left" border="0"' \
f'{tooltip}{href}>'
vars_string += var_string(name)
vars_string += '</td></tr>'
vars_string += '</table>'
return vars_string
def class_methods_string(cls: Type, url: str) -> str:
methods = public_class_methods(cls)
# return "<br/>".join([name + "()" for (name, f) in methods])
if len(methods) == 0:
return ""
methods_string = f'<table border="0" cellpadding="0" ' \
f'cellspacing="0" ' \
f'align="left" tooltip="{cls.__name__}" href="#">'
for public in [True, False]:
for (name, f) in methods:
if public != is_public(name, f):
continue
if log:
print(f" Drawing {name}()")
if is_public(name, f) and not docstring(f):
warnings.warn(f"{f.__qualname__}() is listed as public,"
f" but has no docstring")
overloaded = is_overloaded(name, f)
method_doc = escape(name + str(inspect.signature(f)))
if docstring(f):
method_doc += ":
" + escape_doc(docstring(f))
# Tooltips are only shown if a href is present, too
tooltip = f' tooltip="{method_doc}"'
href = f' href="{url}"'
methods_string += f'<tr><td align="left" border="0"' \
f'{tooltip}{href}>'
methods_string += method_string(name, public, overloaded)
methods_string += '</td></tr>'
methods_string += '</table>'
return methods_string
def display_class_node(cls: Type) -> None:
name = cls.__name__
if name in drawn_classes:
return
drawn_classes.add(name)
if log:
print(f"Drawing class {name}")
if cls.__module__ == '__main__':
url = '#'
else:
url = cls.__module__ + '.ipynb'
if is_abstract(cls):
formatted_class_name = f'<i>{cls.__name__}</i>'
else:
formatted_class_name = cls.__name__
if include_methods or include_class_vars:
vars = class_vars_string(cls, url)
methods = class_methods_string(cls, url)
spec = '<{<b><font color="' + CLASS_COLOR + '">' + \
formatted_class_name + '</font></b>'
if include_class_vars and vars:
spec += '|' + vars
if include_methods and methods:
spec += '|' + methods
spec += '}>'
else:
spec = '<' + formatted_class_name + '>'
class_doc = escape('class ' + cls.__name__)
if docstring(cls):
class_doc += ':
' + escape_doc(docstring(cls))
else:
warnings.warn(f"Class {cls.__name__} has no docstring")
dot.node(name, spec, tooltip=class_doc, href=url)
def display_class_trees(trees: List[Tuple[Type, List]]) -> None:
for tree in trees:
(cls, subtrees) = tree
display_class_node(cls)
for subtree in subtrees:
(subcls, _) = subtree
if (cls.__name__, subcls.__name__) not in edges:
dot.edge(cls.__name__, subcls.__name__)
edges.add((cls.__name__, subcls.__name__))
display_class_trees(subtrees)
def display_legend() -> None:
fontsize = 8.0
label = f'<b><font color="{CLASS_COLOR}">Legend</font></b><br align="left"/>'
for item in [
method_string("public_method",
public=True, overloaded=False, fontsize=fontsize),
method_string("private_method",
public=False, overloaded=False, fontsize=fontsize),
method_string("overloaded_method",
public=False, overloaded=True, fontsize=fontsize)
]:
label += '• ' + item + '<br align="left"/>'
label += f'<font face="Helvetica" point-size="{str(fontsize + 1)}">' \
'Hover over names to see doc' \
'</font><br align="left"/>'
dot.node('Legend', label=f'<{label}>', shape='plain', fontsize=str(fontsize + 2))
for cls in classes:
tree = class_tree(cls)
overloaded_methods = overloaded_class_methods(cls)
display_class_trees(tree)
if include_legend:
display_legend()
return dot
display_class_hierarchy(D_Class, project='debuggingbook', log=True)
display_class_hierarchy(D_Class, project='fuzzingbook')
# Here is a variant with abstract classes and logging:
display_class_hierarchy([A_Class, B_Class],
abstract_classes=[A_Class],
public_methods=[
A_Class.quux,
], log=True)
# ## Synopsis
# The function `display_class_hierarchy()` function shows the class hierarchy for the given class (or list of classes).
# * The keyword parameter `public_methods`, if given, is a list of "public" methods to be used by clients (default: all methods with docstrings).
# * The keyword parameter `abstract_classes`, if given, is a list of classes to be displayed as "abstract" (i.e. with a cursive class name).
display_class_hierarchy(D_Class, abstract_classes=[A_Class])
# ## Exercises
# Enjoy!
| notebooks/shared/ClassDiagram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Strings
# Strings have already been discussed in Chapter 02, but can also be treated as collections similar to lists and tuples.
# For example
S = 'The Taj Mahal is beautiful'
print([x for x in S if x.islower()]) # list of lower case charactes
words=S.split() # list of words
print("Words are:",words)
print("--".join(words)) # hyphenated
" ".join(w.capitalize() for w in words) # capitalise words
# String Indexing and Slicing are similar to Lists which was explained in detail earlier.
print(S[4])
print(S[4:])
# ## Dictionaries
# Dictionaries are mappings between keys and items stored in the dictionaries. Alternatively one can think of dictionaries as sets in which something stored against every element of the set. They can be defined as follows:
# To define a dictionary, equate a variable to { } or dict()
d = dict() # or equivalently d={}
print(type(d))
d['abc'] = 3
d[4] = "A string"
print(d)
# As can be guessed from the output above. Dictionaries can be defined by using the `{ key : value }` syntax. The following dictionary has three elements
d = { 1: 'One', 2 : 'Two', 100 : 'Hundred'}
len(d)
# Now you are able to access 'One' by the index value set at 1
print(d[1])
# There are a number of alternative ways for specifying a dictionary including as a list of `(key,value)` tuples.
# To illustrate this we will start with two lists and form a set of tuples from them using the **zip()** function
# Two lists which are related can be merged to form a dictionary.
names = ['One', 'Two', 'Three', 'Four', 'Five']
numbers = [1, 2, 3, 4, 5]
[ (name,number) for name,number in zip(names,numbers)] # create (name,number) pairs
# Now we can create a dictionary that maps the name to the number as follows.
a1 = dict((name,number) for name,number in zip(names,numbers))
print(a1)
# Note that the ordering for this dictionary is not based on the order in which elements are added but on its own ordering (based on hash index ordering). It is best never to assume an ordering when iterating over elements of a dictionary.
#
# **Note:** Any value used as a key must be _immutable_. That means that _tuples_ can be used as keys (because they can't be changed) but lists are not allowed. As an aside for more advanced readers, arbitrary objects can be used as keys -- but in this case the object reference (address) is used as a key, not the "value" of the object.
#
# The use of tuples as keys is very common and allows for a (sparse) matrix type data structure:
matrix={ (0,1): 3.5, (2,17): 0.1}
matrix[2,2] = matrix[0,1] + matrix[2,17]
# matrix[2,2] is equivalent to matrix[ (2,2) ]
print(matrix)
# Dictionary can also be built using the loop style definition.
a2 = { name : len(name) for name in names}
print(a2)
# ### Built-in Functions
# The `len()` function and `in` operator have the obvious meaning:
print("a1 has",len(a1),"elements")
print("One is in a1",'One' in a1,"but not 2:", 2 in a1) # 'in' checks keys only
# The `clear( )` function is used to erase all elements.
a2.clear()
print(a2)
# The `values( )` function returns a list with all the assigned values in the dictionary. (Acutally not quit a list, but something that we can iterate over just like a list to construct a list, tuple or any other collection):
[ v for v in a1.values() ]
# `keys( )` function returns all the index or the keys to which contains the values that it was assigned to.
{ k for k in a1.keys() }
# `items( )` is returns a list containing both the list but each element in the dictionary is inside a tuple. This is same as the result that was obtained when zip function was used - except that the ordering may be 'shuffled' by the dictionary.
", ".join( "%s = %d" % (name,val) for name,val in a1.items())
# The `pop( )` function is used to get the remove that particular element and this removed element can be assigned to a new variable. But remember only the value is stored and not the key. Because the is just a index value.
val = a1.pop('Four')
print(a1)
print("Removed",val)
# # When to use Dictionaries vs Lists
#
# The choice of whether to store data in a list or dictionary (or set) may seem a bit arbitrary at times. Here is a brief summary of some of the pros and cons of these:
#
# * Finding elements in a set vs a list: `x in C` is valid whether the collection `C` is a list, set or dictonary. However computationally for large collections this is much slower with lists than sets or dictionaries. On the other hand if all items are indexed by an integer than `x[45672]` is much faster to look up if x is a list than if it is a dictionary.
# * If all your items are indexed by integers but with some indices unused you could use lists and assign some dummy value (e.g. "") whenever there is no corresponding item. For very sparse collections this could consume significant additional memory compared to a dictionary. On the other hand if most values are present, then storing the indices explicitly (as is done in a dictionary) could consume significant additional memory compared to the list representation.
#
#
import time
bigList = [i for i in range(0,100000)]
bigSet = set(bigList)
start = time.clock() # how long to find the last number out of 10,000 items?
99999 in bigList
print("List lookup time: %.6f ms" % (1000*(time.clock()-start)))
start = time.clock()
99999 in bigSet
print("Set lookup time: %.6f ms" % (1000*(time.clock()-start)))
| Intro-to-Python/04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 (''.venv'': venv)'
# name: pythonjvsc74a57bd067b393f23005f5647497c50fa99fb25b525d8642232b1bdc07a39bdb19f3ee4f
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import re
import math
from scipy import interpolate
plt.rc('font',family='Times New Roman',size=15)
Tref=773
lx=340
ly=150
endTime=0.1
timeStep=0.01
MCoke=12
MO2=32
pixelResolution=1e-6
YO2=0.22
#
# ## Temporal Evolution of Combustion Temperature,residual coke and reaction rate
# +
fieldminMaxFile="../postProcessing/minMaxComponents/0/fieldMinMax.dat"
with open(fieldminMaxFile,"r") as fp:
comment=fp.readline()
header=fp.readline()
header=header[1:-1].split()
indexs_processor=[]
for i,name in enumerate(header):
if header[i]=="processor":
indexs_processor.append(i)
indexs_processor.reverse()
data=pd.read_csv(fieldminMaxFile,comment='#', sep='\t',header=None)
data=data.drop(indexs_processor,axis=1)
data.rename(columns=lambda x:header[x],inplace=True)
data.head()
# -
sampling_rate=10
data_sampling=data[data.index%sampling_rate==0]
data_sampling.shape
def readOpenFoamUField(file,nx,ny,normizedValue=1,component=0):
with open(file,"r") as fp:
lines=fp.readlines()
for i,line in enumerate(lines):
if line.startswith("internalField"):
start=i+3
elif line.startswith("boundaryField"):
end=i-4
break
field=[]
for i in np.arange(start,end+1):
values=lines[i].replace('\n', '').split()
values=[float(value.replace('(', '').replace(')', '')) for value in values]
value=values[component]
field.append(value/normizedValue)
field=np.array(field).reshape(ny,nx)
return field
def readOpenFoamField(file,nx,ny,normizedValue=1):
with open(file,"r") as fp:
lines=fp.readlines()
for i,line in enumerate(lines):
if line.startswith("internalField"):
start=i+3
elif line.startswith("boundaryField"):
end=i-4
break
field=[]
for i in np.arange(start,end+1):
value=float(lines[i].replace('\n', ''))
field.append(value/normizedValue)
field=np.array(field).reshape(ny,nx)
return field
times=np.arange(timeStep,endTime+timeStep,timeStep)
stimes=pd.Series([f"{t:.2f}".rstrip('.0') for t in times])
sampling_rate=1
stimes=stimes[stimes.index%sampling_rate==0]
stimes.shape
# +
volumeAveragedCoke=[]
volumeAveragedReactionRate=[]
sumReactionRate=[]
inletfluxs=[]
for t in stimes:
cokeField=readOpenFoamField(f"../{str(t)}/coke",lx,ly)
volumeAveragedCoke.append(np.mean(cokeField))
cokeReactionRateField=readOpenFoamField(f"../{str(t)}/cokeRectionRate",lx,ly)
volumeAveragedReactionRate.append(np.mean(cokeReactionRateField))
sumReactionRate.append(np.sum(cokeReactionRateField))
densityField=readOpenFoamField(f"../{str(t)}/rho",lx,ly)
UxField=readOpenFoamUField(f"../{str(t)}/U",lx,ly)
inletFluxProfile=densityField[:,0]*UxField[:,0]
inletfluxs.append(np.sum(inletFluxProfile))
# +
fig, ax = plt.subplots()
ax.set_xlabel(f"Time (s)")
ax.set_title(f"Temporal Evolution",color="k")
ax.plot(data["Time"],data["max"]/Tref,linestyle="-",label="Maximum Temperature",color="b")
ax.set_ylabel(f"Dimensionless T",color="b")
ax.tick_params(axis='y', labelcolor="b")
ax2 = ax.twinx()
ax2.plot(stimes.index*timeStep,volumeAveragedCoke,linestyle="-",color="r")
ax2.set_xlabel('Time (s)',color="r")
ax2.set_ylabel("Residual coke fraction",color="r")
ax2.tick_params(axis='y', labelcolor="r")
# +
fig,ax=plt.subplots()
ax.plot(stimes.index*timeStep,np.array(sumReactionRate)*(pixelResolution*pixelResolution)*-1/MCoke*MO2,linestyle="-",color="b")
plt.rcParams.update({'mathtext.default': 'regular' })
ax.set_xlabel('Time (s)')
ax.set_ylabel("Total $O_2$ Reaction Rate (kg/s)",color="b")
ax.set_ylim([1e-7,2e-5])
ax.set_yscale('log')
ax.tick_params(axis='y', labelcolor="b")
ax2 = ax.twinx()
ax2.plot(stimes.index*timeStep,np.array(inletfluxs)*pixelResolution*YO2,linestyle="--",color="r")
ax2.set_ylabel("Total $O_{2}$ Flux by convection",color="r")
ax2.set_ylim([1e-7,2e-5])
ax2.set_yscale('log')
ax2.tick_params(axis='y', labelcolor="r")
fig.tight_layout()
# -
# ## Transversely averaged O2 fraction and temperature distributions at three typical time instants
# +
def show(timeInstant):
cokeField=readOpenFoamField(f"../{str(timeInstant)}/coke",lx,ly)
O2Field=readOpenFoamField(f"../{str(timeInstant)}/O2",lx,ly)
TField=readOpenFoamField(f"../{str(timeInstant)}/T",lx,ly,Tref)
fig,axs=plt.subplots(nrows=3, sharex=True, figsize=(13, 6))
fig.tight_layout()
plt.rcParams.update({'mathtext.default': 'regular' })
# fig.suptitle(f"Field contours at time instant of {str(timeInstant)} s", fontsize=20)
fig.text(0.55, 1.02, f'Field contours at time instant of {str(timeInstant)} s', transform=fig.transFigure, horizontalalignment='center', fontsize=18)
im0=axs[0].imshow(cokeField,cmap="coolwarm")
axs[0].set_title("coke fraction")
bbox_ax0 = axs[0].get_position()
loc_cbar0 = fig.add_axes([bbox_ax0.x1*1.01, bbox_ax0.y0, 0.02, bbox_ax0.y1-bbox_ax0.y0])
cbar0 = fig.colorbar(im0, cax=loc_cbar0)
im1=axs[1].imshow(O2Field,cmap="coolwarm")
plt.rcParams.update({'mathtext.default': 'regular' })
axs[1].set_title("${O_2}$ fraction")
bbox_ax1 = axs[1].get_position()
loc_cbar1 = fig.add_axes([bbox_ax1.x1*1.01, bbox_ax1.y0, 0.02, bbox_ax1.y1-bbox_ax1.y0])
cbar1 = fig.colorbar(im1, cax=loc_cbar1)
im2=axs[2].imshow(TField,cmap="coolwarm")
axs[2].set_title("Temperature")
bbox_ax2 = axs[2].get_position()
loc_cbar2 = fig.add_axes([bbox_ax2.x1*1.01, bbox_ax2.y0, 0.02, bbox_ax2.y1-bbox_ax2.y0])
cbar2 = fig.colorbar(im2, cax=loc_cbar2)
# show(t1)
# -
t1=0.01
t2=0.05
t3=0.1
show(t1)
show(t2)
show(t3)
# +
cokeField0=readOpenFoamField(f"../{str(t1)}/coke",lx,ly)
O2Field0=readOpenFoamField(f"../{str(t1)}/O2",lx,ly)
TField0=readOpenFoamField(f"../{str(t1)}/T",lx,ly,Tref)
cokeField1=readOpenFoamField(f"../{str(t2)}/coke",lx,ly)
O2Field1=readOpenFoamField(f"../{str(t2)}/O2",lx,ly)
TField1=readOpenFoamField(f"../{str(t2)}/T",lx,ly,Tref)
cokeField2=readOpenFoamField(f"../{str(t3)}/coke",lx,ly)
O2Field2=readOpenFoamField(f"../{str(t3)}/O2",lx,ly)
TField2=readOpenFoamField(f"../{str(t3)}/T",lx,ly,Tref)
fig,axs=plt.subplots(nrows=3, sharex=True, figsize=(10, 6))
fig.tight_layout()
axs[0].plot(np.mean(cokeField0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s")
axs[0].plot(np.mean(cokeField1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s")
axs[0].plot(np.mean(cokeField2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s")
axs[0].set_ylabel(f"Coke Fraction")
axs[0].legend()
axs[1].plot(np.mean(O2Field0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s")
axs[1].plot(np.mean(O2Field1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s")
axs[1].plot(np.mean(O2Field2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s")
axs[1].set_ylabel(f"$O_{2}$ Fraction")
axs[1].legend()
axs[2].plot(np.mean(TField0,axis=0),linestyle="-.",color="k",label=fr"$\mathit{{t}}\ $ = {str(t1)} s")
axs[2].plot(np.mean(TField1,axis=0),linestyle="--",color="b",label=fr"$\mathit{{t}}\ $ = {str(t2)} s")
axs[2].plot(np.mean(TField2,axis=0),linestyle="-",color="r",label=fr"$\mathit{{t}}\ $ = {str(t3)} s")
axs[2].set_ylabel(f"Temperature")
axs[2].legend()
axs[2].set_xlim([0,lx*1.2])
| applications/solvers/cokeCombustionFoam/SegregatedSteps/runs/complicatedPorousMedia/combustions/optimize/tiny2_7/analysis/analyzeCombustion.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.0
# language: julia
# name: julia-1.4
# ---
# + Collapsed="false"
using FileIO
using Images
using ImageSegmentation
using Plots
using GaussianMixtures
using Statistics
# + Collapsed="false"
using Profile
# + Collapsed="false"
include("tiffxml.jl")
# + Collapsed="false"
img3d1 = load("../mRNA_confocal_hamamatsu-60X-TIRF/20200316_result/s35-c1.tiff");
# + Collapsed="false"
img3d2 = load("../mRNA_confocal_hamamatsu-60X-TIRF/20200316_result/s3_c5.tiff");
# + Collapsed="false"
s3c2 = load("../mRNA_confocal_hamamatsu-60X-TIRF/20200316_result/s3_c2.tiff");
# + Collapsed="false"
function create3dmask(zstack)
mask = zeros(size(zstack))
thresholds_z = [real(yen_threshold(zstack[:, :, i])) for i in 1:20]
threshold_3d = median(thresholds_z)
#print(threshold_3d)
mask = opening(zstack .> threshold_3d)
mask, threshold_3d
end
function extract3dnucleus(stack)
z_depth = 20
t_length = size(stack)[3] ÷ z_depth
nucleus = zeros(size(stack))
thresholds = zeros(t_length)
nucleus_3dmask = zeros(size(stack)[1], size(stack)[2], z_depth)
for i in 1:t_length
nucleus_3dmask, thresholds[i] = create3dmask(stack[:, :,(i-1)*20+1:20*i])
nucleus[:,:,(i-1)*20+1:20*i] = nucleus_3dmask .* stack[:, :,(i-1)*20+1:20*i]
end
nucleus, thresholds
end
# + Collapsed="false"
@time nucleus_all, threshold_all = extract3dnucleus(s3c2);
@time save(File(format"TIFF", "s5-c2_clear.ome.tiff"), N0f16.(nucleus_all))
x, y, z_all = size(nucleus_all)
@time embedxml(x, y, 20, z_all÷20, "s5-c2_clear.ome.tiff")
# + Collapsed="false"
plot(threshold_all)
# + Collapsed="false"
@time nucleus_all_, threshold_all_ = extract3dnucleus(img3d2);
@time save(File(format"TIFF", "img3d2_clear.ome.tiff"), N0f16.(nucleus_all_))
x, y, z_all = size(nucleus_all_)
@time embedxml(x, y, 20, z_all÷20, "img3d2_clear.ome.tiff")
# + Collapsed="false"
plot(threshold_all_)
# + Collapsed="false"
@time nucleus_all__, threshold_all__ = extract3dnucleus(img3d1);
@time save(File(format"TIFF", "s35-c1_clear.ome.tiff"), N0f16.(nucleus_all__))
x, y, z_all = size(nucleus_all__)
@time embedxml(x, y, 20, z_all÷20, "s35-c1_clear.ome.tiff")
# + Collapsed="false"
plot(threshold_all__)
# + [markdown] Collapsed="false"
# # 2D Histogram
# + Collapsed="false"
threshold = [real(yen_threshold(s3c2[:, :, i])) for i in 1:20 ];
threshold_2d = real(median(threshold));
plot()
for i in 1:20
edge, count = build_histogram(s3c2[:,:,i])
plot!(edge[2:end], count[2:end])
end
plot!(fill(threshold_2d, 2), [0, 1e4])
annotate!([threshold_2d, 1e4, "$threshold_2d"])
# + Collapsed="false"
plot(threshold, marker=:dot)
# + Collapsed="false"
filtered = zeros(700, 700, 20);
filtered = opening(s3c2[:, :, 1:20].>median(threshold)).*s3c2[:, :, 1:20]
save("3dsegmentation.tiff", N0f16.(filtered));
save("3dsegmentation3d_opeing.tiff", N0f16.(filtered.*opening(filtered.>0)))
# + [markdown] Collapsed="false"
# # 3D Histogram
# + Collapsed="false"
edge, count = build_histogram(s3c2[:,:,1:20]);
plot(edge[2:end], count[2:end])
#plot!(fill(median(threshold), 10), 0e5:2e4:1.9e5)
threshold_3d_ = real(yen_threshold(s3c2[:, :,1:20]))
plot!(fill(threshold_3d_, 2), [0, 1e5]) #, text="real(yen_threshold(s3c2[:, :,1:20]))")
annotate!([threshold_3d_, 1e5, "$threhold_3d_"])
| notebook/segmentation3d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import os
from datetime import datetime
file_location = 'Resources/Data/State_time_series.csv'
zhvi_df = pd.read_csv(file_location)
# +
# zhvi_df.set_index('Date',inplace=True)
# -
zhvi_df.columns
zhvi_df['RegionName'].value_counts()
# +
zhvi_CA_df = zhvi_df.loc[zhvi_df['RegionName']=='California']
zhvi_CA_df = zhvi_CA_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianRentalPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
zhvi_CA_df = zhvi_CA_df.dropna()
zhvi_CA_df = zhvi_CA_df.iloc[2:]
zhvi_CA_df.to_csv("Resources/Data/CA_time_series.csv")
zhvi_CA_df
# +
zhvi_FL_df = zhvi_df.loc[zhvi_df['RegionName']=='Florida']
zhvi_FL_df = zhvi_FL_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianRentalPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
zhvi_FL_df = zhvi_FL_df.dropna()
zhvi_FL_df = zhvi_FL_df.iloc[2:]
zhvi_FL_df.to_csv("Resources/Data/FL_time_series.csv")
zhvi_FL_df
# +
# No data in MedianRentalPricePerSqft_AllHomes and MedianRentalPricePerSqft_AllHomes columns
# So taken Massachusetts
zhvi_NY_df = zhvi_df.loc[zhvi_df['RegionName']=='NewYork']
zhvi_NY_df = zhvi_NY_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianRentalPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
# zhvi_NY_df = zhvi_NY_df.dropna()
zhvi_NY_df
zhvi_NY_df.to_csv("Resources/Data/NY_time_series.csv")
# +
zhvi_MA_df = zhvi_df.loc[zhvi_df['RegionName']=='Massachusetts']
zhvi_MA_df = zhvi_MA_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianListingPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
zhvi_MA_df = zhvi_MA_df.dropna()
zhvi_MA_df = zhvi_MA_df.iloc[31:]
zhvi_MA_df.to_csv("Resources/Data/MA_time_series.csv")
zhvi_MA_df
# +
# Has data from 2010/01/31 so getting from 2014-01-31
zhvi_WA_df = zhvi_df.loc[zhvi_df['RegionName']=='Washington']
zhvi_WA_df = zhvi_WA_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianRentalPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
zhvi_WA_df = zhvi_WA_df.dropna()
zhvi_WA_df = zhvi_WA_df.iloc[48:]
zhvi_WA_df.to_csv("Resources/Data/WA_time_series.csv")
zhvi_WA_df
# +
# Has data from 2010/01/31 so getting from 2014-01-31
zhvi_TX_df = zhvi_df.loc[zhvi_df['RegionName']=='Texas']
zhvi_TX_df = zhvi_TX_df[['RegionName','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes','MedianRentalPrice_AllHomes','MedianRentalPricePerSqft_AllHomes']]
zhvi_TX_df = zhvi_TX_df.dropna()
zhvi_TX_df = zhvi_TX_df.iloc[48:]
zhvi_TX_df.to_csv("Resources/Data/TX_time_series.csv")
zhvi_TX_df
# +
state_list = ['Texas','Florida','NewYork','Washington','California']
boolean_series = zhvi_df['RegionName'].isin(state_list)
selected_states_df = zhvi_df[boolean_series]
# +
selected_states_df = selected_states_df[['Date','RegionName','DaysOnZillow_AllHomes','MedianListingPricePerSqft_AllHomes','MedianListingPrice_AllHomes',
'MedianRentalPrice_AllHomes','Sale_Counts','Sale_Prices','ZHVI_AllHomes','ZRI_AllHomes']]
selected_states_df.columns
# -
selected_states_df = selected_states_df.dropna()
selected_states_df.sort_values(by=['RegionName', 'Date'])
selected_states_df
# +
selected_states_df.reset_index()
selected_states_df = selected_states_df[(selected_states_df['Date'] >= '2015-01-31')]
selected_states_df = selected_states_df.replace(to_replace ="NewYork",value ="New York")
# -
selected_states_df
selected_states_df.sort_values(by=['RegionName', 'Date'])
selected_states_df.set_index('Date',inplace=True)
selected_states_df.to_csv("Resources/Data/Selected_States_time_series.csv")
| Zillow_WB.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Observing raw data before processing them
# <b>File completed </b> (05/17/2021)
# Table of Contents + tool versions on 06/22/2021
#
# <NAME>
#
# - Preparing session for IFB core cluster
# - Some checks as a precaution
# - First quality control on raw `.fastq.gz` files
# - Having a summary report with MultiQC
#
# ---
#
# ## <b>Preparing session for IFB core cluster</b>
#
# <em>loaded JupyterLab</em> : Version 2.2.9
# +
echo "=== Cell launched on $(date) ==="
echo "=== Current IFB session size: Medium (4CPU, 10GB) or Large (10CPU, 50GB) ==="
jobid=$(squeue -hu $USER | awk '/jupyter/ {print $1}')
sacct --format=JobID,AllocCPUS,NODELIST -j ${jobid}
echo "=== Working's root folder is ==="
gohome="/shared/projects/gonseq/Building/" # to adjust with your project's folder
echo "${gohome}"
echo ""
echo "=== current folder tree ==="
tree "${gohome}"
echo "=== current working directory ==="
echo "${PWD}"
# +
module load bc/1.07.1 fastqc/0.11.9 multiqc/1.9
echo "===== basic calulator ====="
bc --version | head -n 1
echo "===== individual reports ====="
fastqc --version
echo "===== compiled report ====="
multiqc --version
# -
# ---
# ## <b>I- Some checks as a precaution</b>
# ### **1- Available files**
# The data files are already present on the server, in the `Data/fastq/raw/` folder of our current working directory.
#
# As we change session and or day, let's first check all files are there using following commands and they correspo
# +
rawfolder="${gohome}Data/fastq/raw/"
echo "There are $(ls ${rawfolder} | wc -l) raw .fastq.gz files:"
ls ${rawfolder}
# -
# The files consist of raw data from the Illumina sequencer (`.fastq`) which sizes have been reduced (`.gz`) thanks to compression (``gunzip`` or ``pigz``, see `Pipe_1.ipynb` notebook). As genomics tools can deal with both compressed and uncompressed file formats, we'll save disk space using the compressed ones.
# ### **2- Examining data files: are they what we expect?**
# Let's pick up one file to see inside.
#
# We list the files in the folder and ask for only the first line (``-n 1``).
arawfile=$(ls "${rawfolder}"*gz | head -n 1)
echo ${arawfile}
# ``.fastq`` files are readable by the human eye, and we can display the first lines of this file, using the Unix ``head`` command on the ``zcat`` command that can deal with ``.gzip`` files.
zcat ${arawfile} | head
# We expect to have text file with 4 files per read (sequence):
# - read identification starts with `@`
# - sequence itself (some `N` may appear when bases are undetermined)
# - a line separator starting with `+` and the identifier again (for first sequencers) or nothing else
# - phred quality string with special characters (ranging from 33 to 41 in an ASCII table)
#
# <div class="alert alert-block alert-info">
# For more information on phred score and history, please refer to <a href="https://en.wikipedia.org/wiki/FASTQ_format#Encoding">FASTQ format wikipedia page</a> that display graphical view for different phred score encoding.
# </div>
#
# To count lines in that file:
time wcloutput=$(zcat ${arawfile} | wc -l)
echo ${wcloutput}
# For those who don't want to fetch for a calculator, we will use the `bc` basic calculator that allow to use decimal in `bash`.
echo "scale=2; ${wcloutput}/4" | bc -l
# If the result ends with no decimal (*i.e.* `.00`) along with correct file format (upper bullet point list), we have a good start... else please ask for information to the data supplier (platform or colleagues, file extensions are easy to add, change or even overwrite files...).
# <blockquote>
# <ul>
# <li> Alternatively, we can get directly get the number of reads noticing all reads in this file starting (<code>^</code> in an expression pattern) with <code>@SRR</code>, using the command <code>zgrep</code> to do the pattern search in a <code>.gz</code> file: <br> <mark>makes Adenine stops running</mark> </li>
# </ul>
# </blockquote>
time zgrep "^@SRR" ${arawfile} | wc -l
# ---
# ## <b>II - First quality control on raw <code>.fastq.gz</code> files</b>
# ### **1- Tool version and introduction**
# For this step, we will use <a href="https://www.bioinformatics.babraham.ac.uk/projects/fastqc/"><b>FASTQC</b></a> (notebook developped with ``FastQC v0.11.9``).
fastqc --version
# To analyze a sample, we could launch: <br>
# <code>fastqc --outdir path/to/destination/folder/ \ <br> path/to/file.fastq.gz</code> <br>
# where <code>-- outdir</code> introduces the path where you want new created files to be saved, while file to be analyzed is placed at the end of the line. <br>
# <br>
# For several samples, we can directly launch <code>fastqc</code> with a list of files to analyze. As several cores are availables, we can ask for <code>fastqc</code> to deal with several files at a time.
# <blockquote>
# <code>-t 16</code> or <code>-threads 16</code> to ask for 16 files to be managed in parallel, knowing that each process will use 250 MB of RAM memory (<em>so 4 GB at a time for 16 threads, and 32 files is also 2 times 16 samples)</em>
# </blockquote>
# ### **2- Prepare destination folders**
# We will store output files in ``Results/`` and in a subfolder called ``fastqc/``.
qcfolder="${gohome}Results/fastqc/"
mkdir -p "${qcfolder}"
# As it's easier to work with files saved close to each other, the matched ``.log`` file will be saved in a ``logfiles/`` subfolder, also placed in ``Results/``.
logfolder="${gohome}Results/logfiles/"
mkdir -p "${logfolder}"
# ### **3- Run ``fastqc`` tool**
# +
logfile="${logfolder}fastqc_raw-quality-processing.log"
echo "Screen output is redirected to ${logfile}"
# as time command does not redirect output
echo "operation starting by $(date)" >> ${logfile}
time fastqc --outdir ${qcfolder} --threads 16 \
$(echo "${rawfolder}"*.gz)\
&>> ${logfile}
echo "operation finished by $(date)" >> ${logfile}
# to see which files we have afterward and follow folder sizes
ls -lh ${qcfolder} >> ${logfile}
ls -lh "${gohome}Results/" >> ${logfile}
echo "$(ls -l "${qcfolder}"*.html | wc -l) generated .html reports"
# -
# The ouputs are in a `.zip` folder and a `.html` file, the latest being a complete summary of the analysis. <br>
# To open this `html` file, in the left-hand panel of *JupyterLab* double-click the "Results" folder, and in it, on the html file: it should open in a new tab beside this notebook. <br>
# If you have no teacher nor bionformatician (or maybe they don't know either of this subject), you can browse some links:
# <ul class="alert alert-block alert-info">
# <li><code>fastqc</code>'s help sections on <a href="https://www.bioinformatics.babraham.ac.uk/projects/fastqc/Help/">its raw (no layout!) website</a>
# </li>
# <li>Michigan state University's support facility offers a nicer <a heef="https://rtsf.natsci.msu.edu/genomics/tech-notes/fastqc-tutorial-and-faq/">FASTQC Tutorial and FAQ</a>
# </li>
# <li><i>Assessing quality metrics</i>'s section in <a ref="https://hbctraining.github.io/Intro-to-rnaseq-hpc-salmon/lessons/qc_fastqc_assessment.html">Quality control</a>, from a former workshop by the <i>Harvard Chan Bioinformatics Core</i> (HBC). <br>
# <i>++</i>: <b>some schemes on sequencing run and detailed information in general</b>
# </li>
# <li><a hef="https://huoww07.github.io/Bioinformatics-for-RNA-Seq/lessons/02_Quality_Control.html#understand-fastqc-report">RNA sequencing quality control with fastQC</a>'s section of <i>Tufts University Research Technology Workshop</i>
# </li>
# </ul>
#
# After eyeing one file (<i>so only one sample!</i>), you may want to view all results at the same time to compare between samples. That's where MultiQC and next steps will help us.
# ## <b>III- Having a summary report with MultiQC</b>
# When numerous samples are processed, it can easily become tedious to look in each mapping quality report. To that purpose, we will run <a href="https://multiqc.info/"><b>MultiQC</b></a>, that scans automatically a folder for all quality checks outputs and produce a single report.
# ### **1- Tool version and short presentation**
# This notebook was developped with ``multiqc, version 1.9``, even if next version (1.10) appeared meanwhile.
multiqc --version
# This tool deals with almost any possible NGS tools: see <a href="https://multiqc.info/docs/#multiqc-modules">the onlinefull updated list</a> for more details and to know how it works (detectd files and folder extensions).
# By default, ``multiqc`` identifies any report it can parse from the input directory.
# If you want to only generate a multiQC report on specific analyses, you can add the argument ``-m`` followed by the name of the module as for example:
# <code>multiqc -m fastqc ./Results/Fastqc/ -o /Results/MultiQC_on_FastQC</code>
# > You can add several modules ``-m fastqc dir_fastqc -m qualimap dir_qualimap`` etc...
# ### **2- Folder, filename, title and comment**
#
# We will create a subfolder in the ``Results/`` folder for ``multiqc``.
qcsummaries="${gohome}Results/multiqc/"
mkdir -p ${qcsummaries}
# All downstream reports will also be saved here and we will use different file names.
#
# We will ask MultiQC for specific and meaning filenames and title using ``-n`` and ``-i`` options.
# <blockquote>
# <code>-n</code> or <code>--filename TEXT</code> to have a non-default report filename (warning: <code>stdout</code> will just print results to console <br>
# <code>-i</code> or <code>--title</code>, to change file header. Also used for filename if option not specified <br>
# <code>-b</code> or <code>--comment</code> to add any text section in report
# </blockquote>
#
# Please, specify **file name** you want to have (do not worry about extension, MultiQC will handle this for us) inside quotes in the next cell.
# <b>DO NOT use spaces or any special characters!</b>
inamemyfile="1_raw-fastq-files"
# Please, specify a meaningful **title** inside quotes in the next cell, to display at the head of your oncoming report.
# <b>No more space limit but still avoid any special characters.</b>
mytitle="Raw fastq files quality for 16 paired end sequenced samples"
# Besides, we can add a comment in header's report. It's a good practise to do so. So we will define it in following cell.
#
# > In this cell, we use several lines to keep it readable when displaying notebook. As your text lines are just collapsed together in the html report, be sure to keep last blank space when ending every line).
mycomment=$(echo "Raw fastq files from bulk RNA sequencing (human, stranded, rRNA depletion) " \
"performed by Lecluze et al. 2020 (PMID: 32412604, GEO: GSE116278, SRA: SR151462). " \
"Sample set is limited to fetal ovaries and testes at 6 and 12 PCWeeks (n=4 per condition). " \
"SRR7430706-709: female 6 PCW, 709-713: male 6 PCW, 738-741: female 12 PCW, 742-745: male 12 PCW.")
# ### **3- Generate summary report**
# MultiQC is verbose but, as it will work only on FastQC reports, it is quite short.
# So, we will let it lines show in notebook while saving them in a file for later use.
# +
logfile="${logfolder}multiqc-processing_raw-quality.log"
echo "Screen output is also saved in ${logfile}"
# as time command does not redirect output
echo "operation starting by $(date)" >> ${logfile}
multiqc --interactive --export \
--outdir "${qcsummaries}" \
--filename "${inamemyfile}" \
--title "${mytitle}" \
--comment "${mycomment}" \
"${qcfolder}" \
|& tee -a ${logfile}
echo "operation finished by $(date)" >> ${logfile}
# to see which files we have afterward and follow folder sizes
ls -lh "${qcsummaries}" >> ${logfile}
ls -lh "${gohome}Results/" >> ${logfile}
# -
# The three options that have not been yet introduced previously in this notebook are:
# <blockquote>
# <code>-ip</code> or <code>--interactive</code> stands for integrate dynamical graphics to have interactive plots in html report <br>
# <code>-p</code> or <code>--export</code> to export plot as static images besides html report <br>
# <code>-o</code> or <code>--outdir</code> to define the destination folder for output and report files <br>
# then, folder we want to be scanned <br>
# </blockquote>
#
# Others options exist: <br>
# <blockquote>
# <code>-m</code> or <code>--module</code> to limit MultiqQC's searches for some quality tools <br>
# <code>-d</code> or <code>--dirs</code> to append directory names to files (useful for same names in different folders) <br>
# <code>-f</code> or <code>--force</code> to force overwriting existing files <br>
# <code>-v</code> or <code>--verbose</code> to increase output verbosity <br>
# <code>--tag TEXT</code> if only TEXT-matching modules are desired <br>
# <code>--pdf</code> to get a pdf report (available only with <code>pandoc</code> library)
# </blockquote>
# To open the report (``.html`` file using Javascript, language not supported by *JupyterLab* so far), download the html file from the left-hand panel and open it in your own browser for it to express its full abilities.
#
# ---
# ___
#
# After you watch MultiQC in order to know what to correct for your data, please proceed to next step.
#
# **=> Step 3 : Preprocessing reads and checking for their quality**
| Pipe_2-bash_raw-data-quality.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # vendor_scheduling
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/vendor_scheduling.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/vendor_scheduling.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver('Vendors scheduling')
#
# data
#
num_vendors = 9
num_hours = 10
num_work_types = 1
trafic = [100, 500, 100, 200, 320, 300, 200, 220, 300, 120]
max_trafic_per_vendor = 100
# Last columns are :
# index_of_the_schedule, sum of worked hours (per work type).
# The index is useful for branching.
possible_schedules = [[1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 8],
[1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 4],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 2, 5],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 4],
[1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 4, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0]]
num_possible_schedules = len(possible_schedules)
selected_schedules = []
vendors_stat = []
hours_stat = []
#
# declare variables
#
x = {}
for i in range(num_vendors):
tmp = []
for j in range(num_hours):
x[i, j] = solver.IntVar(0, num_work_types, 'x[%i,%i]' % (i, j))
tmp.append(x[i, j])
selected_schedule = solver.IntVar(0, num_possible_schedules - 1,
's[%i]' % i)
hours = solver.IntVar(0, num_hours, 'h[%i]' % i)
selected_schedules.append(selected_schedule)
vendors_stat.append(hours)
tmp.append(selected_schedule)
tmp.append(hours)
solver.Add(solver.AllowedAssignments(tmp, possible_schedules))
#
# Statistics and constraints for each hour
#
for j in range(num_hours):
workers = solver.Sum([x[i, j] for i in range(num_vendors)]).Var()
hours_stat.append(workers)
solver.Add(workers * max_trafic_per_vendor >= trafic[j])
#
# Redundant constraint: sort selected_schedules
#
for i in range(num_vendors - 1):
solver.Add(selected_schedules[i] <= selected_schedules[i + 1])
#
# Search
#
db = solver.Phase(selected_schedules, solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db)
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
for i in range(num_vendors):
print('Vendor %i: ' % i,
possible_schedules[selected_schedules[i].Value()])
print()
print('Statistics per day:')
for j in range(num_hours):
print('Day%2i: ' % j, end=' ')
print(hours_stat[j].Value(), end=' ')
print()
print()
solver.EndSearch()
print()
print('num_solutions:', num_solutions)
print('failures:', solver.Failures())
print('branches:', solver.Branches())
print('WallTime:', solver.WallTime(), 'ms')
| examples/notebook/contrib/vendor_scheduling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="gzLKlf9l9UCC" colab_type="text"
# To aid autoassociative recall (sparse recall using partial pattern), we need two components -
# 1. each pattern remembers a soft mask of the contribution of each
# element in activating it. For example, if an element varies a lot at high activation levels, that element should be masked out when determining activation. On the other hand, if an element has a very specific value every time the element has high activation, then that element is important and should be considered (masked-in).
# 2. Among the masked-in elements for a pattern, even a small subset (say 20%) almost perfect match should be able to activate the pattern. To find soft-perfect match -
#
# mask weighted dim similarity ${s_i}$ will be high only if input value ${x_i}$ is similar to pattern value ${p_i}$ for that dim AND the pattern cares about that dim (i.e. has ~1 mask ${m_i}$)
#
# ${s_i = m_i * \lVert x_i, p_i \rVert}$
#
# ${s = {1 \over m} \sum s_i}$ is the total mean similarity across dims that the pattern cares about, where
#
# ${m = \sum m_i}$ is the soft version of the number of dims that the pattern cares about.
#
#
# + id="_8v7hDUUJX6R" colab_type="code" outputId="04ae1701-e7c4-4f43-f582-c9dd4b1ace28" executionInfo={"status": "ok", "timestamp": 1588197428996, "user_tz": 420, "elapsed": 3429, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 88}
import math
import torch
import matplotlib.pyplot as plt
import pdb
import pandas as pd
import seaborn as sns
import numpy as np
from scipy.spatial import Voronoi, voronoi_plot_2d
# %matplotlib inline
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
# + id="-pqE_7Du8uuv" colab_type="code" outputId="ba466f02-5bb1-4ab7-c622-ce086537e27e" executionInfo={"status": "ok", "timestamp": 1588201648985, "user_tz": 420, "elapsed": 667, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
class HP:
grid_size = 20
# similarity
mask_contrast = 3
mexhat_width = 0.98
a_max = 1500
mexhat_inh_width = 0.10 # controls how far away patterns are pushed away by a pattern
mexhat_inh_scale = 0.80 # controls how strongly patterns push other patterns away
def mexhat(x, patterns, winning_pattern):
a = HP.mexhat_width * ((x - winning_pattern) ** -2)
a[a > HP.a_max] = HP.a_max
sim = (-a * ((x - patterns) ** 2)).mean(dim=-1)
sim = (torch.exp(sim) - HP.mexhat_inh_scale * torch.exp(sim * (1 - HP.mexhat_inh_width))) / (1 - HP.mexhat_inh_scale)
sim[sim>1.0] = 1.0
return sim
def show_mexhat():
x = torch.linspace(0.0, 1.0, 300)
sim = mexhat(torch.tensor([0.5]), x.unsqueeze(dim=-1), torch.tensor([0.55]))
plt.plot(x, sim)
plt.show()
show_mexhat()
# + id="XsQUDWDN8THZ" colab_type="code" outputId="0ebc8b46-88fc-4a1a-8c99-67e13beb28d3" executionInfo={"status": "ok", "timestamp": 1588201648987, "user_tz": 420, "elapsed": 426, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
from sklearn.datasets import load_boston
def normalize(df):
df1 = (df - df.mean())/df.std()
return df1
def scale(df):
min = df.min()
max = df.max()
df1 = (df - min) / (max - min)
return df1
dataset = load_boston()
dataset = pd.DataFrame(dataset.data, columns=dataset.feature_names)
dataset = pd.DataFrame(np.c_[scale(normalize(dataset['LSTAT'])), scale(normalize(dataset['RM']))], columns = ['LSTAT','RM'])
dataset = torch.tensor(dataset.to_numpy()).float().to(device)
dataset1 = dataset[dataset[:,0] < 0.33]
dataset2 = dataset[(dataset[:,0] >= 0.33) & (dataset[:,0] < 0.66)]
dataset3 = dataset[dataset[:,0] >= 0.66]
# dataset = [[0.25, 0.4], [0.75, 0.75], [0.85, 0.65]]
original_dataset = dataset
print("dataset", dataset.shape)
# + id="hBRXYem8JrYD" colab_type="code" outputId="822cfb33-1a4d-4da1-c21a-c129fb5179fd" executionInfo={"status": "ok", "timestamp": 1588201649734, "user_tz": 420, "elapsed": 936, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 594}
# from https://kornia.readthedocs.io/en/latest/_modules/kornia/utils/grid.html
from typing import Optional
def create_meshgrid(
height: int,
width: int,
normalized_coordinates: Optional[bool] = True,
device: Optional[torch.device] = torch.device('cpu')) -> torch.Tensor:
"""Generates a coordinate grid for an image.
When the flag `normalized_coordinates` is set to True, the grid is
normalized to be in the range [-1,1] to be consistent with the pytorch
function grid_sample.
http://pytorch.org/docs/master/nn.html#torch.nn.functional.grid_sample
Args:
height (int): the image height (rows).
width (int): the image width (cols).
normalized_coordinates (Optional[bool]): whether to normalize
coordinates in the range [-1, 1] in order to be consistent with the
PyTorch function grid_sample.
Return:
torch.Tensor: returns a grid tensor with shape :math:`(1, H, W, 2)`.
"""
# generate coordinates
xs: Optional[torch.Tensor] = None
ys: Optional[torch.Tensor] = None
if normalized_coordinates:
xs = torch.linspace(-1, 1, width, device=device, dtype=torch.float)
ys = torch.linspace(-1, 1, height, device=device, dtype=torch.float)
else:
xs = torch.linspace(0, width - 1, width, device=device, dtype=torch.float)
ys = torch.linspace(0, height - 1, height, device=device, dtype=torch.float)
# generate grid by stacking coordinates
base_grid: torch.Tensor = torch.stack(
torch.meshgrid([xs, ys])).transpose(1, 2) # 2xHxW
return torch.unsqueeze(base_grid, dim=0).permute(0, 2, 3, 1) # 1xHxWx2
def add_gaussian_noise(tensor, mean=0., std=1.):
t = tensor + torch.randn(tensor.size()).to(device) * std + mean
t.to(device)
return t
def plot_patterns(patterns, pattern_lr, dataset, voronoi=False, annotate=False, figsize=(7,7), dpi=100):
patterns = patterns.cpu()
dataset = dataset.cpu()
assert len(patterns.shape) == 2 # (pattern count, 2)
assert patterns.shape[1] == 2 # 2D
rgba_colors = torch.zeros((patterns.shape[0], 4))
# for blue the last column needs to be one
rgba_colors[:,2] = 1.0
# the fourth column needs to be your alphas
if pattern_lr is not None:
alpha = (1.1 - pattern_lr.cpu()).clamp(0, 1) * 0.9
rgba_colors[:, 3] = alpha
else:
rgba_colors[:, 3] = 1.0
plt.figure(figsize=figsize, dpi=dpi)
ax = plt.gca()
ax.cla() # clear things for fresh plot
if annotate:
for i in range(patterns.shape[0]):
ax.annotate(str(i), (patterns[i][0], patterns[i][1]), xytext=(5,-3), textcoords='offset points')
ax.scatter(patterns[:, 0], patterns[:, 1], marker='.', c=rgba_colors, s=50)
ax.scatter(dataset[:, 0], dataset[:, 1], marker='.', c='r', s=10)
if voronoi:
vor = Voronoi(patterns)
vor_fig = voronoi_plot_2d(vor, ax=ax, show_vertices=False, line_colors='gray',
line_width=1, line_alpha=0.2, point_size=0)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
plt.show()
grid_size = HP.grid_size
patterns = create_meshgrid(grid_size, grid_size, normalized_coordinates=False).reshape(-1, 2) / (grid_size-1)
patterns = patterns.to(device)
pattern_lr = torch.ones((patterns.shape[0],)).to(device)
pattern_var = torch.ones_like(patterns).to(device) * 10 # start with high var indicating no specificity to any value
# patterns = torch.rand((50, 2))
# patterns = torch.tensor([[0.25, 0.30]])
# patterns
if grid_size < 10:
plot_patterns(patterns=patterns, pattern_lr=pattern_lr, dataset=dataset, voronoi=True, annotate=True)
else:
plot_patterns(patterns=patterns, pattern_lr=pattern_lr, dataset=dataset, voronoi=False, annotate=False)
original_patterns = patterns.clone().to(device)
# + id="BDzhJdByJ10d" colab_type="code" colab={}
def precision_weighted_distance(x, patterns, pattern_var):
mask = torch.exp(-pattern_var * HP.mask_contrast) # *3 so that var of 1 gives low enough mask value ~0.5. Thus mask range [0.5,1]
dist_i = (x - patterns).abs()
di = (dist_i ** 2) * mask # distance weighted by importance
dist = di.sum(dim=-1) / mask.sum(dim=-1) # dividing by mask sum ~= count of # of dims the pattern cares about
return dist, dist_i
def similarity(x, patterns, pattern_var):
# Formula derivation https://www.desmos.com/calculator/iokn9kyuaq
# print("x", x)
# print("patterns", patterns)
dist, dist_i = precision_weighted_distance(x, patterns, pattern_var)
# print("dist", dist)
winner_index = dist.min(dim=0)[1]
# print("winner_index", winner_index)
winning_pattern = patterns[winner_index]
# print("winning_pattern", winning_pattern)
sim = mexhat(x, patterns, winning_pattern)
# print("sim", sim)
return sim, winner_index, dist, dist_i
def autoassociative_recall(x, patterns, pattern_var, pattern_lr):
dist, _ = precision_weighted_distance(x, patterns, pattern_var)
sim = torch.exp(-dist*500) * (1.0 - pattern_lr)
y = ((sim.unsqueeze(-1) * patterns) / sim.sum(-1)).sum(dim=0)
# y = ((y - 0.5) * 1.1) + 0.5
return y
def predict(dataset, patterns, pattern_var, pattern_lr):
loss = 0.0
output = []
for x in dataset:
y = autoassociative_recall(x, patterns, pattern_var, pattern_lr)
output.append(y)
loss += torch.dist(x, y)
output = torch.stack(output)
plot_patterns(output, pattern_lr=None, dataset=original_dataset, voronoi=False, annotate=False, figsize=(12,12))
print("loss", loss)
return output, loss
# output, loss = predict(original_dataset, patterns, pattern_var, pattern_lr)
# + id="W9Ug8kEPKTe8" colab_type="code" outputId="7925229a-ead8-4787-ee83-c1c8680bdcee" executionInfo={"status": "ok", "timestamp": 1588201657169, "user_tz": 420, "elapsed": 5525, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
patterns = original_patterns
pattern_lr = torch.ones((patterns.shape[0],)).to(device) * 0.99
pattern_var = torch.ones_like(patterns).to(device) * 1.0
def run_dataset(dataset, patterns, pattern_lr):
# print("pattern_var", pattern_var)
for x in dataset:
# print("-------")
sim, winner_index, dist, dist_i = similarity(x=x, patterns=patterns, pattern_var=pattern_var)
sim = sim.unsqueeze(-1)
# print("dist[winner_index]", dist[winner_index] * 100)
pattern_lr[winner_index] = 0.9 * pattern_lr[winner_index] + 0.1 * (1.0 - torch.exp(-dist[winner_index]))
pattern_var[winner_index] = 0.9 * pattern_var[winner_index] + 0.1 * dist_i[winner_index]
# if winner_index == 0:
# print("dist_i[winner_index]", dist_i[winner_index])
# print("pattern_var[winner_index]", pattern_var[winner_index])
# print("x", x)
# print("(x - patterns)", (x - patterns))
# print("sim", sim)
delta = (x - patterns) * sim * lr * pattern_lr.unsqueeze(-1)
# print("delta", delta)
patterns = patterns + delta
patterns.clamp_(0, 1)
pattern_lr.clamp(0, 1)
# print("patterns", patterns)
# print("pattern_lr", pattern_lr)
return patterns, pattern_lr
lr = 1
epochs = 5
noise = 0.0
non_iid = True
predict(original_dataset, patterns, pattern_var, pattern_lr)
if non_iid:
for _ in range(3):
for i in range(epochs):
dataset = add_gaussian_noise(dataset1, std=noise)
if (i+1) % int(epochs / 1) == 0:
print("Iteration ", i)
# plot_patterns(patterns, pattern_lr, dataset)
patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr)
for i in range(epochs):
dataset = add_gaussian_noise(dataset2, std=noise)
if (i+1) % int(epochs / 1) == 0:
print("Iteration ", i)
# plot_patterns(patterns, pattern_lr, dataset)
patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr)
for i in range(epochs):
dataset = add_gaussian_noise(dataset3, std=noise)
if (i+1) % int(epochs / 1) == 0:
print("Iteration ", i)
# plot_patterns(patterns, pattern_lr, dataset)
patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr)
predict(original_dataset, patterns, pattern_var, pattern_lr)
else:
for i in range(epochs):
dataset = add_gaussian_noise(original_dataset, std=noise)
if (i+1) % int(epochs / 1) == 0:
print("Iteration ", i)
plot_patterns(patterns, pattern_lr, dataset)
patterns, pattern_lr = run_dataset(dataset, patterns, pattern_lr)
predict(original_dataset, patterns, pattern_var, pattern_lr)
# + id="T9SfPZb3OFF7" colab_type="code" outputId="5e40dc44-3c53-4ee1-aae5-94b8295378f4" executionInfo={"status": "ok", "timestamp": 1588200548725, "user_tz": 420, "elapsed": 1096, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}} colab={"base_uri": "https://localhost:8080/", "height": 972}
plot_patterns(patterns, pattern_lr, original_dataset, voronoi=True, annotate=False, figsize=(12,12))
# + id="NDQrkqrZkdV9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 989} outputId="5d4c1adf-6893-4f2b-a506-bf50b2cc31eb" executionInfo={"status": "ok", "timestamp": 1588200556167, "user_tz": 420, "elapsed": 590, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}}
output, loss = predict(original_dataset, patterns, pattern_var, pattern_lr)
# + id="SQgq3Dggk6TU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="75b105a0-7f3e-4ea2-8774-26e25f05b3af" executionInfo={"status": "ok", "timestamp": 1588198529530, "user_tz": 420, "elapsed": 433, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}}
x = original_dataset[0]
dist = precision_weighted_distance(x, patterns, pattern_var)
sim = torch.exp(-dist*500)
(sim * 100).long()
# + id="to6wob128TxZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="87d2bf3e-6f99-41bc-a0ab-98d261a40781" executionInfo={"status": "ok", "timestamp": 1588197779492, "user_tz": 420, "elapsed": 451, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "07278259258766517376"}}
# (((sim.unsqueeze(-1) * patterns) / sim.sum(-1)) * 100).long()
((1.0 - torch.exp(-sim)) * 100).long()
# + [markdown] id="EIz6YcfWCFI2" colab_type="text" endofcell="--"
# Notes -
#
# -
# --
# + [markdown] id="1fxtQ0uJRkXf" colab_type="text"
#
| 2020-1/07.1 Inference (recognition of full pattern).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from utils import *
import tensorflow as tf
from sklearn.cross_validation import train_test_split
import time
from tqdm import tqdm
import random
ngram_range = 2
max_features = 20000
maxlen = 50
batch_size = 64
embedded_size = 128
epoch = 10
trainset = sklearn.datasets.load_files(container_path = 'data', encoding = 'UTF-8')
trainset.data, trainset.target = separate_dataset(trainset,1.0)
print (trainset.target_names)
print (len(trainset.data))
print (len(trainset.target))
concat = ' '.join(trainset.data).split()
vocabulary_size = len(list(set(concat)))
data, count, dictionary, rev_dictionary = build_dataset(concat, vocabulary_size)
print('vocab from size: %d'%(vocabulary_size))
print('Most common words', count[4:10])
print('Sample data', data[:10], [rev_dictionary[i] for i in data[:10]])
idx_trainset = []
for text in trainset.data:
idx = []
for t in text.split():
try:
idx.append(dictionary[t])
except:
pass
idx_trainset.append(idx)
# +
def create_ngram_set(input_list, ngram_value):
return set(zip(*[input_list[i:] for i in range(ngram_value)]))
def build_ngram(x_train):
global max_features
ngram_set = set()
for input_list in tqdm(x_train, total=len(x_train), ncols=70):
for i in range(2, ngram_range + 1):
set_of_ngram = create_ngram_set(input_list, ngram_value=i)
ngram_set.update(set_of_ngram)
start_index = max_features + 1
token_indice = {v: k + start_index for k, v in enumerate(ngram_set)}
indice_token = {token_indice[k]: k for k in token_indice}
max_features = np.max(list(indice_token.keys())) + 1
return token_indice
def add_ngram(sequences, token_indice):
new_sequences = []
for input_list in tqdm(sequences, total=len(sequences), ncols=70):
new_list = input_list[:]
for ngram_value in range(2, ngram_range + 1):
for i in range(len(new_list) - ngram_value + 1):
ngram = tuple(new_list[i:i + ngram_value])
if ngram in token_indice:
new_list.append(token_indice[ngram])
new_sequences.append(new_list)
return new_sequences
# -
token_indice = build_ngram(idx_trainset)
X = add_ngram(idx_trainset, token_indice)
X = tf.keras.preprocessing.sequence.pad_sequences(X, maxlen)
X.shape
train_X, test_X, train_Y, test_Y = train_test_split(X,
trainset.target,
test_size = 0.2)
class Model:
def __init__(self, embedded_size, dict_size, dimension_output, learning_rate):
self.X = tf.placeholder(tf.int32, [None, None])
self.Y = tf.placeholder(tf.int32, [None])
encoder_embeddings = tf.Variable(tf.random_uniform([dict_size, embedded_size], -1, 1))
encoder_embedded = tf.nn.embedding_lookup(encoder_embeddings, self.X)
self.logits = tf.layers.dense(tf.reduce_mean(encoder_embedded, 1), dimension_output)
self.cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits,
labels=self.Y))
self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(self.cost)
correct_pred = tf.equal(tf.argmax(self.logits, 1,output_type=tf.int32), self.Y)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.reset_default_graph()
sess = tf.InteractiveSession()
model = Model(embedded_size,vocabulary_size+4,2,1e-3)
sess.run(tf.global_variables_initializer())
EARLY_STOPPING, CURRENT_CHECKPOINT, CURRENT_ACC, EPOCH = 5, 0, 0, 0
while True:
lasttime = time.time()
if CURRENT_CHECKPOINT == EARLY_STOPPING:
print('break epoch:%d\n'%(EPOCH))
break
train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
for i in range(0, (len(train_X) // batch_size) * batch_size, batch_size):
acc, loss, _ = sess.run([model.accuracy, model.cost, model.optimizer],
feed_dict = {model.X : train_X[i:i+batch_size], model.Y : train_Y[i:i+batch_size]})
train_loss += loss
train_acc += acc
for i in range(0, (len(test_X) // batch_size) * batch_size, batch_size):
acc, loss = sess.run([model.accuracy, model.cost],
feed_dict = {model.X : test_X[i:i+batch_size], model.Y : test_Y[i:i+batch_size]})
test_loss += loss
test_acc += acc
train_loss /= (len(train_X) // batch_size)
train_acc /= (len(train_X) // batch_size)
test_loss /= (len(test_X) // batch_size)
test_acc /= (len(test_X) // batch_size)
if test_acc > CURRENT_ACC:
print('epoch: %d, pass acc: %f, current acc: %f'%(EPOCH,CURRENT_ACC, test_acc))
CURRENT_ACC = test_acc
CURRENT_CHECKPOINT = 0
else:
CURRENT_CHECKPOINT += 1
print('time taken:', time.time()-lasttime)
print('epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'%(EPOCH,train_loss,
train_acc,test_loss,
test_acc))
EPOCH += 1
logits = sess.run(model.logits, feed_dict={model.X:test_X})
print(metrics.classification_report(test_Y, np.argmax(logits,1), target_names = trainset.target_names))
| text-classification/52.fast-text.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Baisalis/Baisalis.github.io/blob/master/Project_2.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="82EceDVYp_uK" colab_type="code" colab={}
import pandas as pd
# + id="t7XbQE8uqqff" colab_type="code" outputId="f715c198-a4b3-40ec-ac35-385b24da11b2" colab={"base_uri": "https://localhost:8080/", "height": 217}
df = pd.read_csv('https://github.com/Thinkful-Ed/data-201-resources/raw/master/hotel-reviews.csv')
# !pip install category_encoders==2.*
# + id="2_ka1b6Xq1HJ" colab_type="code" outputId="6a05faaf-e7ca-4e9d-a45d-40f75d19a704" colab={"base_uri": "https://localhost:8080/", "height": 495}
print(df.shape)
df.head()
# + id="NXair-O1xS67" colab_type="code" outputId="737a0553-1ff1-462a-c1a7-6d79b002fea5" colab={"base_uri": "https://localhost:8080/", "height": 181}
df['reviews.rating'].describe()
# + id="Q7bPHyfpMDCs" colab_type="code" outputId="3ef6a888-f4f0-484b-cdcd-ffacdaa4976f" colab={"base_uri": "https://localhost:8080/", "height": 265}
df['reviews.rating'].plot(kind='hist', bins=10);
# + id="GsnPSHnpLtqQ" colab_type="code" outputId="a11f5e35-e259-47d2-aa0b-593073f0c3ab" colab={"base_uri": "https://localhost:8080/", "height": 542}
import plotly.graph_objs as go
import pandas as pd
import numpy as np
import plotly.offline as py
q2 = df.groupby('name')['reviews.rating'].mean().reset_index().sort_values(by='reviews.rating', ascending=False)[:10]
trace = go.Bar(
x=q2['name'],
y=q2['reviews.rating'],
marker=dict(
color='rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
opacity=0.6
)
data = [trace]
layout = go.Layout(
title='Bar Chat Showing Top 10 Hotels With Highest Average Ratings.',
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hotel-reviews-highest-rating')
# + id="YuZW3PtPMLeA" colab_type="code" outputId="9b38730c-ce6a-4dca-d001-ebcae81175d2" colab={"base_uri": "https://localhost:8080/", "height": 542}
q3 = df['city'].value_counts()[:20]
trace = go.Bar(
x=q3.index,
y=q3.values,
marker=dict(
color='rgb(158,202,225)',
line=dict(
color='rgb(8,48,107)',
width=1.5,
)
),
opacity=0.6
)
data = [trace]
layout = go.Layout(
title='Bar Chart Showing Top 20 Cities With Highest Reviews.',
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename='hotel-reviews-highest-cities')
# + id="xW7Bj6-rxV4N" colab_type="code" colab={}
# Derive binary classification target:
# We define a 'Best' hotels
# overall rating of 5 or higher, on a 10 point scale.
# Drop unrated hotels.
df = df.dropna(subset=['reviews.rating'])
df['Best'] = df['reviews.rating'] >= 5
# + id="ay-SVX69xZKQ" colab_type="code" colab={}
# choose a target
y = df['Best']
# + id="gbIcFWQgxfL0" colab_type="code" outputId="1f6cfce8-79da-458d-a334-79390cda8c09" colab={"base_uri": "https://localhost:8080/", "height": 35}
# There are 2 classes
# this is a binary classification problem.
y.nunique()
# + id="2OUnchMGxh3a" colab_type="code" outputId="77884227-c38b-4eef-81ba-dceb56b5daa5" colab={"base_uri": "https://localhost:8080/", "height": 72}
# The majority class occurs with 62% frequency,
# so this is not too imbalanced.
y.value_counts(normalize=True)
# + id="u0lQRjmsxk9J" colab_type="code" outputId="68834a69-3e5e-4be0-f211-c1e324044277" colab={"base_uri": "https://localhost:8080/", "height": 399}
df.dtypes
# + id="W__D-PCIxn9R" colab_type="code" outputId="ce2b2472-3b7d-4c1a-fb95-be308e5ba398" colab={"base_uri": "https://localhost:8080/", "height": 399}
df.isnull().sum()
# + id="y6ou2rZRxq8w" colab_type="code" outputId="122a7b2d-ac1a-41e1-dc0c-3135fd22d836" colab={"base_uri": "https://localhost:8080/", "height": 288}
df.describe()
# + id="3Nbd4KeWH4rb" colab_type="code" outputId="1f145106-28e2-49db-8878-a54c1367468e" colab={"base_uri": "https://localhost:8080/", "height": 399}
df.isnull().sum()
# + id="X-jqODX-xuza" colab_type="code" colab={}
# Drop some high cardinality categoricals
df = df.drop(columns=['address', 'latitude', 'longitude', 'reviews.dateAdded', 'reviews.doRecommend', 'reviews.title', 'reviews.id'])
# + id="KlDtCHQg9iw5" colab_type="code" colab={}
df['reviews.text'] = df['reviews.text'].fillna('')
# + id="9Udd8aUN_3L2" colab_type="code" outputId="e5848af4-0f20-41a0-dd4d-669060760e8a" colab={"base_uri": "https://localhost:8080/", "height": 272}
df.isnull().sum()
# + id="TLCxy_1VxxVi" colab_type="code" outputId="dc0b72c2-4130-4a5a-93ed-e6dee1a60af7" colab={"base_uri": "https://localhost:8080/", "height": 35}
df.shape
# + id="uAAH4P8-x1mp" colab_type="code" colab={}
# Convert date_recorded to datetime
df['reviews.date'] = pd.to_datetime(df['reviews.date'], infer_datetime_format=True)
# + id="o6nMjm78N1Ly" colab_type="code" colab={}
# split Train, Test and Val
train = df[df['reviews.date'].dt.year <= 2013]
val = df[df['reviews.date'].dt.year == 2014]
test = df[df['reviews.date'].dt.year >= 2015]
# + id="u8ud-tEqN4dR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9eacdcba-f41e-4d96-8f6e-6ab701efa79a"
train.shape, test.shape, val.shape
# + id="ocWpJqZQcAdG" colab_type="code" colab={}
df['reviews.text'] = df['reviews.text'].apply(str.lower)
# + id="mVHoP8dc0itW" colab_type="code" colab={}
# sentiment analysis
positive_words = ['great', 'good', 'pleasant', 'Only' , 'park','No', 'real', 'complaints', 'hotel', 'great', 'location', 'surroundings', 'amenities', 'service','recommendations', 'however', 'firstly','staff', 'upon', 'check', 'very', 'confusing', 'regarding', 'deposit', 'payments', 'staff', 'offer', 'upon', 'checkout', 'refund','original', 'payment','confusing', 'Secondly', 'restaurant', 'lacking', 'very', 'well', 'thought', 'excellent', 'quality', 'food', 'vegetarian', 'background', 'but', 'even', 'a', 'wrap', 'or', 'toasted', 'sandwich', 'option', 'would', 'be', 'great', 'Aside', 'Location', 'was', 'good','Amazing', 'location', 'and', 'building', 'Romantic', 'setting', 'and', 'staff', 'were', 'ok', 'It', 'is', 'cute', 'hotel', 'the', 'breakfast', 'range', 'is', 'nice', 'Will', 'go', 'back','from', 'those', 'minor', 'minor', 'things', 'fantastic', 'spot', 'and', 'will', 'be', 'back', 'when', 'i', 'return', 'Amsterdam', 'outside', 'beautiful','smile','give', 'peace', 'lovable', 'best behaviour', 'minimum walk','lovely', 'clean', 'good location','loving', 'wonderful','special','be kind,' 'like','nice', 'sweat', 'greatful', 'enjoy','kindness','plentiful','dream','giving','amazing','durable','happening','ideal']
negative_words = ['bad', 'horrible', 'bad location', 'not clean', 'no','staff', 'bad behaviour','awful', 'sad','lost','failure','helpless','rejected','unhappy','confused','scared','panic','frustrated','troubled','lonely','feeble','panic','embarrassed', 'dirty','']
def count_positives(review):
positive_count = 0
for word in review.split(' '):
if word in positive_words:
positive_count += 1
return positive_count
def count_negatives(review):
negative_count = 0
for word in review.split(' '):
if word in negative_words:
negative_count += 1
return negative_count
#positive and negative word counts from the review
df['positive_word_count'] = df['reviews.text'].apply(count_positives)
df['negative_word_count'] = df['reviews.text'].apply(count_negatives)
#sentiment score (number of positives words minus the number of negative words)
df['sentiment_score'] = df['positive_word_count'] - df['negative_word_count']
#feature that counts the length of the review- maybe there is a relationship
#between the length of a review and whether or not its a high rating
df['review_length'] = df['reviews.text'].apply(len)
# + id="BgAYyX5rHGuI" colab_type="code" outputId="3706c90e-68dc-4222-bff1-636c28bc09df" colab={"base_uri": "https://localhost:8080/", "height": 477}
df.head()
# + id="nBOaN2u95kZ4" colab_type="code" colab={}
# set up the features
target = 'Best'
features = df.columns.drop([target, 'reviews.date','reviews.text','reviews.rating'])
# Arrange data into X features matrix and y target vector
X_train = train[features]
y_train = train[target]
X_val = val[features]
y_val = val[target]
X_test = test[features]
# + [markdown] id="Jy3MI7EUBkS-" colab_type="text"
# # **Random forest Classifier**
# + id="Lu3XPy3L6RhI" colab_type="code" outputId="826a2151-f720-4fbe-8ae0-25a2f4f6823b" colab={"base_uri": "https://localhost:8080/", "height": 54}
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
ce.OneHotEncoder(use_cat_names=True),
SimpleImputer(strategy='median'),
RandomForestClassifier(n_estimators=100, n_jobs=-1, random_state=0)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="tNSDJXewCy-f" colab_type="text"
# # **Random Forest**
# + id="iT2ociT4CRoh" colab_type="code" outputId="dc5ad358-41ce-4a43-e460-10468241db4c" colab={"base_uri": "https://localhost:8080/", "height": 54}
from sklearn.ensemble import RandomForestClassifier
pipeline = make_pipeline(
ce.OrdinalEncoder(),
RandomForestClassifier(n_estimators = 100, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="G2Rp5WdCDwry" colab_type="text"
# # **Feature Importances**
# + id="18fPI9heDfSG" colab_type="code" outputId="847c6166-bb4a-49eb-e953-0996ad61485a" colab={"base_uri": "https://localhost:8080/", "height": 337}
rf = pipeline.named_steps['randomforestclassifier']
importances = pd.Series(rf.feature_importances_, X_train.columns)
# %matplotlib inline
import matplotlib.pyplot as plt
n=10
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
# + [markdown] id="3mhXwfgCDFRR" colab_type="text"
# # **Decision Tree**
# + id="hYACRfSKDBpy" colab_type="code" outputId="71c41964-0795-4482-d012-59e6b3de717e" colab={"base_uri": "https://localhost:8080/", "height": 54}
from sklearn.tree import DecisionTreeClassifier
pipeline = make_pipeline(
ce.OrdinalEncoder(),
DecisionTreeClassifier(random_state=42)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="8rvju3Q-FAfm" colab_type="text"
# # **Linear Regression**
# + id="xAYBiW7RFEcS" colab_type="code" outputId="4ac645b4-dc69-4913-872a-4c94a8488ffb" colab={"base_uri": "https://localhost:8080/", "height": 54}
from sklearn.linear_model import LinearRegression
pipeline = make_pipeline(
ce.OrdinalEncoder(),
LinearRegression()
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + [markdown] id="WK-vY7F0DRjB" colab_type="text"
# # **XGB Classifier**
# + id="B53yiqWLgQvm" colab_type="code" outputId="3a91bedc-4c1a-4e63-bde0-93113305f9fc" colab={"base_uri": "https://localhost:8080/", "height": 54}
from xgboost import XGBClassifier
pipeline = make_pipeline(
ce.OrdinalEncoder(),
XGBClassifier(n_estimators=100, random_state=42, n_jobs=-1)
)
pipeline.fit(X_train, y_train)
y_pred = pipeline.predict(X_val)
print('Train Accuracy', pipeline.score(X_train, y_train))
print('Validation Accuracy', pipeline.score(X_val, y_val))
# + id="IvOZg9CiVB76" colab_type="code" outputId="88d143db-1354-4d21-d3b7-c6413b43656e" colab={"base_uri": "https://localhost:8080/", "height": 337}
rf = pipeline.named_steps['xgbclassifier']
importances = pd.Series(rf.feature_importances_, X_train.columns)
# %matplotlib inline
import matplotlib.pyplot as plt
n=10
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
importances.sort_values()[-n:].plot.barh(color='grey');
# + id="N9YKJxrZQllb" colab_type="code" outputId="22735c95-b8a8-40a6-c7aa-b4ec91d66319" colab={"base_uri": "https://localhost:8080/", "height": 299}
results = model.evals_result()
train_error = results['validation_0']['error']
val_error = results['validation_1']['error']
epoch = range(1, len(train_error)+1)
plt.plot(epoch, train_error, label='Train')
plt.plot(epoch, val_error, label='Validation')
plt.title('Validation Curve')
plt.ylabel('Classification Error')
plt.xlabel('Model Complexity (n_estimators)')
plt.ylim()
plt.legend();
# + id="9BCrrlLmWor2" colab_type="code" outputId="32c3fd69-6344-4ba5-b500-d98cd4a8f85f" colab={"base_uri": "https://localhost:8080/", "height": 409}
plt.figure(figsize=(12, 6))
sns.distplot(df['sentiment_score'])
# + id="rasjtv2mMdFF" colab_type="code" outputId="49972734-7c0a-4c09-f636-e354e3b0e99b" colab={"base_uri": "https://localhost:8080/", "height": 285}
import seaborn as sns
sns.barplot(x='positive_word_count', y='negative_word_count', data=df);
# + id="frLTn8SEN5Bh" colab_type="code" outputId="4f5bacb8-1875-47b7-eebe-c3701d95952d" colab={"base_uri": "https://localhost:8080/", "height": 35}
import category_encoders as ce
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
lr = make_pipeline(
ce.TargetEncoder(),
LinearRegression()
)
lr.fit(X_train, y_train)
print('Linear Regression R^2', lr.score(X_val, y_val))
# + id="gZpRXq9vOB2x" colab_type="code" outputId="18190893-f06c-4c16-bb09-1b3e6e115de7" colab={"base_uri": "https://localhost:8080/", "height": 35}
from sklearn.metrics import r2_score, accuracy_score
from xgboost import XGBRegressor
gb = make_pipeline(
ce.OrdinalEncoder(),
XGBRegressor(n_estimators=200, objective='reg:squarederror', n_jobs=-1)
)
gb.fit(X_train, y_train)
y_pred = gb.predict(X_val)
print('Gradient Boosting R^2', r2_score(y_val, y_pred))
# + id="miOxwCfiQ0Q_" colab_type="code" colab={}
# !pip install PDPbox
# !pip install shap
# + id="fhdJ7A7cOIwP" colab_type="code" outputId="1e2e766f-83b7-42b0-fcaf-36612a4b283b" colab={"base_uri": "https://localhost:8080/", "height": 634}
# Use Pdpbox
# %matplotlib inline
import matplotlib.pyplot as plt
from pdpbox import pdp
feature = 'sentiment_score'
isolated = pdp.pdp_isolate(
model=gb,
dataset=X_val,
model_features=X_val.columns,
feature=feature
)
pdp.pdp_plot(isolated, feature_name=feature)
# + id="IfHFGCs7WDE-" colab_type="code" outputId="05979575-2504-42eb-836a-1176ee063b30" colab={"base_uri": "https://localhost:8080/", "height": 579}
pdp.pdp_plot(isolated, feature_name=feature, plot_lines=True,
frac_to_plot=0.01)
plt.xlim(0,20);
# + id="KLM9ZLoEDk38" colab_type="code" outputId="0eb182fc-74d8-4b97-ab2a-601b8177d24e" colab={"base_uri": "https://localhost:8080/", "height": 409}
plt.figure(figsize=(12, 6))
sns.distplot(df['reviews.rating'])
# + id="zgX-4yQyE7pk" colab_type="code" outputId="d7a273e5-483f-43c4-f0a6-ccbb6b806900" colab={"base_uri": "https://localhost:8080/", "height": 1000}
correlation = df.corr()
plt.figure(figsize=(18, 18))
sns.heatmap(correlation, vmax=1, square=True,annot=True,cmap='viridis')
plt.title('Correlation between different fearures')
| Project_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + code_folding=[21, 43]
import logging, os, json, glob, h5py, pickle, math, collections
import numpy as np
from einops import rearrange, repeat
import tqdm.notebook as tqdm
import torch
from torch import nn
import torch.nn.functional as F
from pathlib import Path
from typing import Callable, Optional, Tuple, Union, Any
def load_h5_file(file_path: Union[str, Path], sl: Optional[slice] = None, to_torch: bool = False) -> np.ndarray:
"""Given a file path to an h5 file assumed to house a tensor, load that
tensor into memory and return a pointer.
Parameters
----------
file_path: str
h5 file to load
sl: Optional[slice]
slice to load (data is written in chunks for faster access to rows).
"""
# load
with h5py.File(str(file_path) if isinstance(file_path, Path) else file_path, "r") as fr:
data = fr.get("array")
if sl is not None:
data = np.array(data[sl])
else:
data = np.array(data)
if to_torch:
data = torch.from_numpy(data)
data = data.to(dtype=torch.float)
return data
def write_data_to_h5(data: np.ndarray, filename: Union[str, Path], compression="gzip", compression_level=9, dtype="uint8", verbose=True):
"""write data in gzipped h5 format.
Parameters
----------
data
filename
compression
compression_level
verbose
"""
with h5py.File(filename if isinstance(filename, str) else str(filename), "w", libver="latest") as f:
if data.dtype != dtype:
logging.warning(f"Found data with {data.dtype}, expected {dtype}.")
if verbose:
print(f"writing {filename} ...")
f.create_dataset(
# `chunks=(1, *data.shape[1:])`: optimize for row access!
"array",
shape=data.shape,
data=data,
chunks=(1, *data.shape[1:]),
dtype=dtype,
compression=compression,
compression_opts=compression_level,
)
if verbose:
print(f"... done writing {filename}")
# -
# #### Make some prediction
# + code_folding=[4]
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.empty_cache()
device = 'cpu'
class Runner():
def __init__(self, model_name='Resnet3D'):
self.model_name = model_name
self.model = globals()[model_name]()
# if torch.cuda.device_count() > 1:
# self.model = torch.nn.DataParallel(self.model)
self.model.to(device)
def predict(self, checkpoint_dir='./checkpoints/Resnet3D.pk', mode='core', use_mask=True):
submission_name = f'submission_{self.model_name}'
model_dic = torch.load(checkpoint_dir)
new_state_dict = collections.OrderedDict()
for k, v in model_dic.items():
name = k.replace('module.', '')# remove `module.`
new_state_dict[name] = v
self.model.load_state_dict(new_state_dict)
self.model.eval()
if mode =='core':
cities = ['BERLIN', 'CHICAGO', 'ISTANBUL', 'MELBOURNE']
postfix = 'temporal'
else:
cities = ['VIENNA', 'NEWYORK']
postfix = 'spatiotemporal'
for city in cities:
print(f'Predicting {city}...')
# read city information
with h5py.File(f'../data/raw/{city}/{city}_test_{postfix}.h5', "r") as f:
test_data = f.get("array")
test_data = np.array(test_data)
# MASK
if use_mask:
mask = np.zeros([495, 436, 8])
for n in range(100):
for t in range(12):
mask = np.logical_or(mask, test_data[n, t, :, :, :])
mask = torch.from_numpy(mask).to(device)
y_preds = []
for x in tqdm.tqdm(test_data):
x = torch.from_numpy(x).unsqueeze(0).float().to(device)
if use_mask:
y_pred = self.model(x) * mask
else:
y_pred = self.model(x)
y_pred[y_pred < 1] = 0
y_pred[y_pred > 255] = 255
y_preds.append(y_pred.cpu().detach().numpy())
torch.cuda.empty_cache()
y_preds = np.concatenate(y_preds, axis=0)
Path(f'{submission_name}/{city}/').mkdir(parents=True, exist_ok=True)
write_data_to_h5(y_preds.astype(np.uint8), f'{submission_name}/{city}/{city}_test_{postfix}.h5')
print('Done.')
# + code_folding=[2, 27, 50, 62]
# Model 1 - Resnet3D
inlen, outlen = 12, 6
class PositionalEncoding(nn.Module):
def __init__(self, d_model: int, dropout: float = 0.1, max_len: int = 5000):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
self.cnn = nn.Conv3d(12, 12, kernel_size=1, stride=1, padding=0, bias=False)
position = torch.arange(max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
pe = torch.zeros(max_len, 1, d_model)
pe[:, 0, 0::2] = torch.sin(position * div_term)
pe[:, 0, 1::2] = torch.cos(position * div_term)
self.register_buffer('pe', pe)
def forward(self, x):
"""
Args:
x: Tensor, [1, 12, 495, 436, 8]
"""
x = self.cnn(x)
x = x.permute(0,2,3,1,4)
pe = self.pe[:12].squeeze()
#print(x.shape, pe.shape)
x = x + pe
x = x.permute(0,3,1,2,4)
#print(x.shape)
return self.dropout(x)
class Resnet3DBlock(nn.Module):
def __init__(self, hidden_size=4, leakyrate=0.2):
super(Resnet3DBlock, self).__init__()
self.conv1 = nn.Conv3d(hidden_size, hidden_size, kernel_size=(1,3,3), stride=(1,1,1), padding=(0,1,1))
#self.conv2 = nn.Conv3d(hidden_size, hidden_size, kernel_size=(3,3,3), stride=(1,1,1), padding=(1,1,1), bias=False)
#self.bn = nn.BatchNorm3d(hidden_size)
self.layer_norm = nn.LayerNorm((8, 495, 436))
#self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(leakyrate)
#self.dropout = nn.Dropout(p=0.2)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.layer_norm(out)
out = self.relu(out)
#out = self.dropout(out)
out = (out + identity)
return out
class InceptionIn(nn.Module):
def __init__(self, hidden_size=24):
super(InceptionIn, self).__init__()
self.cnn1x1 = nn.Conv3d(inlen, hidden_size, kernel_size=(1,1,1), stride=(1,1,1), padding=(0,0,0))
self.cnn3x3 = nn.Conv3d(inlen, hidden_size, kernel_size=(3,3,3), stride=(1,1,1), padding=(1,1,1))
# self.cnn7x7 = nn.Conv3d(inlen, hidden_size, kernel_size=(3,7,7), stride=(1,1,1), padding=(1,3,3))
def forward(self, x):
out1 = self.cnn1x1(x)
out2 = self.cnn3x3(x)
# out3 = self.cnn7x7(x)
out = out1 + out2 # + out3
return out
class Resnet3D(nn.Module):
def __init__(self, n_layers=4, hidden_size=16, leakyrate=0.2):
super(Resnet3D, self).__init__()
self.n_layers = n_layers
self.pe = PositionalEncoding(8)
self.relu = nn.LeakyReLU(leakyrate)
#self.relu = nn.ReLU()
#self.cnn_in = nn.Conv3d(inlen, hidden_size, kernel_size=(3,3,3), stride=(1,1,1), padding=(1,1,1), bias=True)
self.cnn_in = InceptionIn(hidden_size)
self.layer_norm = nn.LayerNorm((8, 495, 436))
# self.bn = nn.BatchNorm3d(hidden_size)
for i in range(n_layers):
setattr(self, f'resnet{i}', Resnet3DBlock(hidden_size, leakyrate))
self.cnn_out = nn.Conv3d(hidden_size, outlen, kernel_size=(3,3,3), stride=(1,1,1), padding=(1,1,1))
# init
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.Conv3d)):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm, nn.BatchNorm3d)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# positional encoding
x = self.pe(x)
x = rearrange(x, 'b t w h c -> b t c w h')
# print('input shape is ', x.shape)
x = self.layer_norm(self.relu(self.cnn_in(x)))
for i in range(self.n_layers):
x = getattr(self, f'resnet{i}')(x)
#print("out in",x.shape)
x = F.relu(self.cnn_out(x))
#x[x > 255.0] = 255.0
#x[x < 1.0] = 0
x = rearrange(x, 'b t c w h -> b t w h c')
return x
runner = Runner(model_name='Resnet3D')
runner.predict(checkpoint_dir='./checkpoints/Resnet3D.pk', mode='core')
# + code_folding=[3]
# Model 2 - SparseUNet
import MinkowskiEngine as ME
import MinkowskiEngine.MinkowskiFunctional as MF
class SparseUNet(ME.MinkowskiNetwork):
def __init__(self, hs_block1=12, hs_block2=12, hs_block3=16, block3_tr=8, block2_tr=48):
in_nchannel, out_nchannel, D = 12, 6, 3
super(SparseUNet, self).__init__(D)
self.block1 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=in_nchannel,
out_channels=hs_block1,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(hs_block1),
)
self.block2 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=hs_block1,
out_channels=hs_block2,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(hs_block2),
)
self.block3 = torch.nn.Sequential(
ME.MinkowskiConvolution(
in_channels=hs_block2,
out_channels=hs_block3,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(hs_block3),
)
self.block3_tr = torch.nn.Sequential(
ME.MinkowskiConvolutionTranspose(
in_channels=hs_block3,
out_channels=block3_tr,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(block3_tr),
)
self.block2_tr = torch.nn.Sequential(
ME.MinkowskiConvolutionTranspose(
in_channels=hs_block2+block3_tr,
out_channels=block2_tr,
kernel_size=3,
stride=1,
dimension=D),
ME.MinkowskiBatchNorm(block2_tr),
)
self.conv1_tr = ME.MinkowskiConvolution(
in_channels=hs_block1+block2_tr,
out_channels=out_nchannel,
kernel_size=1,
stride=1,
dimension=D,
expand_coordinates=True
)
def forward(self, x):
# x to sparse tensor
x = ME.MinkowskiOps.to_sparse(x)
out_s1 = self.block1(x)
out = MF.relu(out_s1)
out_s2 = self.block2(out)
out = MF.relu(out_s2)
out_s4 = self.block3(out)
out = MF.relu(out_s4)
out = MF.relu(self.block3_tr(out))
out = ME.cat(out, out_s2)
out = MF.relu(self.block2_tr(out))
out = ME.cat(out, out_s1)
out = self.conv1_tr(out)
dense_output, min_coord, tensor_stride = out.dense()
missed_min_coordinate = np.array(dense_output.size()[-3:]) - np.array([495, 436, 8])
if missed_min_coordinate.sum() != 0:
dense_output, min_coord, tensor_stride = out.dense(
min_coordinate=torch.IntTensor(missed_min_coordinate)
)
return dense_output
runner = Runner(model_name='SparseUNet')
runner.predict(checkpoint_dir='./checkpoints/SparseUNet.pk', mode='extended')
# -
| models/submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Copy the content of the data directory to the hdfs cluster
from os import listdir
from os.path import isfile, join
from hdfs import InsecureClient
client = InsecureClient('http://192.168.11.2:9870', user='root')
client.makedirs("/data", permission=755)
client.makedirs("/tmp", permission=777)
client.makedirs("/app", permission=777)
client.makedirs("/result", permission=777)
client.list("/", status=False)
datadir = "data/"
allfiles = [f for f in listdir(datadir) if isfile(join(datadir, f))]
datafiles = list(filter(lambda x: x[0]!= ".", allfiles))
for file in datafiles:
print(f"processing file {file}")
client.upload(f"/data/{file}", f"data/{file}", n_threads=1, temp_dir="/tmp", chunk_size=65536, progress=None, cleanup=True, overwrite=True)
| push_data_dir_to_hdfs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Za8-Nr5k11fh"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="Eq10uEbw0E4l" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Nm71sonIiJjH"
# # Moving average
# + [markdown] colab_type="text" id="C34f-r1Mhzkj"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c03_moving_average.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c03_moving_average.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="vidayERjaO5q"
# ## Setup
# + colab_type="code" id="G0954hNkvkXk" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
# + colab_type="code" id="_nkdPmYn2hFS" colab={}
try:
# Use the %tensorflow_version magic if in colab.
# %tensorflow_version 2.x
except Exception:
pass
# + colab_type="code" id="gqWabzlJ63nL" colab={}
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
# + colab_type="code" id="sJwA96JU00pW" colab={}
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
# + [markdown] colab_type="text" id="yVo6CcpRaW7u"
# ## Trend and Seasonality
# + colab_type="code" id="BLt-pLiZ0nfB" colab={}
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
# + [markdown] colab_type="text" id="bjD8ncEZbjEW"
# ## Naive Forecast
# + colab_type="code" id="Pj_-uCeYxcAb" colab={}
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
naive_forecast = series[split_time - 1:-1]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, start=0, end=150, label="Series")
plot_series(time_valid, naive_forecast, start=1, end=151, label="Forecast")
# + [markdown] colab_type="text" id="Uh_7244Gsxfx"
# Now let's compute the mean squared error between the forecasts and the predictions in the validation period:
# + colab_type="code" id="byNnC7IbsnMZ" colab={}
keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy()
# + [markdown] colab_type="text" id="WGPBC9QttI1u"
# That's our baseline, now let's try a moving average.
# + [markdown] colab_type="text" id="MLtZbFoU8OH-"
# ## Moving Average
# + colab_type="code" id="YGz5UsUdf2tV" colab={}
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast"""
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
# + colab_type="code" id="Le2gNBthBWPN" colab={}
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast
This implementation is *much* faster than the previous one"""
mov = np.cumsum(series)
mov[window_size:] = mov[window_size:] - mov[:-window_size]
return mov[window_size - 1:-1] / window_size
# + colab_type="code" id="F50zyJGoDNJl" colab={}
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, moving_avg, label="Moving average (30 days)")
# + colab_type="code" id="wG7pTAd7z0e8" colab={}
keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy()
# + [markdown] colab_type="text" id="JMYPnJqwz8nS"
# That's worse than naive forecast! The moving average does not anticipate trend or seasonality, so let's try to remove them by using differencing. Since the seasonality period is 365 days, we will subtract the value at time *t* – 365 from the value at time *t*.
# + colab_type="code" id="5pqySF7-rJR4" colab={}
diff_series = (series[365:] - series[:-365])
diff_time = time[365:]
plt.figure(figsize=(10, 6))
plot_series(diff_time, diff_series, label="Series(t) – Series(t–365)")
plt.show()
# + [markdown] colab_type="text" id="WDNer84g8OIF"
# Focusing on the validation period:
# + colab_type="code" id="-O21jlnA8OIG" colab={}
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)")
plt.show()
# + [markdown] colab_type="text" id="xPlPlS7DskWg"
# Great, the trend and seasonality seem to be gone, so now we can use the moving average:
# + colab_type="code" id="QmZpz7arsjbb" colab={}
diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:], label="Series(t) – Series(t–365)")
plot_series(time_valid, diff_moving_avg, label="Moving Average of Diff")
plt.show()
# + [markdown] colab_type="text" id="Gno9S2lyecnc"
# Now let's bring back the trend and seasonality by adding the past values from t – 365:
# + colab_type="code" id="Dv6RWFq7TFGB" colab={}
diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, diff_moving_avg_plus_past, label="Forecasts")
plt.show()
# + colab_type="code" id="59jmBrwcTFCx" colab={}
keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy()
# + [markdown] colab_type="text" id="vx9Et1Hkeusl"
# Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:
# + colab_type="code" id="K81dtROoTE_r" colab={}
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-359], 11) + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, label="Series")
plot_series(time_valid, diff_moving_avg_plus_smooth_past, label="Forecasts")
plt.show()
# + colab_type="code" id="iN2MsBxWTE3m" colab={}
keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy()
# + [markdown] colab_type="text" id="WKnmJisHcvTW"
# That's starting to look pretty good! Let's see if we can do better with a Machine Learning model.
| Intro-to-TensorFlow/Time-Series-Forecasting/moving_average.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: graco
# language: python
# name: graco
# ---
# +
from scipy.spatial.distance import squareform, pdist, cdist
from itertools import islice, combinations, product
from pyclustering.cluster.kmedoids import kmedoids
from collections import defaultdict
from scipy.stats import hypergeom
from goatools import obo_parser
from functools import partial
import os
import time
import graco
import numpy as np
import pandas as pd
import seaborn as sns
import networkx as nx
import matplotlib.pyplot as plt
# +
# %matplotlib inline
sns.set()
pd.set_option("display.max_columns", 50)
DATA_DIRECTORY = "/home/clusterduck123/Desktop/git/supplements/data"
CPP_DIRECTORY = "/home/clusterduck123/Desktop/git/graco/graco/cpp"
RAW_DATA_DIRECTORY = f"{DATA_DIRECTORY}/raw_data"
PPI_DIRECTORY = f"{DATA_DIRECTORY}/PPI"
ANNOTATIONS_DIRECTORY = f"{DATA_DIRECTORY}/annotations"
MATRIX_DIRECTORY = f"{DATA_DIRECTORY}/matrix"
CLUSTERS_DIRECTORY = f"{DATA_DIRECTORY}/clusters"
# -
name2string = {'GCV_tvd':'TVD', 'GCV_hellinger':'GCV_{hell}', 'GDV_similarity':'Tijana',
'GDV_euclidean' :'GDV_{eucl}' , 'GDV_zscore_euclidean' :'GDV_{z-eucl}' ,
'GDV_cityblock' :'GDV_{city}' , 'GDV_zscore_cityblock' :'GDV_{z-city}' ,
'GDV_seuclidean' :'GDV_{seucl}' , 'GDV_zscore_seuclidean' :'GDV_{z-seucl}' ,
'GDV_cosine' :'GDV_{cos}' , 'GDV_zscore_cosine' :'GDV_{z-cos}' ,
'GDV_correlation':'GDV_{cor}' , 'GDV_zscore_correlation':'GDV_{z-cor}' ,
'GDV_sqeuclidean':'GDV_{eucl^2}', 'GDV_zscore_sqeuclidean':'GDV_{z-eucl^2}',
'GDV_chebyshev' :'GDV_{cheby}' , 'GDV_zscore_chebyshev' :'GDV_{z-cheby}' ,
'GDV_canberra' :'GDV_{can}' , 'GDV_zscore_canberra' :'GDV_{z-can}' ,
'GDV_braycurtis' :'GDV_{bray}' , 'GDV_zscore_braycurtis' :'GDV_{z-bray}' ,
'GDV_mahalanobis':'GDV_{mahala}', 'GDV_zscore_mahalanobis':'GDV_{z-mahala}',
'GCV_euclidean' :'GCV_{eucl}' , 'GCV_zscore_euclidean' :'GCV_{z-eucl}' ,
'GCV_cityblock' :'GCV_{city}' , 'GCV_zscore_cityblock' :'GCV_{z-city}' ,
'GCV_seuclidean' :'GCV_{seucl}' , 'GCV_zscore_seuclidean' :'GCV_{z-seucl}' ,
'GCV_cosine' :'GCV_{cos}' , 'GCV_zscore_cosine' :'GCV_{z-cos}' ,
'GCV_correlation':'GCV_{cor}' , 'GCV_zscore_correlation':'GCV_{z-cor}' ,
'GCV_sqeuclidean':'GCV_{eucl^2}', 'GCV_zscore_sqeuclidean':'GCV_{z-eucl^2}',
'GCV_chebyshev' :'GCV_{cheby}' , 'GCV_zscore_chebyshev' :'GCV_{z-cheby}' ,
'GCV_canberra' :'GCV_{can}' , 'GCV_zscore_canberra' :'GCV_{z-can}' ,
'GCV_braycurtis' :'GCV_{bray}' , 'GCV_zscore_braycurtis' :'GCV_{z-bray}' ,
'GCV_mahalanobis':'GCV_{mahala}', 'GCV_zscore_mahalanobis':'GCV_{z-mahala}',
'gGCV_euclidean' :'new GCV_{eucl}' , 'GCV_zscore_euclidean' :'GCV_{z-eucl}' ,
'gGCV_cityblock' :'new GCV_{city}' , 'GCV_zscore_cityblock' :'GCV_{z-city}' ,
'gGCV_seuclidean' :'new GCV_{seucl}' , 'GCV_zscore_seuclidean' :'GCV_{z-seucl}' ,
'gGCV_cosine' :'new GCV_{cos}' , 'GCV_zscore_cosine' :'GCV_{z-cos}' ,
'gGCV_correlation':'new GCV_{cor}' , 'GCV_zscore_correlation':'GCV_{z-cor}' ,
'gGCV_sqeuclidean':'new GCV_{eucl^2}', 'GCV_zscore_sqeuclidean':'GCV_{z-eucl^2}',
'gGCV_chebyshev' :'new GCV_{cheby}' , 'GCV_zscore_chebyshev' :'GCV_{z-cheby}' ,
'gGCV_canberra' :'new GCV_{can}' , 'GCV_zscore_canberra' :'GCV_{z-can}' ,
'gGCV_braycurtis' :'new GCV_{bray}' , 'GCV_zscore_braycurtis' :'GCV_{z-bray}' ,
'gGCV_mahalanobis':'new GCV_{mahala}', 'GCV_zscore_mahalanobis':'GCV_{z-mahala}',
'gGCV_normalizedl1':'GCV_{L_1}',
'gGCV_normalizedl2': 'GCV_{L_2}',
'gGCV_normalizedlinf':'GCV_{L_{\infty}}'}
# # Load
namespace = 'BP'
cluster_coverages = defaultdict(pd.DataFrame)
GO_coverages = defaultdict(pd.DataFrame)
gene_coverages = defaultdict(pd.DataFrame)
for method in [
#'GDV_similarity',
'gGCV_normalizedl1',
#'gGCV_normalizedl2',
#'gGCV_normalizedlinf',
#'GCV_tvd',
]:
cluster_coverages[method] = pd.read_csv(f"{DATA_DIRECTORY}/enrichments/{namespace}/{method}/cluster_coverage.txt",
index_col=0)
GO_coverages[method] = pd.read_csv(f"{DATA_DIRECTORY}/enrichments/{namespace}/{method}/GO_coverage.txt",
index_col=0)
gene_coverages[method] = pd.read_csv(f"{DATA_DIRECTORY}/enrichments/{namespace}/{method}/gene_coverage.txt",
index_col=0)
# # Plots
plot_methods = [
#'GDV_similarity',
'gGCV_normalizedl1',
#'gGCV_normalizedl2',
#'gGCV_normalizedlinf',
#'GCV_tvd',
#'GDV_mahalanobis',
#'gGCV_normalizedlinf'
]
# +
#Cluster coverage
figname = 'all'
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
Blues = iter(sns.color_palette("Blues",6)[::-1])
Reds = iter(sns.color_palette("Reds", 6)[::-1])
for method in plot_methods:
ax.plot(cluster_coverages[method].index, 100*cluster_coverages[method].T.mean(),
label=f'${name2string[method]}$',
linewidth=2.5,
alpha=0.75);
ax.fill_between(cluster_coverages[method].index,
100*cluster_coverages[method].T.quantile(0.25),
100*cluster_coverages[method].T.quantile(0.75),
alpha=0.1,);
ax.set_title('Clusters enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_cluster.png")
# +
#Cluster coverage
figname = 'all'
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
Blues = iter(sns.color_palette("Blues",6)[::-1])
Reds = iter(sns.color_palette("Reds", 6)[::-1])
for method in plot_methods:
ax.plot(GO_coverages[method].index, 100*GO_coverages[method].T.mean(),
label=f'${name2string[method]}$',
linewidth=2.5,
alpha=0.75);
ax.fill_between(GO_coverages[method].index,
100*GO_coverages[method].T.quantile(0.25),
100*GO_coverages[method].T.quantile(0.75),
alpha=0.1,);
ax.set_title('GO-terms enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_GO-term.png")
# +
#Cluster coverage
figname = 'all'
fig, ax = plt.subplots(figsize=(12,9))
fig.patch.set_alpha(0)
fig.subplots_adjust(hspace = 0.4)
Blues = iter(sns.color_palette("Blues",6)[::-1])
Reds = iter(sns.color_palette("Reds", 6)[::-1])
for method in plot_methods:
ax.plot(gene_coverages[method].index, 100*gene_coverages[method].T.mean(),
label=f'${name2string[method]}$',
linewidth=2.5,
alpha=0.75);
ax.fill_between(gene_coverages[method].index,
100*gene_coverages[method].T.quantile(0.25),
100*gene_coverages[method].T.quantile(0.75),
alpha=0.1,);
ax.set_title('Genes enriched', fontsize=28)
ax.patch.set_alpha(0)
ax.set_xlabel('# clusters', fontsize=24)
ax.set_ylabel('% enriched', fontsize=24)
ax.tick_params(axis='both', which='major', labelsize=24)
ax.spines['left'].set_linewidth(2.5)
ax.spines['left'].set_color('black')
ax.spines['bottom'].set_linewidth(2.5)
ax.spines['bottom'].set_color('black')
ax.legend(fontsize=18, shadow=True, facecolor=[0.95, 0.95, 0.95, 0]);
fig.savefig(f"{DATA_DIRECTORY}/plots/{figname}_gene.png")
# -
| new_beginning/.ipynb_checkpoints/plots2-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="TZEUaYSjC6CZ"
# # Constructing a high-level model
# + id="5uAmHirHtUwd"
# %%capture
import sys
IN_COLAB = "google.colab" in sys.modules
if IN_COLAB:
# !pip install --quiet scvi-tools
# + id="tcMSn-GSxAeC"
import numpy as np
import scvi
import torch
# + [markdown] id="J7a98f8MiykA"
# At this point we have covered
#
# 1. Data registration via `scvi.data.setup_anndata` and dataloaders via `AnnDataLoader`
# 2. Building a probabilistic model by subclassing `BaseModuleClass`
#
# In this tutorial, we will cover the highest-level classes in `scvi-tools`: the model classes. The main purpose of these classes (e.g., `scvi.model.SCVI`) is to wrap the actions of module instantiation, training, and subsequent posterior queries of our module into a convenient interface. These model classes are the fundamental objects driving scientific analysis of data with `scvi-tools`. Out of convention, we will refer to these objects as "models" and the lower-level objects presented in the previous tutorial as "modules".
#
# ## A simple model class
# Here we will walkthrough an example of building the `scvi.model.SCVI` class. We will progressively add functionality to the class.
#
# + [markdown] id="SUwavqvn0BBk"
#
# ### Sketch of `BaseModelClass`
#
# Let us start by providing a high level overview of `BaseModelClass` that we will inherit. Note that this is pseudocode to provide intuition. We see that `BaseModelClass` contains some unverisally applicable methods, and some private methods (conventionally starting with `_` in Python) that will become useful after training the model.
# + [markdown] id="3_rabB4JoGG3"
# ```python
# class MyModel(UnsupervisedTrainingMixin, BaseModelClass)
#
# def __init__(self, adata):
# # sets some basic attributes like is_trained_
# # record the setup_dict registered in the adata
# self.adata = adata
# self.scvi_setup_dict_ = adata.uns["_scvi"]
# self.summary_stats = self.scvi_setup_dict_["summary_stats"]
#
# def _validate_anndata(self, adata):
# # check that anndata is equivalent by comparing
# # to the initial setup_dict
#
# def _make_dataloader(adata):
# # return a dataloader to iterate over adata
#
# def train(...):
# # Universal train method provided by UnsupservisedTrainingMixin
# # BaseModelClass does not come with train
# # In general train methods are straightforward to compose manually
#
# def save(...):
# # universal save method
# # saves modules, anndata setup dict, and attributes ending with _
#
# def load(...):
# # universal load method
# ```
# + [markdown] id="4vvYzZa_ukC4"
# ### Baseline version of `SCVI` class
#
# Let's now create the simplest possible version of the `SCVI` class. We inherit the `BaseModelClass`, and write our `__init__` method.
#
# We take care to do the following:
#
# 1. Set the `module` attribute to be equal to our `VAE` module, which here is the torch-level version of scVI.
# 2. Add a `_model_summary_string` attr, which will be used as a representation for the model.
# 3. Run `self.init_params_ = self._get_init_params(locals())`, which stores the arguments used to initialize the model, facilitating saving/loading of the model later.
#
# To initialize the `VAE`, we can use the information in `self.summary_stats`, which is information that was stored in the anndata object at `setup_anndata()` time. In this example, we have only exposed `n_latent` to users through `SCVI`. In practice, we try to expose only the most relevant parameters, as all other parameters can be accessed by passing `model_kwargs`.
# + id="Q0zQzJD4jNoV"
from anndata import AnnData
from scvi.module import VAE
from scvi.model.base import BaseModelClass, UnsupervisedTrainingMixin
class SCVI(UnsupervisedTrainingMixin, BaseModelClass):
"""
single-cell Variational Inference [Lopez18]_.
"""
def __init__(
self,
adata: AnnData,
n_latent: int = 10,
**model_kwargs,
):
super(SCVI, self).__init__(adata)
self.module = VAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_latent=n_latent,
**model_kwargs,
)
self._model_summary_string = (
"SCVI Model with the following params: \nn_latent: {}"
).format(
n_latent,
)
self.init_params_ = self._get_init_params(locals())
# + [markdown] id="-Qw-uCsuwhgO"
# Now we explore what we can and cannot do with this model. Let's get some data and initialize a `SCVI` instance. Of note, for testing purposes we like to use `scvi.data.synthetic_iid()` which returns a simple, small anndata object that was already run through `setup_anndata()`.
# + colab={"base_uri": "https://localhost:8080/"} id="Tw3DQF9IwftC" outputId="1e992a71-7a48-4881-8af8-9fec60138d6a"
adata = scvi.data.synthetic_iid()
adata
# + colab={"base_uri": "https://localhost:8080/"} id="cAg0lsNpuCYG" outputId="4d3b639f-39fe-42cf-cb9c-d2f16c9d0018"
model = SCVI(adata)
model
# + colab={"base_uri": "https://localhost:8080/"} id="vOau9VkQxyIp" outputId="34538c4d-bfdb-4ba5-d1be-68a045dd5c30"
model.train(max_epochs=20)
# + [markdown] id="iG1AU4cI0Fe1"
# ### The `train` method
# + [markdown] id="EDFKIKMdx-TO"
# We were able to train this model, as this method is inherited in the class. Let us now take a look at psedocode of the `train` method of `UnsupervisedTrainingMixin`. The function of each of these objects is described in the API reference.
#
# ```python
# def train(
# self,
# max_epochs: Optional[int] = 100,
# use_gpu: Optional[bool] = None,
# train_size: float = 0.9,
# **kwargs,
# ):
# """
# Train the model.
# """
# # object to make train/test/val dataloaders
# data_splitter = DataSplitter(
# self.adata,
# train_size=train_size,
# validation_size=validation_size,
# batch_size=batch_size,
# use_gpu=use_gpu,
# )
# # defines optimizers, training step, val step, logged metrics
# training_plan = TrainingPlan(
# self.module, len(data_splitter.train_idx),
# )
# # creates Trainer, pre and post training procedures (Trainer.fit())
# runner = TrainRunner(
# self,
# training_plan=training_plan,
# data_splitter=data_splitter,
# max_epochs=max_epochs,
# use_gpu=use_gpu,
# **kwargs,
# )
# return runner()
# ```
# + [markdown] id="9fT3-7MQ0S6G"
# We notice two new things:
#
# 1. A training plan (`training_plan`)
# 2. A train runner (`runner`)
#
# The `TrainRunner` is a lightweight wrapper of the PyTorch lightning's [`Trainer`](https://pytorch-lightning.readthedocs.io/en/stable/trainer.html#trainer-class-api), which is a completely black-box method once a `TrainingPlan` is defined. So what does the `TrainingPlan` do?
#
# 1. Configures optimizers (e.g., Adam), learning rate schedulers.
# 2. Defines the training step, which runs a minibatch of data through the model and records the loss.
# 3. Defines the validation step, same as training step, but for validation data.
# 4. Records relevant metrics, such as the ELBO.
#
# In `scvi-tools` we have `scvi.lightning.TrainingPlan`, which should cover many use cases, from VAEs and VI, to MLE and MAP estimation. Developers may find that they need a custom `TrainingPlan` for e.g,. multiple optimizers and complex training scheme. These can be written and used by the model class.
#
# Developers may also overwrite this train method to add custom functionality like Early Stopping (see TOTALVI's train method). In most cases the higher-level train method can call `super().train()`, which would be the `BaseModelClass` train method.
# + [markdown] id="bEqgLc8c2-Gf"
# ### Save and load
# + [markdown] id="-3aR7N5O3BKR"
# We can also save and load this model object, as it follows the expected structure.
# + colab={"base_uri": "https://localhost:8080/"} id="5kjJoYZ42_7h" outputId="51244e0d-30af-4fa5-9ecc-9e837deced37"
model.save("saved_model/", save_anndata=True)
model = SCVI.load("saved_model/")
# + [markdown] id="ZAGP3_R23TnF"
# ## Writing methods to query the model
# + [markdown] id="QsTrpi2GCuRW"
# So we have a model that wraps a module that has been trained. How can we get information out of the module and present in cleanly to our users? Let's implement a simple example: getting the latent representation out of the VAE.
#
# This method has the following structure:
#
# 1. Validate the user-supplied data
# 2. Create a data loader
# 3. Iterate over the data loader and feed into the VAE, getting the tensor of interest out of the VAE.
# + id="PehIGudfDXRr"
from typing import Optional, Sequence
@torch.no_grad()
def get_latent_representation(
self,
adata: Optional[AnnData] = None,
indices: Optional[Sequence[int]] = None,
batch_size: Optional[int] = None,
) -> np.ndarray:
r"""
Return the latent representation for each cell.
Parameters
----------
adata
AnnData object with equivalent structure to initial AnnData. If `None`, defaults to the
AnnData object used to initialize the model.
indices
Indices of cells in adata to use. If `None`, all cells are used.
batch_size
Minibatch size for data loading into model. Defaults to `scvi.settings.batch_size`.
Returns
-------
latent_representation : np.ndarray
Low-dimensional representation for each cell
"""
if self.is_trained_ is False:
raise RuntimeError("Please train the model first.")
adata = self._validate_anndata(adata)
dataloader = self._make_dataloader(adata=adata, indices=indices, batch_size=batch_size)
latent = []
for tensors in dataloader:
inference_inputs = self.module._get_inference_input(tensors)
outputs = self.module.inference(**inference_inputs)
qz_m = outputs["qz_m"]
latent += [qz_m.cpu()]
return torch.cat(latent).numpy()
# + [markdown] id="5hhvcl6zGGIV"
# <div class="alert alert-info">
#
# Note
#
# Validating the anndata is critical to the user experience. If `None` is passed it just returns the anndata used to initialize the model, but if a different object is passed, it checks that this new object is equivalent in structure to the anndata passed to the model. We took great care in engineering this function so as to allow passing anndata objects with potentially missing categories (e.g., model was trained on batches `["A", "B", "C"]`, but the passed anndata only has `["B", "C"]`). These sorts of checks will ensure that your module will see data that it expects, and the user will get the results they expect without advanced data manipulations.
# </div>
# + [markdown] id="h4a-4Ssmu6qk"
# As a convention, we like to keep the module code as bare as possible and leave all posterior manipulation of module tensors to the model class methods. However, it would have been possible to write a `get_z` method in the module, and just have the model class that method.
# + [markdown] id="wmsozWgrDObo"
# ## Mixing in pre-coded features
#
# We have a number of Mixin classes that can add functionality to your model through inheritance. Here we demonstrate the [`VAEMixin`](https://www.scvi-tools.org/en/stable/api/reference/scvi.model.base.VAEMixin.html#scvi.model.base.VAEMixin) class.
#
# Let's try to get the latent representation from the object we already created.
# + colab={"base_uri": "https://localhost:8080/"} id="KntvOQekxq0K" outputId="93cd2a5a-7ac7-4437-926d-b43caff8cdc7"
try:
model.get_latent_representation()
except AttributeError:
print("This function does not exist")
# + [markdown] id="FPf4xHI_L-oI"
# This method becomes avaialble once the `VAEMixin` is inherited. Here's an overview of the mixin methods, which are coded generally enough that they should be broadly useful to those building VAEs.
# + [markdown] id="QD3HWo2lLKSC"
# ```python
# class VAEMixin:
# @torch.no_grad()
# def get_elbo(
# self,
# adata: Optional[AnnData] = None,
# indices: Optional[Sequence[int]] = None,
# batch_size: Optional[int] = None,
# ) -> float:
# pass
#
# @torch.no_grad()
# def get_marginal_ll(
# self,
# adata: Optional[AnnData] = None,
# indices: Optional[Sequence[int]] = None,
# n_mc_samples: int = 1000,
# batch_size: Optional[int] = None,
# ) -> float:
# pass
#
# @torch.no_grad()
# def get_reconstruction_error(
# self,
# adata: Optional[AnnData] = None,
# indices: Optional[Sequence[int]] = None,
# batch_size: Optional[int] = None,
# ) -> Union[float, Dict[str, float]]:
# pass
#
# @torch.no_grad()
# def get_latent_representation(
# self,
# adata: Optional[AnnData] = None,
# indices: Optional[Sequence[int]] = None,
# give_mean: bool = True,
# mc_samples: int = 5000,
# batch_size: Optional[int] = None,
# ) -> np.ndarray:
# pass
#
# ```
# + [markdown] id="nA2ES0nANQN8"
# Let's now inherit the mixin into our SCVI class.
# + id="Lu4Rb9nw2217"
from scvi.model.base import VAEMixin, UnsupervisedTrainingMixin
class SCVI(VAEMixin, UnsupervisedTrainingMixin, BaseModelClass):
"""
single-cell Variational Inference [Lopez18]_.
"""
def __init__(
self,
adata: AnnData,
n_latent: int = 10,
**model_kwargs,
):
super(SCVI, self).__init__(adata)
self.module = VAE(
n_input=self.summary_stats["n_vars"],
n_batch=self.summary_stats["n_batch"],
n_latent=n_latent,
**model_kwargs,
)
self._model_summary_string = (
"SCVI Model with the following params: \nn_latent: {}"
).format(
n_latent,
)
self.init_params_ = self._get_init_params(locals())
# + colab={"base_uri": "https://localhost:8080/"} id="8erM-DwwNV9V" outputId="f15996d8-e22d-4dc9-eb84-4c735b9d34ad"
model = SCVI(adata)
model.train(10)
model.get_latent_representation()
# + [markdown] id="qWvfa7q9NggC"
# ## Summary
# + [markdown] id="59_79HYQNjv_"
# We learned the structure of the high-level model classes in scvi-tools, and learned how a simple version of `SCVI` is implemented.
#
# Questions? Comments? Keep the discussion going on our [forum](https://discourse.scvi-tools.org/)
| model_user_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import numpy as np
from skimage import io, exposure
from matplotlib import pyplot as plt
# -
# 
# + deletable=true editable=true
import os
data_dir_name = 'LC08_L1TP_220071_20160409_20170326_01_T1'
data_dir_path = './GPAM_dataset/LC08_L1TP_220071_20160409_20170326_01_T1/'
def read_band(directory, n):
if n in range(1, 12):
band_path = directory + data_dir_name + '_B' + str(n) + '.TIF'
if (os.path.isfile(band_path)):
img = io.imread(band_path)
print('Image:', band_path)
return img
else:
print('Image:', band_path, 'not found!')
else:
print('Band number has to be in the range 1-11!')
read_band(data_dir_path, 5)
# + deletable=true editable=true
# %%time
import re
from math import pow
def show_image(img, title):
fig = plt.figure(figsize=(12, 12))
fig.set_facecolor('white')
img_dim = pow(2, get_image_dimension(img)) - 1
plt.imshow(img/img_dim)
plt.title(title)
plt.show()
def get_image_dimension(img):
dim = re.findall(r'\d+', str(img.dtype))
dim = int(dim[0])
return dim
show_image(read_band(data_dir_path, 2), 'Landsat 8 Imagery')
# + deletable=true editable=true
def stack_bands(directory, *args):
b = []
for idx, band in enumerate(args):
b.append(read_band(directory, band))
stack = np.dstack(b)
return stack
rgb = stack_bands(data_dir_path, 4, 3, 2)def equalize_image(img):
eq_img = np.empty(img.shape, dtype=img.dtype)
lims = [(5000,18000), (6000, 23000), (10000, 25000)]
for lim, channel in zip(lims, range(3)):
eq_img[:, :, channel] = exposure.rescale_intensity(img[:, :, channel], lim)
return eq_img
eq_img_765 = equalize_image(img_765)
color_image_show(eq_img_765, '7-6-5 image, histogram equilized')
show_image(rgb, 'RGB Stack')
img_765 = stack_bands(data_dir_path, 7, 6, 5)
show_image(img_765, 'Bands 7, 6, 5')
# + deletable=true editable=true
# python histogram cut
# other function: get image dimension and cut % of frequency desired (not null)
# get xmin = 0 and xmax = 255
def get_image_lims(freq):
pass
def rgb_histogram(image):
fig = plt.figure(figsize=(14, 7))
fig.set_facecolor('white')
for color, channel in zip('rgb', np.rollaxis(image, axis=-1)):
counts, centers = exposure.histogram(channel)
plt.plot(centers[1::], counts[1::], color=color)
plt.title('RGB Histogram')
plt.show()
rgb_histogram(rgb)
# + deletable=true editable=true
def equalize_image(img):
eq_img = np.empty(img.shape, dtype=img.dtype)
# Fix hard coded values
lims = [(5000,18000), (6000, 23000), (10000, 25000)]
for lim, channel in zip(lims, range(3)):
eq_img[:, :, channel] = exposure.rescale_intensity(img[:, :, channel], lim)
return eq_img
eq_img_765 = equalize_image(img_765)
color_image_show(eq_img_765, '7-6-5 image, histogram equilized')
# +
def adjust_gamma(image):
image[:, :, 1] = exposure.adjust_gamma(image[:, :, 1], 0.65)
image[:, :, 2] = exposure.adjust_gamma(image[:, :, 2], 0.75)
return image
adj_image = adjust_gamma(img432_ha)
color_image_show(adj_image, '4-3-2 image, histogram equilized, color gamma adjusted')
| image-parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score
from sklearn.tree import plot_tree
from sklearn.model_selection import cross_val_score, KFold
from timeit import default_timer as timer
import time
from statistics import *
from sklearn.metrics import matthews_corrcoef
import warnings
import math
warnings.filterwarnings('ignore')
import numpy as np
import statistics
path = r"/Users/nasim/Desktop/data/Flu_Classification.csv"
df = pd.read_csv(path)
df.head()
df = df.astype(str)
df.Diagnosis = df.Diagnosis.astype("category").cat.codes
df.Sex = df.Sex.astype("category").cat.codes
df.Diarrhea = df.Diarrhea.astype("category").cat.codes
df.Fever = df.Fever.astype("category").cat.codes
df.Coughing = df.Coughing.astype("category").cat.codes
df.ShortnessOfBreath = df.ShortnessOfBreath.astype("category").cat.codes
df.SoreThroat = df.SoreThroat.astype("category").cat.codes
df.NauseaVomitting = df.NauseaVomitting.astype("category").cat.codes
df.Fatigue = df.Fatigue.astype("category").cat.codes
df.Cancer = df.Cancer.astype("category").cat.codes
df.dtypes
X = df[["Age", "Temperature", "Sex", "Diarrhea", "Fever", "Coughing", "ShortnessOfBreath", "SoreThroat", "NauseaVomitting", "Fatigue", "Cancer"]]
y = df['Diagnosis']
# Python program to get average of a list
def average_list(lst):
return mean(lst)
def create_dt_gini(criter='gini'):
a = timer()
kf = KFold(n_splits=20)
scores = []
mcc_scores = []
dt = DecisionTreeClassifier(criterion=criter)
for train_index, test_index in kf.split(X):
#print("Train index: {0}, \nTest index: {1}".format(train_index, test_index))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
dt.fit(X_train, y_train)
y_pred = dt.predict(X_test)
mcc_scores.append(matthews_corrcoef(y_test, y_pred))
scores.append(dt.score(X_test, y_test))
b = timer()
delta = b - a
accuracy = str(np.mean(scores))
generation_time = str(delta)
mcc_avg = average_list(mcc_scores)
return accuracy,generation_time, mcc_avg
# +
accuracies = []
times = []
mccs = []
for _ in range(30):
acc, gtime, mcc_avg = create_dt_gini(criter='gini')
accuracies.append(acc)
times.append(gtime)
mccs.append(mcc_avg)
conveted_accuracies = [float(x) for x in accuracies]
converted_times = [float(x) for x in times]
converted_mccs = [float(x) for x in mccs]
avg_accuracy = average_list(conveted_accuracies)
avg_time = average_list(converted_times)
avg_mcc = average_list(converted_mccs)
print('*'*50)
print('Evaluating for Gini Index')
print('Accuracy: {}'.format(avg_accuracy*100))
print('Mcc: {}'.format(avg_mcc))
print('Average generation time : {} sec'.format(avg_time))
print('*'*50)
# +
accuracies = []
times = []
mccs = []
for _ in range(30):
acc, gtime, mcc_avg = create_dt_gini(criter='entropy')
accuracies.append(acc)
times.append(gtime)
mccs.append(mcc_avg)
conveted_accuracies = [float(x) for x in accuracies]
converted_times = [float(x) for x in times]
converted_mccs = [float(x) for x in mccs]
avg_accuracy = average_list(conveted_accuracies)
avg_time = average_list(converted_times)
avg_mcc = average_list(converted_mccs)
print('*'*50)
print('Evaluating for information gain')
print('Accuracy: {}'.format(avg_accuracy*100))
print('Mcc: {}'.format(avg_mcc))
print('Average generation time : {} sec'.format(avg_time))
print('*'*50)
| Dataset 3 - Flu Classification/Decision Tree Codes/cross_validation_mcc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Analysis of order of contigs with QUAST
#
# In order to analyse whether the plasmid sequences produced by HyAsP correspond to the expected plasmids or whether misassembly events happened, we compared the sequences using QUAST.
#
# *Summary:*
# HyAsP's predictions included misassemblies in a lot of test samples.
# Relocations were the predominant misassembly type, followed by translocations and only a few inversions.
# There was no clear correlation between the number of frequency of misassembly events and the prediction scores.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
pd.options.display.max_rows = None
# +
proj_dir = '/project/6007976/wg-anoph/Plasmids-Assembly'
ids_file = '%s/data/2018-10-29__databases_greedy_paper/mob_database_filtered/test_ids.txt' % proj_dir
with open(ids_file, 'r') as in_file:
ids = [int(line.strip()) for line in in_file]
# -
# *Read misassembly statistics*
# +
col_id = []
col_tool = []
value_lists = [[], [], [], [], [], [], [], [], [], [], [], [], []]
missing = dict()
for sid in ids:
col_id.append(sid)
col_tool.append('greedy')
file = '%s/exp/2018-10-29__analyses_greedy_paper/quast_results_ncbi_filtered/%i_quast/contigs_reports/misassemblies_report.tsv' % (proj_dir, sid)
with open(file, 'r') as in_file:
non_empty = in_file.readline().strip().split('\t')[1:]
if 'greedy' in non_empty:
for i, line in enumerate(in_file.readlines()):
value_lists[i].append(int(line.strip().split('\t')[1]))
missing[sid] = False
else:
for i in range(0, 13):
value_lists[i].append(0)
missing[sid] = True
col_misasm = value_lists[0] # entry '# misassemblies'
col_relocations = value_lists[1] # entry '# relocations'
col_translocations = value_lists[2] # entry '# translocations'
col_inversions = value_lists[3] # entry '# inversions'
col_misasm_contigs = value_lists[4] # entry '# misassembled contigs'
col_misasm_contigs_len = value_lists[5] # entry 'Misassembled contigs length'
col_local_misasm = value_lists[6] # entry '# local misassemblies'
col_unaligned_misasm_contigs = value_lists[7] # entry '# unaligned mis. contigs'
col_mismatches = value_lists[8] # entry '# mismatches'
col_indels = value_lists[9] # entry '# indels'
col_indels_small = value_lists[10] # entry '# indels (<= 5 bp)'
col_indels_large = value_lists[11] # entry '# indels (> 5 bp)'
col_indels_length = value_lists[12] # entry 'Indels length'
cols = ['sample', 'tool', 'num_misassemblies', 'num_relocations', 'num_translocations', 'num_inversions', 'num_misassembled_contigs',
'len_misassembled_contigs', 'num_local_misassemblies', 'num_unaligned_misassembled_contigs', 'num_mismatches',
'num_indels', 'num_small_indels', 'num_large_indels', 'len_indels']
stats = pd.DataFrame({cols[0]: col_id, cols[1]: col_tool, cols[2]: col_misasm, cols[3]: col_relocations, cols[4]: col_translocations,
cols[5]: col_inversions, cols[6]: col_misasm_contigs, cols[7]: col_misasm_contigs_len, cols[8]: col_local_misasm,
cols[9]: col_unaligned_misasm_contigs, cols[10]: col_mismatches, cols[11]: col_indels, cols[12]: col_indels_small,
cols[13]: col_indels_large, cols[14]: col_indels_length}, columns = cols)
# -
# *Add total lengths of predicted and expected plasmids*
# +
prediction_lengths = []
prediction_nums = []
reference_lengths = []
reference_nums = []
empty = dict()
for sid in ids:
file = '%s/exp/2018-10-29__analyses_greedy_paper/analysis_ncbi_filtered/sample_%i/eval/greedy/greedy_eval.csv' % (proj_dir, sid)
with open(file, 'r') as in_file:
lines = in_file.readlines()
num_chr_references = int(lines[1].strip().split(' ')[-1])
num_pla_references = int(lines[2 + num_chr_references].strip().split(' ')[-1])
len_sum = 0
for i in range(0, num_pla_references):
len_sum += int(lines[3 + num_chr_references + i].strip().split(' ')[-2])
reference_lengths.append(len_sum)
reference_nums.append(num_pla_references)
num_predictions = int(lines[5 + num_chr_references + num_pla_references].strip().split(' ')[-1])
len_sum = 0
for i in range(0, num_predictions):
len_sum += int(lines[6 + num_chr_references + num_pla_references + i].strip().split(' ')[-2])
prediction_lengths.append(len_sum)
prediction_nums.append(num_predictions)
empty[sid] = (num_predictions == 0)
stats['prediction_length'] = prediction_lengths
stats['reference_length'] = reference_lengths
stats['num_predictions'] = prediction_nums
stats['num_references'] = reference_nums
# -
# *Add precision, recall and F1 score*
# +
scores = pd.read_csv('%s/exp/2018-10-29__analyses_greedy_paper/analysis_ncbi_filtered/scoring_results.csv' % proj_dir, sep = ';')
precisions = []
recalls = []
f1s = []
sample_groups = scores.groupby('sample_id')
for sid in ids:
grp = sample_groups.get_group(sid)
r = grp[grp['tool'] == 'greedy_putative']
precisions.append(r['precision'].item() if len(r) > 0 else 0.0)
recalls.append(r['recall'].item() if len(r) > 0 else 0.0)
f1s.append(r['f1_score'].item() if len(r) > 0 else 0.0)
stats['precision'] = precisions
stats['recall'] = recalls
stats['f1_score'] = f1s
# -
# *Sort by sample id and extract statistics on greedy plasmids*
#
# Subsequently, the analysis uses only the statistics of the plasmid sequences predicted by HyAsP. The statistics on the contigs underlying these sequences as well as the ones of plasmidSPAdes and MOB-recon are not informative because the unconcatenated contigs can be freely orientated by QUAST and, thus, show (almost) no misassembly events.
stats.sort_values(by = ['sample'], inplace = True)
stats.to_csv('hyasp_quast_stats_ncbi_filtered.csv', sep = ';', index = False)
greedy_stats = stats[stats['tool'] == 'greedy']
# **Misassembly events per sample**
# +
plt.figure(figsize = (25, 5))
pos = range(1, len(greedy_stats['num_misassemblies']) + 1)
rel_bar = greedy_stats['num_relocations']
trans_bar = greedy_stats['num_translocations']
inv_bar = greedy_stats['num_inversions']
pos_missing = []
pos_empty = []
for i, sid in enumerate(greedy_stats['sample']):
if missing[sid]:
pos_missing.append(i + 1)
if empty[sid]:
pos_empty.append(i + 1)
plt.bar(pos, rel_bar, color = '#377eb8', label = 'Relocations')
plt.bar(pos, trans_bar, bottom = rel_bar, color = '#4daf4a', label = 'Translocations')
plt.bar(pos, inv_bar, bottom = np.array(rel_bar) + np.array(trans_bar), color = '#984ea3', label = 'Inversions')
plt.scatter(pos_missing, [0.45] * len(pos_missing), marker = 'x', color = 'red', label = 'Nothing aligned')
plt.scatter(pos_empty, [0.9] * len(pos_empty), marker = '*', color = 'red', label = 'Nothing predicted')
plt.xticks(range(1, len(greedy_stats['num_misassemblies']) + 1), greedy_stats['sample'], fontsize = 12);
plt.yticks(range(0, max(greedy_stats['num_misassemblies']) + 2, 2), fontsize = 15);
plt.ylabel('Number of observations', fontsize = 20);
plt.legend(fontsize = 15)
plt.tight_layout()
plt.savefig('misassembly_counts_ncbi_filtered.eps', format = 'eps', dpi = 1200)
plt.savefig('misassembly_counts_ncbi_filtered.pdf', format = 'pdf', dpi = 1200)
#plt.savefig('misassembly_counts_ncbi_filtered.png', format = 'png', dpi = 1200)
# -
# **Average number of misassembly events in 1000 nt of predicted plasmids per sample**
# +
plt.figure(figsize = (25, 5))
pos = range(1, len(greedy_stats['num_misassemblies']) + 1)
plt.plot(pos, 1000 * np.array(greedy_stats['num_misassemblies']) / np.array(greedy_stats['prediction_length']), 'o', color = '#e41a1c')
plt.plot(pos, 1000 * np.array(greedy_stats['num_relocations']) / np.array(greedy_stats['prediction_length']), 'o', color = '#377eb8')
plt.plot(pos, 1000 * np.array(greedy_stats['num_translocations']) / np.array(greedy_stats['prediction_length']), 'o', color = '#4daf4a')
plt.plot(pos, 1000 * np.array(greedy_stats['num_inversions']) / np.array(greedy_stats['prediction_length']), 'o', color = '#984ea3')
plt.xticks(range(1, len(greedy_stats['num_misassemblies']) + 1), greedy_stats['sample'], fontsize = 12);
plt.yticks(fontsize = 15);
plt.ylabel('Average number of events per 1000 nt', fontsize = 15);
plt.legend(['All misassembly events', 'Relocations', 'Translocations', 'Inversions'], fontsize = 15)
plt.tight_layout()
plt.savefig('misassembly_rates_ncbi_filtered.eps', format = 'eps', dpi = 1200)
plt.savefig('misassembly_rates_ncbi_filtered.pdf', format = 'pdf', dpi = 1200)
#plt.savefig('misassembly_rates_ncbi_filtered.png', format = 'png', dpi = 1200)
# -
# Above RuntimeWarning is "intended" (affected positions, i.e. empty predictions, also remain empty in plot).
# **Distributions of misassembly events**
# +
fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (15, 10))
values = sorted(greedy_stats['num_misassemblies'].unique())
stats_non_empty = greedy_stats.loc[greedy_stats['num_predictions'] > 0]
axes[0, 0].bar(values, [len(stats_non_empty[stats_non_empty['num_misassemblies'] == n]) for n in values], color = '#e41a1c')
axes[0, 0].set_xlabel('Total number of misassembly events', fontsize = 15)
axes[0, 1].bar(values, [len(stats_non_empty[stats_non_empty['num_relocations'] == n]) for n in values], color = '#377eb8')
axes[0, 1].set_xlabel('Number of relocations', fontsize = 15)
axes[1, 0].bar(values, [len(stats_non_empty[stats_non_empty['num_translocations'] == n]) for n in values], color = '#4daf4a')
axes[1, 0].set_xlabel('Number of translocations', fontsize = 15)
axes[1, 1].bar(values, [len(stats_non_empty[stats_non_empty['num_inversions'] == n]) for n in values], color = '#984ea3')
axes[1, 1].set_xlabel('Number of inversions', fontsize = 15)
for i in range(0, 2):
for j in range(0, 2):
axes[i, j].set_ylabel('Number of observations', fontsize = 15)
axes[i, j].set_xticks(range(values[0], values[-1] + 1))
axes[i, j].tick_params(axis = 'x', labelsize = 13)
axes[i, j].tick_params(axis = 'y', labelsize = 13)
plt.tight_layout()
plt.savefig('misassembly_hists_ncbi_filtered.eps', format = 'eps', dpi = 1200)
plt.savefig('misassembly_hists_ncbi_filtered.pdf', format = 'pdf', dpi = 1200)
#plt.savefig('misassembly_hists_ncbi_filtered.png', format = 'png', dpi = 1200)
# -
# **Misassembly events vs. scores**
# *Counts vs. scores*
# +
fig, axes = plt.subplots(nrows = 4, ncols = 3, figsize = (25, 20), sharex = True, sharey = True)
stats_non_empty = greedy_stats.loc[greedy_stats['num_predictions'] > 0]
axes[0, 0].plot(stats_non_empty['num_misassemblies'], stats_non_empty['precision'], 'o', color = '#e41a1c')
axes[0, 1].plot(stats_non_empty['num_misassemblies'], stats_non_empty['recall'], 'o', color = '#e41a1c')
axes[0, 2].plot(stats_non_empty['num_misassemblies'], stats_non_empty['f1_score'], 'o', color = '#e41a1c')
axes[1, 0].plot(stats_non_empty['num_relocations'], stats_non_empty['precision'], 'o', color = '#377eb8')
axes[1, 1].plot(stats_non_empty['num_relocations'], stats_non_empty['recall'], 'o', color = '#377eb8')
axes[1, 2].plot(stats_non_empty['num_relocations'], stats_non_empty['f1_score'], 'o', color = '#377eb8')
axes[2, 0].plot(stats_non_empty['num_translocations'], stats_non_empty['precision'], 'o', color = '#4daf4a')
axes[2, 1].plot(stats_non_empty['num_translocations'], stats_non_empty['recall'], 'o', color = '#4daf4a')
axes[2, 2].plot(stats_non_empty['num_translocations'], stats_non_empty['f1_score'], 'o', color = '#4daf4a')
axes[3, 0].plot(stats_non_empty['num_inversions'], stats_non_empty['precision'], 'o', color = '#984ea3')
axes[3, 1].plot(stats_non_empty['num_inversions'], stats_non_empty['recall'], 'o', color = '#984ea3')
axes[3, 2].plot(stats_non_empty['num_inversions'], stats_non_empty['f1_score'], 'o', color = '#984ea3')
xlabs = ['Number of misassembly events', 'Number of relocations', 'Number of translocations', 'Number of inversions']
ylabs = ['Precision', 'Recall', 'F1 score']
for i in range(0, 4):
for j in range(0, 3):
axes[i, j].set_xlabel(xlabs[i])
axes[i, j].set_ylabel(ylabs[j])
plt.show()
# -
# *Rates vs. scores*
# +
fig, axes = plt.subplots(nrows = 4, ncols = 3, figsize = (25, 20), sharex = True, sharey = True)
stats_non_empty = greedy_stats.loc[greedy_stats['num_predictions'] > 0]
misasm_rates = 1000 * np.array(stats_non_empty['num_misassemblies']) / np.array(stats_non_empty['prediction_length'])
rel_rates = 1000 * np.array(stats_non_empty['num_relocations']) / np.array(stats_non_empty['prediction_length'])
trans_rates = 1000 * np.array(stats_non_empty['num_translocations']) / np.array(stats_non_empty['prediction_length'])
inv_rates = 1000 * np.array(stats_non_empty['num_inversions']) / np.array(stats_non_empty['prediction_length'])
axes[0, 0].plot(misasm_rates, stats_non_empty['precision'], 'o', color = '#e41a1c')
axes[0, 1].plot(misasm_rates, stats_non_empty['recall'], 'o', color = '#e41a1c')
axes[0, 2].plot(misasm_rates, stats_non_empty['f1_score'], 'o', color = '#e41a1c')
axes[1, 0].plot(rel_rates, stats_non_empty['precision'], 'o', color = '#377eb8')
axes[1, 1].plot(rel_rates, stats_non_empty['recall'], 'o', color = '#377eb8')
axes[1, 2].plot(rel_rates, stats_non_empty['f1_score'], 'o', color = '#377eb8')
axes[2, 0].plot(trans_rates, stats_non_empty['precision'], 'o', color = '#4daf4a')
axes[2, 1].plot(trans_rates, stats_non_empty['recall'], 'o', color = '#4daf4a')
axes[2, 2].plot(trans_rates, stats_non_empty['f1_score'], 'o', color = '#4daf4a')
axes[3, 0].plot(inv_rates, stats_non_empty['precision'], 'o', color = '#984ea3')
axes[3, 1].plot(inv_rates, stats_non_empty['recall'], 'o', color = '#984ea3')
axes[3, 2].plot(inv_rates, stats_non_empty['f1_score'], 'o', color = '#984ea3')
xlabs = ['Average number of misassembly events per 1000 nt', 'Average number of relocations per 1000 nt',
'Average number of translocations per 1000 nt', 'Average number of inversions per 1000 nt']
ylabs = ['Precision', 'Recall', 'F1 score']
for i in range(0, 4):
for j in range(0, 3):
axes[i, j].set_xlabel(xlabs[i])
axes[i, j].set_ylabel(ylabs[j])
plt.show()
| results/hyasp_quast_analysis_ncbi_filtered.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=false editable=false
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science
#
# ## Homework 8: Reinforcement Learning [100 pts]
#
#
# **Harvard University**<br/>
# **Spring 2020**<br/>
# **Instructors**: <NAME>, <NAME> and <NAME><br/>
#
# **DISCLAIMER**: No public reproduction of this homework nor its solution is allowed without the explicit consent of their authors.
#
#
#
# <hr style="height:2pt">
#
# ---
# + deletable=false editable=false
#PLEASE RUN THIS CELL
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/cs109.css").text
HTML(styles)
# + [markdown] deletable=false editable=false
# ### INSTRUCTIONS
#
# - To submit your assignment follow the instructions given in Canvas.
# - Restart the kernel and run the whole notebook again before you submit.
# - Do not submit a notebook that is excessively long because output was not suppressed or otherwise limited.
# + deletable=false editable=false
# Numpy and plotting libraries
import numpy as np
import matplotlib.pyplot as plt
import time
# %matplotlib inline
# + [markdown] deletable=false editable=false
# # Overview
# + [markdown] deletable=false editable=false
# The objective of this homework assignment is to get a taste of implementing a planning algorithm in a very simple setting.
# + [markdown] deletable=false editable=false
# <div class='exercise'><b> Markov Decision Process [100 points] </b></div>
#
#
# We have a hallway consisting of 5 blocks (states 0-4). There are two actions, which deterministically move the agent to the left or the right. More explicitly: Performing action “left” in state 0 keeps you in state 0, moves you from state 1 to state 0, from state 2 to state 1, state 3 to state 2, and state 4 to state 3. Performing action “right” in state 4 keeps you in state 4, moves you from state 3 to state 4, from state 2 to state 3, from state 1 to state 2, and from state 0 to state 1. The agent receives a reward of -1.0 if it starts any iteration in state 0, state 1, state 2, or state 3. The agent receives a reward of +10.0 if it starts in state 4. Let the discount factor γ = 0.75.
#
# We provide class MDP that instantiates an object representing a Markov decision process and verifies shapes.
#
# **1.1** MDP proble [10 pts]: Build an MDP representing the hallway setting described above, by completing the function `build_hallway_mdp()`. You need to specify the array T that encodes the transitions from state and actions into next states; and a reward vector R that specifies the reward for being at a certain state.
#
# **1.2** Policy Evaluation [20 pts]: Initialize a policy “left” for every state (a 1D numpy array). Implement policy evaluation as described in lecture (also in Chapter 4 of [Sutton and Barto](http://incompleteideas.net/book/RLbook2018.pdf)). That is, for each possible starting state, what is the expected sum of future rewards for this policy? Using an iterative approach, how many iterations did it take for the value of the policy to converge to a precision of 10−5?
#
# **1.3** Q-function Computation [20 pts]: Compute the Q-function for the `always_left` policy above. Do you see any opportunties for policy improvement?
#
# **1.4** Policy Iteration [20 pts]: Using your solutions to questions 1.2 and 1.3 above, implement policy iteration. Report the sequence of policies you find starting with the policy “left” in every state. How many rounds of policy iteration are required to converge to the optimal policy?
#
# **1.5** [10 pts] What are the effects of different choices of the discount factor on the convergence of policy evaluation? Run policy evaluation for discount factor $\gamma \in [ 10^{-12}, 10^{-3}, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]$.
#
# **1.6** [20 pts] What happens if the transitions are stochastic? Recode the MDP with probability of switching to the opposite action of 0.1. What are now the values when following the optimal policy?
# + deletable=false editable=false
class MDP(object):
"""Wrapper for a discrete Markov decision process that makes shape checks"""
def __init__(self, T, R, discount):
"""Initialize the Markov Decision Process.
- `T` should be a 3D array whose dimensions represent initial states,
actions, and next states, respectively, and whose values represent
transition probabilities.
- `R` should be a 1D array describing rewards for beginning each
timestep in a particular state (or a 3D array like `T`). It will be
transformed into the appropriate 3D shape.
- `discount` should be a value in [0,1) controlling the decay of future
rewards."""
Ds, Da, _ = T.shape
if T.shape not in [(Ds, Da, Ds)]:
raise ValueError("T should be in R^|S|x|A|x|S|")
if R.shape not in [(Ds, Da, Ds), (Ds,)]:
raise ValueError("R should be in R^|S| or like T")
if discount < 0 or discount >= 1:
raise ValueError("discount should be in [0,1)")
if R.shape == (Ds,): # Expand R if necessary
R = np.array([[[R[s1] for s2 in range(Ds)] for a in range(Da)] for s1 in range(Ds)])
self.T = T
self.R = R
self.discount = discount
self.num_states = Ds
self.num_actions = Da
self.states = np.arange(Ds)
self.actions = np.arange(Da)
# + [markdown] autograde="1.1" deletable=false editable=false
# **1.1** MDP proble [10 pts]: Build an MDP representing the hallway setting described above, by completing the function `build_hallway_mdp()`. You need to specify the array T that encodes the transitions from state and actions into next states; and a reward vector R that specifies the reward for being at a certain state.
#
# + deletable=false
def build_hallway_mdp():
"""Build an MDP representing the hallway setting described."""
# your code here
T = np.zeros((5,2,5))
transitions_left = [(0, 0), (1, 0), (2, 1), (3, 2), (4,3)]
transitions_right = [(0, 1), (1, 2), (2, 3), (3, 4), (4,4)]
for s, s_next in transitions_left:
T[s, 0, s_next] = 1
for s, s_next in transitions_right:
T[s, 1, s_next] = 1
# reward
R = np.array([-1,-1,-1,-1,10])
# end of your code here
return MDP(T, R, 0.75)
# + deletable=false editable=false
# Run for sanity check
mdp = build_hallway_mdp()
plt.figure(figsize=(5,2))
plt.subplot(121, title='Left transitions')
plt.imshow(mdp.T[:,0,:])
plt.ylabel("Initial state"); plt.xlabel('Next state')
plt.subplot(122, title='Right transitions')
plt.imshow(mdp.T[:,1,:])
plt.ylabel("Initial state"); plt.xlabel('Next state')
plt.show()
# + [markdown] autograde="1.2" deletable=false editable=false
# **1.2** Policy Evaluation [20 pts]: Initialize a policy “left” for every state (a 1D numpy array). Implement policy evaluation as described in lecture (also in Chapter 4 of [Sutton and Barto](http://incompleteideas.net/book/RLbook2018.pdf)). That is, for each possible starting state, what is the expected sum of future rewards for this policy? Using an iterative approach, how many iterations did it take for the value of the policy to converge to a precision of 10−5?
#
# + deletable=false
def build_always_left_policy():
"""Build a policy representing the action "left" in every state."""
# your code here
return np.zeros(5, dtype=int)
# + deletable=false
def iterative_value_estimation(mdp, policy, tol=1e-5):
"""Value estimation algorithm from page 75, Sutton and Barto. Returns an
estimate of the value of a given policy under the MDP (with the number of
iterations required to reach specified tolerance)."""
V = np.zeros(mdp.num_states)
num_iters = 0
# your code here
while True:
delta = 0
for state in mdp.states:
V_old = V[state]
action = policy[state] # action to take at the state
# update
V[state] = np.dot(mdp.T[state, action],
mdp.R[state, action] + mdp.discount * V)
# tolerance
delta = max(delta, abs(V[state] - V_old))
num_iters += 1
if delta < tol:
break
# end of your code here
return V, num_iters
# + deletable=false editable=false
# Run for sanity check
always_left = build_always_left_policy()
values, iters = iterative_value_estimation(mdp, always_left)
print('Policy value was:')
print(values.round(4))
tols = np.logspace(0,-8,9)
iters = [iterative_value_estimation(mdp, always_left, tol=tol)[1] for tol in tols]
plt.plot(tols, iters, marker='o')
plt.xscale('log')
plt.xlabel("Tolerance")
plt.ylabel("Iterations to converge to within tolerance")
plt.show()
# + [markdown] autograde="1.3" deletable=false editable=false
# **1.3** Q-function Computation [20 pts]: Compute the Q-function for the `always_left` policy above. Do you see any opportunties for policy improvement?
#
# -
# There is no iteration here. Just use V from value estimation in 1.2, and then calculate Q
# + deletable=false
# 1.3
def Q_function(mdp, policy, tol=1e-5):
"""Q function from Equation 4.6, Sutton and Barto. For each state and
action, returns the value of performing the action at that state, then
following the policy thereafter."""
# your code here
# value estimation
V, _ = iterative_value_estimation(mdp, policy, tol)
# initialize Q
Q = np.zeros((mdp.num_states, mdp.num_actions))
# calculate Q
for state in mdp.states:
for action in mdp.actions:
Q[state, action] = np.dot(mdp.T[state, action],
mdp.R[state, action]+ mdp.discount * V)
# end of your code here
assert Q.shape == (mdp.num_states, mdp.num_actions)
return Q
# + deletable=false editable=false
# Run for sanity check
Q = Q_function(mdp, always_left)
print('Q function was:')
print(Q.round(4))
# + [markdown] deletable=false
# *Your answer here*
#
# There is an opportunity for policy improvement, as the Q(s, left) != Q(s, right) for the last two states. Going to the right is definitely a improvement for them.
# + [markdown] autograde="1.4" deletable=false editable=false
# **1.4** Policy Iteration [20 pts]: Using your solutions to questions 1.2 and 1.3 above, implement policy iteration. Report the sequence of policies you find starting with the policy “left” in every state. How many rounds of policy iteration are required to converge to the optimal policy?
#
# + deletable=false
# 1.4
def policy_iteration(mdp, init_policy=None, tol=1e-5):
"""Policy iteration algorithm from page 80, Sutton and Barto.
Iteratively transform the initial policy to become optimal.
Return the full path."""
# your code here
num_iter = 0
# initialize
V = np.zeros(mdp.num_states)
policies = [init_policy]
policy = np.array(init_policy) # avoid referencing issue
while True:
# policy evaluation: calculate Q
Q = Q_function(mdp, policy, tol)
# policy Improvement
policy_stable = True
for state in mdp.states:
old_action = policy[state]
policy[state] = np.argmax(Q[state])
if old_action != policy[state]:
policy_stable = False
# append policy
num_iter += 1
if policy_stable:
break
policies.append(np.array(policy))
# end of your code here
return policies
# + deletable=false editable=false
# Sanity check
policy_iters = policy_iteration(mdp, always_left)
policy_iters
# + [markdown] deletable=false
# *Your answer here*
#
# It only took 4 iterations to converge to the optimal policy.
# + [markdown] autograde="1.5" deletable=false editable=false
# **1.5** [10 pts] What are the effects of different choices of the discount factor on the convergence of policy evaluation? Run policy evaluation for discount factor $\gamma \in [ 10^{-12}, 10^{-3}, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]$.
#
# + deletable=false
# 1.5
# your code here
always_left = build_always_left_policy()
iters_by_factor = []
discount_factors = [1e-12, 1e-3, 0.1, 0.33, 0.67, 0.9, 0.95, 0.99]
for gamma in discount_factors:
mdp.discount = gamma
iters_by_factor.append(iterative_value_estimation(mdp, always_left)[1])
# + deletable=false editable=false
plt.plot(discount_factors, iters_by_factor, marker='o')
plt.xlabel('Discount factor $\gamma$')
plt.ylabel('Iterations for value estimate to converge')
plt.title("Convergence of value estimate by $\gamma$")
plt.show()
# + [markdown] deletable=false
# *Your answer here*
#
# As the discount factors increases, it takes more iterations to converge. The more we focus on future rewards, the more frequently V(s) changes, and thus it takes more time to converge.
# + [markdown] autograde="1.6" deletable=false editable=false
# **1.6** [20 pts] What happens if the transitions are stochastic? Recode the MDP with probability of switching to the opposite action of 0.1. What are now the values when following the optimal policy?
# + deletable=false
# 1.6
# your code here
def build_stochastic_mdp():
# your code here
T = np.zeros((5,2,5))
transitions_left = [(0, 0), (1, 0), (2, 1), (3, 2), (4,3)]
transitions_right = [(0, 1), (1, 2), (2, 3), (3, 4), (4,4)]
for s, s_next in transitions_left:
T[s, 0, s_next] = 0.9
if (s == 0) | (s == 4):
T[s, 0, s_next + 1] = 0.1
else:
T[s, 0, s_next + 2] = 0.1
for s, s_next in transitions_right:
T[s, 1, s_next] = 0.9
if (s == 0) | (s == 4):
T[s, 1, s_next - 1] = 0.1
else:
T[s, 1, s_next - 2] = 0.1
# reward
R = np.array([-1,-1,-1,-1,10])
# end of your code here
return MDP(T, R, 0.75)
mdp_sto = build_stochastic_mdp()
# use optimal policy from 1.4
opt_policy = policy_iters[-1]
# values under optimal policy
V_sto, _ = iterative_value_estimation(mdp_sto, opt_policy)
print(V_sto)
# + [markdown] deletable=false
# *Your answer here*
#
# The optimal values are: [ 6.73733387, 10.71413468, 16.60569034, 24.89205419, 36.51353147]
# -
| content/homeworks/hw08/cs109b_hw8_submit.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Connect to the example "Chinook" database and list the tables.
import sqlite3
conn = sqlite3.connect("chinook.db")
cursor = conn.cursor()
query = "SELECT name FROM sqlite_master WHERE type='table';"
cursor.execute(query)
results = cursor.fetchall()
print(results[:])
# ### Create a couple named subqueries defined in a WITH clause -- one for
# subqueries = "WITH customers_by_country AS (SELECT * FROM customers GROUP BY country), \n" + \
# "sales_per_customer AS (SELECT customer_id, SUM(total) AS total_spent FROM invoices GROUP BY 1)"
# cursor.execute(subqueries)
subquery = "WITH customers_by_country AS (SELECT * FROM customer GROUP BY country)"
cursor.execute(subquery)
q = "SELECT MIN(population), MAX(population), MIN(population_growth), MAX(population_growth) FROM facts"
pd.read_sql_query(q, conn)
# ### Identify anomalies
# The above shows us that some countries in the database table have population count of zero. Find these countries.
q = "SELECT NAME FROM facts WHERE population = 0"
pd.read_sql_query(q, conn)
# Now find the countries with the maximum number of people (7256490011).
q = "SELECT NAME FROM facts WHERE population = (SELECT MAX(population) FROM facts)"
pd.read_sql_query(q, conn)
# ### Generate histogram plots from the dataset
q = "SELECT population, population_growth, birth_rate, death_rate FROM facts WHERE population < (SELECT MAX(population) FROM facts) AND population > 0"
facts = pd.read_sql_query(q, conn)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
fig = plt.figure(figsize=(12, 9))
ax1 = fig.add_subplot(2, 2, 1)
ax1.set_title("Population")
ax2 = fig.add_subplot(2, 2, 2)
ax2.set_title("Population Growth")
ax3 = fig.add_subplot(2, 2, 3)
ax3.set_title("Birth Rate")
ax4 = fig.add_subplot(2, 2, 4)
ax4.set_title("Death Rate")
facts['population'].hist(bins=50, grid=False, ax=ax1)
facts['population_growth'].hist(bins=25, grid=False, ax=ax2)
facts['birth_rate'].hist(bins=25, grid=False, ax=ax3)
facts['death_rate'].hist(bins=25, grid=False, ax=ax4)
# -
| dataquest/notebooks/lesson_sql_complex_queries/SQL_Building_and_Organizing_Complex_Queries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
# TO USE A DATABASE OTHER THAN SQLITE, USE THIS LINE
# Note that this is necessary for parallel execution amongst other things...
# os.environ['SNORKELDB'] = 'postgres:///snorkel-intro'
from snorkel import SnorkelSession
session = SnorkelSession()
# Here, we just set how many documents we'll process for automatic testing- you can safely ignore this!
n_docs = 500 if 'CI' in os.environ else 2591
from snorkel.models import candidate_subclass
Spouse = candidate_subclass('Spouse', ['person1', 'person2'])
train_cands = session.query(Spouse).filter(Spouse.split == 0).order_by(Spouse.id).all()
dev_cands = session.query(Spouse).filter(Spouse.split == 1).order_by(Spouse.id).all()
test_cands = session.query(Spouse).filter(Spouse.split == 2).order_by(Spouse.id).all()
print(len(dev_cands))
# +
from util import load_external_labels
# # %time load_external_labels(session, Spouse, annotator_name='gold')
from snorkel.annotations import load_gold_labels
#L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, zero_one=True)
#L_gold_test = load_gold_labels(session, annotator_name='gold', split=2, zero_one=True)
# L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1)
# L_gold_test = load_gold_labels(session, annotator_name='gold', split=2)
L_gold_dev = load_gold_labels(session, annotator_name='gold', split=1, zero_one=True)
# gold_labels_dev = [L[0,0] if L[0,0]==1 else -1 for L in L_gold_dev]
gold_labels_dev = [L[0,0] for L in L_gold_dev]
from snorkel.learning.utils import MentionScorer
# +
#gold_labels_dev = [x[0,0] for x in L_gold_dev.todense()]
#for i,L in enumerate(gold_labels_dev):
# print(i,gold_labels_dev[i])
# gold_labels_dev = []
# for i,L in enumerate(L_gold_dev):
# gold_labels_dev.append(L[0,0])
# gold_labels_test = []
# for i,L in enumerate(L_gold_test):
# gold_labels_test.append(L[0,0])
# print(len(gold_labels_dev),len(gold_labels_test))
# print(gold_labels_dev.count(1),gold_labels_dev.count(-1))
# print(len(gold_labels_dev))
print(gold_labels_dev.count(1),gold_labels_dev.count(0))
print(len(gold_labels_dev))
# +
from gensim.parsing.preprocessing import STOPWORDS
import gensim.matutils as gm
from gensim.models.keyedvectors import KeyedVectors
# Load pretrained model (since intermediate data is not included, the model cannot be refined with additional data)
model = KeyedVectors.load_word2vec_format('../../../snorkel/tutorials/glove_w2v.txt', binary=False) # C binary format
wordvec_unavailable= set()
def write_to_file(wordvec_unavailable):
with open("wordvec_unavailable.txt","w") as f:
for word in wordvec_unavailable:
f.write(word+"\n")
def preprocess(tokens):
btw_words = [word for word in tokens if word not in STOPWORDS]
btw_words = [word for word in btw_words if word.isalpha()]
return btw_words
def get_word_vectors(btw_words): # returns vector of embeddings of words
word_vectors= []
for word in btw_words:
try:
word_v = np.array(model[word])
word_v = word_v.reshape(len(word_v),1)
#print(word_v.shape)
word_vectors.append(model[word])
except:
wordvec_unavailable.add(word)
return word_vectors
def get_similarity(word_vectors,target_word): # sent(list of word vecs) to word similarity
similarity = 0
target_word_vector = 0
try:
target_word_vector = model[target_word]
except:
wordvec_unavailable.add(target_word+" t")
return similarity
target_word_sparse = gm.any2sparse(target_word_vector,eps=1e-09)
for wv in word_vectors:
wv_sparse = gm.any2sparse(wv, eps=1e-09)
similarity = max(similarity,gm.cossim(wv_sparse,target_word_sparse))
return similarity
# +
# ####### Discrete ##########
# spouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'}
# family = {'father', 'mother', 'sister', 'brother', 'son', 'daughter',
# 'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'}
# family = family | {f + '-in-law' for f in family}
# other = {'boyfriend', 'girlfriend' 'boss', 'employee', 'secretary', 'co-worker'}
# # Helper function to get last name
# def last_name(s):
# name_parts = s.split(' ')
# return name_parts[-1] if len(name_parts) > 1 else None
# def LF_husband_wife(c):
# return (1,1) if len(spouses.intersection(get_between_tokens(c))) > 0 else (0,0)
# def LF_husband_wife_left_window(c):
# if len(spouses.intersection(get_left_tokens(c[0], window=2))) > 0:
# return (1,1)
# elif len(spouses.intersection(get_left_tokens(c[1], window=2))) > 0:
# return (1,1)
# else:
# return (0,0)
# def LF_same_last_name(c):
# p1_last_name = last_name(c.person1.get_span())
# p2_last_name = last_name(c.person2.get_span())
# if p1_last_name and p2_last_name and p1_last_name == p2_last_name:
# if c.person1.get_span() != c.person2.get_span():
# return (1,1)
# return (0,0)
# def LF_no_spouse_in_sentence(c):
# return (-1,1) if np.random.rand() < 0.75 and len(spouses.intersection(c.get_parent().words)) == 0 else (0,0)
# def LF_and_married(c):
# return (1,1) if 'and' in get_between_tokens(c) and 'married' in get_right_tokens(c) else (0,0)
# def LF_familial_relationship(c):
# return (-1,1) if len(family.intersection(get_between_tokens(c))) > 0 else (0,0)
# def LF_family_left_window(c):
# if len(family.intersection(get_left_tokens(c[0], window=2))) > 0:
# return (-1,1)
# elif len(family.intersection(get_left_tokens(c[1], window=2))) > 0:
# return (-1,1)
# else:
# return (0,0)
# def LF_other_relationship(c):
# return (-1,1) if len(other.intersection(get_between_tokens(c))) > 0 else (0,0)
# import bz2
# # Function to remove special characters from text
# def strip_special(s):
# return ''.join(c for c in s if ord(c) < 128)
# # Read in known spouse pairs and save as set of tuples
# with bz2.BZ2File('data/spouses_dbpedia.csv.bz2', 'rb') as f:
# known_spouses = set(
# tuple(strip_special(x.decode('utf-8')).strip().split(',')) for x in f.readlines()
# )
# # Last name pairs for known spouses
# last_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)])
# def LF_distant_supervision(c):
# p1, p2 = c.person1.get_span(), c.person2.get_span()
# return (1,1) if (p1, p2) in known_spouses or (p2, p1) in known_spouses else (0,0)
# def LF_distant_supervision_last_names(c):
# p1, p2 = c.person1.get_span(), c.person2.get_span()
# p1n, p2n = last_name(p1), last_name(p2)
# return (1,1) if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else (0,0)
# LFs = [
# LF_distant_supervision, LF_distant_supervision_last_names,
# LF_husband_wife, LF_husband_wife_left_window, LF_same_last_name,
# LF_no_spouse_in_sentence, LF_and_married, LF_familial_relationship,
# LF_family_left_window, LF_other_relationship
# ]
# +
##### Continuous ################
import re
from snorkel.lf_helpers import (
get_left_tokens, get_right_tokens, get_between_tokens,
get_text_between, get_tagged_text,
)
spouses = {'spouse', 'wife', 'husband', 'ex-wife', 'ex-husband'}
family = {'father', 'mother', 'sister', 'brother', 'son', 'daughter',
'grandfather', 'grandmother', 'uncle', 'aunt', 'cousin'}
family = family | {f + '-in-law' for f in family}
other = {'boyfriend', 'girlfriend' 'boss', 'employee', 'secretary', 'co-worker'}
# Helper function to get last name
def last_name(s):
name_parts = s.split(' ')
return name_parts[-1] if len(name_parts) > 1 else None
def LF_husband_wife(c):
global LF_Threshold
sc = 0
word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))
for sw in spouses:
sc=max(sc,get_similarity(word_vectors,sw))
return (1,sc)
def LF_husband_wife_left_window(c):
global LF_Threshold
sc_1 = 0
word_vectors = get_word_vectors(preprocess(get_left_tokens(c[0])))
for sw in spouses:
sc_1=max(sc_1,get_similarity(word_vectors,sw))
sc_2 = 0
word_vectors = get_word_vectors(preprocess(get_left_tokens(c[1])))
for sw in spouses:
sc_2=max(sc_2,get_similarity(word_vectors,sw))
return(1,max(sc_1,sc_2))
def LF_same_last_name(c):
p1_last_name = last_name(c.person1.get_span())
p2_last_name = last_name(c.person2.get_span())
if p1_last_name and p2_last_name and p1_last_name == p2_last_name:
if c.person1.get_span() != c.person2.get_span():
return (1,1)
return (0,0)
def LF_no_spouse_in_sentence(c):
return (-1,0.75) if np.random.rand() < 0.75 and len(spouses.intersection(c.get_parent().words)) == 0 else (0,0)
def LF_and_married(c):
global LF_Threshold
word_vectors = get_word_vectors(preprocess(get_right_tokens(c)))
sc = get_similarity(word_vectors,'married')
if 'and' in get_between_tokens(c):
return (1,sc)
else:
return (0,0)
def LF_familial_relationship(c):
sc = 0
word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))
for fw in family:
sc=max(sc,get_similarity(word_vectors,fw))
return (-1,sc)
def LF_family_left_window(c):
sc_1 = 0
word_vectors = get_word_vectors(preprocess(get_left_tokens(c[0])))
for fw in family:
sc_1=max(sc_1,get_similarity(word_vectors,fw))
sc_2 = 0
word_vectors = get_word_vectors(preprocess(get_left_tokens(c[1])))
for fw in family:
sc_2=max(sc_2,get_similarity(word_vectors,fw))
return (-1,max(sc_1,sc_2))
def LF_other_relationship(c):
sc = 0
word_vectors = get_word_vectors(preprocess(get_between_tokens(c)))
for ow in other:
sc=max(sc,get_similarity(word_vectors,ow))
return (-1,sc)
# def LF_other_relationship_left_window(c):
# sc = 0
# word_vectors = get_word_vectors(preprocess(get_left_tokens(c)))
# for ow in other:
# sc=max(sc,get_similarity(word_vectors,ow))
# return (-1,sc)
import bz2
# Function to remove special characters from text
def strip_special(s):
return ''.join(c for c in s if ord(c) < 128)
# # Read in known spouse pairs and save as set of tuples
# with bz2.BZ2File('data/spouses_dbpedia.csv.bz2', 'rb') as f:
# known_spouses = set(
# tuple(strip_special(x).strip().split(',')) for x in f.readlines()
# )
# # Last name pairs for known spouses
# last_names = set([(last_name(x), last_name(y)) for x, y in known_spouses if last_name(x) and last_name(y)])
def LF_distant_supervision(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
return (1,1) if (p1, p2) in known_spouses or (p2, p1) in known_spouses else (0,0)
def LF_distant_supervision_last_names(c):
p1, p2 = c.person1.get_span(), c.person2.get_span()
p1n, p2n = last_name(p1), last_name(p2)
return (1,1) if (p1 != p2) and ((p1n, p2n) in last_names or (p2n, p1n) in last_names) else (0,1)
import numpy as np
# def LF_Three_Lists_Left_Window(c):
# global softmax_Threshold
# c1,s1 = LF_husband_wife_left_window(c)
# c2,s2 = LF_family_left_window(c)
# c3,s3 = LF_other_relationship_left_window(c)
# sc = np.array([s1,s2,s3])
# c = [c1,c2,c3]
# sharp_param = 1.5
# prob_sc = np.exp(sc * sharp_param - np.max(sc))
# prob_sc = prob_sc / np.sum(prob_sc)
# #print 'Left:',s1,s2,s3,prob_sc
# if s1==s2 or s3==s1:
# return (0,0)
# return c[np.argmax(prob_sc)],1
# def LF_Three_Lists_Between_Words(c):
# global softmax_Threshold
# c1,s1 = LF_husband_wife(c)
# c2,s2 = LF_familial_relationship(c)
# c3,s3 = LF_other_relationship(c)
# sc = np.array([s1,s2,s3])
# c = [c1,c2,c3]
# sharp_param = 1.5
# prob_sc = np.exp(sc * sharp_param - np.max(sc))
# prob_sc = prob_sc / np.sum(prob_sc)
# #print 'BW:',s1,s2,s3,prob_sc
# if s1==s2 or s3==s1:
# return (0,0)
# return c[np.argmax(prob_sc)],1
LFs = [
LF_distant_supervision, LF_distant_supervision_last_names,
LF_husband_wife, LF_husband_wife_left_window, LF_same_last_name,
LF_no_spouse_in_sentence, LF_and_married, LF_familial_relationship,
LF_family_left_window, LF_other_relationship
]
# -
''' output:
[[[L_x1],[S_x1]],
[[L_x2],[S_x2]],
......
......
]
'''
def get_L_S_Tensor(cands):
L_S = []
for i,ci in enumerate(cands):
L_S_ci=[]
L=[]
S=[]
for LF in LFs:
#print LF.__name__
l,s = LF(ci)
L.append(l)
S.append((s+1)/2) #to scale scores in [0,1]
L_S_ci.append(L)
L_S_ci.append(S)
L_S.append(L_S_ci)
if(i%500==0 and i!=0):
print(str(i)+'data points labelled in',(time.time() - start_time)/60,'mins')
return L_S
# +
# import matplotlib.pyplot as plt
import time
import numpy as np
start_time = time.time()
lt = time.localtime()
print("started at: {}-{}-{}, {}:{}:{}".format(lt.tm_mday,lt.tm_mon,lt.tm_year,lt.tm_hour,lt.tm_min,lt.tm_sec))
dev_L_S = get_L_S_Tensor(dev_cands)
np.save("dev_L_S_smooth",np.array(dev_L_S))
train_L_S = get_L_S_Tensor(train_cands)
np.save("train_L_S_smooth",np.array(train_L_S))
print("--- %s seconds ---" % (time.time() - start_time))
# test_L_S = get_L_S_Tensor(test_cands)
# pkl.dump(test_L_S,open("test_L_S.p","wb"))
# +
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from collections import defaultdict
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import average_precision_score
from sklearn.metrics import roc_auc_score
def draw2DArray(a):
fig = plt.figure(figsize=(6, 3.2))
ax = fig.add_subplot(111)
ax.set_title('colorMap')
plt.imshow(np.array(a))
ax.set_aspect('equal')
cax = fig.add_axes([0.12, 0.1, 0.78, 0.8])
cax.get_xaxis().set_visible(False)
cax.get_yaxis().set_visible(False)
cax.patch.set_alpha(0)
cax.set_frame_on(False)
plt.colorbar(orientation='vertical')
plt.show()
def report2dict(cr):
# Parse rows
tmp = list()
for row in cr.split("\n"):
parsed_row = [x for x in row.split(" ") if len(x) > 0]
if len(parsed_row) > 0:
tmp.append(parsed_row)
# Store in dictionary
measures = tmp[0]
D_class_data = defaultdict(dict)
for row in tmp[1:]:
class_label = row[0]
for j, m in enumerate(measures):
D_class_data[class_label][m.strip()] = float(row[j + 1].strip())
return pd.DataFrame(D_class_data).T
def predictAndPrint(pl):
print("acc",accuracy_score(gold_labels_dev,pl))
# print(precision_recall_fscore_support(true_labels,pl,average='macro'))
print(confusion_matrix(gold_labels_dev,pl))
draw2DArray(confusion_matrix(gold_labels_dev,pl))
return report2dict(classification_report(gold_labels_dev, pl))# target_names=class_names))
def drawPRcurve(y_test,y_score,it_no):
fig = plt.figure()
splt = fig.add_subplot(111)
precision, recall, _ = precision_recall_curve(y_test, y_score,pos_label=1)
splt.step(recall, precision, color='b', alpha=0.2,
where='post')
splt.fill_between(recall, precision, step='post', alpha=0.2,
color='b')
average_precision = average_precision_score(y_test, y_score)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.ylim([0.0, 1.05])
plt.xlim([0.0, 1.05])
plt.title('{0:d} Precision-Recall curve: AP={1:0.2f}'.format(it_no,
average_precision))
def drawLossVsF1(y_loss,x_f1s,text,title):
fig, ax = plt.subplots()
ax.scatter(x_f1s, y_loss)
plt.xlabel('f1-score')
plt.ylabel('loss')
plt.title(title)
for i, txt in enumerate(text):
ax.annotate(txt, (x_f1s[i],y_loss[i]))
plt.savefig(title+".png")
# -
LF_l = [
1,1,1,1,1,-1,1,-1,-1,-1
]
# +
import numpy as np
# dev_L_S = np.load("dev_L_S_smooth.npy")
# train_L_S = np.load("train_L_S_smooth.npy")
dev_L_S = np.load("dev_L_S_discrete.npy")
train_L_S = np.load("train_L_S_discrete.npy")
print(dev_L_S.shape,train_L_S.shape)
# +
#call this only once for a kernel startup
from __future__ import absolute_import, division, print_function
import tensorflow as tf
# BATCH_SIZE = 32
# -
NoOfLFs= len(LFs)
NoOfClasses = 2
print(len(LFs),len(LF_l))
# +
## train smooth Normalized
def train_SNL(lr,ep,th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.001,0.1,seed),\
dtype=tf.float64)
thetas = tf.get_variable('thetas', [1,NoOfLFs],\
initializer=th,\
dtype=tf.float64)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: ls_*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
print("thetas",thetas)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
def ints(y):
return alphas+((tf.exp((t_k*y)*(1-alphas))-1)/(t_k*y))
print("ints",ints)
# zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
zy = tf.map_fn(lambda y: tf.reduce_prod(1+ints(y),axis=0),\
np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0) -logz ))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
train_step = tf.train.AdamOptimizer(lr).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
# train_step = tf.train.MomentumOptimizer(0.0000001,0.002).minimize(normloss)
# train_step = tf.train.AdagradOptimizer(0.01).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
for it in range(ep):
sess.run(train_init_op)
tl = 0
try:
while True:
_,ls = sess.run([train_step,normloss])
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"loss",tl)
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
drawPRcurve(np.array(gold_labels_dev),np.array(m[1::].flatten()),it)
print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# +
## train smooth unnormalized
def train_SUNL(lr,ep,th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.2,0.1,seed),\
dtype=tf.float64)
thetas = tf.get_variable('thetas', [1,NoOfLFs],\
initializer=th,\
dtype=tf.float64)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: ls_*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
print("thetas",thetas)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
def ints(y):
return alphas+((tf.exp((t_k*y)*(1-alphas))-1)/(t_k*y))
print("ints",ints)
# zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
zy = tf.map_fn(lambda y: tf.reduce_prod(1+ints(y),axis=0),\
np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0) ))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
# train_step = tf.train.AdamOptimizer(lr).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
# train_step = tf.train.MomentumOptimizer(0.0000001,0.002).minimize(normloss)
train_step = tf.train.AdagradOptimizer(lr).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
for it in range(ep):
sess.run(train_init_op)
tl = 0
try:
while True:
_,ls = sess.run([train_step,normloss])
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"loss",tl)
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# drawPRcurve(np.array(gold_labels_dev),np.array(m[1::].flatten()),it)
print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# -
train_SUNL(0.1/len(train_L_S),15,tf.truncated_normal_initializer(1,0.1,12))
train_SUNL(0.01/len(train_L_S),15,tf.truncated_normal_initializer(1,0.1,12))
train_SNL(0.01/len(train_L_S),5,tf.truncated_normal_initializer(1,0.1,12))
# +
## normalized training with different params
def train_nl(lr,ep,th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.2,0.1,seed),\
dtype=tf.float64)
# thetas = tf.get_variable('thetas', [1,NoOfLFs],\
# initializer=tf.truncated_normal_initializer(1,0.1,seed),\
# dtype=tf.float64)
thetas = tf.get_variable('thetas',[1,NoOfLFs],\
initializer=th,\
dtype=tf.float64)
print("thetas",thetas)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: l*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0) - logz))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
# train_step = tf.train.AdamOptimizer(0.001).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
train_step = tf.train.AdamOptimizer(lr).minimize(normloss)
# train_step = tf.train.AdagradOptimizer(0.01).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
for it in range(ep):
sess.run(train_init_op)
tl = 0
try:
while True:
_,ls = sess.run([train_step,normloss])
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"loss",tl)
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# +
## Objective value Normalized
def getNLObjValue(th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.2,0.1,seed),\
dtype=tf.float64)
# thetas = tf.get_variable('thetas', [1,NoOfLFs],\
# initializer=tf.truncated_normal_initializer(1,0.1,seed),\
# dtype=tf.float64)
thetas = tf.convert_to_tensor(th)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: l*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
print("thetas",thetas)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0) - logz))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
# train_step = tf.train.AdamOptimizer(0.001).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
# train_step = tf.train.AdamOptimizer(0.01).minimize(normloss)
# train_step = tf.train.AdagradOptimizer(0.01).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
tl = 0
for it in range(1):
sess.run(train_init_op)
try:
while True:
# _,ls = sess.run([train_step,normloss])
ls = sess.run(normloss) # to calculate loss on fixed thetas
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"train loss",tl)
# sess.run(dev_init_op)
# a,t,m,pl,dl = sess.run([alphas,thetas,marginals,predict,normloss])
# print(a)
# print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
# unique, counts = np.unique(pl, return_counts=True)
# print(dict(zip(unique, counts)))
# print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl,dl = sess.run([alphas,thetas,marginals,predict,normloss])
print(a)
print(t)
print("dev loss",dl)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
res = precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary")
return (tl,dl,res)
print("snorkel thetas")
l_f1s = []
#snorkel thetas
l_f1s.append(getNLObjValue(np.array([[ 0.07472098, 0.07514459, 0.11910277,\
0.11186369,0.07306518,0.69216714,0.07467749,0.16012659,\
0.13682546,0.08183363]])))
print(" un-norma thetas ep7 ")
# l_f1s.append(getNLObjValue(np.array([[1.0,1.0,1.0,1.0,1.0,1.02750979,\
# 1.0,1.0218145,1.0,1.0]])))
l_f1s.append(getNLObjValue(np.array([[0.33293226,0.01940464,0.42274838,0.39655883,\
0.31731244,0.84775084,0.37180681,0.46009105,0.5502137,0.32638473]])))
print(l_f1s)
# +
# init random thetas
train_nl(0.1/len(train_L_S),15,tf.truncated_normal_initializer(1,0.1,12))
# +
## Normalized loss plot loss vs f1s
y_loss=[153548.149,179552.66,157032.751,150114.45]
x_f1s =[0.432,0.428,0.43,0.414]
text=["snorkel-thetas","old-unNormalized-thetas","normalized-trained-thetas-ep7",\
"normalized-trained-thetas-ep15"]
drawLossVsF1(y_loss,x_f1s,text,"Spouse-Normalized-Loss")
# +
## Objective value on snorkel thetas Unnormalized # remove logz from obj
def getUNLObjValue(th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.2,0.1,seed),\
dtype=tf.float64)
# thetas = tf.get_variable('thetas', [1,NoOfLFs],\
# initializer=tf.truncated_normal_initializer(1,0.1,seed),\
# dtype=tf.float64)
thetas = tf.convert_to_tensor(th)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: l*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
print("thetas",thetas)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0) ))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
# train_step = tf.train.AdamOptimizer(0.001).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
# train_step = tf.train.AdamOptimizer(0.01).minimize(normloss)
# train_step = tf.train.AdagradOptimizer(0.01).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
tl = 0
for it in range(1):
sess.run(train_init_op)
try:
while True:
# _,ls = sess.run([train_step,normloss])
ls = sess.run(normloss) # to calculate loss on fixed thetas
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"train loss",tl)
# sess.run(dev_init_op)
# a,t,m,pl,dl = sess.run([alphas,thetas,marginals,predict,normloss])
# print(a)
# print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
# unique, counts = np.unique(pl, return_counts=True)
# print(dict(zip(unique, counts)))
# print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl,dl = sess.run([alphas,thetas,marginals,predict,normloss])
print(a)
print(t)
print("dev loss",dl)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
res = precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary")
return (tl,dl,res)
print("snorkel thetas")
l_f1s = []
#snorkel thetas
l_f1s.append(getUNLObjValue(np.array([[ 0.07472098, 0.07514459, 0.11910277,\
0.11186369,0.07306518,0.69216714,0.07467749,0.16012659,\
0.13682546,0.08183363]])))
print("normalized thetas ep6 f10.43")
l_f1s.append(getUNLObjValue(np.array([[0.48198891,0.38912505,0.70829843,0.67643395,0.56100246,\
1.39815618,0.49929654,0.83071361,0.8706048,0.6070296 ]])))
print(l_f1s)
# +
## Un normalized training with different params
def train_unl(lr,ep,th):
BATCH_SIZE = 1
tf.reset_default_graph()
seed = 12
with tf.Graph().as_default():
train_dataset = tf.data.Dataset.from_tensor_slices(train_L_S).batch(BATCH_SIZE)
dev_dataset = tf.data.Dataset.from_tensor_slices(dev_L_S).batch(dev_L_S.shape[0])
labels = tf.convert_to_tensor(gold_labels_dev)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
train_dataset.output_shapes)
next_element = iterator.get_next()
train_init_op = iterator.make_initializer(train_dataset)
dev_init_op = iterator.make_initializer(dev_dataset)
next_element = iterator.get_next()
print("next_element",next_element)
alphas = tf.get_variable('alphas', [NoOfLFs],\
initializer=tf.truncated_normal_initializer(0.2,0.1,seed),\
dtype=tf.float64)
# thetas = tf.get_variable('thetas', [1,NoOfLFs],\
# initializer=tf.truncated_normal_initializer(1,0.1,seed),\
# dtype=tf.float64)
thetas = tf.get_variable('thetas',[1,NoOfLFs],\
initializer=th,\
dtype=tf.float64)
print("thetas",thetas)
k = tf.convert_to_tensor(LF_l, dtype=tf.float64)
print("k",k)
print(alphas.graph)
print(thetas.graph)
l,s = tf.unstack(next_element,axis=1)
print(alphas)
print(s)
print("l",l)
print(s.graph)
s_ = tf.map_fn(lambda x : tf.maximum(tf.subtract(x,alphas), 0), s)
ls_ = tf.multiply(l,s_)
nls_ = tf.multiply(l,s_)*-1
pout = tf.map_fn(lambda x: l*x,np.array([-1,1],dtype=np.float64))
print("nls",nls_)
# lst = tf.matmul(ls_,thetas)
# print("lst",lst)
t_pout = tf.map_fn(lambda x: tf.matmul(x,thetas,transpose_b=True),pout)
print("pout",pout)
print("t_pout",t_pout)
t_k = k*tf.squeeze(thetas)
print("t_k",t_k)
zy = tf.map_fn(lambda y: tf.reduce_prod(1+tf.exp(t_k*y),axis=0),np.array([-1,1],dtype=np.float64))
logz = tf.log(tf.reduce_sum(zy,axis=0))
print("zy",zy)
print("logz",logz)
lsp = tf.reduce_logsumexp(t_pout)
print("lsp",lsp)
# normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)) - logz) # add z
normloss = tf.negative(tf.reduce_sum(tf.reduce_logsumexp(t_pout,axis=0)))
print("normloss",normloss)
marginals = tf.nn.softmax(t_pout,axis=0)
print("marginals",marginals)
predict = tf.argmax(marginals,axis=0)
print("predict",predict)
# pre = tf.metrics.precision(labels,predict)
# rec = tf.metrics.recall(labels,predict)
# print("loss",loss)
# print("nls_",nls_)
# global_step = tf.Variable(0, trainable=False,dtype=tf.float64)
# starter_learning_rate = 1.0
# learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step,
# 10, 0.96, staircase=True)
# train_step = tf.train.AdamOptimizer(learning_rate).minimize(normloss, global_step=global_step)
# train_step = tf.train.AdamOptimizer(0.001).minimize(normloss)
# reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
# reg_constant = 5.0 # Choose an appropriate one.
# totalloss = normloss + reg_constant * sum(reg_losses)
train_step = tf.train.AdamOptimizer(lr).minimize(normloss)
# train_step = tf.train.AdagradOptimizer(0.01).minimize(normloss)
# train_step = tf.train.MomentumOptimizer(0.01,0.2).minimize(normloss)
# train_step = tf.train.GradientDescentOptimizer(0.1).minimize(normloss)
init_g = tf.global_variables_initializer()
init_l = tf.local_variables_initializer()
with tf.Session() as sess:
sess.run(init_g)
sess.run(init_l)
# Initialize an iterator over the training dataset.
for it in range(ep):
sess.run(train_init_op)
tl = 0
try:
while True:
_,ls = sess.run([train_step,normloss])
tl = tl + ls
except tf.errors.OutOfRangeError:
pass
print(it,"loss",tl)
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
# MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
print()
# Initialize an iterator over the validation dataset.
sess.run(dev_init_op)
a,t,m,pl = sess.run([alphas,thetas,marginals,predict])
print(a)
print(t)
MentionScorer(dev_cands, L_gold_dev).score(m[1::].flatten())
unique, counts = np.unique(pl, return_counts=True)
print(dict(zip(unique, counts)))
print("acc",accuracy_score(gold_labels_dev,pl))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl)))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="macro"))
cf = confusion_matrix(gold_labels_dev,pl)
print(cf)
print("prec: tp/(tp+fp)",cf[1][1]/(cf[1][1]+cf[0][1]),"recall: tp/(tp+fn)",cf[1][1]/(cf[1][1]+cf[1][0]))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(pl),average="binary"))
# +
#init with old network thetas
# train_unl(0.01,15,np.array([[1.0,1.0,1.0,\
# 1.0,1.0,1.02750979,1.0,1.0218145,1.0,1.0]]))
train_unl(0.1/len(train_L_S),15,tf.constant_initializer(np.array([[1.0,1.0,1.0,\
1.0,1.0,1.02750979,1.0,1.0218145,1.0,1.0]])))
# +
## UN-Normalized loss plot loss vs f1s
y_loss=[-18693.37,-29153.74,-33418.08,-42678.89]
x_f1s =[0.432,0.431,0.43,0.43]
text=["snorkel-thetas","normalized-thetas-ep7","Un-normalized-trained-thetas-ep7","Un-normalized-trained-thetas-ep15"]
drawLossVsF1(y_loss,x_f1s,text,"Spouse-Un-Normalized-Loss")
# -
# init random thetas
train_unl(0.1/len(train_L_S),15,tf.truncated_normal_initializer(0.2,0.1,12))
# +
# LFS init random thetas
train_unl(0.1/len(train_L_S),15,tf.truncated_normal_initializer(0.2,0.1,12))
# +
#snorkel
a =np.array([ 0.07472098, 0.07514459, 0.11910277, 0.11186369, 0.07306518,
0.69216714, 0.07467749, 0.16012659, 0.13682546, 0.08183363])
temp = a.flatten().argsort()
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(a))
print(temp)
# +
a = np.array([ 0.4751682, 0.46430319 , 0.77729748 , 0.69961045 , 0.43660742, 4.98316919,
0.4786732 , -0.29070728, -0.31361022, -0.41560446])
temp = a.flatten().argsort()
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(a))
print(temp)
# +
# rerun old network to get thetas
#stochastic + weighted cross entropy logits func + remove min(theta,0) in loss -- Marked
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def train_NN():
print()
result_dir = "./"
config = projector.ProjectorConfig()
tf.logging.set_verbosity(tf.logging.INFO)
summary_writer = tf.summary.FileWriter(result_dir)
tf.reset_default_graph()
dim = 2 #(labels,scores)
_x = tf.placeholder(tf.float64,shape=(dim,len(LFs)))
alphas = tf.get_variable('alpha', _x.get_shape()[-1],initializer=tf.constant_initializer(0.2),
dtype=tf.float64)
thetas = tf.get_variable('theta', _x.get_shape()[-1],initializer=tf.constant_initializer(1),
dtype=tf.float64)
l,s = tf.unstack(_x)
prelu_out_s = tf.maximum(tf.subtract(s,alphas), tf.zeros(shape=(len(LFs)),dtype=tf.float64))
mul_L_S = tf.multiply(l,prelu_out_s)
phi_p1 = tf.reduce_sum(tf.multiply(mul_L_S,thetas))
phi_n1 = tf.reduce_sum(tf.multiply(tf.negative(mul_L_S),thetas))
phi_out = tf.stack([phi_n1,phi_p1])
predict = tf.argmax(tf.nn.softmax(phi_out))
loss = tf.negative(tf.reduce_logsumexp(phi_out))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
check_op = tf.add_check_numerics_ops()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(5):
c = 0
te_prev=1
total_te = 0
for L_S_i in train_L_S:
a,t,te_curr,_ = sess.run([alphas,thetas,loss,train_step],feed_dict={_x:L_S_i})
total_te+=te_curr
if(abs(te_curr-te_prev)<1e-200):
break
if(c%4000==0):
pl = []
for L_S_i in dev_L_S:
a,t,de_curr,p = sess.run([alphas,thetas,loss,predict],feed_dict={_x:L_S_i})
pl.append(p)
predicted_labels = pl
print()
print(total_te/4000)
total_te=0
print(a)
print(t)
# print()
print(predicted_labels.count(0),predicted_labels.count(1))
print(c," dm ",precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
print(c," db ",precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='binary'))
c+=1
te_prev = te_curr
pl = []
for L_S_i in dev_L_S:
p = sess.run(predict,feed_dict={_x:L_S_i})
pl.append(p)
predicted_labels = pl
print(i,total_te)
print(predicted_labels.count(-1),predicted_labels.count(1))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='binary'))
train_NN()
# +
# rerun old network 2 to get thetas
#stochastic + weighted cross entropy logits func + remove min(theta,0) in loss -- Marked
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.tensorboard.plugins import projector
def train_NN():
print()
result_dir = "./"
config = projector.ProjectorConfig()
tf.logging.set_verbosity(tf.logging.INFO)
summary_writer = tf.summary.FileWriter(result_dir)
tf.reset_default_graph()
dim = 2 #(labels,scores)
_x = tf.placeholder(tf.float64,shape=(dim,len(LFs)))
alphas = tf.get_variable('alpha', _x.get_shape()[-1],initializer=tf.constant_initializer(0.2),
dtype=tf.float64)
thetas = tf.get_variable('theta', _x.get_shape()[-1],initializer=tf.constant_initializer(1),
dtype=tf.float64)
l,s = tf.unstack(_x)
prelu_out_s = tf.maximum(tf.subtract(s,alphas), tf.zeros(shape=(len(LFs)),dtype=tf.float64))
mul_L_S = tf.multiply(l,prelu_out_s)
phi_p1 = tf.reduce_sum(tf.multiply(mul_L_S,thetas))
phi_n1 = tf.reduce_sum(tf.multiply(tf.negative(mul_L_S),thetas))
phi_out = tf.stack([phi_n1,phi_p1])
predict = tf.argmax(tf.nn.softmax(phi_out))
loss = tf.negative(tf.reduce_logsumexp(phi_out))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
check_op = tf.add_check_numerics_ops()
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for i in range(5):
c = 0
te_prev=1
total_te = 0
for L_S_i in train_L_S:
a,t,te_curr,_ = sess.run([alphas,thetas,loss,train_step],feed_dict={_x:L_S_i})
total_te+=te_curr
if(abs(te_curr-te_prev)<1e-200):
break
if(c%4000==0):
pl = []
for L_S_i in dev_L_S:
a,t,de_curr,p = sess.run([alphas,thetas,loss,predict],feed_dict={_x:L_S_i})
pl.append(p)
predicted_labels = pl
print()
print(total_te/4000)
total_te=0
# print(a)
print(t)
# print()
print(predicted_labels.count(-1),predicted_labels.count(1))
print(c," d ",precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
c+=1
te_prev = te_curr
pl = []
for L_S_i in dev_L_S:
p = sess.run(predict,feed_dict={_x:L_S_i})
pl.append(p)
predicted_labels = pl
print(i,total_te)
print(predicted_labels.count(0),predicted_labels.count(1))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='binary'))
train_NN()
# +
# #stochastic + weighted cross entropy logits func + remove min(theta,0) in loss -- Marked
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# import tensorflow as tf
# from tensorflow.contrib.tensorboard.plugins import projector
# def train_NN():
# print()
# result_dir = "./"
# config = projector.ProjectorConfig()
# tf.logging.set_verbosity(tf.logging.INFO)
# summary_writer = tf.summary.FileWriter(result_dir)
# tf.reset_default_graph()
# dim = 2 #(labels,scores)
# _x = tf.placeholder(tf.float64,shape=(dim,len(LFs)))
# alphas = tf.get_variable('alpha', _x.get_shape()[-1],initializer=tf.constant_initializer(0.2),
# dtype=tf.float64)
# thetas = tf.get_variable('theta', _x.get_shape()[-1],initializer=tf.constant_initializer(1),
# dtype=tf.float64)
# l,s = tf.unstack(_x)
# prelu_out_s = tf.maximum(tf.subtract(s,alphas), tf.zeros(shape=(len(LFs)),dtype=tf.float64))
# mul_L_S = tf.multiply(l,prelu_out_s)
# phi_p1 = tf.reduce_sum(tf.multiply(mul_L_S,thetas))
# phi_n1 = tf.reduce_sum(tf.multiply(tf.negative(mul_L_S),thetas))
# phi_out = tf.stack([phi_n1,phi_p1])
# predict = tf.argmax(tf.nn.softmax(phi_out))
# loss = tf.negative(tf.reduce_logsumexp(phi_out))
# train_step = tf.train.GradientDescentOptimizer(0.01).minimize(loss)
# check_op = tf.add_check_numerics_ops()
# sess = tf.Session()
# init = tf.global_variables_initializer()
# sess.run(init)
# for i in range(1):
# c = 0
# te_prev=1
# total_te = 0
# for L_S_i in train_L_S:
# a,t,te_curr,_ = sess.run([alphas,thetas,loss,train_step],feed_dict={_x:L_S_i})
# total_te+=te_curr
# if(abs(te_curr-te_prev)<1e-200):
# break
# if(c%4000==0):
# pl = []
# for L_S_i in dev_L_S:
# a,t,de_curr,p = sess.run([alphas,thetas,loss,predict],feed_dict={_x:L_S_i})
# pl.append(p)
# predicted_labels = [-1 if x==0 else x for x in pl]
# print()
# print(total_te/4000)
# total_te=0
# # print(a)
# # print(t)
# # print()
# print(predicted_labels.count(-1),predicted_labels.count(1))
# print(c," d ",precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
# c+=1
# te_prev = te_curr
# pl = []
# for L_S_i in dev_L_S:
# p = sess.run(predict,feed_dict={_x:L_S_i})
# pl.append(p)
# predicted_labels = [-1 if x==0 else x for x in pl]
# print(i,total_te)
# print(predicted_labels.count(-1),predicted_labels.count(1))
# print(precision_recall_fscore_support(np.array(gold_labels_dev),np.array(predicted_labels),average='macro'))
# train_NN()
| intro_Z/Spouse-LossAnalysis-DiscreteLFs-bk.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from src.models.level_kv_div import klTrainer, utils, network, datasets
import torch
mode = 'train'
t = datasets.get_raw_tensor(f'../data/{mode}.csv')
# +
def far_func(sorted_dist: torch.tensor, indices: torch.tensor):
return sorted_dist[:, 100 + 1:], indices[:, 100 + 1:]
def close_func(sorted_dist: torch.tensor, indices: torch.tensor):
mid = indices.shape[1] // 2
return sorted_dist[:, :100 + 1], indices[:, :100 + 1]
_, close_idx, far_idx, _, _ = utils.calculate_distance(t, close_func, far_func)
# -
indices = torch.cat((close_idx, far_idx), dim=1)
sim = utils.level_grading(indices, 100)
torch.save(sim, f'../data/{mode}.level.grading')
| notebooks/level_grade_s0.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# In this notebook, we'll be making notes about the initial chapters of Deep Learning for Computer Vision with Python, Vol 1
# Machine learning has been around for a long time, but now with better hardware and more data it has become ubiquitous.
# ### Deep Learning vs other kinds of artificial intelligence
# Before the deep learning advent, many different features were taken from a image so the artificial network could process it. Some examples were countours, edges, color groups and other features. With deep learning, there's no need for that, we just put the raw pixels into the network and the features are automatically learned. Each layer represents a concept the goes up in abstraction, the first could be the edges of the image, the second the contours the third the objects and so on.
# In Adrian's opinion everything with more than two layers should be called deep learning, and with more than 10 very deep.
# ### Image Fundamentals
# Numpy represents the pixels in a 3D format, and they are width, height and depth. When we talk about depth, it's about the color channels that the image has. Another thing that is different than the normal is that width and height are really in this ordering, because that's how a matrix is represented.
# ##### BGR Ordering
# Because of historical reasons opencv uses BGR ordering in colors, instead of the normal RGB.
# ##### Neural Network algorithms image size
# Most of the algorithms use some fixed size for the images, some common choices are 32×32, 64×64, 224×224, 227×227, 256×256, and 299×299.
# ### Image Classification
# In machine learning we have the dataset and the so called data points, that is each individual image.
# #### Steps in image classification
# ##### Dataset gathering
# We need to have the images that will be used, and all of the groups should have the same quantity of images.
# ##### Splitting the dataset
# The dataset should be splitten in 3 different parts, and it's very important that none of these overlap. They are:
# Training Set: The dataset used to train the neural network
# Validation Set: The dataset used to set the hyperparameters (as learning rate, decay, etc)
# Testing Set: The dataset that will be used to evaluate the NN
# Some common divisions are: From 66.7% to 90% are reserved to the training and validation sets, while the validation takes between 10 and 20% from this space,
| Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# 
# # Automated MachineLearning
# _**The model backtesting**_
#
# ## Contents
# 1. [Introduction](#Introduction)
# 2. [Setup](#Setup)
# 3. [Data](#Data)
# 4. [Prepare remote compute and data.](#prepare_remote)
# 5. [Create the configuration for AutoML backtesting](#train)
# 6. [Backtest AutoML](#backtest_automl)
# 7. [View metrics](#Metrics)
# 8. [Backtest the best model](#backtest_model)
# ## Introduction
# Model backtesting is used to evaluate its performance on historical data. To do that we step back on the backtesting period by the data set several times and split the data to train and test sets. Then these data sets are used for training and evaluation of model.<br>
# This notebook is intended to demonstrate backtesting on a single model, this is the best solution for small data sets with a few or one time series in it. For scenarios where we would like to choose the best AutoML model for every backtest iteration, please see [AutoML Forecasting Backtest Many Models Example](../forecasting-backtest-many-models/auto-ml-forecasting-backtest-many-models.ipynb) notebook.
# 
# This notebook demonstrates two ways of backtesting:
# - AutoML backtesting: we will train separate AutoML models for historical data
# - Model backtesting: from the first run we will select the best model trained on the most recent data, retrain it on the past data and evaluate.
# ## Setup
# +
import os
import numpy as np
import pandas as pd
import shutil
import azureml.core
from azureml.core import Experiment, Model, Workspace
# -
# This notebook is compatible with Azure ML SDK version 1.35.1 or later.
print("You are currently using version", azureml.core.VERSION, "of the Azure ML SDK")
# As part of the setup you have already created a <b>Workspace</b>.
# +
ws = Workspace.from_config()
output = {}
output["Subscription ID"] = ws.subscription_id
output["Workspace"] = ws.name
output["SKU"] = ws.sku
output["Resource Group"] = ws.resource_group
output["Location"] = ws.location
pd.set_option("display.max_colwidth", -1)
outputDf = pd.DataFrame(data=output, index=[""])
outputDf.T
# -
# ## Data
# For the demonstration purposes we will simulate one year of daily data. To do this we need to specify the following parameters: time column name, time series ID column names and label column name. Our intention is to forecast for two weeks ahead.
# +
TIME_COLUMN_NAME = "date"
TIME_SERIES_ID_COLUMN_NAMES = "time_series_id"
LABEL_COLUMN_NAME = "y"
FORECAST_HORIZON = 14
FREQUENCY = "D"
def simulate_timeseries_data(
train_len: int,
test_len: int,
time_column_name: str,
target_column_name: str,
time_series_id_column_name: str,
time_series_number: int = 1,
freq: str = "H",
):
"""
Return the time series of designed length.
:param train_len: The length of training data (one series).
:type train_len: int
:param test_len: The length of testing data (one series).
:type test_len: int
:param time_column_name: The desired name of a time column.
:type time_column_name: str
:param time_series_number: The number of time series in the data set.
:type time_series_number: int
:param freq: The frequency string representing pandas offset.
see https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
:type freq: str
:returns: the tuple of train and test data sets.
:rtype: tuple
"""
data_train = [] # type: List[pd.DataFrame]
data_test = [] # type: List[pd.DataFrame]
data_length = train_len + test_len
for i in range(time_series_number):
X = pd.DataFrame(
{
time_column_name: pd.date_range(
start="2000-01-01", periods=data_length, freq=freq
),
target_column_name: np.arange(data_length).astype(float)
+ np.random.rand(data_length)
+ i * 5,
"ext_predictor": np.asarray(range(42, 42 + data_length)),
time_series_id_column_name: np.repeat("ts{}".format(i), data_length),
}
)
data_train.append(X[:train_len])
data_test.append(X[train_len:])
train = pd.concat(data_train)
label_train = train.pop(target_column_name).values
test = pd.concat(data_test)
label_test = test.pop(target_column_name).values
return train, label_train, test, label_test
n_test_periods = FORECAST_HORIZON
n_train_periods = 365
X_train, y_train, X_test, y_test = simulate_timeseries_data(
train_len=n_train_periods,
test_len=n_test_periods,
time_column_name=TIME_COLUMN_NAME,
target_column_name=LABEL_COLUMN_NAME,
time_series_id_column_name=TIME_SERIES_ID_COLUMN_NAMES,
time_series_number=2,
freq=FREQUENCY,
)
X_train[LABEL_COLUMN_NAME] = y_train
# -
# Let's see what the training data looks like.
X_train.tail()
# ### Prepare remote compute and data. <a id="prepare_remote"></a>
# The [Machine Learning service workspace](https://docs.microsoft.com/en-us/azure/machine-learning/service/concept-workspace), is paired with the storage account, which contains the default data store. We will use it to upload the artificial data and create [tabular dataset](https://docs.microsoft.com/en-us/python/api/azureml-core/azureml.data.tabulardataset?view=azure-ml-py) for training. A tabular dataset defines a series of lazily-evaluated, immutable operations to load data from the data source into tabular representation.
# +
from azureml.data.dataset_factory import TabularDatasetFactory
ds = ws.get_default_datastore()
# Upload saved data to the default data store.
train_data = TabularDatasetFactory.register_pandas_dataframe(
X_train, target=(ds, "data"), name="data_backtest"
)
# -
# You will need to create a compute target for backtesting. In this [tutorial](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute), you create AmlCompute as your training compute resource.
#
# > Note that if you have an AzureML Data Scientist role, you will not have permission to create compute resources. Talk to your workspace or IT admin to create the compute targets described in this section, if they do not already exist.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
# Choose a name for your CPU cluster
amlcompute_cluster_name = "backtest-cluster"
# Verify that cluster does not exist already
try:
compute_target = ComputeTarget(workspace=ws, name=amlcompute_cluster_name)
print("Found existing cluster, use it.")
except ComputeTargetException:
compute_config = AmlCompute.provisioning_configuration(
vm_size="STANDARD_DS12_V2", max_nodes=6
)
compute_target = ComputeTarget.create(ws, amlcompute_cluster_name, compute_config)
compute_target.wait_for_completion(show_output=True)
# -
# ## Create the configuration for AutoML backtesting <a id="train"></a>
#
# This dictionary defines the AutoML and many models settings. For this forecasting task we need to define several settings including the name of the time column, the maximum forecast horizon, and the partition column name definition.
#
# | Property | Description|
# | :--------------- | :------------------- |
# | **task** | forecasting |
# | **primary_metric** | This is the metric that you want to optimize.<br> Forecasting supports the following primary metrics <br><i>normalized_root_mean_squared_error</i><br><i>normalized_mean_absolute_error</i> |
# | **iteration_timeout_minutes** | Maximum amount of time in minutes that the model can train. This is optional but provides customers with greater control on exit criteria. |
# | **iterations** | Number of models to train. This is optional but provides customers with greater control on exit criteria. |
# | **experiment_timeout_hours** | Maximum amount of time in hours that the experiment can take before it terminates. This is optional but provides customers with greater control on exit criteria. |
# | **label_column_name** | The name of the label column. |
# | **max_horizon** | The forecast horizon is how many periods forward you would like to forecast. This integer horizon is in units of the timeseries frequency (e.g. daily, weekly). Periods are inferred from your data. |
# | **n_cross_validations** | Number of cross validation splits. Rolling Origin Validation is used to split time-series in a temporally consistent way. |
# | **time_column_name** | The name of your time column. |
# | **grain_column_names** | The column names used to uniquely identify timeseries in data that has multiple rows with the same timestamp. |
automl_settings = {
"task": "forecasting",
"primary_metric": "normalized_root_mean_squared_error",
"iteration_timeout_minutes": 10, # This needs to be changed based on the dataset. We ask customer to explore how long training is taking before settings this value
"iterations": 15,
"experiment_timeout_hours": 1, # This also needs to be changed based on the dataset. For larger data set this number needs to be bigger.
"label_column_name": LABEL_COLUMN_NAME,
"n_cross_validations": 3,
"time_column_name": TIME_COLUMN_NAME,
"max_horizon": FORECAST_HORIZON,
"track_child_runs": False,
"grain_column_names": TIME_SERIES_ID_COLUMN_NAMES,
}
# ## Backtest AutoML <a id="backtest_automl"></a>
# First we set backtesting parameters: we will step back by 30 days and will make 5 such steps; for each step we will forecast for next two weeks.
# The number of periods to step back on each backtest iteration.
BACKTESTING_PERIOD = 30
# The number of times we will back test the model.
NUMBER_OF_BACKTESTS = 5
# To train AutoML on backtesting folds we will use the [Azure Machine Learning pipeline](https://docs.microsoft.com/en-us/azure/machine-learning/concept-ml-pipelines). It will generate backtest folds, then train model for each of them and calculate the accuracy metrics. To run pipeline, you also need to create an <b>Experiment</b>. An Experiment corresponds to a prediction problem you are trying to solve (here, it is a forecasting), while a Run corresponds to a specific approach to the problem.
# +
from uuid import uuid1
from pipeline_helper import get_backtest_pipeline
pipeline_exp = Experiment(ws, "automl-backtesting")
# We will create the unique identifier to mark our models.
model_uid = str(uuid1())
pipeline = get_backtest_pipeline(
experiment=pipeline_exp,
dataset=train_data,
# The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.
process_per_node=2,
# The maximum number of nodes for our compute is 6.
node_count=6,
compute_target=compute_target,
automl_settings=automl_settings,
step_size=BACKTESTING_PERIOD,
step_number=NUMBER_OF_BACKTESTS,
model_uid=model_uid,
)
# -
# Run the pipeline and wait for results.
pipeline_run = pipeline_exp.submit(pipeline)
pipeline_run.wait_for_completion(show_output=False)
# After the run is complete, we can download the results.
metrics_output = pipeline_run.get_pipeline_output("results")
metrics_output.download("backtest_metrics")
# ## View metrics<a id="Metrics"></a>
# To distinguish these metrics from the model backtest, which we will obtain in the next section, we will move the directory with metrics out of the backtest_metrics and will remove the parent folder. We will create the utility function for that.
def copy_scoring_directory(new_name):
scores_path = os.path.join("backtest_metrics", "azureml")
directory_list = [os.path.join(scores_path, d) for d in os.listdir(scores_path)]
latest_file = max(directory_list, key=os.path.getctime)
print(
f"The output directory {latest_file} was created on {pd.Timestamp(os.path.getctime(latest_file), unit='s')} GMT."
)
shutil.move(os.path.join(latest_file, "results"), new_name)
shutil.rmtree("backtest_metrics")
# Move the directory and list its contents.
copy_scoring_directory("automl_backtest")
pd.DataFrame({"File": os.listdir("automl_backtest")})
# The directory contains a set of files with results:
# - forecast.csv contains forecasts for all backtest iterations. The backtest_iteration column contains iteration identifier with the last training date as a suffix
# - scores.csv contains all metrics. If data set contains several time series, the metrics are given for all combinations of time series id and iterations, as well as scores for all iterations and time series id are marked as "all_sets"
# - plots_fcst_vs_actual.pdf contains the predictions vs forecast plots for each iteration and time series.
#
# For demonstration purposes we will display the table of metrics for one of the time series with ID "ts0". Again, we will create the utility function, which will be re used in model backtesting.
# +
def get_metrics_for_ts(all_metrics, ts):
"""
Get the metrics for the time series with ID ts and return it as pandas data frame.
:param all_metrics: The table with all the metrics.
:param ts: The ID of a time series of interest.
:return: The pandas DataFrame with metrics for one time series.
"""
results_df = None
for ts_id, one_series in all_metrics.groupby("time_series_id"):
if not ts_id.startswith(ts):
continue
iteration = ts_id.split("|")[-1]
df = one_series[["metric_name", "metric"]]
df.rename({"metric": iteration}, axis=1, inplace=True)
df.set_index("metric_name", inplace=True)
if results_df is None:
results_df = df
else:
results_df = results_df.merge(
df, how="inner", left_index=True, right_index=True
)
results_df.sort_index(axis=1, inplace=True)
return results_df
metrics_df = pd.read_csv(os.path.join("automl_backtest", "scores.csv"))
ts_id = "ts0"
get_metrics_for_ts(metrics_df, ts_id)
# -
# Forecast vs actuals plots.
# +
from IPython.display import IFrame
IFrame("./automl_backtest/plots_fcst_vs_actual.pdf", width=800, height=300)
# -
# # <font color='blue'>Backtest the best model</font> <a id="backtest_model"></a>
#
# For model backtesting we will use the same parameters we used to backtest AutoML. All the models, we have obtained in the previous run were registered in our workspace. To identify the model, each was assigned a tag with the last trainig date.
model_list = Model.list(ws, tags={"experiment": "automl-backtesting"})
model_data = {"name": [], "last_training_date": []}
for model in model_list:
if (
"last_training_date" not in model.tags
or "model_uid" not in model.tags
or model.tags["model_uid"] != model_uid
):
continue
model_data["name"].append(model.name)
model_data["last_training_date"].append(
pd.Timestamp(model.tags["last_training_date"])
)
df_models = pd.DataFrame(model_data)
df_models.sort_values(["last_training_date"], inplace=True)
df_models.reset_index(inplace=True, drop=True)
df_models
# We will backtest the model trained on the most recet data.
model_name = df_models["name"].iloc[-1]
model_name
# ### Retrain the models.
# Assemble the pipeline, which will retrain the best model from AutoML run on historical data.
# +
pipeline_exp = Experiment(ws, "model-backtesting")
pipeline = get_backtest_pipeline(
experiment=pipeline_exp,
dataset=train_data,
# The STANDARD_DS12_V2 has 4 vCPU per node, we will set 2 process per node to be safe.
process_per_node=2,
# The maximum number of nodes for our compute is 6.
node_count=6,
compute_target=compute_target,
automl_settings=automl_settings,
step_size=BACKTESTING_PERIOD,
step_number=NUMBER_OF_BACKTESTS,
model_name=model_name,
)
# -
# Launch the backtesting pipeline.
pipeline_run = pipeline_exp.submit(pipeline)
pipeline_run.wait_for_completion(show_output=False)
# The metrics are stored in the pipeline output named "score". The next code will download the table with metrics.
metrics_output = pipeline_run.get_pipeline_output("results")
metrics_output.download("backtest_metrics")
# Again, we will copy the data files from the downloaded directory, but in this case we will call the folder "model_backtest"; it will contain the same files as the one for AutoML backtesting.
copy_scoring_directory("model_backtest")
# Finally, we will display the metrics.
model_metrics_df = pd.read_csv(os.path.join("model_backtest", "scores.csv"))
get_metrics_for_ts(model_metrics_df, ts_id)
# Forecast vs actuals plots.
# +
from IPython.display import IFrame
IFrame("./model_backtest/plots_fcst_vs_actual.pdf", width=800, height=300)
| python-sdk/tutorials/automl-with-azureml/forecasting-backtest-single-model/auto-ml-forecasting-backtest-single-model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# I want to be able to sit down and try an image processing idea in 10 minutes
#
# ```
# image_collection = ... # {image, roi, label}
# distraction_coll = ... # {image}
#
# train, test = split(image_collection)
# avg_roi_size = avg_roi_size_f(train)
# pos_samples = sample(train) + sample(flipped(train))
# neg_samples = sample(distraction_coll)
#
# model = train_model(union(pos_samples, neg_samples))
# results = fit(model, test)
# ```
#
# It should be as simple as that, but it's made more complicated when you start needing to fill in the details, and much more so when you start trying to make the execution efficient by, for instance, distributing it & caching values
# Keep it small. I have a list of filenames. I want to work with them, but don't want to load everything into memory or operate on all of them at the same time
#
# ```
# image_coll = multiset(full_set, sample_set)
#
# def process_image(item):
# im = cv2.imread(item.image_fn)
# box = box_read(item.box_fn)
# roi = resample(im, box)
# hist= hog(roi)
# return {im, box, roi, hist}
# ```
| blog/2021-05-02 image pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Hello, this is my first notebook, reading the data and manipulating it
# +
# Importing the packages we will need
from __future__ import print_function
from __future__ import division
from IPython.display import display
from ipywidgets import Button, Layout
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as ipw
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(style="darkgrid")
# -
# Defining a function plot that plots y against x and creates labels
def plot(x, y, ax, title, y_label):
ax.set_title(title)
ax.set_ylabel(y_label)
ax.plot(x, y)
ax.margins(x=0, y=0)
def run_calculation(slope,aspect,albedo,roughness):
# Input parameters that don't change for the practical
lat=63.5
long=-19.5
ref_long=0.0
summertime=0.0
elevation=300.0
met_stat_elevation=300.0
lapse_rate=0.0056
# Reading the meteorological data file
df = pd.read_excel('Samplmet.xls')
# What does the file look like? Printing the head of it
# df.head()
# Renaming the columns
df.columns = ['day', 'time', 'inswrad', 'avp', 'airtemp','windspd']
running_time=df.day + df.time/2400
temperature=df.airtemp*albedo
fig, ax = plt.subplots()
plot(running_time, temperature, ax, 'Time in days', 'Air temperature x albedo')
import xlsxwriter
# Create a workbook and add a worksheet.
workbook = xlsxwriter.Workbook('output1.xlsx')
worksheet = workbook.add_worksheet()
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
worksheet.write(row, col, 'Time')
row += 1
# Iterate over the data and write it out row by row.
for time_entry in (running_time):
worksheet.write(row, col, time_entry)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total')
worksheet.write(row, 1, '=SUM(B1:B4)')
workbook.close()
# +
# Make user interface
def getvalue(x):
return x
w1 = interactive(getvalue, x=ipw.BoundedFloatText(value=7.5,min=0,max=10.0,step=0.1,description='Slope:',disabled=False))
display(w1)
w2 = interactive(getvalue, x=ipw.BoundedFloatText(value=7.5,min=0,max=10.0,step=0.1,description='Aspect:',disabled=False))
display(w2)
w3 = interactive(getvalue, x=ipw.BoundedFloatText(
value=0.5,
min=0,
max=1.0,
step=0.0001,
description='Albedo:',
disabled=False
))
display(w3)
w4 = interactive(getvalue, x=ipw.BoundedFloatText(
value=0.5,
min=0,
max=1.0,
step=0.0001,
description='Roughness:',
disabled=False
))
display(w4)
# +
# Loading the input parameters defined by the user
from IPython.display import display
button = ipw.Button(description="Load these values and run the model!")
output = ipw.Output()
display(button, output)
def on_button_clicked(b):
with output:
print("Loading values and running the model.")
slope=w1.result
aspect=w2.result
albedo=w3.result
roughness=w4.result
print('The slope is %.1f, the aspect is %.1f, the albedo is %.4f, and the roughness is %.4f.' % (slope,aspect,albedo,roughness))
run_calculation(slope,aspect,albedo,roughness)
button.on_click(on_button_clicked)
# -
| second.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Aprendizaje Automatizado con Regresión Lineal Simple
import numpy as np
import random
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
import matplotlib.pyplot as plt
# %matplotlib inline
# Generador de distribución de datos para la regresión simple
def generador_datos_simple(beta, muestras, desviación):
x = np.random.random(muestras) * 100
e = np.random.randn(muestras) * desviación
y = x * beta + e
return x.reshape((muestras, 1)), y.reshape((muestras, 1))
# El código de la universidad no lo dice implicitamente, pero el modelo es el resultado de dos cosas diferentes. Primero, un juego de datos x,y generado por `generador_datos_simple` que es solo números, y luego el objeto instanciado de modelo en la linea:
#
# `modelo = linear_model.LinearRegression()`
#
# Este modelo aún depende de los datos `x,y` para ser válido, es un objeto instanciado vacío hasta ese momento, pero tiene los métodos para generar la regresión lineal.
desviacion = 200
beta = 10
n = 50
x,y = generador_datos_simple(beta, n, desviacion)
plt.scatter(x,y)
plt.show()
# Crear el juego de datos con la libreria linear_model
modelo = linear_model.LinearRegression()
# Entrenar el modelo con la misma libreria (hereda las propiedades)
modelo.fit(x, y)
# Veamos un coeficiente
print(u'Coeficiente beta1: ', modelo.coef_[0])
# Con este modelo podemos predecir los valores de $\hat{y}$ y además obtener el error cuadrático y el R^2.
# +
y_pred = modelo.predict(x)
print(u'Error cuadrático medio: ', mean_squared_error(y, y_pred))
print(u'Estadístico R_2: ', r2_score(y, y_pred))
# -
# Hagamos un gráfico del resultado.
# +
plt.scatter(x,y)
plt.plot(x, y_pred, color='red')
x_real = np.array([0,100])
y_real = x_real * beta
plt.plot(x_real, y_real, color = 'green')
plt.show()
# -
| AA/MLRegresionSimple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import nums
import nums.numpy as nps
from nums.models.glms import LogisticRegression
nums.init()
# +
# Make dataset.
X1 = nps.random.randn(500, 1) + 5.0
y1 = nps.zeros(shape=(500,), dtype=bool)
X2 = nps.random.randn(500, 1) + 10.0
y2 = nps.ones(shape=(500,), dtype=bool)
X = nps.concatenate([X1, X2], axis=0)
y = nps.concatenate([y1, y2], axis=0)
# +
# Train Logistic Regression Model.
model = LogisticRegression(solver="newton", tol=1e-8, max_iter=1)
model.fit(X, y)
y_pred = model.predict(X)
print("accuracy", (nps.sum(y == y_pred) / X.shape[0]).get())
| examples/notebooks/logistic_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Forming Queries
#
# Get Familiar with querying the database. BUT don't forget your [cheat sheets](https://snowexsql.readthedocs.io/en/latest/cheat_sheet.html)!
#
# ## Process
# ### Getting Connected
# Getting connected to the database is easiest done using the snowexsql library function [`get_db`](https://snowexsql.readthedocs.io/en/latest/snowexsql.html#snowexsql.db.get_db)
#
# +
# Import the function to get connect to the db
from snowexsql.db import get_db
# This is what you will use for all of hackweek to access the db
db_name = 'snow:hackweek@172.16.17.324/snowex'
# Using the function get_db, we receive 2 ways to interact with the database
engine, session = get_db(db_name)
# -
# ### Importing the tables classes
# These are critical for build queries. You will need at least one of these every query since they reflect the data were interested in.
#
from snowexsql.data import SiteData, PointData, LayerData, ImageData
# ### Query Time!
# We build queries in python using `session.query()`. Whatever we put inside of the query parentheses is what we will get back in the results!
# +
# Lets grab a single row from the points table
qry = session.query(PointData).limit(10)
# Execute that query!
result = qry.all()
# -
# Pause for moment and consider what is in `result`....
#
#
# Is it:
#
# A. a single value
# B. a bunch of values
# C. an object
# D. a row of values
#
# uncomment the line below and print out the results
print(result)
# This feels soooo *limited* :)
# + [markdown] tags=["nbsphinx-gallery", "nbsphinx-thumbnail"]
# **Questions**
# * What happens if we changed the number in the limit? What will we get back?
# * Where are our column names?
# * What if I only wanted a single column and not a whole row?
#
# -
# ## Filtering
# The database had a silly number of records, and asking for all of them will crash your computer.
#
# So let talk about using `.filter()`
#
# All queries can be reduced by applying `session.query(__).filter(__)` and a lot can go into the parentheses. This is where your cheat sheet will come in handy.
# +
# Its convenient to store a query like the following
qry = session.query(LayerData)
# Then filter on it to just density profiles
qry = qry.filter(LayerData.type == 'density')
# protect ourselves from a lot of data
qry = qry.limit(5)
result = qry.all()
print(result)
# -
# **Questions**
# * What happens if I filter on a qry that's been filtered?
# * What happens if I just want a single column/attribute back? How do I do that?
#
# ### How do I know what to filter on?
# Queries and `.distinct()`!
# +
# Get the unique datanames in the table
results = session.query(LayerData.type).distinct().all()
print('Available types = {}'.format(', '.join([r[0] for r in results])))
# Get the unique instrument in the table
results = session.query(LayerData.instrument).distinct().all()
print('\nAvailable Instruments = {}'.format(', '.join([str(r[0]) for r in results])))
# Get the unique dates in the table
results = session.query(LayerData.date).distinct().all()
print('\nAvailable Dates = {}'.format(', '.join([str(r[0]) for r in results])))
# Get the unique surveyors in the table
results = session.query(LayerData.surveyors).distinct().all()
print('\nAvailable surveyors = {}'.format(', '.join([str(r[0]) for r in results])))
# -
# ## Recap
# You just explored using the session object to form queries and compounding filters results with it
#
# **You should know:**
# * How to build queries using filtering
# * How to isolate column data
# * Determine what values to filter on
#
# If you don't feel comfortable with these, you are probably not alone, let's discuss it!
# Close out the session to avoid hanging transactions
session.close()
| book/tutorials/database/3_forming_queries.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .js
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: JavaScript (Node.js)
// language: javascript
// name: javascript
// ---
// # User defined modules
// ## modules - require/import
// - https://nodejs.org/api/modules.html
// - large projects typically require JS code to be written in multiple files or modules
// - user defined modules and packages are sometime used as library in other modules
// - Nodejs by default uses CommonJS syntax to load module
// - syntax:
//
// ```javascript
// const modName = require('./moduleName')
// ```
//
// - .js extension of the module is optional
// - if relative package or module name is used, Node will look for the module relative to the current module where the external module is required
// - without a leading `/`, `./`, or `../` to indicate a file, the module must either be a core module or is loaded from a node_modules folder
// - circular module dependencies need to be avoided or carefully done to work properly
// - modules are cached after the first time they're loaded
// - import keyword is supported to import EcmaScript modules (ESM)
// - in order to use ESM import syntax, module's extension must be .mjs
// - see examples here for importing and exporting names: https://nodejs.org/api/esm.html
//
// ```javascript
// import {constName, varName, funcName, ...} from './moduleName.mjs';
// import * as modName from './moduleName.mjs';
// ```
//
// ### NOTE:
// - ESM can import CommonJS module but not vice versa
// ## modules - export CommonJS
//
// - modules must export names: functions, class and primitive values to be able to use by other module using import
// - In exporting module use CommonJS syntax:
//
// ```javascript
// module.exports = { name1, name2, ... };
// ```
// - requiring/importing module can be done two ways:
// - require the whole module and all the exported names
//
// ```javascript
// const mod_name = require('path/modulename.js')
// // use the identifiers defined in the module using member access (.) operator
// mode_name.name1...
// mode_name.name2(...)
// ```
// - require/import only the specific exported names
//
// ```javascript
// const {name1, name2, ...} = require('path/modulename')
// ```
// ## modules - export EcmaScript Module
// - module must have .mjs extension
// - exporting names from ESM is a little different
//
// ```javascript
// // define some identifiers: variables, functions, classes, etc.
// export {varName, functionName, ...};
// ```
// ## see JSDemos/moduleDemo.js and JSDemos/moduleDemo.mjs for a quick demos
// - module is executed when it is required or loaded into another
// - use guard to prevent unnecessary codes (such as tests) being executed if the module is expected to be used as library
// - CommonJS syntax
//
// ```javascript
// if (require.main === module) {
// // do what the module is supposed to when run as the main module
// main();
// }
// ```
//
// - ESM syntax
//
// ```javascript
// import {fileURLToPath} from 'url';
//
// // code...
//
// if (process.argv[1] === fileURLToPath(import.meta.url)) {
// // do what the module is supposed to when run as the main module
// main();
// }
// ```
//
// ## passing command line arguments to main module
// - the module name run with node interpreter is considered the main module
// - one can pass arguments to main module similar to passing arguments to main function in C/C++ program
//
// ```bash
// node module.js arg1 arg2 ...
// ```
// - module.js can capture and work with the arguments passed using global process.argv list
//
// ```nodejs
// process.argv[index]
// ```
//
// - index 0 is node program
// - index 1 is name of the module
// - index 2 and above are the indicies of user provided arguments while running the module
// - see `JSDemo/commandLineArgs.js` for demo
// ## Rules of thumb when creating a module
// - http://callbackhell.com/
// - Start by moving repeatedly used code into a function
// - When your function (or a group of functions related to the same theme) get big enough, move them into another file and expose them using *module.exports*. You can load this using a relative require
// - If you have some code that can be used across multiple projects give it it's own readme, tests and package.json and publish it to github and npm. There are too many awesome benefits to this specific approach to list here!
// - A good module is small and focuses on one problem
// - Individual files in a module should not be longer than around 150 lines of JavaScript
// - A module shouldn't have more than one level of nested folders full of JavaScript files. If it does, it is probably doing too many things
// - If it takes more than a few minutes to understand what is happening, it probably isn't a very good module.
| Ch07-Modules.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: DS 5110 Spark 3.1
# language: python
# name: ds5110_spark3.1
# ---
# +
from pyspark.sql import SparkSession
spark = SparkSession.builder \
.appName("comm") \
.getOrCreate()
# -
df = spark.read.csv("training.1600000.processed.noemoticon.csv")
df.count()
df.show(5)
df = df.withColumnRenamed("_c0", "label")\
.withColumnRenamed("_c1", "id")\
.withColumnRenamed("_c2", "time")\
.withColumnRenamed("_c3", "Query_Boolean")\
.withColumnRenamed("_c4", "Username")\
.withColumnRenamed("_c5", "Content")
df.show(2)
df.filter(df.label == 0).show(10)
df.createOrReplaceTempView("twitter")
result = spark.sql("""SELECT Username
,Content
FROM twitter
WHERE label = 0
AND Content LIKE '%sad%'""")
result.show(5, False)
# +
from pyspark.sql import functions as F
agg_df = df.groupBy("label").agg(F.count("id"))
agg_df.show()
# -
agg_df2 = df.groupBy("Query_Boolean").agg(F.count("id"))
agg_df2.show()
| EDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **This notebook is an exercise in the [Introduction to Machine Learning](https://www.kaggle.com/learn/intro-to-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/machine-learning-competitions).**
#
# ---
#
# # Introduction
#
# In this exercise, you will create and submit predictions for a Kaggle competition. You can then improve your model (e.g. by adding features) to apply what you've learned and move up the leaderboard.
#
# Begin by running the code cell below to set up code checking and the filepaths for the dataset.
# +
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex7 import *
# Set up filepaths
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
# -
# Here's some of the code you've written so far. Start by running it again.
# +
# Import helpful libraries
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
# Load the data, and separate the target
iowa_file_path = '../input/train.csv'
home_data = pd.read_csv(iowa_file_path)
y = home_data.SalePrice
# Create X (After completing the exercise, you can return to modify this line!)
features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
# Select columns corresponding to features, and preview the data
X = home_data[features]
X.head()
# Split into validation and training data
train_X, val_X, train_y, val_y = train_test_split(X, y, random_state=1)
# Define a random forest model
rf_model = RandomForestRegressor(random_state=1)
rf_model.fit(train_X, train_y)
rf_val_predictions = rf_model.predict(val_X)
rf_val_mae = mean_absolute_error(rf_val_predictions, val_y)
print("Validation MAE for Random Forest Model: {:,.0f}".format(rf_val_mae))
# -
# # Train a model for the competition
#
# The code cell above trains a Random Forest model on **`train_X`** and **`train_y`**.
#
# Use the code cell below to build a Random Forest model and train it on all of **`X`** and **`y`**.
# +
# To improve accuracy, create a new Random Forest model which you will train on all training data
rf_model_on_full_data = RandomForestRegressor(random_state=1)
# fit rf_model_on_full_data on all data from the training data
rf_model_on_full_data.fit(X, y)
# -
# Now, read the file of "test" data, and apply your model to make predictions.
# +
# path to file you will use for predictions
test_data_path = '../input/test.csv'
# read test data file using pandas
test_data = pd.read_csv(test_data_path)
# create test_X which comes from test_data but includes only the columns you used for prediction.
# The list of columns is stored in a variable called features
test_X = test_data[features]
# make predictions which we will submit.
test_preds = rf_model_on_full_data.predict(test_X)
# -
# Before submitting, run a check to make sure your `test_preds` have the right format.
# Check your answer (To get credit for completing the exercise, you must get a "Correct" result!)
step_1.check()
step_1.solution()
# # Generate a submission
#
# Run the code cell below to generate a CSV file with your predictions that you can use to submit to the competition.
# +
# Run the code to save predictions in the format used for competition scoring
output = pd.DataFrame({'Id': test_data.Id,
'SalePrice': test_preds})
output.to_csv('my_submission.csv', index=False)
# -
# # Submit to the competition
#
# To test your results, you'll need to join the competition (if you haven't already). So open a new window by clicking on **[this link](https://www.kaggle.com/c/home-data-for-ml-course)**. Then click on the **Join Competition** button.
#
# 
#
# Next, follow the instructions below:
# 1. Begin by clicking on the **Save Version** button in the top right corner of the window. This will generate a pop-up window.
# 2. Ensure that the **Save and Run All** option is selected, and then click on the **Save** button.
# 3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
# 4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
#
# You have now successfully submitted to the competition!
#
# If you want to keep working to improve your performance, select the **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
#
#
# # Continue Your Progress
# There are many ways to improve your model, and **experimenting is a great way to learn at this point.**
#
# The best way to improve your model is to add features. To add more features to the data, revisit the first code cell, and change this line of code to include more column names:
# ```python
# features = ['LotArea', 'YearBuilt', '1stFlrSF', '2ndFlrSF', 'FullBath', 'BedroomAbvGr', 'TotRmsAbvGrd']
# ```
#
# Some features will cause errors because of issues like missing values or non-numeric data types. Here is a complete list of potential columns that you might like to use, and that won't throw errors:
# - 'MSSubClass'
# - 'LotArea'
# - 'OverallQual'
# - 'OverallCond'
# - 'YearBuilt'
# - 'YearRemodAdd'
# - 'BsmtFinSF1'
# - 'BsmtFinSF2'
# - 'BsmtUnfSF'
# - 'TotalBsmtSF'
# - '1stFlrSF'
# - '2ndFlrSF'
# - 'LowQualFinSF'
# - 'GrLivArea'
# - 'BsmtFullBath'
# - 'BsmtHalfBath'
# - 'FullBath'
# - 'HalfBath'
# - 'BedroomAbvGr'
# - 'KitchenAbvGr'
# - 'TotRmsAbvGrd'
# - 'Fireplaces'
# - 'GarageCars'
# - 'GarageArea'
# - 'WoodDeckSF'
# - 'OpenPorchSF'
# - 'EnclosedPorch'
# - '3SsnPorch'
# - 'ScreenPorch'
# - 'PoolArea'
# - 'MiscVal'
# - 'MoSold'
# - 'YrSold'
#
# Look at the list of columns and think about what might affect home prices. To learn more about each of these features, take a look at the data description on the **[competition page](https://www.kaggle.com/c/home-data-for-ml-course/data)**.
#
# After updating the code cell above that defines the features, re-run all of the code cells to evaluate the model and generate a new submission file.
#
#
# # What's next?
#
# As mentioned above, some of the features will throw an error if you try to use them to train your model. The **[Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning)** course will teach you how to handle these types of features. You will also learn to use **xgboost**, a technique giving even better accuracy than Random Forest.
#
# The **[Pandas](https://kaggle.com/Learn/Pandas)** course will give you the data manipulation skills to quickly go from conceptual idea to implementation in your data science projects.
#
# You are also ready for the **[Deep Learning](https://kaggle.com/Learn/intro-to-Deep-Learning)** course, where you will build models with better-than-human level performance at computer vision tasks.
# ---
#
#
#
#
# *Have questions or comments? Visit the [Learn Discussion forum](https://www.kaggle.com/learn-forum/161285) to chat with other Learners.*
| Platforms/Kaggle/Courses/Intro_to_Machine_Learning/7.Machine_Learning_Competitions/exercise-machine-learning-competitions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
## Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("gpu4/plot_time.csv", header=None)
df.columns = ["Epochs", "Time"]
df["Epochs"] +=1
df
df_1 = df.head(10)
print("df_1")
print(df_1)
print(df_1.Time.mean())
df_2 = df
print("df_2")
print(df_2)
print(df_2.Time.mean())
# # Time
# +
dict_time = {"10": 800.0900908946991 ,
"20" : 974.1935685634613}
plt.plot(*zip(*sorted(dict_time.items())), '--bo')
plt.title("Time VS Epochs")
plt.xlabel("Epochs")
plt.ylabel("Time(sec)")
plt.show()
# -
# # Accuracy
# +
dict_acc = {"10": 83.01 ,
"20" : 84.64}
plt.plot(*zip(*sorted(dict_acc.items())), '--bo')
plt.title("Accuracy VS Epochs")
plt.xlabel("Epochs")
plt.ylabel("Accuracy(%)")
plt.show()
# +
dict_loss = {"10": 0.07805 ,
"20" : 0.07341}
plt.plot(*zip(*sorted(dict_loss.items())), '--bo')
plt.title("Loss VS Epochs")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.show()
| Code/project code from discovery cluster/Comparison_EPOCHS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # package
# ## MediaPipe pose
# +
import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import pandas as pd
from matplotlib.pyplot import figure
import hickle as hkl
import time
import datetime
from matplotlib.animation import FuncAnimation
import psutil
import collections
import os
from IPython.display import clear_output
import copy
import hashlib
import socket
import scipy
from scipy import stats
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# -
# # analge calculation fun
def angle_calculate(holistic_landmarks,w,h):
# left/right three point
arm_points = [11, 13, 15, 12, 14, 16]
angle1 = -1
angle2 = -1
if holistic_landmarks:
ldm = holistic_landmarks.landmark
if ldm[arm_points[0]] and ldm[arm_points[1]] and ldm[arm_points[2]]:
a1 = np.array([ldm[arm_points[0]].x*w ,ldm[arm_points[0]].y*h])
b1 = np.array([ldm[arm_points[1]].x*w ,ldm[arm_points[1]].y*h])
c1 = np.array([ldm[arm_points[2]].x*w ,ldm[arm_points[2]].y*h])
ba1 = a1 - b1
bc1 = c1 - b1
cosine_angle1 = np.dot(ba1, bc1) / (np.linalg.norm(ba1) * np.linalg.norm(bc1))
angle1 = np.arccos(cosine_angle1)/np.pi*180
if ldm[arm_points[3]] and ldm[arm_points[4]] and ldm[arm_points[5]]:
a2 = np.array([ldm[arm_points[3]].x*w ,ldm[arm_points[3]].y*h])
b2 = np.array([ldm[arm_points[4]].x*w ,ldm[arm_points[4]].y*h])
c2 = np.array([ldm[arm_points[5]].x*w ,ldm[arm_points[5]].y*h])
ba2 = a2 - b2
bc2 = c2 - b2
cosine_angle2 = np.dot(ba2, bc2) / (np.linalg.norm(ba2) * np.linalg.norm(bc2))
angle2 = np.arccos(cosine_angle2)/np.pi*180
return [angle1, angle2]
# # pose data save
# +
# 按照 时间, index, 存储 坐标和图像信息, 读取并重新画图分析
# test the 2D area stable in the video stream
def pose_tracking_recording(filename):
now = datetime.datetime.now()
now_ts = time.time()
data_s = []
index_s = []
time_s = []
result_s = []
image_s = []
image_size = []
idx = 0
face_data = []
pose_data = []
left_hand_data = []
right_hand_data = []
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
# mp_hands = mp.solutions.hands
# mp_holistic = mp.solutions.holistic
mp_pose = mp.solutions.pose
# POSE_CONNECTIONS = frozenset([(0, 1), (1, 2), (2, 3), (3, 7), (0, 4), (4, 5),
# (5, 6), (6, 8), (9, 10), (11, 12), (11, 13),
# (13, 15),
# (12, 14), (14, 16),
# (11, 23), (12, 24), (23, 24), (23, 25),
# (24, 26), (25, 27), (26, 28), (27, 29), (28, 30),
# (29, 31), (30, 32), (27, 31), (28, 32)])
# hand_area_left = collections.deque(np.zeros(500))
# hand_area_right = collections.deque(np.zeros(500))
arm_angle_left = collections.deque(np.zeros(500))
arm_angle_right = collections.deque(np.zeros(500))
# hand_z = collections.deque(np.zeros(500))
# hand_z1 = collections.deque(np.zeros(500))
# hand_z2 = collections.deque(np.zeros(500))
# hand_z3 = collections.deque(np.zeros(500))
# hand_z4 = collections.deque(np.zeros(500))
cap = cv2.VideoCapture(0)
with mp_pose.Pose(
static_image_mode=True,
model_complexity=2,
enable_segmentation=True,
min_detection_confidence=0.5) as pose:
while cap.isOpened():
success, image = cap.read()
ts = time.time()
time_s.append(ts)
if not success:
# If loading a video, use 'break' instead of 'continue'.
continue
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pose.process(image)
image_height, image_width, _ = image.shape
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# face_data.append(results.face_landmarks)
pose_data.append(results.pose_landmarks)
# left_hand_data.append(results.left_hand_landmarks)
# right_hand_data.append(results.right_hand_landmarks)
# pose_landmarks_fix = results.pose_landmarks
# if pose_landmarks_fix is not None and results.left_hand_landmarks is not None and results.right_hand_landmarks is not None:
# pose_landmarks_fix.landmark[18].visibility = 0.1
# pose_landmarks_fix.landmark[20].visibility = 0.1
# pose_landmarks_fix.landmark[22].visibility = 0.1
# pose_landmarks_fix.landmark[17].visibility = 0.1
# pose_landmarks_fix.landmark[19].visibility = 0.1
# pose_landmarks_fix.landmark[21].visibility = 0.1
# pose_landmarks_fix.landmark[15].x = results.left_hand_landmarks.landmark[0].x
# pose_landmarks_fix.landmark[15].y = results.left_hand_landmarks.landmark[0].y
# pose_landmarks_fix.landmark[15].z = results.left_hand_landmarks.landmark[0].z
# pose_landmarks_fix.landmark[15].visibility = 1
# pose_landmarks_fix.landmark[16].x = results.right_hand_landmarks.landmark[0].x
# pose_landmarks_fix.landmark[16].y = results.right_hand_landmarks.landmark[0].y
# pose_landmarks_fix.landmark[16].z = results.right_hand_landmarks.landmark[0].z
# pose_landmarks_fix.landmark[16].visibility = 1
# mp_drawing.draw_landmarks(
# image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(255,0,0), thickness=2, circle_radius=4),
# mp_drawing.DrawingSpec(color=(255,0,0), thickness=2, circle_radius=2))
# mp_drawing.draw_landmarks(
# image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS,
# mp_drawing.DrawingSpec(color=(0,0,255), thickness=2, circle_radius=4),
# mp_drawing.DrawingSpec(color=(0,0,255), thickness=2, circle_radius=2))
mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
mp_pose.POSE_CONNECTIONS,
mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=4),
mp_drawing.DrawingSpec(color=(0,255,0), thickness=2, circle_radius=2))
# mp_drawing.draw_landmarks(
# image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS,
# mp_drawing.DrawingSpec(color=(80, 110, 10), thickness=1, circle_radius=1),
# mp_drawing.DrawingSpec(color=(80, 256, 121), thickness=1, circle_radius=1))
# calculate all necessary data than plot together
# scale_area = 1000.0
# if results.left_hand_landmarks:
# hand_area_left.popleft()
# area = areatriangle2d_sum(results.left_hand_landmarks,image_width,image_height)
# hand_area_left.append(area/scale_area)
# else:
# hand_area_left.popleft()
# hand_area_left.append(0)
# if results.right_hand_landmarks:
# hand_area_right.popleft()
# area = areatriangle2d_sum(results.right_hand_landmarks,image_width,image_height)
# hand_area_right.append(area/scale_area)
# else:
# hand_area_right.popleft()
# hand_area_right.append(0)
ang = angle_calculate(results.pose_landmarks,image_width,image_height)
arm_angle_left.popleft()
arm_angle_left.append(ang[0])
arm_angle_right.popleft()
arm_angle_right.append(ang[1])
clear_output(wait=True)
# right arm
name = 'angle_arm1'
angle_right = ang[1]
# IP = '127.0.0.1'
stp = ts
plt.subplot(223)
plt.plot(arm_angle_left)
plt.title("Left arm angle")
plt.xlabel("Time")
plt.ylabel("Angle")
plt.subplot(224)
plt.plot(arm_angle_right)
plt.title("Right arm angle")
plt.xlabel("Time")
plt.ylabel("Angle")
plt.show()
# if results.multi_hand_landmarks:
# # save data
# index_s.append(idx)
# time_s.append(ts)
# result_s.append(results.multi_hand_landmarks)
# # image_s.append(image)
# for hand_landmarks in results.multi_hand_landmarks:
# clear_output(wait=True)
# hand_area.popleft()
# hand_z.popleft()
# area = areatriangle2d_sum(hand_landmarks,image_width,image_height)
# hand_area.append(area/1000)
# hand_z.append(hand_landmarks.landmark[4].z)
# plt.plot(hand_area)
# plt.title("Hand area tracking with time")
# plt.xlabel("Time")
# plt.ylabel("Hand area")
# plt.show()
# # output empty background not increase FPS
# # image = np.zeros((image_height,image_width,_))
# mp_drawing.draw_landmarks(
# image,
# hand_landmarks,
# mp_hands.HAND_CONNECTIONS,
# mp_drawing_styles.get_default_hand_landmarks_style(),
# mp_drawing_styles.get_default_hand_connections_style())
# Flip the image horizontally for a selfie-view display.
image = cv2.flip(image, 1)
if len(time_s)>1:
cv2.putText(image, "FPS: " + str(round(1.0 / (time_s[-1] - time_s[-2]),0)), (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
(0, 255, 0), 2, )
cv2.imshow('MediaPipe Holistic', image)
idx = idx + 1
if cv2.waitKey(5) & 0xFF == 27:
break
cap.release()
cv2.destroyAllWindows()
for i in range (1,5):
cv2.waitKey(1)
data_s.append(index_s)
data_s.append(time_s)
# result_s.append(face_data)
result_s.append(pose_data)
# result_s.append(left_hand_data)
# result_s.append(right_hand_data)
data_s.append(result_s) # when the lendmark is not null, store the result with
# data_s.append(image_s)
image_size.append(image_width)
image_size.append(image_height)
data_s.append(image_size)
# time_md5 = str(now.year) +'-'+str(now.month)+'-'+str(now.day)+ '-'+str(now.hour)+'-'+str(now.minute)+'-'+str(now.second)
# result_md5 = hashlib.md5(time_md5.encode())
# md5 = result_md5.hexdigest()
# fname = filename+'-'+md5+'_gzip.hkl'
fname = filename+'.hkl'
# 2015 5 6 8 53 40
# Dump data, with compression
hkl.dump(data_s, fname, mode='w', compression='gzip')
print("data save success on: "+fname)
tm_out = round((ts - now_ts),2)
print("total time: " + str(tm_out)+' s')
f_size = round(os.path.getsize(fname)/1048576,2)
print("file size: " + str(f_size)+" MB")
return data_s, fname
# -
# filename = "pose_right_small_10"
filename = "pose_right_big"
# filename = "pose_right_bare_10"
data_s, fname = pose_tracking_recording(filename)
# # pose data read
# +
# file_name = '2021-10-25-10-47-15_gzip.hkl'
def pose_data_read_plot(fname):
# data read and show
# this recording is good example '2021-10-25-10-47-15_gzip.hkl'
data_r = hkl.load(fname)
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# hand_area = collections.deque(np.zeros(500))
hand_area = []
time_r = data_r[1]
result_r = data_r[2]
image_size = data_r[3]
image_width = image_size[0]
image_height = image_size[1]
arm_angle_left = []
arm_angle_right = []
idx = 0
for i in result_r:
for hand_landmarks in i:
# image = image_r[idx]
# clear_output(wait=True)
# hand_area.popleft()
ang = angle_calculate(hand_landmarks,image_width,image_height)
arm_angle_left.append(ang[0])
arm_angle_right.append(ang[1])
# hand_area.append(area)
print("data read success of: "+fname)
tm_out = round((time_r[-1] - time_r[0]),2)
print("total time: " + str(tm_out)+' s')
f_size = round(os.path.getsize(fname)/1048576,2)
print("file size: " + str(f_size)+" MB")
hand_area = np.array(hand_area)
# plt.figure(figsize=(8, 6), dpi=120)
plt.plot(arm_angle_right)
plt.title("Right arm angle")
plt.xlabel("Time")
plt.ylabel("Angle(degree)")
plt.show()
# return data_r
return arm_angle_right, arm_angle_left, time_r, result_r,image_width, image_height,f_size,tm_out
# -
arm_angle_right,arm_angle_left, time_r, result_r,image_width, image_height,f_size,tm_out = pose_data_read_plot(fname)
# # pose noise remove
# +
def hand_area_noise_rm(hand_area):
n = len(hand_area)
x = np.linspace(0, n, n, endpoint=False)
data = np.array(hand_area)
# detection of local minimums and maximums ___
a = np.diff(np.sign(np.diff(data))).nonzero()[0] + 1 # local min & max
b = (np.diff(np.sign(np.diff(data))) > 0).nonzero()[0] + 1 # local min
c = (np.diff(np.sign(np.diff(data))) < 0).nonzero()[0] + 1 # local max
# 去除噪音
a_len = len(a)
b_len = len(b)
c_len = len(c)
# print(a)
# print(b)
# print(c)
a_d = []
b_d = []
c_d = []
b = np.array(b)
c = np.array(c)
gap = np.max(data[a])-np.min(data[a])
noised_rate = 0.4
for i in range(len(a)-1):
if np.abs(data[a[i]]-data[a[i+1]])< gap*noised_rate:
b_t = np.where(b == a[i])
if len(b_t[0])>0:
b_d.append(b_t[0])
c_t = np.where(c == a[i])
if len(c_t[0])>0:
c_d.append(c_t[0])
b_r = np.delete(b, b_d)
c_r = np.delete(c, c_d)
# second remove the peak noise filted by peak max
data_cr = data[c_r]
data_cr_r = np.where(data_cr < noised_rate*np.max(data_cr) )
c_r = np.delete(c_r, data_cr_r)
# second remove the valley noise filted noise min by valley mean
noised_rate_valley = 0.5
data_br = data[b_r]
data_br_r = np.where(data_br < noised_rate_valley*np.mean(data_br) )
b_r = np.delete(b_r, data_br_r)
# print(data_br)
# print(np.mean(data_br))
print('Real peak number:'+str(len(c_r)) + ' \noriginal peak number:'+str(len(c))+'\nremove noise:'+str(len(c)-len(c_r)))
print('Real valley number:'+str(len(b_r)) + '\noriginal valley number:'+str(len(b))+'\nremove noise:'+str(len(b)-len(b_r)))
return c_r, c, b_r, b
def hand_area_noise_rm_plot(hand_area, c_r, c, b_r, b):
data = np.array(hand_area)
n = len(hand_area)
x = np.linspace(0, n, n, endpoint=False)
# plot with noise
plt.figure(figsize=(12, 5), dpi=120)
plt.plot(x, data, color='grey')
plt.plot(x[b], data[b], "o", label="min", color='r')
plt.plot(x[c], data[c], "o", label="max", color='b')
plt.title("Pose angle tracking by time with noise")
plt.xlabel("Time")
plt.ylabel("Angle(Degree)")
plt.legend(['Pose angle change with time','Pose local min angle', 'Pose local max angle'])
plt.show()
# plot after remove noise
plt.figure(figsize=(12, 5), dpi=150)
plt.plot(x, data, color='grey')
plt.plot(x[b_r], data[b_r], "o", label="min", color='r')
plt.plot(x[c_r], data[c_r], "o", label="max", color='b')
plt.title("Pose angle tracking by time with noise")
plt.xlabel("Time")
plt.ylabel("Angle(Degree)")
plt.legend(['Pose angle change with time','Pose local min angle', 'Pose local max angle'])
plt.show()
def hand_area_pv_speed(c_r,b_r,time_r):
peak_speed = round(len(c_r) / (time_r[c_r[-1]]-time_r[c_r[0]]) * 60,2)
valley_speed = round(len(b_r) / (time_r[b_r[-1]]-time_r[b_r[0]]) * 60,2)
mean_speed = round((peak_speed+valley_speed)/2,2)
print('pose peak speed: '+str(peak_speed)+' per min')
print('pose valley speed: '+str(valley_speed)+' per min')
print('pose mean speed: '+str(mean_speed)+' per min')
return [peak_speed,valley_speed,mean_speed]
# -
c_r, c, b_r, b = hand_area_noise_rm(arm_angle_right)
hand_area_noise_rm_plot(arm_angle_right, c_r, c, b_r, b)
speed = hand_area_pv_speed(c_r,b_r,time_r)
# # files read
# +
path = 'pose_data/'
df = pd.read_csv('file_data.csv')
n = df.shape[0]
print(n)
for i in range(n):
k = i
arm_angle_right,arm_angle_left, time_r, result_r,image_width, image_height,f_size,tm_out = pose_data_read_plot(path + df.iloc[k][0])
c_r, c, b_r, b = hand_area_noise_rm(arm_angle_right)
# hand_area_noise_rm_plot(arm_angle_right, c_r, c, b_r, b)
speed = hand_area_pv_speed(c_r,b_r,time_r)
df.iloc[k,1] = f_size
df.iloc[k,2] = tm_out
df.iloc[k,3] = speed[0]
df.iloc[k,4] = speed[1]
df.iloc[k,5] = speed[2]
print(k)
print(round(k/(n-1),2))
clear_output(wait=True)
df.head(9)
# -
df.to_csv('file_data_01_01.csv')
# # compare analysis
# +
df1 = df.loc[0:9]
df2 = df.loc[10:19]
df3 = df.loc[20:29]
df1_speed = df1[['mean_speed']]
df2_speed = df2[['mean_speed']]
df3_speed = df3[['mean_speed']]
# -
def t_compare(df_t):
re = []
for i in range(3):
for j in range(2-i):
re.append(scipy.stats.ttest_ind(df_t[i], df_t[i+j+1]))
m = np.mean(df_t,1)
print(m)
for k in re:
print(k)
return re,m
df_all = np.array([df1_speed,df2_speed,df3_speed])
re,m = t_compare(df_all)
print(df3_speed)
re1 = scipy.stats.ttest_ind(df1_speed,df2_speed)
re2 = scipy.stats.ttest_ind(df1_speed,df3_speed)
re3 = scipy.stats.ttest_ind(df2_speed,df3_speed)
print(re1)
print(re2)
print(re3)
| 05_pose/01_pose_angle_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import pandas as pd
messages=pd.read_csv('SMSSpamCollection',sep='\t',names=['label','message'])
messages
pip install nltk
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
nltk.download('wordnet')
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps=PorterStemmer()
lm=WordNetLemmatizer()
'''corpus=[]
for i in range(len(messages)):
review=re.sub('[^a-zA-Z]',' ',messages['message'][i])
review=review.lower()
review=review.split()
review=[ps.stem(word) for word in review if word not in stopwords.words('english')]
review=''.join(review)
corpus.append(review)'''
Corpus=[]
for i in range(len(messages)):
review=re.sub('[^a-zA-Z]',' ',messages['message'][i])
review=review.lower()
review=review.split()
review=[lm.lemmatize(Word) for Word in review if Word not in stopwords.words('english')]
review=''.join(review)
Corpus.append(review)
from sklearn.feature_extraction.text import CountVectorizer
Cv=CountVectorizer(max_features=25000)
X=Cv.fit_transform(Corpus).toarray()
y=pd.get_dummies(messages['label'])
y=y.iloc[:,1].values
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.20,random_state=0)
from sklearn.metrics import accuracy_score,confusion_matrix
from sklearn.linear_model import LogisticRegression
LR=LogisticRegression().fit(X_train,y_train)
LR_pred=LR.predict(X_test)
accuracy_score(y_test,LR_pred)
confusion_matrix(y_test,LR_pred)
| Spam Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python36
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# # Handwritten Digit Classification (MNIST) using ONNX Runtime on Azure ML
#
# This example shows how to deploy an image classification neural network using the Modified National Institute of Standards and Technology ([MNIST](http://yann.lecun.com/exdb/mnist/)) dataset and Open Neural Network eXchange format ([ONNX](http://aka.ms/onnxdocarticle)) on the Azure Machine Learning platform. MNIST is a popular dataset consisting of 70,000 grayscale images. Each image is a handwritten digit of 28x28 pixels, representing number from 0 to 9. This tutorial will show you how to deploy a MNIST model from the [ONNX model zoo](https://github.com/onnx/models), use it to make predictions using ONNX Runtime Inference, and deploy it as a web service in Azure.
#
# Throughout this tutorial, we will be referring to ONNX, a neural network exchange format used to represent deep learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools (CNTK, PyTorch, Caffe, MXNet, TensorFlow) and choose the combination that is best for them. ONNX is developed and supported by a community of partners including Microsoft AI, Facebook, and Amazon. For more information, explore the [ONNX website](http://onnx.ai) and [open source files](https://github.com/onnx).
#
# [ONNX Runtime](https://aka.ms/onnxruntime-python) is the runtime engine that enables evaluation of trained machine learning (Traditional ML and Deep Learning) models with high performance and low resource utilization.
#
# #### Tutorial Objectives:
#
# - Describe the MNIST dataset and pretrained Convolutional Neural Net ONNX model, stored in the ONNX model zoo.
# - Deploy and run the pretrained MNIST ONNX model on an Azure Machine Learning instance
# - Predict labels for test set data points in the cloud using ONNX Runtime and Azure ML
# ## Prerequisites
#
# ### 1. Install Azure ML SDK and create a new workspace
# Please follow [Azure ML configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/00.configuration.ipynb) to set up your environment.
#
# ### 2. Install additional packages needed for this tutorial notebook
# You need to install the popular plotting library `matplotlib`, the image manipulation library `opencv`, and the `onnx` library in the conda environment where Azure Maching Learning SDK is installed.
#
# ```sh
# (myenv) $ pip install matplotlib onnx opencv-python
# ```
#
# **Debugging tip**: Make sure that you run the "jupyter notebook" command to launch this notebook after activating your virtual environment. Choose the respective Python kernel for your new virtual environment using the `Kernel > Change Kernel` menu above. If you have completed the steps correctly, the upper right corner of your screen should state `Python [conda env:myenv]` instead of `Python [default]`.
#
# ### 3. Download sample data and pre-trained ONNX model from ONNX Model Zoo.
#
# In the following lines of code, we download [the trained ONNX MNIST model and corresponding test data](https://github.com/onnx/models/tree/master/mnist) and place them in the same folder as this tutorial notebook. For more information about the MNIST dataset, please visit [Yan LeCun's website](http://yann.lecun.com/exdb/mnist/).
# +
# urllib is a built-in Python library to download files from URLs
# Objective: retrieve the latest version of the ONNX MNIST model files from the
# ONNX Model Zoo and save it in the same folder as this tutorial
import urllib.request
onnx_model_url = "https://www.cntk.ai/OnnxModels/mnist/opset_7/mnist.tar.gz"
urllib.request.urlretrieve(onnx_model_url, filename="mnist.tar.gz")
# +
# the ! magic command tells our jupyter notebook kernel to run the following line of
# code from the command line instead of the notebook kernel
# We use tar and xvcf to unzip the files we just retrieved from the ONNX model zoo
# !tar xvzf mnist.tar.gz
# -
# ## Deploy a VM with your ONNX model in the Cloud
#
# ### Load Azure ML workspace
#
# We begin by instantiating a workspace object from the existing workspace created earlier in the configuration notebook.
# +
# Check core SDK version number
import azureml.core
print("SDK version:", azureml.core.VERSION)
# +
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.resource_group, ws.location, sep = '\n')
# -
# ### Registering your model with Azure ML
# +
model_dir = "mnist" # replace this with the location of your model files
# leave as is if it's in the same folder as this notebook
# +
from azureml.core.model import Model
model = Model.register(workspace = ws,
model_path = model_dir + "/" + "model.onnx",
model_name = "mnist_1",
tags = {"onnx": "demo"},
description = "MNIST image classification CNN from ONNX Model Zoo",)
# -
# ### Optional: Displaying your registered models
#
# This step is not required, so feel free to skip it.
models = ws.models
for name, m in models.items():
print("Name:", name,"\tVersion:", m.version, "\tDescription:", m.description, m.tags)
# + [markdown] nbpresent={"id": "c3f2f57c-7454-4d3e-b38d-b0946cf066ea"}
# ### ONNX MNIST Model Methodology
#
# The image classification model we are using is pre-trained using Microsoft's deep learning cognitive toolkit, [CNTK](https://github.com/Microsoft/CNTK), from the [ONNX model zoo](http://github.com/onnx/models). The model zoo has many other models that can be deployed on cloud providers like AzureML without any additional training. To ensure that our cloud deployed model works, we use testing data from the famous MNIST data set, provided as part of the [trained MNIST model](https://github.com/onnx/models/tree/master/mnist) in the ONNX model zoo.
#
# ***Input: Handwritten Images from MNIST Dataset***
#
# ***Task: Classify each MNIST image into an appropriate digit***
#
# ***Output: Digit prediction for input image***
#
# Run the cell below to look at some of the sample images from the MNIST dataset that we used to train this ONNX model. Remember, once the application is deployed in Azure ML, you can use your own images as input for the model to classify!
# +
# for images and plots in this notebook
import matplotlib.pyplot as plt
from IPython.display import Image
# display images inline
# %matplotlib inline
# -
Image(url="http://3.bp.blogspot.com/_UpN7DfJA0j4/TJtUBWPk0SI/AAAAAAAAABY/oWPMtmqJn3k/s1600/mnist_originals.png", width=200, height=200)
# ### Specify our Score and Environment Files
# We are now going to deploy our ONNX Model on AML with inference in ONNX Runtime. We begin by writing a score.py file, which will help us run the model in our Azure ML virtual machine (VM), and then specify our environment by writing a yml file. You will also notice that we import the onnxruntime library to do runtime inference on our ONNX models (passing in input and evaluating out model's predicted output). More information on the API and commands can be found in the [ONNX Runtime documentation](https://aka.ms/onnxruntime).
#
# ### Write Score File
#
# A score file is what tells our Azure cloud service what to do. After initializing our model using azureml.core.model, we start an ONNX Runtime inference session to evaluate the data passed in on our function calls.
# +
# %%writefile score.py
import json
import numpy as np
import onnxruntime
import sys
import os
from azureml.core.model import Model
import time
def init():
global session, input_name, output_name
model = Model.get_model_path(model_name = 'mnist_1')
session = onnxruntime.InferenceSession(model, None)
input_name = session.get_inputs()[0].name
output_name = session.get_outputs()[0].name
def run(input_data):
'''Purpose: evaluate test input in Azure Cloud using onnxruntime.
We will call the run function later from our Jupyter Notebook
so our azure service can evaluate our model input in the cloud. '''
try:
# load in our data, convert to readable format
data = np.array(json.loads(input_data)['data']).astype('float32')
start = time.time()
r = session.run([output_name], {input_name: data})[0]
end = time.time()
result = choose_class(r[0])
result_dict = {"result": [result],
"time_in_sec": [end - start]}
except Exception as e:
result_dict = {"error": str(e)}
return json.dumps(result_dict)
def choose_class(result_prob):
"""We use argmax to determine the right label to choose from our output"""
return int(np.argmax(result_prob, axis=0))
# -
# ### Write Environment File
#
# This step creates a YAML environment file that specifies which dependencies we would like to see in our Linux Virtual Machine.
# +
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies.create(pip_packages=["numpy", "onnxruntime", "azureml-core"])
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
# -
# ### Create the Container Image
# This step will likely take a few minutes.
# +
from azureml.core.image import ContainerImage
image_config = ContainerImage.image_configuration(execution_script = "score.py",
runtime = "python",
conda_file = "myenv.yml",
description = "MNIST ONNX Runtime container",
tags = {"demo": "onnx"})
image = ContainerImage.create(name = "onnximage",
# this is the model object
models = [model],
image_config = image_config,
workspace = ws)
image.wait_for_creation(show_output = True)
# -
# In case you need to debug your code, the next line of code accesses the log file.
print(image.image_build_log_uri)
# We're all done specifying what we want our virtual machine to do. Let's configure and deploy our container image.
#
# ### Deploy the container image
# +
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores = 1,
memory_gb = 1,
tags = {'demo': 'onnx'},
description = 'ONNX for mnist model')
# -
# The following cell will likely take a few minutes to run as well.
# +
from azureml.core.webservice import Webservice
aci_service_name = 'onnx-demo-mnist'
print("Service", aci_service_name)
aci_service = Webservice.deploy_from_image(deployment_config = aciconfig,
image = image,
name = aci_service_name,
workspace = ws)
aci_service.wait_for_deployment(True)
print(aci_service.state)
# -
if aci_service.state != 'Healthy':
# run this command for debugging.
print(aci_service.get_logs())
# If your deployment fails, make sure to delete your aci_service or rename your service before trying again!
# aci_service.delete()
# ### Success!
#
# If you've made it this far, you've deployed a working VM with a handwritten digit classifier running in the cloud using Azure ML. Congratulations!
#
# Let's see how well our model deals with our test images.
# ## Testing and Evaluation
#
# ### Load Test Data
#
# These are already in your directory from your ONNX model download (from the model zoo).
#
# Notice that our Model Zoo files have a .pb extension. This is because they are [protobuf files (Protocol Buffers)](https://developers.google.com/protocol-buffers/docs/pythontutorial), so we need to read in our data through our ONNX TensorProto reader into a format we can work with, like numerical arrays.
# +
# to manipulate our arrays
import numpy as np
# read in test data protobuf files included with the model
import onnx
from onnx import numpy_helper
# to use parsers to read in our model/data
import json
import os
test_inputs = []
test_outputs = []
# read in 3 testing images from .pb files
test_data_size = 3
for i in np.arange(test_data_size):
input_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'input_0.pb')
output_test_data = os.path.join(model_dir, 'test_data_set_{0}'.format(i), 'output_0.pb')
# convert protobuf tensors to np arrays using the TensorProto reader from ONNX
tensor = onnx.TensorProto()
with open(input_test_data, 'rb') as f:
tensor.ParseFromString(f.read())
input_data = numpy_helper.to_array(tensor)
test_inputs.append(input_data)
with open(output_test_data, 'rb') as f:
tensor.ParseFromString(f.read())
output_data = numpy_helper.to_array(tensor)
test_outputs.append(output_data)
if len(test_inputs) == test_data_size:
print('Test data loaded successfully.')
# + [markdown] nbpresent={"id": "c3f2f57c-7454-4d3e-b38d-b0946cf066ea"}
# ### Show some sample images
# We use `matplotlib` to plot 3 test images from the dataset.
# + nbpresent={"id": "396d478b-34aa-4afa-9898-cdce8222a516"}
plt.figure(figsize = (16, 6))
for test_image in np.arange(3):
plt.subplot(1, 15, test_image+1)
plt.axhline('')
plt.axvline('')
plt.imshow(test_inputs[test_image].reshape(28, 28), cmap = plt.cm.Greys)
plt.show()
# -
# ### Run evaluation / prediction
# +
plt.figure(figsize = (16, 6), frameon=False)
plt.subplot(1, 8, 1)
plt.text(x = 0, y = -30, s = "True Label: ", fontsize = 13, color = 'black')
plt.text(x = 0, y = -20, s = "Result: ", fontsize = 13, color = 'black')
plt.text(x = 0, y = -10, s = "Inference Time: ", fontsize = 13, color = 'black')
plt.text(x = 3, y = 14, s = "Model Input", fontsize = 12, color = 'black')
plt.text(x = 6, y = 18, s = "(28 x 28)", fontsize = 12, color = 'black')
plt.imshow(np.ones((28,28)), cmap=plt.cm.Greys)
for i in np.arange(test_data_size):
input_data = json.dumps({'data': test_inputs[i].tolist()})
# predict using the deployed model
r = json.loads(aci_service.run(input_data))
if "error" in r:
print(r['error'])
break
result = r['result'][0]
time_ms = np.round(r['time_in_sec'][0] * 1000, 2)
ground_truth = int(np.argmax(test_outputs[i]))
# compare actual value vs. the predicted values:
plt.subplot(1, 8, i+2)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if ground_truth != result else 'black'
clr_map = plt.cm.gray if ground_truth != result else plt.cm.Greys
# ground truth labels are in blue
plt.text(x = 10, y = -30, s = ground_truth, fontsize = 18, color = 'blue')
# predictions are in black if correct, red if incorrect
plt.text(x = 10, y = -20, s = result, fontsize = 18, color = font_color)
plt.text(x = 5, y = -10, s = str(time_ms) + ' ms', fontsize = 14, color = font_color)
plt.imshow(test_inputs[i].reshape(28, 28), cmap = clr_map)
plt.show()
# -
# ### Try classifying your own images!
#
# Create your own handwritten image and pass it into the model.
# +
# Preprocessing functions take your image and format it so it can be passed
# as input into our ONNX model
import cv2
def rgb2gray(rgb):
"""Convert the input image into grayscale"""
return np.dot(rgb[...,:3], [0.299, 0.587, 0.114])
def resize_img(img):
"""Resize image to MNIST model input dimensions"""
img = cv2.resize(img, dsize=(28, 28), interpolation=cv2.INTER_AREA)
img.resize((1, 1, 28, 28))
return img
def preprocess(img):
"""Resize input images and convert them to grayscale."""
if img.shape == (28, 28):
img.resize((1, 1, 28, 28))
return img
grayscale = rgb2gray(img)
processed_img = resize_img(grayscale)
return processed_img
# +
# Replace this string with your own path/test image
# Make sure your image is square and the dimensions are equal (i.e. 100 * 100 pixels or 28 * 28 pixels)
# Any PNG or JPG image file should work
your_test_image = "<path to file>"
# e.g. your_test_image = "C:/Users/vinitra.swamy/Pictures/handwritten_digit.png"
import matplotlib.image as mpimg
if your_test_image != "<path to file>":
img = mpimg.imread(your_test_image)
plt.subplot(1,3,1)
plt.imshow(img, cmap = plt.cm.Greys)
print("Old Dimensions: ", img.shape)
img = preprocess(img)
print("New Dimensions: ", img.shape)
else:
img = None
# -
if img is None:
print("Add the path for your image data.")
else:
input_data = json.dumps({'data': img.tolist()})
try:
r = json.loads(aci_service.run(input_data))
result = r['result'][0]
time_ms = np.round(r['time_in_sec'][0] * 1000, 2)
except Exception as e:
print(str(e))
plt.figure(figsize = (16, 6))
plt.subplot(1, 15,1)
plt.axhline('')
plt.axvline('')
plt.text(x = -100, y = -20, s = "Model prediction: ", fontsize = 14)
plt.text(x = -100, y = -10, s = "Inference time: ", fontsize = 14)
plt.text(x = 0, y = -20, s = str(result), fontsize = 14)
plt.text(x = 0, y = -10, s = str(time_ms) + " ms", fontsize = 14)
plt.text(x = -100, y = 14, s = "Input image: ", fontsize = 14)
plt.imshow(img.reshape(28, 28), cmap = plt.cm.gray)
# ## Optional: How does our ONNX MNIST model work?
# #### A brief explanation of Convolutional Neural Networks
#
# A [convolutional neural network](https://en.wikipedia.org/wiki/Convolutional_neural_network) (CNN, or ConvNet) is a type of [feed-forward](https://en.wikipedia.org/wiki/Feedforward_neural_network) artificial neural network made up of neurons that have learnable weights and biases. The CNNs take advantage of the spatial nature of the data. In nature, we perceive different objects by their shapes, size and colors. For example, objects in a natural scene are typically edges, corners/vertices (defined by two of more edges), color patches etc. These primitives are often identified using different detectors (e.g., edge detection, color detector) or combination of detectors interacting to facilitate image interpretation (object classification, region of interest detection, scene description etc.) in real world vision related tasks. These detectors are also known as filters. Convolution is a mathematical operator that takes an image and a filter as input and produces a filtered output (representing say edges, corners, or colors in the input image).
#
# Historically, these filters are a set of weights that were often hand crafted or modeled with mathematical functions (e.g., [Gaussian](https://en.wikipedia.org/wiki/Gaussian_filter) / [Laplacian](http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm) / [Canny](https://en.wikipedia.org/wiki/Canny_edge_detector) filter). The filter outputs are mapped through non-linear activation functions mimicking human brain cells called [neurons](https://en.wikipedia.org/wiki/Neuron). Popular deep CNNs or ConvNets (such as [AlexNet](https://en.wikipedia.org/wiki/AlexNet), [VGG](https://arxiv.org/abs/1409.1556), [Inception](http://www.cv-foundation.org/openaccess/content_cvpr_2015/papers/Szegedy_Going_Deeper_With_2015_CVPR_paper.pdf), [ResNet](https://arxiv.org/pdf/1512.03385v1.pdf)) that are used for various [computer vision](https://en.wikipedia.org/wiki/Computer_vision) tasks have many of these architectural primitives (inspired from biology).
#
# ### Convolution Layer
#
# A convolution layer is a set of filters. Each filter is defined by a weight (**W**) matrix, and bias ($b$).
#
# 
#
# These filters are scanned across the image performing the dot product between the weights and corresponding input value ($x$). The bias value is added to the output of the dot product and the resulting sum is optionally mapped through an activation function. This process is illustrated in the following animation.
Image(url="https://www.cntk.ai/jup/cntk103d_conv2d_final.gif", width= 200)
# ### Model Description
#
# The MNIST model from the ONNX Model Zoo uses maxpooling to update the weights in its convolutions, summarized by the graphic below. You can see the entire workflow of our pre-trained model in the following image, with our input images and our output probabilities of each of our 10 labels. If you're interested in exploring the logic behind creating a Deep Learning model further, please look at the [training tutorial for our ONNX MNIST Convolutional Neural Network](https://github.com/Microsoft/CNTK/blob/master/Tutorials/CNTK_103D_MNIST_ConvolutionalNeuralNetwork.ipynb).
# #### Max-Pooling for Convolutional Neural Nets
#
# 
# #### Pre-Trained Model Architecture
#
# 
# +
# remember to delete your service after you are done using it!
# aci_service.delete()
# -
# ## Conclusion
#
# Congratulations!
#
# In this tutorial, you have:
# - familiarized yourself with ONNX Runtime inference and the pretrained models in the ONNX model zoo
# - understood a state-of-the-art convolutional neural net image classification model (MNIST in ONNX) and deployed it in Azure ML cloud
# - ensured that your deep learning model is working perfectly (in the cloud) on test data, and checked it against some of your own!
#
# Next steps:
# - Check out another interesting application based on a Microsoft Research computer vision paper that lets you set up a [facial emotion recognition model](https://github.com/Azure/MachineLearningNotebooks/tree/master/onnx/onnx-inference-emotion-recognition.ipynb) in the cloud! This tutorial deploys a pre-trained ONNX Computer Vision model in an Azure ML virtual machine.
# - Contribute to our [open source ONNX repository on github](http://github.com/onnx/onnx) and/or add to our [ONNX model zoo](http://github.com/onnx/models)
| onnx/onnx-inference-mnist-deploy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Hide deprecation warnings
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import itertools
import heapq
from sklearn.metrics.pairwise import cosine_similarity
from scipy.sparse import coo_matrix
import scipy.sparse.linalg as linalg
# -
# ## Load Datasets
df_order_products__prior = pd.read_csv("../data/raw/order_products__prior.csv")
df_order_products__train = pd.read_csv("../data/raw/order_products__train.csv")
df_orders = pd.read_csv("../data/interim/df_orders_clustered.csv")
df_products = pd.read_csv("../data/raw/products.csv")
# ## Data Preparation
#
# ### User basket products
df_orders_test = df_orders.loc[(df_orders.eval_set == "train")].reset_index()
df_orders_test.head()
df_orders_test.shape
df_orders_test = df_orders_test[["order_id", "user_id", 'cluster']]
df_orders_test.head()
df_test = df_order_products__train[["order_id", "product_id"]]
df_test.head()
df_test.shape
# +
df_test = df_test.groupby("order_id")["product_id"]\
.apply(list).reset_index().rename(columns={"product_id": "products"})
df_test.head()
# -
df_test.shape
df_test = pd.merge(df_orders_test, df_test, on="order_id")
df_test.head()
df_test = df_test[["user_id", "products", "cluster"]]
df_test.head()
len(df_test)
# ### Users prior purchases per product
df_orders_train = df_orders.loc[df_orders.eval_set == "prior"]
df_orders_train.head()
df_orders_train.shape
df_orders_train = df_orders_train[["order_id", "user_id", "cluster"]]
df_orders_train.head()
df_train = pd.merge(df_orders_train, df_order_products__prior[["order_id", "product_id"]],\
on="order_id")
df_train.head()
df_train = df_train[["user_id", "product_id", "cluster"]]
df_train = df_train.groupby(["user_id", "product_id", "cluster"])\
.size().reset_index().rename(columns={0:"quantity"})
df_train.head()
df_train.shape
df_train.info()
# ## Utility Matrices
clusternumber = len(df_train.cluster.unique())
# +
cluster = []
for i in range(clusternumber):
cluster.append(df_train.loc[df_train['cluster'] == i].drop('cluster',axis=1))
# -
for i in range(clusternumber):
cluster[i]["user_id"] = cluster[i]["user_id"].astype("category")
cluster[i]["product_id"] = cluster[i]["product_id"].astype("category")
# +
utility_matrix = []
for i in range(clusternumber):
utility_matrix.append(coo_matrix((cluster[i]["quantity"],
(cluster[i]["product_id"].cat.codes.copy(),
cluster[i]["user_id"].cat.codes.copy()))))
# -
for i in range(clusternumber):
print("Utility matrix {} shape: {}".format(i,utility_matrix[i].shape))
# +
utility_matrix_T = []
for i in range(clusternumber):
utility_matrix_T.append(utility_matrix[i].T.tocsr())
# -
# Let's create users and products dictionaries for future ease of use
# +
users = []
for i in range(clusternumber):
users.append({uid:i for i, uid in enumerate(cluster[i]["user_id"].cat.categories)})
# +
products = []
for i in range(clusternumber):
products.append(dict(enumerate(cluster[i]["product_id"].cat.categories)))
# -
# ## Popular products
#
# One thing that could be done is to recommend always the most popular products. Although being an underfitting solution, that could be done with the following products
# +
popular_products = list(df_order_products__prior["product_id"].value_counts().head(10).index)
popular_products
# -
print("Most popular products:")
df_products.product_name.loc[df_products.product_id.isin(popular_products)].reset_index(drop=True)
# ## Recommendation with user to user similarity
# We will use an example user: User ID 1
# Get cluster
user_ex = 1
cluster = df_train.cluster.loc[df_train.user_id == user_ex].unique()[0]
cluster
# Get top similar users
similarities = cosine_similarity(utility_matrix_T[cluster][users[cluster][1]],utility_matrix_T[cluster])
ids = np.argpartition(similarities[0], -11)[-11:]
best = sorted(zip(ids, similarities[0][ids]), key=lambda x: -x[1])[1:]
ids
best
# Let's check if they're really similar
ex_user_products = set(utility_matrix_T[cluster][ids[-1]].nonzero()[1])
print("User products history:")
df_products.product_name.loc[df_products.product_id.isin(ex_user_products)].reset_index(drop=True)
similar_user_products = set(utility_matrix_T[cluster][ids[-2]].nonzero()[1])
print("Most similar user products history:")
df_products.product_name.loc[df_products.product_id.isin(similar_user_products)].reset_index(drop=True)
print("Recall:",len(similar_user_products.intersection(ex_user_products)) / len(similar_user_products))
# Quite similar products! So the users really have a high similarity.
#
# Let's get now the product recommendations
ids = ids[:-1]
# +
if len(df_test.products.loc[df_test.user_id == user_ex])>0:
products_in_basket = df_test.products.loc[df_test.user_id == user_ex].tolist()[0]
else:
products_in_basket = []
final_recommendations = []
final_valuation = []
for i in range(len(ids)):
similar_users_products = utility_matrix_T[cluster][ids[i]].nonzero()[1]
#Mask to filter products already in the user's cart
mask = np.isin(similar_users_products, products_in_basket, invert=True)
for j in range(len(similar_users_products[mask])):
if np.isin(similar_users_products[mask][j], final_recommendations, invert=True):
final_recommendations.append(similar_users_products[mask][j])
final_valuation.append(best[-(i+1)][1])
else:
index = final_recommendations.index(similar_users_products[mask][j])
final_valuation[index]+= best[-(i+1)][1]
final_recommendations = np.asarray(final_recommendations)
final_valuation = np.asarray(final_valuation)
# -
ind = heapq.nlargest(min(10,len(final_recommendations)), range(len(final_valuation)), final_valuation.take)
final_recommendations = final_recommendations[ind]
print("Recommended products:")
df_products.product_name.loc[df_products.product_id.isin(final_recommendations)].reset_index(drop=True)
# Let's do it now for the rest of the users, or a sample of them
# +
subset = 0.05 #We will make the predictions only in 5% of the data
df_test = df_test.sample(n=int(len(df_test) * subset)).reset_index(drop=True)
df_test.info()
# +
def rec_user2user(row):
cluster = row['cluster']
similarities = cosine_similarity(utility_matrix_T[cluster][users[cluster][row["user_id"]]]\
,utility_matrix_T[cluster])
ids = np.argpartition(similarities[0], -11)[-11:]
best = sorted(zip(ids, similarities[0][ids]), key=lambda x: -x[1])[1:]
ids = ids[:-1]
if len(df_test.products.loc[df_test.user_id == row['user_id']])>0:
products_in_basket = df_test.products.loc[df_test.user_id == row['user_id']].tolist()[0]
else:
products_in_basket = []
final_recommendations = []
final_valuation = []
for i in range(len(ids)):
similar_users_products = utility_matrix_T[cluster][ids[i]].nonzero()[1]
#Mask to filter products already in the user's cart
mask = np.isin(similar_users_products, products_in_basket, invert=True)
for j in range(len(similar_users_products[mask])):
if np.isin(similar_users_products[mask][j], final_recommendations, invert=True):
final_recommendations.append(similar_users_products[mask][j])
final_valuation.append(best[-(i+1)][1])
else:
index = final_recommendations.index(similar_users_products[mask][j])
final_valuation[index]+= best[-(i+1)][1]
final_recommendations = np.asarray(final_recommendations)
final_valuation = np.asarray(final_valuation)
ind = heapq.nlargest(min(10,len(final_recommendations)), range(len(final_valuation)), final_valuation.take)
final_recommendations = set(final_recommendations[ind])
return final_recommendations
df_test['Recommendations'] = df_test.apply(rec_user2user, axis=1)
# -
df_test.head()
df_test = df_test[['user_id','cluster','products','Recommendations']]
df_test.columns = ['User','Cluster','Products in basket','U2U Recommendations']
df_test.sort_values('User').head()
# ## SVD Factorization
# We will stick with the example user ID 1
user_ex = 1
cluster_ex = df_train.cluster.loc[df_train.user_id == user_ex].unique()[0]
# We'll start by factorizing the utility matrix using SciPy's SVD
# +
user_factors = []
product_factors = []
singular_values = []
for cluster in range(clusternumber):
utility_matrix_T[cluster] = utility_matrix_T[cluster].astype(np.float32)
user_factor, singular_value, product_factor = linalg.svds(utility_matrix_T[cluster], 10)
# User factored stored directly with a user*factor format
user_factors.append(user_factor*singular_value)
product_factors.append(product_factor)
singular_values.append(singular_value)
# +
scores = user_factors[cluster_ex][users[cluster_ex][user_ex]].dot(product_factors[cluster_ex])
best = np.argpartition(scores, -10)[-10:]
recommendations_all = sorted(zip(best, scores[best]), key=lambda x: -x[1])
print("Recommended products:")
df_products.product_name.loc[df_products.product_id.isin(best)].reset_index(drop=True)
# -
# But some of those products might be already in the users basket, so we should get rid of them
# +
bought_indices = utility_matrix_T[cluster_ex][users[cluster_ex][user_ex]].nonzero()[1]
count = 10 + len(bought_indices)
ids = np.argpartition(scores, -count)[-count:]
best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])
recommendations_new = list(itertools.islice((rec for rec in best if rec[0] not in bought_indices), 10))
print("Recommended products:")
recommendations = []
for recommendation in recommendations_new:
recommendations.append(recommendation[0])
print(df_products.product_name.loc[df_products.product_id.isin(recommendations)].reset_index(drop=True))
# -
# Now, let's do it for the already sampled portion of the dataset, df_test
# +
def rec_SVD(row):
cluster = row['Cluster']
scores = user_factors[cluster][users[cluster][row['User']]].dot(product_factors[cluster])
bought_indices = utility_matrix_T[cluster][users[cluster][row['User']]].nonzero()[1]
count = 10 + len(bought_indices)
ids = np.argpartition(scores, -count)[-count:]
best = sorted(zip(ids, scores[ids]), key=lambda x: -x[1])
recommendations_new = list(itertools.islice((rec for rec in best if rec[0] not in bought_indices), 10))
recommendations = []
for recommendation in recommendations_new:
recommendations.append(recommendation[0])
final_recommendations = set(recommendations)
return final_recommendations
df_test['SVD Recommendations'] = df_test.apply(rec_SVD, axis=1)
# -
df_test.head()
# ## Recall between user to user recommendation and SVD matrix factorization
# +
def methods_recall(row):
return len(row['U2U Recommendations'].intersection(row['SVD Recommendations'])) \
/ len(row['U2U Recommendations'])
df_test['Methods Recall'] = df_test.apply(methods_recall, axis=1)
# -
print("U2U and SVD recommendations recall: {:.2f}%".format(df_test['Methods Recall'].mean() * 100))
# Nearly 1 in 10 products recommended by each system is recommended too by the other system for each user. Let's test it in the first user of the test dataset
print(df_test['U2U Recommendations'][0])
print(df_test['SVD Recommendations'][0])
# We can see how the product 47059 is indeed repeated in both recommendations, giving a recall in this case of 10%.
| notebooks/Recommender.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="UE4eky2QYcXB"
# If you are interested in graident boosting, here is a good place to start: https://xgboost.readthedocs.io/en/latest/tutorials/model.html
#
# This is a supervised machine learning method.
# + id="fg_LmZjejXi_" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613872616111, "user_tz": 420, "elapsed": 20606, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="f783d025-b497-4c79-ca3f-0e0930e2ba02"
# !pip install catboost
# !pip install scikit-learn --upgrade
# + colab={"base_uri": "https://localhost:8080/"} id="qC2ECegCYcXD" executionInfo={"status": "ok", "timestamp": 1613872617458, "user_tz": 420, "elapsed": 21947, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="5575c694-b69e-4b17-9e0c-16776bc551fb"
# If you have installation questions, please reach out
import pandas as pd # data storage
import catboost as cats # graident boosting
from catboost import CatBoostRegressor, Pool
import datetime
import numpy as np # math and stuff
import matplotlib.pyplot as plt # plotting utility
import sklearn # ML and stats
print('catboost ver:', cats.__version__)
print('scikit ver:', sklearn.__version__)
from sklearn.preprocessing import MinMaxScaler, RobustScaler
from sklearn.model_selection import cross_val_score, KFold, train_test_split
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.metrics import max_error, mean_squared_error, median_absolute_error
# + colab={"base_uri": "https://localhost:8080/"} id="WNiabSVfYjTE" executionInfo={"status": "ok", "timestamp": 1613872735823, "user_tz": 420, "elapsed": 140308, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="3eed27af-3308-4ba5-9088-6c5dfc332150"
from google.colab import drive
drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/"} id="Hk1AsPnSYcXQ" executionInfo={"status": "ok", "timestamp": 1613872738506, "user_tz": 420, "elapsed": 142987, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="58724475-819e-4fe7-b6b8-31a7b8e1d2f4"
df = pd.read_csv('drive/My Drive/1_lewis_research/core_to_wl_merge/Merged_dataset_inner_imputed_12_21_2020.csv')
# + [markdown] id="eMQ02l7x0qD_"
# Let's drop some columns, and show off some of the data
# + colab={"base_uri": "https://localhost:8080/", "height": 469} id="Ws9xTzdwYzgX" executionInfo={"status": "ok", "timestamp": 1613872738746, "user_tz": 420, "elapsed": 143222, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="94474ace-3d2e-499c-bd11-dbbe17c55e67"
df = df.drop(['Unnamed: 0', 'Unnamed: 0.1', 'LiveTime2','ScanTime2', 'LiveTime1','ScanTime1',
'ref_num', 'API', 'well_name', 'sample_num' ], axis=1)
print(df.columns.values) # printing all column names
df.describe()
# + [markdown] id="pQPTpuJJ1VY1"
# This is the dataset we want to test/train on. Should have features along with what we are trying to predict.
# + id="91nAGubNYcYo" executionInfo={"status": "ok", "timestamp": 1613872738747, "user_tz": 420, "elapsed": 143221, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}}
dataset = df[[
'CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',
'RESD', 'PHIN', 'PHID', 'GR_smooth',
'PE_smooth',
'gz_pchip_interp' # Trying to predict gz_pchip_interp
]]
# + [markdown] id="T52yBCFGYcYt"
# In the next code block, we will remove the rows without data, and change string NaN's to np.nans
# + colab={"base_uri": "https://localhost:8080/", "height": 257} id="tUO4fhDeYcYu" executionInfo={"status": "ok", "timestamp": 1613872738748, "user_tz": 420, "elapsed": 143218, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="1929b18b-24b4-44aa-fc12-09cb29a4eb04"
dataset.replace('NaN',np.nan, regex=True, inplace=True)# Should be good already
# dataset = dataset.dropna() # not needed
dataset.head(3)
# + id="MxCYJ2GVYcZA" executionInfo={"status": "ok", "timestamp": 1613872738749, "user_tz": 420, "elapsed": 143218, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}}
# Features we will use for prediction
X = dataset[['CAL', 'GR', 'DT', 'SP', 'DENS', 'PE',
'RESD', 'PHIN', 'PHID',
'GR_smooth',
'PE_smooth']]
# What we are trying to predict
Y = dataset[['gz_pchip_interp']]
Y_array = np.array(Y.values)
# + [markdown] id="rfNwgw_MYcZJ"
# ## Starting to set up the ML model params
# + [markdown] id="HVx6Fd103Qo-"
# Setting up the test and train sets.
# + id="q_Zq4vu_YcZK" executionInfo={"status": "ok", "timestamp": 1613872738749, "user_tz": 420, "elapsed": 143216, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}}
seed = 42 # random seed is only used if you want to compare exact answers with friends
test_size = 0.25 # how much data you want to withold, .15 - 0.3 is a good starting point
X_train, X_test, y_train, y_test = train_test_split(X.values, Y_array, test_size=test_size)
# + [markdown] id="-ySy_-2TYcZO"
# ### Let's try some hyperparameter tuning (this takes forever!)
# + [markdown] id="aU6jtQCFYcZO"
# Hyperparameter testing does a grid search to find the best parameters, out of the parameters below. This turned out to be really slow on my laptop. Please skip this!
# + colab={"base_uri": "https://localhost:8080/"} id="R8i9doQmYcZP" executionInfo={"status": "ok", "timestamp": 1613872750783, "user_tz": 420, "elapsed": 155247, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="decfc119-46c6-4ae1-ac73-daa713499cd2"
model = CatBoostRegressor(objective='RMSE', iterations=3000)
model.fit(X_train, y_train, verbose=1000 )
preds = model.predict(X_test)
rmse = mean_squared_error(y_test, preds, squared=False)
print("Root Mean Squared Error: %f" % (rmse))
max = max_error(y_test, preds)
print("Max Error: %f" % (max))
MAE= median_absolute_error(y_test, preds)
print("Median Abs Error: %f" % (max))
# + colab={"base_uri": "https://localhost:8080/"} id="5oKpwOj3_Mvf" executionInfo={"status": "ok", "timestamp": 1613872750784, "user_tz": 420, "elapsed": 155244, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="2439f86d-db29-4d1e-dbf2-5b205c9ca043"
print(model.feature_names_)
print(model.feature_importances_)
# + [markdown] id="PWEEZlhoV2Bm"
# ### Parameters to search during tuning
# + colab={"base_uri": "https://localhost:8080/"} id="trJgcHlqcIF6" executionInfo={"status": "ok", "timestamp": 1613882579061, "user_tz": 420, "elapsed": 9983517, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="cebb9194-908a-4667-8d68-2fb9e9bd43b6"
grid = {'learning_rate': [0.1, 0.2, 0.4],
'depth': [6, 8, 10, 12, 14],
'l2_leaf_reg': [2, 3, 4 , 5, 6 ]}
model_grid = CatBoostRegressor(objective='RMSE', iterations=1200)
# Grid Search
grid_search_result = model_grid.grid_search(grid,
X=X_train,
y=y_train,
cv=5,
verbose=False
)
# + colab={"base_uri": "https://localhost:8080/"} id="zr3U2KRpuxL4" executionInfo={"status": "ok", "timestamp": 1613882579067, "user_tz": 420, "elapsed": 9983520, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="bb06108a-2db9-44d2-98b3-eb77213b563a"
grid_search_result['params']
# + [markdown] id="_olH3GBuYcZf"
# Now plug in the hyperparameters into the training model.
# + colab={"base_uri": "https://localhost:8080/"} id="F_AVSe-pYcZg" executionInfo={"status": "ok", "timestamp": 1613883847989, "user_tz": 420, "elapsed": 622688, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="80ad3a0c-0fe2-4732-9cba-38b479f0f9e9"
model2 = CatBoostRegressor(objective='RMSE',
depth=grid_search_result['params']['depth'],
l2_leaf_reg=grid_search_result['params']['l2_leaf_reg'],
learning_rate=grid_search_result['params']['learning_rate'],
iterations=2000)
model2.fit(X_train, y_train, verbose=500 )
preds2 = model2.predict(X_test)
rmse2 = mean_squared_error(y_test, preds2, squared=False)
print("Root Mean Squared Error: %f" % (rmse))
max2 = max_error(y_test, preds2)
print("Max Error: %f" % (max2))
MAE2= median_absolute_error(y_test, preds2)
print("Median Abs Error: %f" % (MAE2))
# + [markdown] id="-Bb77y_4LxWi"
# # Exporting
# + colab={"base_uri": "https://localhost:8080/", "height": 112} id="FlnYi2GfLzcY" executionInfo={"status": "ok", "timestamp": 1613883203554, "user_tz": 420, "elapsed": 10608000, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="26501dac-64aa-46ba-a06b-a55096f56956"
x = datetime.datetime.now()
d = {'target': [Y.columns.values, Y.columns.values],
'MSE': [rmse, rmse2],
'MAE': [MAE, MAE2],
'MaxError': [max, max2],
'day': [x.day, x.day],
'month':[x.month, x.month],
'year':[x.year, x.year],
'model':['catboost', 'catboost'],
'version':[cats.__version__, cats.__version__ ]}
results = pd.DataFrame(data=d)
results.to_csv('drive/My Drive/1_lewis_research/analysis/experiments/catboost/catboost_results/grainsize_cat.csv')
results
# + [markdown] id="rlRyKrsfcKQJ"
# Let's plot the error, we are looking for low and centered around 0.
# + colab={"base_uri": "https://localhost:8080/", "height": 244} id="4sNv4HnBr80H" executionInfo={"status": "ok", "timestamp": 1613883203803, "user_tz": 420, "elapsed": 10608245, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="1f2955f5-6566-42db-e8e4-e9220b7ac2f0"
error = preds - y_test.T
plt.figure(figsize=(6,3))
plt.hist(error[0], bins=25)
plt.xlabel('Prediction Error Grainsize, (phi)')
plt.xlim((-4,4))
# + colab={"base_uri": "https://localhost:8080/", "height": 298} id="PAX4Se0cqCsh" executionInfo={"status": "ok", "timestamp": 1613883204588, "user_tz": 420, "elapsed": 10609027, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04445590536399793096"}} outputId="5804159b-94ed-45eb-f5e6-d04120dc228f"
sorted_idx = model2.feature_importances_.argsort()
plt.barh(X.columns[sorted_idx], model2.feature_importances_[sorted_idx])
plt.xlabel("Catboost Feature Importance")
# + [markdown] id="QbaiGl8VXC8K"
# *fin*
| catboost/old_notebooks/catboost_regression_grainsize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simulation of Elzerman readout
# In this notebook we simulate the measurement traces generated by an electron tunneling on or off a quantum dot, using a continuous-time Markov model. A Markov chain (according to Wikipedia) is a stochastic model describing a sequence of possible events in which the provavility of each event depends only on the state attained in the previous event. For more information: https://www.probabilitycourse.com/chapter11/11_3_1_introduction.php and https://vknight.org/unpeudemath/code/2015/08/01/simulating_continuous_markov_chains.html
#
# This simulation is used to investigate ways to analyse the data of random telegraph signal (RTS) and Elzerman readout. For the latter we also calculate the readout fidelity for our model.
# +
import warnings
import random
import matplotlib.pyplot as plt
import numpy as np
import scipy
import matplotlib
matplotlib.rcParams['figure.figsize']=[1.3*size for size in matplotlib.rcParams['figure.figsize'] ]
import qtt
from qtt.algorithms.random_telegraph_signal import generate_RTS_signal
from qtt.algorithms.markov_chain import ContinuousTimeMarkovModel, generate_traces
from qtt.algorithms.random_telegraph_signal import tunnelrates_RTS
from qtt.algorithms.random_telegraph_signal import fit_double_gaussian
from qtt.utilities.visualization import plot_vertical_line, plot_double_gaussian_fit, plot_single_traces
np.random.seed(1)
# -
# ## Random telegraph signal
# We start with a model for a random telegraph signal. This model is valid for an electron tunneling into and out of a quantum dot with zero magnetic field. The figure shows a measurement signal which is typical for RTS.
# +
model_unit = 1e-6 # we work with microseconds as the base unit
rate_up = 15e3 # kHz
rate_down = 25e3 # kHz
rts_model = ContinuousTimeMarkovModel(['zero', 'one'], [rate_up*model_unit,rate_down*model_unit], np.array([[0.,1],[1,0]]) )
rts_data = generate_traces(rts_model, number_of_sequences=1, length=500000, std_gaussian_noise=.05, delta_time=1)
plt.figure(100); plt.clf()
plt.plot(1e6*model_unit*np.arange(800), rts_data.T[0:800,:])
plt.xlabel('Time [us]')
_=plt.ylabel('Signal [a.u.]')
# -
# We analyse the signal to determine the tunnel rates and the separation between the two levels.
samplerate=1e6
tunnelrate_dn, tunnelrate_up, results = tunnelrates_RTS(rts_data.flatten(), samplerate=samplerate, min_sep = 1.0, max_sep=2222, min_duration = 10, fig=1, verbose=1)
# ### More efficient calculation of tunnel rates
#
# The tunnel rates are calculated by fitting an exponential to a histogram of segment lengths.
# The mean segment length contains roughly the same information. Fitting the exponantial is more accurate when the tunnelrate approximates the sampling rate. Calculating the mean segment length is more robust for low number of datapoints.
#
# Comparing the performance of the two analysis methods, varying the tunnelrates and lowering the number of datapoints. Blue: fitted tunnelrate, red: 1 / mean segment length.
# +
def generate_RTS_results(tunnel_rate, model_unit, length):
rts_model = ContinuousTimeMarkovModel(['down', 'up'], [tunnel_rate*model_unit,tunnel_rate*model_unit], np.array([[0.,1],[1,0]]) )
rtsdata = generate_traces(rts_model, number_of_sequences=1, length=10000, std_gaussian_noise=.15)[0]
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
tunnelrate_dn, tunnelrate_up, results = tunnelrates_RTS(rtsdata, samplerate=samplerate, min_sep = 1.0, max_sep=2222, min_duration = 10, num_bins = 40, fig=0, verbose=0)
return tunnelrate_dn, tunnelrate_up, results
def plot_RTS_results(results, model_unit, fig):
tunnelrate_dn = results['tunnelrate_down_exponential_fit']
tunnelrate_up = results['tunnelrate_up_exponential_fit']
plt.figure(fig)
if tunnelrate_dn is not None:
plt.plot(tunnel_rate/1e3, tunnelrate_dn, '.b')
plt.plot(tunnel_rate/1e3, tunnelrate_up, '+b')
x_factor = 1e-3
y_factor = (1./model_unit)*x_factor
plt.plot(tunnel_rate*x_factor, y_factor/(samplerate*results['down_segments']['mean']), '.r')
plt.plot(tunnel_rate*x_factor, y_factor/(samplerate*results['up_segments']['mean']), '+r')
samplerate = 1e6
plt.figure(1002); plt.clf(); plt.xlabel('Tunnel rate [kHz]'); plt.ylabel('Fitted tunnel rate [kHz]')
for jj, tunnel_rate in enumerate(np.arange(5, 405, 10)*1e3): #varying the tunnelrate from 5 to 400 kHz
tunnelrate_dn, tunnelrate_up, results = generate_RTS_results(tunnel_rate, model_unit, length = 155000)
plot_RTS_results(results, model_unit, fig = 1002)
plt.figure(1002)
qtt.pgeometry.plot2Dline([1,-1,0], ':c', label='')
plt.title('Varying the tunnelrates')
plt.figure(1010); plt.clf(); plt.xlabel('Tunnel rate [kHz]'); plt.ylabel('Fitted tunnel rate [kHz]')
for jj, tunnel_rate in enumerate(np.arange(5, 150, 5)*1e3):
tunnelrate_dn, tunnelrate_up, results = generate_RTS_results(tunnel_rate, model_unit, length = 10000)
plot_RTS_results(results, model_unit, fig = 1010)
plt.figure(1010)
qtt.pgeometry.plot2Dline([1,-1,0], ':c', label='')
_ = plt.title('Decreased the number of datapoints (10000)')
# -
# ## Elzerman readout
# We model Elzerman readout with a Markov model with three states: empty, dot filled with a spin-up electron, dot filled with a spin-dot electron. The transitions possible are tunneling of a spin-up or spin-down electron out of the system, tunneling from an electron into the down state and decay of spin-up to spin-down (T1 decay).
# +
model_unit = 1e-6 # we work with microseconds as the baseunit
gamma_up_out = 10e3
gamma_down_out = .1e3
gamma_empty_down = 2e3
T1 = 3e-3 # [s]
gamma_up_down = 1./T1
G = np.array( [[-gamma_down_out, 0, gamma_down_out], [gamma_up_down, -(gamma_up_down+gamma_up_out), gamma_up_out], [gamma_empty_down, 0, -gamma_empty_down]] ).T
holding_parameters = -np.diag(G).reshape( (-1,1))
jump_chain= (1./holding_parameters.T)*G
jump_chain[np.diag_indices(G.shape[0])]=0
elzerman_model = ContinuousTimeMarkovModel(['spin-down', 'spin-up', 'empty'], holding_parameters*model_unit, jump_chain )
print(elzerman_model)
# -
# We generate a number of traces with the model. We shown the generated states (first plot) and the corresponding signal of the charge sensor (second plot). We calculate the signal of the charge sensor from the states with the `sensor_values` map and add noise to the signal. This gives us the opportunity to compare the states as simulated (dot empty, dot occupied with spin-up electron, dot occupied with spin-down electron), with the corresponding measurement traces.
sensor_values = {'spin-down': 0, 'spin-up':0, 'empty': 1}
# +
def generate_model_sequences(elzerman_model, sensor_values=sensor_values, std_gaussian_noise = 0.2,
number_of_samples=3500, number_of_traces=1000, initial_state=[.5, .5, 0] ):
state_traces = generate_traces(elzerman_model, std_gaussian_noise=0, length=number_of_samples, initial_state=initial_state, number_of_sequences=number_of_traces)
state_mapping=np.array([ sensor_values.get(state, np.NaN) for state in elzerman_model.states])
traces = state_traces.copy()
traces=np.array(state_mapping)[traces]
if std_gaussian_noise != 0:
traces = traces + np.random.normal(0, std_gaussian_noise, traces.size).reshape(traces.shape)
initial_states=state_traces[:,0]
return traces, state_traces, initial_states
traces, state_traces, initial_states = generate_model_sequences(elzerman_model, number_of_traces=300)
max_number_traces=100
plt.figure();
plt.imshow(state_traces[:max_number_traces,:])
plt.axis('tight')
plt.xlabel('Time [us]'); plt.ylabel('Trace index')
plt.title('States')
plt.figure();
plt.imshow(traces[:max_number_traces,:])
plt.axis('tight')
plt.xlabel('Time [us]'); plt.ylabel('Trace index')
plt.title('Sensor signal')
_=plt.colorbar()
# -
# We can also plot the individual traces. For reference we color the traces according to the initial-state of the traces.
# +
plot_single_traces(traces, trace_color=initial_states, maximum_number_of_traces=20)
plt.xlabel('Time [us]')
plt.ylabel('Signal [a.u.]')
_=plt.title('Elzerman traces (spin-down in blue, spin-up in red)')
# -
# ## Determination of readout fidelity with max of trace
# For each trace we termine the maximum value. We then label the traces according to whether this maximum value exceeds a given threshold.
# +
from qtt.algorithms.random_telegraph_signal import two_level_threshold, plot_two_level_threshold
elzermann_threshold_result = two_level_threshold(np.max(traces, axis=1))
plot_two_level_threshold(elzermann_threshold_result)
# -
# For a given readout threshold and readout length we can determine the fidelity by counting the number of traces that is correctly labelled as either up or down.
# +
def calculate_fidelity(traces, initial_states, readout_threshold, readout_length):
traces_smooth = scipy.ndimage.filters.convolve(traces, np.array([[1,1,1.]])/3, mode='nearest')
measured_states = np.max(traces_smooth[:, :readout_length], axis=1)>readout_threshold
F= np.sum(initial_states==measured_states) / measured_states.size
return F
readout_threshold=elzermann_threshold_result['signal_threshold']
F=calculate_fidelity(traces, initial_states, readout_threshold, 800)
print('readout fidelity F %.2f' % F)
# -
# The optimal fidelity is a trade-off between longer measurement (so that a spin-up state can tunnel out) and shorter measurement (no accidental tunneling out of the ground state, or decay from spin up to spin down).
# +
readout_lengths=np.arange(10, traces.shape[1], 20)
fidelities=np.zeros(readout_lengths.size)
for ii, readout_length in enumerate(readout_lengths):
fidelities[ii]=calculate_fidelity(traces, initial_states, readout_threshold, readout_length)
fidelities=qtt.algorithms.generic.smoothImage(fidelities)
plt.figure(1000);
plt.clf()
plt.plot(readout_lengths, fidelities,'.-b', label='fidelity')
plt.xlabel('Readout length [us]')
_=plt.ylabel('Fidelity')
plot_vertical_line(1.e6/gamma_up_out, label = 'Tunnel spin-up to empty')
plot_vertical_line(1.e6/gamma_up_down, label = 'T1', color='m')
_=plt.legend(numpoints=1)
# -
# ## Pauli spin blockade or readout with a resonator
# Taking the maximum of the trace has the disadvantage that a lot of information from the trace is discarded. An alternative method is to take the mean of the trace (over the readout period). This does not work for Elzerman readout, as the length of the blips can be either short or long with respect to the measurement interval.
#
# For Pauli spin-blockade (PSB) or resonator spin readout ([Rapid high-fidelity gate-based spin read-out in silicon](https://arxiv.org/abs/1901.00687)) we can average over the traces, as the signal is different for both spin-up and spin-down directly after pulsing to the measurement point.
# +
model_unit = 1e-6 # we work with microseconds as the baseunit
T1 = 3e-3 # [s]
gamma_up_down = 1./T1 # Hz
gamma_down_up = 1e-5 # Hz
psb_model = ContinuousTimeMarkovModel(['singlet', 'triplet'], [gamma_up_down*model_unit,gamma_down_up*model_unit], np.array([[0.,1],[1,0]]) )
print(psb_model)
# +
sensor_values = {'singlet': 0, 'triplet':1}
traces, state_traces, initial_states = generate_model_sequences(psb_model, sensor_values=sensor_values,
std_gaussian_noise=.6, number_of_traces=400, initial_state=[0.5,0.5])
max_number_traces=100
plt.figure();
plt.imshow(state_traces[:max_number_traces,:])
plt.axis('tight')
plt.xlabel('Time [us]'); plt.ylabel('Trace index')
plt.title('States')
plt.figure();
plt.imshow(traces[:max_number_traces,:])
plt.axis('tight')
plt.xlabel('Time [us]'); plt.ylabel('Trace index')
plt.title('Sensor signal')
_=plt.colorbar()
# +
readout_length = 800
trace_means = np.mean(traces[:, :readout_length], axis=1)
number_of_bins = 40
counts, bins = np.histogram(trace_means, bins=number_of_bins)
bincentres = np.array([(bins[i] + bins[i + 1]) / 2 for i in range(0, len(bins) - 1)])
par_fit, result_dict = fit_double_gaussian(bincentres, counts)
print('fitted parameters : %s' % (par_fit,))
plt.figure(50); plt.clf()
plt.plot(trace_means, '.b')
plt.xlabel('Trace index'); plt.ylabel('mean value')
plt.figure(100); plt.clf()
plt.bar(bincentres, counts, width=bincentres[1]-bincentres[0], alpha=.5, label='histogram')
_=plt.plot(bincentres, counts, '.r')
plt.xlabel('Mean of trace'); plt.ylabel('Count')
signal_range=np.linspace(trace_means.min(), trace_means.max(), 100 )
plot_double_gaussian_fit(result_dict, signal_range)
_ =plt.legend()
# -
psb_threshold = 0.5
# +
def calculate_fidelity_mean(traces, initial_states, readout_threshold, readout_length):
trace_means = np.mean(traces[:, :readout_length], axis=1)
measured_states = trace_means > readout_threshold
F= np.sum(initial_states==measured_states) / measured_states.size
return F
F=calculate_fidelity_mean(traces, initial_states, psb_threshold, readout_length = 800)
print('readout fidelity F %.2f' % F)
# -
# From the fitted double Gaussian the readout fidelity can also be determined (for details including a model where the T1 decay is taken into account, see "Rapid Single-Shot Measurement of a Singlet-Triplet Qubit", Barthel et al., 2009, https://arxiv.org/abs/0902.0227). This is usefull for real measurement data where the true spin-states of the traces are unknown.
| docs/notebooks/analysis/example_elzerman_readout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # User Guide
#
# This package provides a common interface to work with reactions and decay trees
# for several kinds of elements (string-based, PDG, ...). It is therefore good
# separating the part corresponding to the treatment of the reactions and decays to
# that corresponding to the underlying elements.
# ## Reactions and decays
#
# Reactions and decays can be acessed through the `reactions.make_reaction`
# and `reactions.make_decay` functions, respectively. The syntax for both is very
# similar, and can be consulted in the [syntax section](../syntax.rst).
import reactions
r = reactions.reaction('A B -> C D')
# This reaction corresponds to two reactants `A` and `B` that generate two products `C` and `D`. By default, the elements of this reaction contain simply a string. We can create more complex chains of reactions:
r = reactions.reaction('A B -> {C D -> {E {F -> G H} -> G H I J}}')
# Note that we have used curly braces to define the nested reactions. We can now define a function to explore the reaction and print the corresponding tree. This package provides the function `reactions.is_element` to distinguish when the node corresponds to an element and when it corresponds to a reaction or a decay.
# +
def element_info(e, attributes):
""" Build a string with the information of an element """
attributes = attributes or ('name',)
return ', '.join(f'{a}={getattr(e, a)}' for a in attributes)
def print_reaction(reaction, indent=0, attributes=None):
""" Display a reaction on a simple way """
prefix = indent * ' '
print(f'{prefix}reactants:')
for n in reaction.reactants:
if reactions.is_element(n):
print(f'{prefix} - {element_info(n, attributes)}')
else:
print_reaction(n, indent=indent + 2)
print(f'{prefix}products:')
for n in reaction.products:
if reactions.is_element(n):
print(f'{prefix} - {element_info(n, attributes)}')
else:
print_reaction(n, indent=indent + 2)
r = reactions.reaction('A B -> {C D -> {E {F -> G H} -> G H I J}}')
print_reaction(r)
# -
# A similar function can be defined for decays, that are composed by a head and a set of products:
# +
def print_decay(decay, indent=0, attributes=None):
""" Display a reaction on a simple way """
prefix = indent * ' '
print(f'{prefix}- {element_info(decay.head, attributes)}')
print(f'{prefix} products:')
for n in decay.products:
if reactions.is_element(n):
print(f'{prefix} - {element_info(n, attributes)}')
else:
print_decay(n, indent=indent + 2)
d = reactions.decay('A -> B {C -> D E} F')
print_decay(d)
# -
# The comparison between reactions and decays is not sensitive to the orther specified in the reactants or the products, thus these expressions are all true:
assert(reactions.reaction('A B -> C D') == reactions.reaction('B A -> C D'))
assert(reactions.reaction('A B -> C D') == reactions.reaction('A B -> D C'))
assert(reactions.reaction('A B -> C D') == reactions.reaction('B A -> D C'))
assert(reactions.decay('A -> B C') == reactions.decay('A -> C B'))
# However, note that we can not compare reactions with decays, and the comparison between objects must be done for the same underlying type. The kind of element can be specified on construction using the `kind` keyword argument, as can be seen in the following.
# ## String elements
# This is the default kind of element used when constructing reactions and decays. It has just one property, a string.
B = reactions.string_element('B')
assert(B.name == 'B')
r1 = reactions.reaction('A B -> C D')
assert(r.reactants[1] == B)
r2 = reactions.reaction('A B -> C D', kind='string')
assert(r.reactants[1] == B)
assert(r1 == r2)
# This element can be used for simple operations, but is not useful for scientific applications.
# ## PDG elements
# This kind of elements are based on the information from the [Particle Data Group](https://pdglive.lbl.gov) (PDG). Their construction is dones through the `reactions.pdg_database` class, that acts as a service. This class is provided as a singleton, in such a way that any instance depending storing information of the PDG will access it through this class.
#
# ### Constructing PDG elements
# There are two ways of building this kind of elements: through the database or through the `reactions.pdg_element` constructor.
z0_db = reactions.pdg_database('Z0')
z0_el = reactions.pdg_element('Z0')
assert(z0_db == z0_el)
# We can also access the elements using the PDG identification number.
z0_db = reactions.pdg_database(310)
z0_el = reactions.pdg_element(310)
assert(z0_db == z0_el)
# The PDG elements contain information about the name and PDG ID (unique for each element), three times the charge, mass and width with their lower and upper errors, and whether the element is self charge-conjugate or not. Reactions and decays can be built with this particles providing the `pdg` value to the `kind` keyword argument.
decay = reactions.decay('Z0 -> mu+ mu-', kind='pdg')
print_decay(decay, attributes=['name', 'pdg_id', 'three_charge', 'mass', 'width', 'is_self_cc'])
# The values of the mass and width depend on whether these have been measured by the experiments, so for certain particles this information is missing (also for their errors). To check if the information is available you can check whether the returned value is `None`.
assert(reactions.pdg_element('H0').width is None)
# The full table used to construct this kind of elements can be consulted [here](../_static/pdg_table.pdf).
# ### Registering new PDG elements
# It is possible to register custom elements in the database for later use.
# +
# register the element from its initialization arguments
reactions.pdg_database.register_element('A', 99999999, 0, None, None, True)
A = reactions.pdg_database('A')
# directly register the element
B = reactions.pdg_element('B', 99999998, 0, None, None, True)
reactions.pdg_database.register_element(B)
print(element_info(A, attributes=['name', 'pdg_id', 'three_charge', 'is_self_cc']))
print(element_info(B, attributes=['name', 'pdg_id', 'three_charge', 'is_self_cc']))
# -
# There is one single condition that need to be satisfied in order to register an element, and is that none of the elements registered in the PDG database must have the same name or PDG ID.
# ### Changing the energy units
# It is possible to change the energy units used by the `reactions.pdg_element` classes with the use of the `reactions.pdg_system_of_units` object. This class is another singleton which determines the units to be used by all the PDG-related object. The PDG uses `GeV` units by default. If you want to change them, you simply need to provide the new units as a string.
z0_mass_gev = reactions.pdg_database('Z0').mass
reactions.pdg_system_of_units.set_energy_units('MeV')
z0_mass_mev = reactions.pdg_database('Z0').mass
assert(abs(z0_mass_gev - z0_mass_mev * 1e-3) < 1e-12)
# ## NuBase elements
# This kind of elements are based on the information of the [NuBase](http://amdc.in2p3.fr/web/nubase_en.html) database. The construction of elements is handled through the `reactions.nubase_database` and `reactions.nubase_element` objects, with a similar implementation to PDG elements.
#
# ### Constructing NuBase elements
# Similarly to PDG elements, there are two ways of building NuBase elements: through the database or through the `reactions.nubase_element` constructor.
# +
proton_db_by_name = reactions.nubase_database('1H')
proton_el_by_name = reactions.nubase_element('1H')
assert(proton_db_by_name == proton_el_by_name)
proton_db_by_id = reactions.nubase_database(1001000)
proton_el_by_id = reactions.nubase_element(1001000)
assert(proton_db_by_id == proton_el_by_id)
assert(proton_db_by_id == proton_el_by_name)
# -
# A NuBase element contains information about its name, ID, whether the nucleus is stable or not, whether it is a ground state and the information about the mass excess and half-life with their corresponding errors, and whether these where obtained from systematics or not.
print(reactions.nubase_database('1H'))
# The mass excess and half-life information can be missing, in which case the returned value associated to that quantity is `None`:
assert(reactions.nubase_element('1H').half_life is None)
assert(reactions.nubase_element('76Cu(m)').mass_excess is None)
# Reactions and decays can be created providing `nubase` as the `kind` to `reactions.reaction` and `reactions.decay`.
decay = reactions.decay('1n -> 1H e-', kind='nubase')
print_decay(decay, attributes=['name', 'nubase_id', 'is_stable'])
# The full list of NuBase elements can be consulted [here](../_static/nubase_table.pdf).
# ### Registering new NuBase elements
#
# +
# register the element from its initialization arguments
reactions.nubase_database.register_element('999Un', 999999000, 999, 999, None, True, None, True)
un999 = reactions.nubase_database('999Un')
# directly register the element
un998 = reactions.nubase_element('998Un', 999998000, 999, 998, None, False, None, True)
reactions.nubase_database.register_element(un998)
print(element_info(un999, attributes=['name', 'nubase_id', 'is_ground_state']))
print(element_info(un998, attributes=['name', 'nubase_id', 'is_ground_state']))
# -
# Newly registered elements must not have a similar name or ID to any existing element.
# ### Changing the units
# NuBase elements contain information about the mass excess, expressed with energy units (`keV` by default) and the half-life, expressed with time unit (seconds by default). It is possible to change both through the `reactions.nubase_system_of_units` singleton.
# +
proton_mass_excess_kev = reactions.nubase_database('1H').mass_excess
reactions.nubase_system_of_units.set_energy_units('eV')
proton_mass_excess_ev = reactions.nubase_database('1H').mass_excess
assert(abs(proton_mass_excess_kev - proton_mass_excess_ev * 1e-3) < 1e-12)
neutron_half_life_sec = reactions.nubase_database('1n').half_life
reactions.nubase_system_of_units.set_time_units('ms')
neutron_half_life_ms = reactions.nubase_database('1n').half_life
assert(abs(neutron_half_life_sec - neutron_half_life_ms * 1e-3) < 1e-12)
# -
# ## Using the database cache
# The PDG and NuBase databases allows to use a cache, loading all the elements in memory to boost the access to them. You can do this through the `reactions.pdg_database.enable_cache` and `reactions.nubase_database.enable_cache` functions. The cache can later be disabled using `reactions.pdg_database.disable_cache` and `reactions.nubase_database.disable_cache`. Note that this will not remove the elements registered by the user. If you wish to remove all elements you must call to `reactions.pdg_database.clear_cache` or `reactions.nubase_database.clear_cache`. The following example shows how to use it for PDG elements.
# +
reactions.pdg_database('Z0') # this is taken from the cache
reactions.pdg_database.enable_cache() # load all particles
reactions.pdg_database('Z0') # this is taken from the cache
reactions.pdg_database.register_element("Z0'", 99999997, 0, None, None, True)
reactions.pdg_database('Z0') # this is taken from the cache
reactions.pdg_database.disable_cache()
reactions.pdg_database("Z0'") # our element is still there
reactions.pdg_database.clear_cache() # remove all elements in the cache
try:
reactions.pdg_database("Z0'") # this will fail
raise RutimeError('Should have failed before')
except reactions.LookupError as e:
pass
| docs/source/notebooks/user_guide.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Self-Driving Car Engineer Nanodegree
#
#
# ## Project: **Advanced Lane finding **
#
# ### Part 1: Camera Calibration + Writeup preparation
# ## Import Packages
# +
#importing some useful packages
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import cv2
import math
import os
import pickle
from support_functions import *
# %matplotlib inline
# -
# ## Support functions
# Functions will be modified and added in "support_functions.py" to allow for pipeline and remove the debug output
# +
def lane_filter_wrapper(undist_img, s_thresh,l_thresh,b_thresh,x_thresh):
binary= lane_filter(undist_img, s_thresh,l_thresh,b_thresh,x_thresh)
combined_white = np.zeros_like(binary)
combined_white[binary==1] = 255
return combined_white
#search for lane pixels using sliding window method
def find_lane_pixels_sliding_window(binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 50
# Set minimum number of pixels found to recenter window
minpix = 50
# Set height of windows - based on nwindows above and image shape
window_height = np.int(binary_warped.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
#fit lines and draw over image
def fit_polynomial(binary_warped):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = find_lane_pixels_sliding_window(binary_warped)
# Fit a second order polynomial to each using `np.polyfit`
left_fit = np.polyfit(lefty, leftx, 2) #retunrs A,B and C coffiecents
right_fit = np.polyfit(righty, rightx, 2) #retunrs A,B and C coffiecents
# Generate x and y values for plotting
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
lane_highlight_left= np.array([np.transpose(np.vstack([left_fitx, ploty]))])
lane_highlight_right= np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
lane_highlight_pts = np.hstack((lane_highlight_left, lane_highlight_right))
window_img_debug = np.zeros_like(out_img)
cv2.fillPoly(window_img_debug,np.int_([lane_highlight_pts]),(0,255,0))
out_img_debug = cv2.addWeighted(out_img, 1, window_img_debug, 0.3, 0)
window_img = np.zeros_like(out_img)
cv2.fillPoly(window_img,np.int_([lane_highlight_pts]),(0,255,0))
return window_img, out_img_debug
# -
# ## Camera Calibration
# +
#1 get image points and object points
imgpoints = [] # 2d points in image plane.
objpoints = [] # 3d point in real world space and and it will be based on chessboard dimensions 9*6
objpoints,imgpoints,size= get_obj_image_points("camera_cal/",9,6)
#2 get distortion parameters
mtx,dist = get_dist_parameters(objpoints,imgpoints,size)
# undistort one chessboard image as an example:
cal_image= cv2.imread("camera_cal/calibration2.jpg")
imgsize = ( cal_image.shape[1] , cal_image.shape[0] )
#undist :
undist= cv2.undistort(cal_image, mtx, dist, None, mtx)
#save undistorted images
cv2.imwrite("camera_cal_output/calibration2_02_Undist.jpg",undist)
# -
# ## Undistort images
# +
#undistort straight lines images to use for src and dst determination
#Loop over all images for calibration
test_images=os.listdir("test_images/")
for filename in test_images:
img = cv2.imread("test_images/"+filename)
imgsize = ( img.shape[1] , img.shape[0] )
#undist the input image :
undist= cv2.undistort(img, mtx, dist, None, mtx)
#save undistorted images
cv2.imwrite("output_images/"+filename[:-4]+"_02_Undist.jpg",undist)
# -
# ## Prespective Transform
# +
#Loop over all images for calibration
output_images=os.listdir("output_images/")
#calculate M for images with straight lines
undist_1 = cv2.imread("output_images/straight_lines1_02_undist.jpg")
undist_2 = cv2.imread("output_images/straight_lines2_02_undist.jpg")
#image 1:
src= np.float32(((207, 720), (582, 460), (701, 460), (1090, 720)))
dst = np.float32( ((240, 720),(240, 0),(850, 0),(850, 720)))
M_1 = compute_prespective_M(undist_1,src,dst)
#image 2:
src= np.float32(((207, 720), (582, 460), (705, 460), (1090, 720)))
M_2 = compute_prespective_M(undist_2,src,dst)
M_mean = (M_1+M_2)/2
MInverse = compute_inverse_M (undist_1,src,dst )
for filename in test_images:
undist = cv2.imread("output_images/"+filename[:-4]+"_02_Undist.jpg")
imgsize = ( undist.shape[1] , undist.shape[0] )
warped =cv2.warpPerspective(undist, M_mean, imgsize)
cv2.imwrite("output_images/"+filename[:-4]+"_03_Wraped.jpg",warped)
# -
# ## Color and gradient filter
s_thres=(150, 255)
x_thresh=(30, 100)
l_thresh =(215,255)
b_thresh = (145,200)
for filename in test_images:
wraped = cv2.imread("output_images/"+filename[:-4]+"_03_Wraped.jpg")
filtered_white = lane_filter_wrapper(wraped,s_thres,l_thresh,b_thresh,x_thresh)
cv2.imwrite("output_images/"+filename[:-4]+"_04_Filtered.jpg",filtered_white)
# ## Finding lane using sliding window
for filename in test_images:
wraped = cv2.imread("output_images/"+filename[:-4]+"_03_Wraped.jpg")
filtered_binary = lane_filter(wraped,s_thres,l_thresh,b_thresh,x_thresh)
temp,lane_highlight_wraped= fit_polynomial(filtered_binary)
cv2.imwrite("output_images/"+filename[:-4]+"_05_Lane_Highlight.jpg",lane_highlight_wraped)
# ## Transform back the lane highlight to actual image
for filename in test_images:
filtered = cv2.imread("output_images/"+filename[:-4]+"_04_Filtered.jpg")
undist = cv2.imread("output_images/"+filename[:-4]+"_02_Undist.jpg")
wraped = cv2.imread("output_images/"+filename[:-4]+"_03_Wraped.jpg")
filtered_binary = lane_filter(wraped,s_thres,l_thresh,b_thresh,x_thresh)
window_img,temp = fit_polynomial(filtered_binary)
window_img_unwarped =cv2.warpPerspective(window_img, MInverse, imgsize)
lane_image_final = cv2.addWeighted(undist, 1, window_img_unwarped, 0.3, 0)
cv2.imwrite("output_images/"+filename[:-4]+"_06_final.jpg",lane_image_final)
# ## Save all calibration parameters to be used in Video processing
# +
calibration_parameters = {"mtx": mtx,"dist_coff": dist,
"matrix":M_mean, "inverse_matrix":MInverse,
"s_channel_thresh":s_thres,"gradient_thresh":x_thresh,
"l_channel_thresh":l_thresh,
"b_channel_thresh":b_thresh}
pickle.dump(calibration_parameters, open( "calibration_parameters.p", "wb" ))
# -
| .ipynb_checkpoints/Part 1 Camera Calibration and Writeup preparation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Aries Basic Controller - Basic Message Example
# %autoawait
import time
import asyncio
# +
from aries_basic_controller.aries_controller import AriesAgentController
WEBHOOK_HOST = "0.0.0.0"
WEBHOOK_PORT = 8022
WEBHOOK_BASE = ""
ADMIN_URL = "http://alice-agent:8021"
# Based on the aca-py agent you wish to control
agent_controller = AriesAgentController(webhook_host=WEBHOOK_HOST, webhook_port=WEBHOOK_PORT,
webhook_base=WEBHOOK_BASE, admin_url=ADMIN_URL, connections=True)
# -
# ## Setup a listener for the basicmessages topic
#
# This is emitted using PyPubSub when the controller receives a basicmessages webhook from the agent. This happens everytime the agent receives a basicmessage.
# +
def messages_handler(payload):
connection_id = payload["connection_id"]
print("Handle message", payload, connection_id)
message_listener = {
"handler": messages_handler,
"topic": "basicmessages"
}
loop = asyncio.get_event_loop()
loop.create_task(agent_controller.listen_webhooks())
agent_controller.register_listeners([message_listener], defaults=True)
# -
# ## Verify an Active Connection Exists
#
# This should have been established through the setup docker container using the create_connection.py script.
response = await agent_controller.connections.get_connections()
results = response['results']
print("Results : ", results)
if len(results) > 0:
connection = response['results'][0]
print("Connection :", connection)
if connection['state'] == 'active':
connection_id = connection["connection_id"]
print("Active Connection ID : ", connection_id)
else:
print("Connection is still progressing to active state, retry in a few moments")
else:
print("You must create a connection")
# ## Send a Basic Message over DIDComm to Bob
#
# See [aries-rfc](https://github.com/hyperledger/aries-rfcs/tree/master/features/0095-basic-message)
#
# You can send as messages as you want, if you are running the [basic-message tutorial on Bob's notebook](http://localhost:8889/notebooks/basic-message.ipynb) these will be received and printed by the message handler. You may have to run a code block to see the output.
basic_message = "hello from Alice"
response = await agent_controller.messaging.send_message(connection_id, basic_message)
print("BASIC MESSAGE - Alice -> Bob")
print(response)
# ## End of Tutorial
#
# Be sure to terminate the controller so you can run another tutorial.
response = await agent_controller.terminate()
print(response)
| demo/alice/basic-message.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pygamma
import pylab
import numpy as np
import nmrglue
from pygamma import spin_system, Hcs, HJw, Fm, gen_op, sigma_eq, Iypuls, Ixpuls, Ixypuls, evolve, FID, row_vector
from scipy import fftpack
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
# %matplotlib inline
# -
mmm = np.ones((3,4))
mmm
rrr = np.array([1,2,3])
ccc = np.array([1,2,3,4])
mmm*ccc
(mmm.transpose()*rrr).transpose()
mmm*rrr[:,np.newaxis]
dt1 = 0.001 # t1 time increment
dt2 = 0.001 # t2 time increment
t1pts = 1024 # points on t1 axis
t2pts = 1024 # points on t2 axis
# +
sys=spin_system() # define the system, read in
sys.read("cs_2.sys") # from disk
print( sys)
# +
tmp = row_vector(t2pts) #block_1D tmp(t2pts); // 1D-data block storage
data1 = np.zeros((t1pts,t2pts), dtype=np.complex128) #block_2D data(t1pts,t2pts); // 2D-data matrix storage
data2 = np.zeros((t1pts,t2pts), dtype=np.complex128) #block_2D data(t1pts,t2pts); // 2D-data matrix storage
H = Hcs(sys) # // Hamiltonian chemical shift
detect = gen_op(Fm(sys)) # // F- for detection operator
# +
sigma0 = sigma_eq(sys) # // equilibrium density matrix
X = 0.
Y = 90.
X_ = 180.0
Y_ = 270.0
mixing_time = 1.0
for t1 in range(t1pts):
# X X X
sigma = Ixypuls(sys, sigma0, X, 90) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data1[t1] += tmp.toNParray()
# Y Y X
sigma = Ixypuls(sys, sigma0, Y, 90) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y, 90) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data1[t1] += tmp.toNParray()
# X_ X_ X
sigma = Ixypuls(sys, sigma0, X_, 90) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X_, 90) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data1[t1] += tmp.toNParray()
# Y_ Y_ X
sigma = Ixypuls(sys, sigma0, Y_, 90) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y_, 90) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y, 90) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data1[t1] += tmp.toNParray()
# +
sigma0 = sigma_eq(sys) # // equilibrium density matrix
X = 0.
Y = 90.
X_ = 180.0
Y_ = 270.0
for t1 in range(t1pts):
# X Y X
sigma = Ixypuls(sys, sigma0, X, 90.) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y, 90.) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90.) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data2[t1] += tmp.toNParray()
# Y X_ X
sigma = Ixypuls(sys, sigma0, Y, 90.) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X_, 90.) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90.) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data2[t1] += tmp.toNParray()
# X_ Y_ X
sigma = Ixypuls(sys, sigma0, X_, 90.) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y_, 9.0) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90.) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data2[t1] += tmp.toNParray()
# Y_ X X
sigma = Ixypuls(sys, sigma0, Y_, 90.) #; // apply first 90 y-pulse
sigma = evolve(sigma, H, t1*dt1) #; // evolution during t1
sigma = Ixypuls(sys, sigma, X, 90.) #; // apply second 90 y-pulse
sigma = evolve(sigma, H, mixing_time) #; // evolution during t1
sigma = Ixypuls(sys, sigma, Y, 90.) #; // apply second 90 y-pulse
FID(sigma,detect,H,dt2,t2pts,tmp) #; // acquisition
data2[t1] += tmp.toNParray()
# -
plt.imshow(data1.real)
lb=5.0
ttt = np.linspace(0,dt1*t2pts, t2pts)
exp_linebroadening = np.exp(-ttt*lb*2*np.pi)
plt.plot(exp_linebroadening);
data1 = data1*exp_linebroadening
data2 = data2*exp_linebroadening
sss1 = fftpack.fft(data1, axis=1)
sss1 = fftpack.fftshift(sss1,axes=1)
sss2 = fftpack.fft(data2, axis=1)
sss2 = fftpack.fftshift(sss2,axes=1)
plt.plot(sss1[0].real, label='r')
plt.plot(sss1[0].imag, label='i')
plt.legend();
plt.plot(sss2[0].real, label='r')
plt.plot(sss2[0].imag, label='i')
plt.legend();
nmrglue.process.proc_autophase.manual_ps(sss1[0], notebook=True)
ph0_1 = 106.649
ph1_1 = -0.34
nmrglue.process.proc_autophase.manual_ps(sss2[0]*1j, notebook=True)
ph0_2 = 0
ph1_2 = -0.34
phased_data1 = nmrglue.proc_base.ps(sss1, p0=ph0_1, p1=ph1_1)
phased_data2 = nmrglue.proc_base.ps(sss2, p0=ph0_2, p1=ph1_2)
fid = phased_data1.real + 1j*phased_data2.real
plt.imshow(fid.real)
plt.plot(fid[0].real)
plt.plot(fid[-1].real)
fid = fid*exp_linebroadening[:,np.newaxis]
plt.imshow(fid.real)
plt.plot(fid[0].imag)
plt.plot(fid[-1].imag)
spec = fftpack.fftshift(fftpack.fft(fid,axis=0), axes=0)
plt.imshow(spec.imag)
# +
class TwoD_NMR_MAT_plot:
def __init__(self, exp, pinfo, info, dimensions_ppm=[]):
# print exp.shape
self.exp = exp
self.pinfo = pinfo
self.info = info
self.rr,self.cc = exp.shape
self.X = np.zeros(exp.shape)
self.Y = np.zeros(exp.shape)
r1=0
r2=self.rr
c1=0
c2=self.cc
# print r2,c2
self.create_axes( pinfo, info, self.rr, self.cc, dimensions_ppm )
self.create_plot_layout(self.dimensions_index)
self.plot_plots()
def create_axes( self, pinfo, info, rr,cc, dimensions_ppm):
self.f1_offset_p = pinfo['procs' ]['OFFSET']
self.f1_sw_hz = pinfo['procs' ]['SW_p']
self.f1_omega = pinfo['procs' ]['SF']
self.f1_sw_ppm = self.f1_sw_hz/self.f1_omega
self.f2_offset_p = pinfo['proc2s' ]['OFFSET']
self.f2_sw_hz = pinfo['proc2s' ]['SW_p']
self.f2_omega = pinfo['proc2s' ]['SF']
self.f2_sw_ppm = self.f2_sw_hz/self.f2_omega
# print self.f1_sw_ppm
self.f1 = np.linspace(self.f1_offset_p, self.f1_offset_p-self.f1_sw_ppm, self.rr)
self.f2 = np.linspace(self.f2_offset_p, self.f2_offset_p-self.f2_sw_ppm, self.cc)
self.dw_f1_ppm = self.f1[1]-self.f1[0]
self.dw_f2_ppm = self.f2[1]-self.f2[0]
for r in range(self.rr):
for c in range( self.cc):
self.Y[r,c] = self.f1[r]
self.X[r,c] = self.f2[c]
# print dimensions_ppm
if dimensions_ppm == []:
self.dimensions_index = np.array([0,self.rr-1,0,self.cc-1])
else:
r1 = int( (dimensions_ppm[1]-self.f1_offset_p)/self.dw_f1_ppm)
r2 = int( (dimensions_ppm[0]-self.f1_offset_p)/self.dw_f1_ppm)
c1 = int( (dimensions_ppm[2]-self.f2_offset_p)/self.dw_f2_ppm)
c2 = int( (dimensions_ppm[3]-self.f2_offset_p)/self.dw_f2_ppm)
self.dimensions_index = np.array([r1,r2,c1,c2 ])
# print "self.dimensions_index", self.dimensions_index
self.Z1 = self.exp[self.dimensions_index[0]:self.dimensions_index[1],self.dimensions_index[2]:self.dimensions_index[3]]
self.X1 = self.X[self.dimensions_index[0]:self.dimensions_index[1],self.dimensions_index[2]:self.dimensions_index[3]]
self.Y1 = self.Y[self.dimensions_index[0]:self.dimensions_index[1],self.dimensions_index[2]:self.dimensions_index[3]]
def create_plot_layout( self, dimensions_index):
# print "dimensions_index",dimensions_index
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left+width+0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
# start with a rectangular Figure
plt.figure(2, figsize=(6,6))
self.axScatter = plt.axes(rect_scatter)
self.axHistx = plt.axes(rect_histx)
self.axHisty = plt.axes(rect_histy)
# no labels
self.axHistx.xaxis.set_major_formatter(nullfmt)
self.axHisty.yaxis.set_major_formatter(nullfmt)
self.axScatter.tick_params(axis='x', labelsize=16)
self.axScatter.tick_params(axis='y', labelsize=16)
self.axScatter.set_xlabel('${^1}$H [ppm]',fontsize=16)
#ax.set_xlim(-60, 60)
self.axScatter.set_ylabel('${^1}$H [ppm]', fontsize=16)
self.axHistx.axis('off')
self.axHisty.axis('off')
f1_start = self.f1[dimensions_index[0]]
f1_end = self.f1[dimensions_index[1]]
f2_start = self.f2[dimensions_index[2]]
f2_end = self.f2[dimensions_index[3]]
self.axScatter.set_ylim( (f1_start, f1_end) )
self.axScatter.set_xlim( (f2_start, f2_end) )
def plot_plots(self):
# the scatter plot:
cl = np.linspace(self.Z1.max()*0.01, self.Z1.max()*1.1,10)
# print "Z1.shape",self.Z1.shape
sum_f1 = self.Z1.sum(axis=0)
# print "len(sum_f1)",len(sum_f1)
sum_f2 = self.Z1.sum(axis=1)
# print "len(sum_f2)",len(sum_f2)
cset = self.axScatter.contour(self.X1, self.Y1, self.Z1, cl, colors='red')
#
self.axHistx.plot(sum_f1, 'r-')
self.axHisty.plot(sum_f2,range(len(sum_f2)),'r')
self.axHistx.set_xlim( (0,len(sum_f1)-1) )
self.axHisty.set_ylim( (0,len(sum_f2)-1) )
# +
info = {}
pinfo = {}
pinfo['procs']={}
pinfo['proc2s']={}
# -
pinfo['procs']['OFFSET']=1.25
pinfo['procs' ]['SW_p']=1.0/dt1
pinfo['procs' ]['SF']=sys.spectrometer_frequency()
pinfo['proc2s' ]['OFFSET'] =1.25
pinfo['proc2s' ]['SW_p']=1.0/dt2
pinfo['proc2s' ]['SF']=sys.spectrometer_frequency()
ppplot = TwoD_NMR_MAT_plot(1*spec.real, pinfo, info )
print(dir(nmrglue))
nmrglue.process.proc_autophase.manual_ps(spec, notebook=True)
| jupyter_notebooks/noe_statesHaberkorn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %run functions_NoiseStudy.py
# %matplotlib
# global variables
fs = 13
type_plot = ['png', 'svg']
# + active=""
# Compare different smoothing filters AND differnt states when the respective smoothing filter is applied to the image. Possible options are:
# 1) choose the filter function:
# 1.1 Gauss
# 1.2 Savitzky-Golay
# 2) choose the stage, when the blur filter is applied:
# 2.1 applied to the ratiometric intensity R = r/g
# 2.2 applied to the individual channels r and g
# 2.3 applied to the normalized intensity R0/R
#
# +
# actual evaluation time
now = datetime.datetime.now()
today = now.strftime("%Y%m%d")
# output folder
save_dir_plots = 'plots/' + today + '_measurement'
if not os.path.exists(save_dir_plots):
os.makedirs(save_dir_plots)
save_dir_res = 'Results/' + today + '_measurement'
if not os.path.exists(save_dir_res):
os.makedirs(save_dir_res)
# +
# Paths must be updated according to your directory !!!
# depth profile microsensor
path_ = '/Volumes/HIS-CAMEL/04measurementData/20201127_Noise-vs-resolution-paper/'
file_ms = path_ + 'Klaus_Optode_noise_study_26-11-2020/O2gradient_experiement/profiles.txt'
# calibration data
file_calib = 'Results/20210531_calibration/20210531-1455_calibration_1RoI.hdf5'
# measurement data - images of the optode.
file_meas = path_ + 'Klaus_Optode_noise_study_26-11-2020/O2gradient_experiement/'
# image cropping to the region of interest (RoI) that should be analyzed
RoI_op = [[(730, 200), (730, 1250), (1290, 1250), (1290, 200)], # optode1
[(1560, 200), (1560, 1250), (2100, 1250), (2100, 200)]] # optode2
# Image resolution - determine conversion factor px -> mm
px2mm, dpi = image_resolution(px=840.6646, dist_mm=30., inch=1.1811)
print('Image resolution: 1mm equals: {:.2f}px, i.e. {:.0f}dpi'.format(px2mm, dpi))
# ................................................................................
# load depth profile(s) of the microsensir for validation of the depth profile
dic_micro = read_microsensor(file_ms=file_ms, encoding='latin-1')
# load measurement data and crop according to the RoI
dint_red, dint_green, dint_ratio = splitImage(path=file_meas, RoI_op=RoI_op)
# -
# one example of one optode
inp = input('Which optode and which setting shall be visualized? ') # optode1, set2
kernel_str = input('Which kernel function should be applied? \n Choose between (1) Gauss or (2) Savitzky-Golay: \n > Your choise: ')
# ### Prepare specific settings for data processing
# +
# preparation lineprofile
surface = (12.9, 12.) # optode 1, optode2
depth_lp = (-4, 4) # depth for 2D line profile
depth_op = (-1.5, 1.) # depth for optode excerpt
pos_lp = (7., 3.) # position of the depth profile within the optode
ls_lw = [0] # line with of the drawn depth profile
# .................................................................
# additional information for visualization
arg = dict({'curve lw': 1.5, 'vmin':-5, 'vmax': 130, 'lw': 0.0, 'offset ms': 0.2,
'aspect': 4, 'cmap': plt.cm.inferno, 'vmin op': 0, 'vmax op': 100,
'figsize': (12, 5), 'fontsize': 11, 'marker': ['o', 'd', 'x', '.'],
'colors': ['slategrey', 'darkorange', 'forestgreen', 'k']})
# -
# Kernel specific settings
if kernel_str == str(1) or kernel_str == 'gauss':
print('selected kernel function - Gauss filter')
kernel='gauss'
ls_kernel = [(1,1), (3,3), (5, 5), (7,7), (9,9), (11, 11), (13,13), (15, 15), (17,17),
(19,19), (21, 21), (25, 25), (31, 31), (35, 35), (41, 41), (45, 45),
(51 ,51), (55, 55), (61, 61), (65, 65), (71, 71), (75, 75), (81, 81)]
elif kernel_str == str(2) or kernel_str == 'Savitzky-Golay':
print('selected kernel function - Savitzky-Golay filter')
kernel='savgol'
# polyorder, window
ls_kernel = [(2,3), (2,5), (2,7), (2,9), (2,11), (2,13), (2,15), (2,17), (2,19),
(2,21), (2,31), (2,41), (2, 51), (2,61), (2,71), (2,81),
(3,5), (3,7), (3,9), (3,11), (3,13), (3,15), (3,17), (3,19), (3,21),
(3,31), (3,41), (3,51), (3,61), (3,71), (3,81)]
else:
raise ValueError('Selection not valid. Choose either 1 (for Gauss) or 2 Savitzky-Golay')
# Select, when blur filter should be applied
blur_str = input('When should the blur be applied? (1) single color channels, (2) ratiometric intensity, or (3) normalized intensity? \n > Your choise: ')
if blur_str == str(1):
dint_ch1, dint_ch2 = dint_red, dint_green
blur_pos='single'
elif blur_str == str(2):
dint_ch1, dint_ch2 = dint_ratio, None
blur_pos='ratio'
elif blur_str == str(3):
dint_ch1, dint_ch2 = dint_ratio, None
blur_pos='norm'
else:
raise ValueError(' Selection not valid! \n Select either 1 (for single color channels), 2 (for ratiometric intensity), or 3 (for normalized intensity)')
# ### Image processing
# +
# determine O2 concentration for the depth profile (1D) for different image blurs
dO2_lp = dict(map(lambda k:
(k, O2_lineprofile_compare_v2(inp=inp, surface=surface, kernel=kernel,
kshape=k, lp=pos_lp, path_calib=file_calib,
dint_ch1=dint_ch1, dint_ch2=dint_ch2,
px2mm=px2mm, ls_lw=ls_lw,
blur_pos=blur_pos)), ls_kernel))
# determine O2 concentration within the whole image
dO2_optode = dict(map(lambda k:
(k, O2blur_optode(kshape=k, inp=inp, path_calib=file_calib,
px2mm=px2mm, kernel=kernel, dint_ch1=dint_ch1,
dint_ch2=dint_ch2, surface=surface,
depth_min=depth_op[0], depth_max=depth_op[1],
blur_pos=blur_pos)), ls_kernel))
# -
# Visualize image blur along line profile
# +
# prepare microsensor for joint visualization
df_ms = prepMS_plot(index_lp=dO2_lp[ls_kernel[0]]['vertical'][ls_lw[0]].index,
dic_micro=dic_micro, offset=arg['offset ms'])
# plot lineprofile and excerpt of optode
dimages = dict()
for k in ls_kernel:
fig_lp = plotLP(kshape=k, dO2_lp=dO2_lp, dO2_optode=dO2_optode, df_ms=df_ms, arg=arg,
header_ms=['Depth (mm)', 'Intensity'], depth_lp=depth_lp,
s=inp.split(',')[1].strip(), depth=df_ms['Depth (mm)'])
dimages[k] = fig_lp
# -
# O$_2$ penetration depth
# +
# set O2 level used for calculating the penetration depth in %air
threshold = 5.
# O2 penetration depth
ydepth = penetration_depth(dO2_lp=dO2_lp, ls_kernel=ls_kernel, df_ms=df_ms,
treshold=threshold)
# plotting penetration depth for different kernel sizes and functions
fig_Rpen = plot_penetrationDepth(depth=ydepth[0], ls_kernel=[round(l[1] / px2mm, 5)
for l in ls_kernel], arg=arg)
fig_Rpen.axes[0].set_xlabel('kernel size [mm]')
plt.show()
# -
# ### Save plots
# +
now = datetime.datetime.now()
# .....................................
name_DP_ = save_dir_plots + '/' + now.strftime("%Y%m%d-%H%M%S") + '_DepthProfile_'
name_DP_ += kernel + '-blur_' + blur_pos +'Intensity_' + '-'.join([i.strip()
for i in inp.split(',')])
for k in dimages.keys():
name_DP = name_DP_ + '_kernel-order' + str(k[0]) +'-window-'+ str(k[1]) + '.'
for t in type_plot:
dimages[k].savefig(name_DP + t, dpi=300, transparent=False)
# .....................................
name_pen = save_dir_plots + '/' + now.strftime("%Y%m%d-%H%M%S") + '_PenetrationDepth_'
name_pen += kernel + '-blur_' + blur_pos + 'Intensity-' + '-'.join([i.strip()
for i in inp.split(',')])
for t in type_plot:
fig_Rpen.savefig(name_pen + '.' + t, dpi=300, transparent=False)
# -
| Additional_studies.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.9 64-bit ('3.9.9')
# language: python
# name: python3
# ---
# +
import re
import operator
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from visualization_methods import VisualizationMethods as vm
from aggregation_methods import AggregationMethods as am
import data_methods as dm
df = dm.get_valid_dataframe()
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 10))
fig.set_facecolor('white')
categories = 'apps', 'frameworks'
platforms = 'github', 'gitlab'
sizes = [
len(df[df['dp_category'] == categories[0]]),
len(df[df['dp_category'] == categories[1]]),
]
vm.set_pie_by_ax(ax1, sizes, categories, 'Django Packages Categories')
sizes = [
len(df[df['platform'] == platforms[0]]),
len(df[df['platform'] == platforms[1]]),
]
vm.set_pie_by_ax(ax2, sizes, platforms, 'Django Packages Platforms')
plt.show()
# +
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(27, 9))
vm.set_boxplot_by_ax(ax1, df['dp_usage_count'].values, 'Usage Count', True)
vm.set_boxplot_by_ax(ax2, df['repo_forks'].values, 'Forks Count', True)
vm.set_boxplot_by_ax(ax3, df['repo_open_issues'].values, 'Open Issues Count', True)
plt.show()
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(18, 9))
size_bytes = df['repo_size'].values
size_kbytes = [b/1000 for b in size_bytes]
size_mbytes = [b/1000 for b in size_kbytes]
vm.set_boxplot_by_ax(ax1, df['repo_commits'].values, 'Commits Count', True)
vm.set_boxplot_by_ax(ax2, size_mbytes, 'Size (MBs)', True)
plt.show()
# +
fig, ax = plt.subplots(figsize=(20, 8))
vm.set_boxplot_by_ax(ax, df['repo_stars'].values, 'Stars Count', True, True)
plt.tight_layout()
plt.show()
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 10))
fig.set_facecolor('white')
labels = 'With grids', 'Without any grid'
sizes = [
len(df[~df['dp_grids'].isnull()]),
len(df[df['dp_grids'].isnull()]),
]
vm.set_pie_by_ax(ax1, sizes, labels, 'Django Packages Grids')
grid_concate, grids_freq = am.get_concate_and_freq(df[~df['dp_grids'].isnull()]['dp_grids'].values)
ax2.imshow(WordCloud().generate(grid_concate), interpolation='bilinear')
ax2.axis("off")
plt.show()
# +
top = 25
top_grid_freq = {k[0]: k[1] for k in sorted(grids_freq.items(), key=operator.itemgetter(1), reverse=True)[:top]}
vm.show_bar_chart_by_dict(top_grid_freq, 'Words', 'Count', 'Grids Distribution', 'blue')
# +
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 10))
fig.set_facecolor('white')
labels = 'With topics', 'Without any topic'
sizes = [
len(df[~df['repo_topics'].isnull()]),
len(df[df['repo_topics'].isnull()]),
]
vm.set_pie_by_ax(ax1, sizes, labels, 'Repository Topics')
unwanted_topics = [ 'django', 'python', 'django-application', 'django-framework' ]
unwanted_regex = [
re.compile(r'^(django)((\d+)|(\-\d+)|\Z)', re.IGNORECASE),
re.compile(r'^(python)((\d+)|(\-.*)|\Z)', re.IGNORECASE),
re.compile(r'^(hacktoberfest)((\-\d+)|(\d+)|\Z)', re.IGNORECASE)
]
topics_concate, topics_freq = am.get_concate_and_freq(
df[~df['repo_topics'].isnull()]['repo_topics'].values,
unwanted_topics,
unwanted_regex
)
ax2.imshow(WordCloud().generate(topics_concate), interpolation='bilinear')
ax2.axis("off")
plt.show()
# +
top = 25
top_repo_freq = {k[0]: k[1] for k in sorted(topics_freq.items(), key=operator.itemgetter(1), reverse=True)[:top]}
vm.show_bar_chart_by_dict(top_repo_freq, 'Topics', 'Count', 'Topics Distribution', 'red')
| visualization/django-packages.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#importing matplotlib
import matplotlib.pyplot as plt
x=range(10)
x
# +
#we can also create two list
x1=[2,6,8,12]
y1=[3,7,13,20]
x2=[35,42,3,4]
y2=[8,45,34,2]
# -
# +
plt.xlabel('distance') #x axis label
plt.ylabel('time') #y axis label
plt.grid(c='green') #to put grid lines
plt.plot(x1,y1,label="cars")
plt.plot(x2,y2,label="bikes")
plt.legend() #to show label of plot
# -
# #barplots#
#
plt.xlabel('time')
plt.ylabel('price')
plt.bar(x1,y1,label='apple',)
plt.plot(x1,y1,label='amazon')
plt.bar(x2,y2,label='microsoft')
plt.grid(c='red')
plt.legend()
players=["dhoni","sachin","virat"]
runs=[234,234,546]
plt.bar(players,runs)
plt.xlabel("players")
plt.ylabel("run")
plt.grid()
# ##scatter
#
plt.scatter(x1,y1)
plt.scatter(x2,y2)
plt.grid(c='y')
plt.scatter(x1,y1,marker='*',s=100)#marker and size are optional
plt.scatter(x2,y2.marker='^',s=120)
plt.grid(c='y')
plt.pie(players,runs)
plt.xlabel("players")
plt.ylabel("run")
plt.grid()
x=[40,10]
y=[20,30]
plt.pie(x,y)
plt.
| matplotlib-21-5-19.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Hacking - Day 1
# **ISTO É UM MARKDOWN**. Semelhante a outras linguagens de marcação, como HTML, o Markdown é um recurso do Jupyter Notebook que permite você colocar pedaços de texto junto com seu código.
#
# Assim como se fosse um documento de texto no **Word** ou **Google Docs**, o Markdown permite que você formate seu texto utilizando caracteres especiais. Confira abaixo como fazer algumas coisas em Markdown:
#
# ### Clique duas vezes sobre uma célula Markdown para ver e editar seu conteúdo
#
# # Cabeçalho 1
# ## Cabeçalho 2
# ### Cabeçalho 3
#
# **Para deixar um texto em negrito, basta colocar seu texto entre dois pares de `*`**
#
# *Para colocar um texto em itálico, coloque seu texto entre dois `*`*
#
# [Você também pode criar um link para uma determinada página](http://datahackers.com.br/)
#
# **Você também pode adicionar uma imagem em Markdown:**
# 
# ## Saiba mais sobre Markdown
# O Github tem um excelente tutorial com todos os principais comandos de Markdown. Não deixe de conferir para melhorar cada vez mais a forma como você pode enriquecer suas análises:
#
# **[CLIQUE AQUI PARA ACESSAR O SITE](https://guides.github.com/features/mastering-markdown/)**
# <center><h1>PROJETO I</h1></center>
# <center><h2>Analisando dados do Senado brasileiro</h2></center>
# 
#
# # Introdução
#
# Durante as análises que você conduzir, comece fazendo uma breve introdução ao seu trabalho. Gaste no máximo dois parágrafos para contextualizar o leitor sobre o que ele verá nas próximas linhas. **Utilize formatações como negrito para destacar pontos interessantes no texto**, mas não abuse deles. Uso de itálico para palavras estrangeiras também é importante para enriquecer seu texto.
#
#
# ## Motivação
#
# Após contextualizar o leito no trecho acima, **utilize essa parte para explicar o motivo que fez você querer fazer essa análise**. Aqui você também pode ressaltar o que você espera alcançar até o fim desse estudo, por exemplo: pretendo identificar possíveis dados sujos e inconsistentes para depois gerar denuncias na Administração da Câmara dos Senadores, para que eles justifiquem o gasto, apresentando notas fiscais e documentos legais.
#
# Durante a motivação, compartilhe (se possível) o link onde você encontrou o dataset utilizado durante sua análise. Isso permitirá não só que outras pessoas baixem e executem suas próprias análises, como também permitirá entender com que tipo de dado você trabalhou.
#
# Exemplo:
# **Link para download do dataset**: [Clique aqui](https://www12.senado.leg.br/transparencia/dados-abertos-transparencia/dados-abertos-ceaps)
# Utilize comentários para explicar o que seus códigos querem dizer
import pandas as pd
# Lendo dataset
df = pd.read_csv('ceaps_dataset.csv', delimiter=';', encoding='latin1', skiprows=1)
# Cinco primeiras linhas do DataFrame
df.head()
# Exibindo cinco últimas linhas
df.tail()
# Utilizando o método dtypes para entender como o Pandas está lendo todas as colunas
df.dtypes
# A função describe() irá retornar uma tabela com estatísticas de todas as colunas númericas de nosso DataFrame
df.describe()
# ## Explicando sua linha de pensamento
#
# Colocar Markdown entre suas células é muito importante para explicar **o que pretende fazer nas células a seguir**, ou então explicar **o que você descobriu das células anteriores**. Essas partes em texto será muito importante para fazer com que um leito não-técnico consiga entender tudo o que você está fazendo.
# # Divida sua análise em tópicos
#
# Divida parte das suas análises em tópicos ao criar cabeçalhos como esse e uma pequena descrição sobre o que você irá fazer nessa fase. Também pode ser interessante destacar problemas que você pode encontrar nos próximos passos. Confira o exemplo abaixo, onde nós vamos criar um tópico para limpeza de dados (Data Cleaning).
# # Limpeza de dados (Data Cleaning)
#
# O dataset do CEAPS (Cota para Exercício da Atividade Parlamentar dos Senadores) possui muitos dados sujos. Um dos problemas que encontraremos é a forma como valores em dinheiro é apresentado. Como no Brasil utilizamos vírgulas(`,`) ao invés de ponto (`.`), como no padrão americano, **precisarei substituir as vírgulas e converter a coluna de forma que o Pandas consiga interpretá-la como número**.
# O comando abaixo irá substituir todas as vírgulas da coluna VALOR_REEMBOLSADO por pontos
df['VALOR_REEMBOLSADO'] = df['VALOR_REEMBOLSADO'].str.replace(',', '.')
# Após substituir os caracteres, converterei a coluna VALOR_REEMBOLSADO em númerico
df['VALOR_REEMBOLSADO'] = pd.to_numeric(df['VALOR_REEMBOLSADO'])
# Verificando se o Pandas está interpretando a coluna VALOR_REEMBOLSADO corretamente
df.dtypes
# Com sucesso, consegui converter a coluna `VALOR_REEMBOLSADO` em número. Como podemos ver no código acima, o Pandas está interpretando essa coluna como `float64` corretamente.
#
# Continuando nossa limpeza, preciso fazer com a coluna `DATA` seja interpretada de forma correta.
# ## Alterando coluna de data
# +
# Se eu executar essa célula sem o código abaixo estar comentado, o Pandas irá apresentar um erro
# Descomente ele, caso queira ver o erro
#df['DATA'] = df['DATA'].str.replace('/', '-')
# -
# O erro é devido haver muitas datas em formatos inválidos, como anos de `5017`, `3016`, `2016`, dentre outras. **Para cada data inválida que o Pandas encontra ao executar o código**, precisaremos alterar o valor da célula errada.
# Essas são algumas datas que estão incorretas. Iremos substitui-las
df['DATA'] = df['DATA'].str.replace('5017', '2017')
df['DATA'] = df['DATA'].str.replace('3016', '2016')
df['DATA'] = df['DATA'].str.replace('216', '2016')
df['DATA'] = df['DATA'].str.replace('200', '2016')
df['DATA'] = df['DATA'].str.replace('206', '2016')
# Agora que as datas foram corrigidas, podemos converter a coluna `DATA` para `datetime`.
df['DATA'] = pd.to_datetime(df['DATA'])
# Esse é um exemplo sobre como você pode conduzir suas análises. Basicamente, você deve:
# * Dividir seu código em tópicos
# * Utilizar markdowns antes de seu código, explicando o que pretende fazer
# * Comentar o resultado de seus principais códigos, explicando que resultados conseguiu com eles
# # Funções para enriquecer sua análise
# Abaixo iremos ver algumas funções que podem ser úteis durante sua análise, que conhecemos durante nosso Bootcamp.
# ### Criando novas colunas com base no valor de outra
# A coluna `DATA` é uma coluna de tempo, nós temos acesso a diversas funções e métodos relacionados a datas. [Confira a documentação oficial do Pandas para conhecer as opções disponíveis para você utilizar](https://pandas.pydata.org/pandas-docs/version/0.23.4/api.html#datetimelike-properties). Por exemplo, poderíamos criar uma coluna com o **dia em que foi feito um reembolso**, utilizando o método `day`.
df['DIA'] = df['DATA'].dt.day
# Podemos criar novas colunas com Pandas também. Por exemplo, podemos **criar uma coluna binária que informa se o gasto do senador foi maior que R$ 100**.
# Primeiro, vamos criar uma coluna nova, onde todos os seus valores serão 0
df['MAIOR_QUE_100'] = 0
# Em seguida vamor marcar com o valor 1 se a coluna VALOR_REEMBOLSADO for maior que 100
df.loc[df['VALOR_REEMBOLSADO'] > 100, 'MAIOR_QUE_100'] = 1
# Cinco primeiras linhas do novo DataFrame
df.head()
# # A função filter
# A função filter permite que você reordene as colunas do DataFrame de acordo com sua vontade.
#
# ## ATENÇÃO:
# Se você esquecer de colocar uma coluna ou colocar um novo errado, nenhum erro será mostrado. Sempre confira se todas as colunas que você quer estão no DataFrame após usar a função.
df = df.filter(['ANO', 'MES', 'DIA', 'SENADOR', 'TIPO_DESPESA', 'CNPJ_CPF', 'FORNECEDOR', 'DOCUMENTO', 'DATA', 'DETALHAMENTO', 'VALOR_REEMBOLSADO', 'MAIOR_QUE_100'])
# Vamos verificar as colunas do nosso DataFrame
df.columns
# A função unique nos mostrará todos os valores distintos presentes em uma coluna.
# A função .tolist() irá apresentá-las de forma melhor para visualizar, mas não é obrigatória
df['TIPO_DESPESA'].unique().tolist()
# As categorias de `TIPO_DESPESA` são muito longas. Podemos utilizar a função `.map()` para renomeá-las e deixar mais fácil de ler e compreender.
# +
mapper = {'Aluguel de imóveis para escritório político, compreendendo despesas concernentes a eles.': 'Aluguel de imóveis',
'Aquisição de material de consumo para uso no escritório político, inclusive aquisição ou locação de software, despesas postais, aquisição de publicações, locação de móveis e de equipamentos. ': 'Aquisição de material',
'Contratação de consultorias, assessorias, pesquisas, trabalhos técnicos e outros serviços de apoio ao exercício do mandato parlamentar': 'Consultoria',
'Locomoção, hospedagem, alimentação, combustíveis e lubrificantes': 'Logística',
'Passagens aéreas, aquáticas e terrestres nacionais': 'Passagem',
'Divulgação da atividade parlamentar': 'Publicidade',
'Serviços de Segurança Privada': 'Segurança'}
df['TIPO_DESPESA'] = df['TIPO_DESPESA'].map(mapper)
# -
df.head()
# Com os dados limpos, podemos salvar o arquivo
df.to_csv('dados_limpos.csv', index=False, encoding='latin1')
| projeto_gastos_politicos/.ipynb_checkpoints/1 - Data Cleaning-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import jax.numpy as np
from jax.config import config; config.update("jax_enable_x64", True)
from jax import jit, grad, jacfwd, jacrev
import numpy as onp
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import astropy.io.fits as pf
import xara
import xaosim as xs
from xaosim.pupil import PHARO
from tqdm import tqdm
import pickle, gzip
# %matplotlib inline
import matplotlib as mpl
mpl.style.use('seaborn-colorblind')
phasemap = mpl.cm.rainbow
phasemap.set_bad(color='k')
#To make sure we have always the same matplotlib settings
#(the ones in comments are the ipython notebook settings)
mpl.rcParams['figure.figsize']=(12.0,9.0) #(6.0,4.0)
mpl.rcParams['font.size']=20 #10
mpl.rcParams['savefig.dpi']= 200 #72
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['axes.labelsize'] = 18
mpl.rcParams['xtick.labelsize'] = 14
mpl.rcParams['ytick.labelsize'] = 14
from matplotlib import rc
mpl.rcParams["font.family"] = "Times New Roman"
colours = mpl.rcParams['axes.prop_cycle'].by_key()['color']
from astropy import units as units
shift = np.fft.fftshift
fft = np.fft.fft2
ifft = np.fft.ifft2
fftfreq = np.fft.fftfreq
dtor = np.pi/180.0
import warnings
warnings.filterwarnings("ignore")
# -
tgt_cube = pf.getdata('tgt_cube.fits').astype('float64') # alpha Ophiuchi
ca2_cube = pf.getdata('ca2_cube.fits').astype('float64') # epsilon Herculis
pscale = 25.0e-3 # plate scale of the image in mas/pixels
wl = 2.145e-6 # central wavelength in meters (Hayward paper)
ISZ = tgt_cube.shape[1] # image size
# +
j = 0
plt.imshow(tgt_cube[j,:,:]**0.25)
# -
from xara.core import recenter
# +
PSZ = 128 # size of the array for the model
# pdiam = 4.978 # telescope diameter in meters
pdiam = 4.646 # telescope diameter in meters
pmask = PHARO(PSZ, PSZ/2, mask="med", ang=-2) # rotated!
ppscale = pdiam / PSZ
pscale = 0.0250
wl = 2.145e-6
# +
arcsec2rad = np.pi/180./3600.
from morphine.matrixDFT import minimal_dft
def calc_uv(image,pixelscale,pupil_diam,wavelength,npix=128,return_coords=False):
nlamd = arcsec2rad*pixelscale * image.shape[0] / wavelength*2*pupil_diam
if return_coords == True:
x = np.linspace(-pupil_diam,pupil_diam,npix)
coords = np.meshgrid(x,x)
return minimal_dft(image.astype('complex64'), nlamd, npix), coords
else:
return minimal_dft(image.astype('complex64'), nlamd, npix)
# -
pscale
# +
img = tgt_cube[j,:,:]
uv, coords = calc_uv(img,pscale,pdiam,wl,return_coords=True)
# -
plt.imshow(np.abs(uv)**0.25)
from morphine.morphine_core import phase_binary
# +
u, v = coords[0].ravel(),coords[1].ravel()
p = [200,30,25]
phases = phase_binary(u,v,wl,p)
# -
phases
plt.imshow(phases.reshape(uv.shape))
# ### Now let's connect it to Xara
kp_fname = "pharomorphine_96.kpi.gz"
kp_fname = 'pharomorphine_full_128.kpi.gz'
# kp_fname = 'pharomorphine_full_64.kpi.gz'
# kp_fname = "pharomorphine_full_64_rev.kpi.gz"
# kp_fname = 'pharomorphine_full_96.kpi.gz'
kp_fname = "pharomorphine_wide_64.kpi.gz"
kp_fname = "pharomorphine_vwide_64.kpi.gz"
kp_fname = "pharomorphine_vvwide_64.kpi.gz"
# kp_fname = 'pharomorphine_fov_64.kpi.gz'
kp_fname = 'pharomorphinefrantz.kpi.gz'
kp_fname = 'pharomorphinefrantz_new.kpi.gz'
# kp_fname = 'pharomorphinefrantz_128.kpi.gz'
kp_fname = 'pharomorphinefrantz_64bit_64pix.kpi.gz'
# kp_fname = "pharomorphinefrantz_64bit_65pix.kpi.gz"
# kp_fname = "pharomorphinefrantz_64bit_128pix.kpi.gz"
# kp_fname = "pharomorphine_vvwide_64.kpi.gz"
# kp_fname = "pharomorphinefrantz_64bit_96pix.kpi.gz"
kp_fname = 'pharomorphinefrantz_32bit_64pix.kpi.gz'
# kp_fname = 'pharomorphinefrantz_64bit_64pix.kpi.gz'
# kp_fname = 'pharomorphinefrantz2_32bit_64pix.kpi.gz'
pscale
# +
offset = 0.5
myf = gzip.open(kp_fname, "r")
stuff = pickle.load(myf)
myf.close()
c = xara.KPO(fname=kp_fname,offset=offset)#
kpo1 = c.copy()#xara.KPO(fname=kp_fname,offset=offset)
# kpo2 = xara.KPO(fname="p3k_med_grey_model.fits")
# kpo2 = kpo1.copy()
kpo2 = c.copy()#xara.KPO(fname=kp_fname,offset=offset)
kpo_frantz = xara.KPO(fname="p3k_med_grey_model.fits")
kpo1.extract_KPD_single_cube(
tgt_cube, pscale*1000, wl,target="alpha Ophiuchi", recenter=True)
kpo2.extract_KPD_single_cube(
ca2_cube, pscale*1000, wl, target="epsilon Herculis", recenter=True)
# +
myf = gzip.GzipFile(kp_fname, "r")
data = pickle.load(myf)
myf.close()
kernel = data['KerPhi']
support = data['support']
# -
plt.imshow(support==1)
# +
# ### try my approach to extracting manually
# science = []
# calib = []
# npix = 65
# npix= 129
# npix= 64
# calib_vis = []
# for j in tqdm(range(100)):
# # img = recenter(tgt_cube[j,:,:],verbose=False)
# img = tgt_cube[j,:,:]
# uv, coords = calc_uv(img,pscale,pdiam,wl,return_coords=True,npix=npix)
# phases = onp.angle(uv)
# science.append(np.dot(kernel,phases[support==1]))
# for j in tqdm(range(100)):
# # img = recenter(ca2_cube[j,:,:],verbose=False)
# img = ca2_cube[j,:,:]
# uv, coords = calc_uv(img,pscale,pdiam,wl,return_coords=True,npix=npix)
# phases = onp.angle(uv)
# calib_vis.append(onp.abs(uv)[support==1])
# calib.append(np.dot(kernel,phases[support==1]))
# science = np.array(science)
# calib = np.array(calib)
# calib_vis = np.array(calib_vis)
# +
# img = tgt_cube[j,:,:]
# uv, coords = calc_uv(img,pscale,pdiam,wl,return_coords=True,npix=npix)
# dummy = onp.angle(uv)
# dummy[support==0] = onp.nan
# plt.imshow(dummy)
# +
# data1 = np.median(science, axis=0)
# data2 = np.median(calib, axis=0)
# calib_vis = np.median(calib_vis,axis=0)
# mydata = data1 - data2
# myerr = np.sqrt(np.var(science, axis=0) / (science[0].shape[0] - 1) + np.var(calib, axis=0) / (calib[0].shape[0] - 1))
# myerr = np.sqrt(myerr**2) + 0.01365
# +
# data1 = np.array(kpo1.KPDT)[0]
# data2 = np.array(kpo2.KPDT)[0]
# mydata = np.median(data1, axis=0) - np.median(data2, axis=0)
# myerr = np.sqrt(np.var(data1, axis=0) / (kpo1.KPDT[0].shape[0] - 1) + np.var(data2, axis=0) / (kpo2.KPDT[0].shape[0] - 1))
# myerr = np.sqrt(myerr**2 + 0.015**2)
# +
data1 = np.array(kpo1.KPDT)[0]
data2 = np.array(kpo2.KPDT)[0]
mydata = np.median(data1, axis=0) - np.median(data2, axis=0)
myerr = np.sqrt(np.var(data1, axis=0) / (kpo1.KPDT[0].shape[0] - 1) + np.var(data2, axis=0) / (kpo2.KPDT[0].shape[0] - 1))
# myerr = np.sqrt(myerr**2 + 1.2**2)
# +
data1 = np.array(kpo1.KPDT)[0]
data2 = np.array(kpo2.KPDT)[0]
myerr = np.sqrt(np.var(data1, axis=0) / (kpo1.KPDT[0].shape[0] - 1) + np.var(data2, axis=0) / (kpo2.KPDT[0].shape[0] - 1))
# myerr = np.sqrt(myerr**2 + 0.0132**2)
# myerr = np.sqrt(0.0132**2)
data1 = np.median(data1,axis=0)
data2 = np.median(data2,axis=0)
mydata = data1 - data2
# -
plt.plot(mydata)
myerr
plt.hist(mydata,bins=50);
# +
print("\ncomputing colinearity map...")
gsize = 100 # gsize x gsize grid
gstep = 10 # grid step in mas
xx, yy = np.meshgrid(
np.arange(gsize) - gsize/2, np.arange(gsize) - gsize/2)
azim = -np.arctan2(xx, yy) * 180.0 / np.pi
dist = np.hypot(xx, yy) * gstep
#mmap = kpo1.kpd_binary_match_map(100, 10, mydata/myerr, norm=True)
mmap_raw = kpo1.kpd_binary_match_map(100, 10, data1, norm=True)
mmap_calib = kpo1.kpd_binary_match_map(100, 10, data2, norm=True)
mmap = kpo1.kpd_binary_match_map(100, 10, mydata, norm=True)
x0, y0 = np.argmax(mmap) % gsize, np.argmax(mmap) // gsize
print("max colinearity found for sep = %.2f mas and ang = %.2f deg" % (
dist[y0, x0], azim[y0, x0]))
# -
vmin, vmax = np.min([mmap_raw,mmap_calib,mmap]), np.max([mmap_raw,mmap_calib,mmap])
# +
f1, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(16,5),sharey=True)
f1.subplots_adjust(right=0.8)
ax1.imshow(mmap_raw/mmap_raw.max(), extent=(
gsize/2*gstep, -gsize/2*gstep, -gsize/2*gstep, gsize/2*gstep))
ax1.set_xlabel("right ascension (mas)")
ax1.set_ylabel("declination (mas)")
ax1.plot([0,0], [0,0], "w*", ms=16)
ax1.set_title("α Oph - Raw",y=1.01)
ax1.grid()
ax2.imshow(mmap_calib/mmap_calib.max(), extent=(
gsize/2*gstep, -gsize/2*gstep, -gsize/2*gstep, gsize/2*gstep))
ax2.set_xlabel("right ascension (mas)")
# # ax2.set_ylabel("declination (mas)")
ax2.plot([0,0], [0,0], "w*", ms=16)
ax2.set_title("ε Her",y=1.01)
ax2.grid()
im = ax3.imshow(mmap/mmap.max(), extent=(
gsize/2*gstep, -gsize/2*gstep, -gsize/2*gstep, gsize/2*gstep))
ax3.set_xlabel("right ascension (mas)")
# ax3.set_ylabel("declination (mas)")
ax3.plot([0,0], [0,0], "w*", ms=16)
ax3.set_title("Calibrated α Oph",y=1.01)
ax3.grid()
f1.subplots_adjust(right=0.8,wspace=0.1)
cb_ax = f1.add_axes([0.81, 0.165, 0.02, 0.675])
cbar = f1.colorbar(im, cax=cb_ax)
# f1.set_tight_layout(True)
plt.savefig('../paper/colinearity_alphaoph.pdf', bbox_inches='tight')
# -
# +
from scipy.optimize import leastsq, minimize
def binary_model(params,kpo):
u = kpo.kpi.UVC[:,0]
v = kpo.kpi.UVC[:,1]
wl = kpo.CWAVEL
detpa = 0
return(xara.core.cvis_binary_jax(u,v,wl, params, detpa))
def ben_binary_model_fit_residuals(params,kpo=kpo1,index=0,obs="KERNEL",err=1.0):
temp = binary_model(params,kpo)
model = np.array(kpo.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (err)
return np.array(error)
def ben_binary_model_fit_chi2(params):
residuals = ben_binary_model_fit_residuals(params,kpo=kpo1,index=0,obs="KERNEL",err=myerr)
chi2 = np.sum(np.abs(residuals)**2)
return chi2
from jax import jit
chi2_jac = jacrev(ben_binary_model_fit_chi2)
chi2_jac_np = lambda x:onp.array(jit(chi2_jac)(x))
def ben_binary_model_fit(p0,kpo=kpo1,index=0,obs='KERNEL',err=myerr):
soluce = leastsq(ben_binary_model_fit_residuals,
p0, args=((kpo,index, obs,err)), full_output=1)
# soluce = minimize(ben_binary_model_fit_chi2,p0,method='BFGS',jac=chi2_jac_np)
# p1 = soluce['x'][0] # the best fit parameter vector (sep, P.A., contrast)
return soluce
def get_chi2(addederror):
return np.sum(((mydata - np.array(ker_theo))/np.sqrt(myerr**2+addederror**2))**2) / (1.0*kpo1.kpi.nbkp)
def add_error():
def get_objective(addederror):
return np.abs(get_chi2(addederror)-1.)
jac = grad(get_objective)
return minimize(get_objective,0.0025,method='BFGS')['x']
# +
print("\nbinary model fitting...")
# p0 = params0 # good starting point
p0 = [dist[y0, x0], azim[y0, x0], mmap.max()] # good starting point
p0[1] = np.mod(p0[1],360.)
wl = kpo1.CWAVEL
# mfit = a.binary_model_fit(p0)
mfit = ben_binary_model_fit(p0,kpo=kpo1,err=myerr)
p1 = mfit[0] # the best fit parameter vector (sep, P.A., contrast)
# p1 = mfit['x']
p1[1] = np.mod(p1[1],360.)
# p1 = p0
# p1 = p0
cvis_b = xara.core.cvis_binary(
kpo1.kpi.UVC[:,0], kpo1.kpi.UVC[:,1], wl, p1) # binary
ker_theo = kpo1.kpi.KPM.dot(np.angle(cvis_b))
added_error = add_error()
print('added error',added_error)
this_error = np.sqrt(myerr**2+added_error**2)
mfit = ben_binary_model_fit(p0,kpo=kpo1,err=this_error)
p2 = np.array(p1)#+np.sqrt(np.diag(mfit[1]))
cvis_b = xara.core.cvis_binary(
kpo1.kpi.UVC[:,0], kpo1.kpi.UVC[:,1], wl, p2) # binary
ker_theo = kpo1.kpi.KPM.dot(np.angle(cvis_b))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.errorbar(ker_theo, mydata, yerr=this_error, fmt="none", ecolor='c')
ax.plot(ker_theo, mydata, 'b.')
mmax = (np.abs(mydata).max())
ax.plot([-mmax,mmax],[-mmax,mmax], 'r')
ax.set_ylabel("data kernel-phase")
ax.set_xlabel("model kernel-phase")
ax.set_title('kernel-phase correlation diagram')
ax.axis("equal")
# ax.axis([-11, 11, -11, 11])
fig.set_tight_layout(True)
if myerr is not None:
chi2 = np.sum(((mydata - ker_theo)/(this_error))**2) / kpo1.kpi.nbkp
else:
chi2 = np.sum(((mydata - ker_theo))**2) / kpo1.kpi.nbkp
print("sep = %3f, ang=%3f, con=%3f => chi2 = %.3f" % (p1[0], p1[1], p1[2], chi2))
print("correlation matrix of parameters")
# hess_inv = mfit['hess_inv']
hess_inv = mfit[1]
print(np.round(hess_inv, 2))
print('Estimated Uncertainty')
print(np.sqrt(np.diag(hess_inv)))
# -
#
# +
fig = plt.figure(figsize=(8,8))
plt.scatter(kpo1.kpi.VAC[:,0],kpo1.kpi.VAC[:,1],c=kpo1.kpi.VAC[:,2],s=4)
plt.scatter(kpo_frantz.kpi.VAC[:,0],kpo_frantz.kpi.VAC[:,1],c='r')
plt.axis('equal')
# +
fig = plt.figure(figsize=(8,8))
plt.scatter(kpo1.kpi.UVC[:,0],kpo1.kpi.UVC[:,1],c=colours[0])
plt.scatter(kpo_frantz.kpi.UVC[:,0],kpo_frantz.kpi.UVC[:,1],s=12,c='r')
plt.axis('equal')
# +
fig, (ax1,ax2) = plt.subplots(1,2,figsize=(16,8))
ax1.scatter(kpo1.kpi.VAC[:,0],kpo1.kpi.VAC[:,1],c=kpo1.kpi.VAC[:,2],s=4,label='64 pix')
ax1.scatter(kpo_frantz.kpi.VAC[:,0],kpo_frantz.kpi.VAC[:,1],c='r',label='M+2020')
plt.legend(fontsize=18,frameon=False)
ax1.set_title('Pupil',fontsize=18)
ax1.axis('equal')
ax2.scatter(kpo1.kpi.UVC[:,0],kpo1.kpi.UVC[:,1],c=colours[0],label='64 pix')
ax2.scatter(kpo_frantz.kpi.UVC[:,0],kpo_frantz.kpi.UVC[:,1],s=12,label='M+2020',c='r')
plt.legend(fontsize=18,frameon=False)
ax2.set_title(r'$u,v$',fontsize=18)
ax2.axis('equal')
# plt.savefig('/Users/benjaminpope/code/morphine/paper/pupil_comparison.pdf',bbox_inches='tight')
# -
kpo1.kpd = mydata
# +
# kpo1.plot_uv_map()
# -
xymax = 4.0
figsize=(12,6)
plot_redun = False
cmap=cm.gray
ssize=12
lw=0
alpha=1.0
marker='s'
kpo1.kpi.plot_pupil_and_uv(ssize=4);
# ### Now let's try mcmc
import emcee
# +
paramlimits=[40,250,0,360,1.1,50.]
def kp_loglikelihood(params,kpo):
temp = binary_model(params,kpo)
model = kpo.kpi.KPM.dot(np.angle(temp))
error = mydata-model
error /= (this_error)
chi2 = -np.sum((error)**2)
return chi2
def lnprior(params):
if paramlimits[0] < params[0] < paramlimits[1] and paramlimits[2] < params[1] < paramlimits[3] and paramlimits[4] < params[2] < paramlimits[5]:
return -np.log(params[0]) -np.log(params[2])
return -np.inf
def lnprob(params,kpo):
return lnprior(params) + kp_loglikelihood(params,kpo)
ndim=3
nwalkers=100
plot=False
burnin=100
nsteps=1000
import time
# -
p1
# +
ivar = np.array(p1) # initial parameters for model-fit
ball = np.array([ivar + 0.1*ivar*onp.random.rand(ndim) for i in range(nwalkers)]) # initialise walkers in a ball
print('Running emcee now!')
t0 = time.time()
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[kpo1])
# burn in
pos,prob,state = sampler.run_mcmc(ball, burnin)
sampler.reset()
t1 = time.time()
print('Burnt in! Took %.3f seconds' %(t1-t0))
# +
# restart
sampler.run_mcmc(pos,nsteps)
tf = time.time()
print('Time elapsed = %.3f s' %(tf-t0))
seps = sampler.flatchain[:,0]
ths = sampler.flatchain[:,1]
meansep = np.mean(seps)
dsep = np.std(seps)
meanth = np.mean(ths)
dth = np.std(ths)
cs = sampler.flatchain[:,2]
bestcon = np.mean(cs)
conerr = np.std(cs)
print('Separation %.3f pm %.3f mas' % (meansep,dsep))
print('Position angle %.3f pm %.3f deg' % (meanth,dth))
print('Contrast at',wl,'um %.3f pm %.3f' % (bestcon,conerr))
# -
meansep
import corner
# Plot it.
figure = corner.corner(sampler.flatchain, labels=[r"$\rho$", r"$\theta$", r"$c$",],
quantiles=[0.16, 0.5, 0.84],
show_titles=True, title_kwargs={"fontsize": 12})
onp.savetxt('autodiff_128.txt',sampler.flatchain)
# ### What should we expect from Hinkley?
#
# +
Kp_A = 1.68
dKp_A = 0.21
Kp_B = 5.25
dKp_B = 0.236
# -
point_est = 100**((Kp_B-Kp_A)/5.)
nsim = 10000
Kps_A = Kp_A + dKp_A*onp.random.randn(nsim)
Kps_B = Kp_B + dKp_B*onp.random.randn(nsim)
contrasts_hinkley = 100**((Kps_B-Kps_A)/5.)
print('Hinkley contrast:',np.mean(contrasts_hinkley),'pm',np.std(contrasts_hinkley))
h = plt.hist(contrasts_hinkley,bins=100);
plt.axvline(point_est,color=colours[2])
# ## Now try simulated data
# +
def shift(im,dx,dy):
input_ = onp.fft.fft2(im)
result = fourier_shift(input_, shift=(dx,dy))
return onp.real(onp.fft.ifft2(result))
def sim_binary(im,sep,th,con,pscale):
sep_pix = sep/pscale
th_rad = np.pi*th/180.
dx, dy = sep_pix*np.cos(-th_rad), sep_pix*np.sin(-th_rad)
shifted = shift(im,dx,dy)
return im+shifted/con
def preprocess_like(data,kpo):
isz = 64
xsz,ysz = data.shape
wrad = 50
pscale = 25.0 # plate scale (mas)
cwavel = kpo.CWAVEL # central wavelength
m2pix = xara.core.mas2rad(pscale)*isz/cwavel # Fourier scaling
tdiam = 5.0 # telescope diameter (m)
spix = xara.core.rad2mas(cwavel/tdiam)/pscale # image sampling (pixels)
(x0, y0) = xara.core.determine_origin(data, mask=None,
algo="BCEN", verbose=False,
wmin=2.0*spix)
x1, y1 = int(x0-isz/2), int(y0-isz/2)
img = data[y1:y1+isz, x1:x1+isz] # image is now (isz x isz)
dy, dx = (y0-ysz/2), (x0-xsz/2)
sgmask = xara.core.super_gauss(isz, isz, isz/2, isz/2, wrad)
(x0, y0) = xara.core.determine_origin(img, mask=sgmask,
algo="BCEN", verbose=False,
wmin=2.0*spix)
img = xara.core.recenter(data,verbose=False)
nx,ny = img.shape
limsx = int(nx/2-64), int(nx/2+64)
limsy = int(ny/2-64), int(ny/2+64)
img = img[limsx[0]:limsx[1],limsy[0]:limsy[1]] # from 512x512 -> 128x128
return img
# -
(xara.core.super_gauss(64, 64, 64/2, 64/2, 50))
# +
from scipy.optimize import leastsq, minimize
def simulate(truth,cal):
binary = sim_binary(cal,*truth,25.0)
# img_sim = preprocess_like(binary,b)
img_sim = binary
a = c.copy()
wl = kpo1.CWAVEL
a.extract_KPD_single_frame(
img_sim, pscale*1000, wl, recenter=True,method='LDFT1')
mydata = a.KPDT[0][0] - calib
myerr = np.std(kpo_cal.KPDT[0],axis=0)
def binary_model(params,kpo):
u = kpo.kpi.UVC[:,0]
v = kpo.kpi.UVC[:,1]
wl = kpo1.CWAVEL
detpa = 0
return(xara.core.cvis_binary_jax(u,v,wl, params, detpa))
def ben_binary_model_fit_residuals(params,kpo=a,index=0,obs="KERNEL",err=1.0):
temp = binary_model(params,kpo)
model = np.array(kpo.kpi.KPM).dot(np.angle(temp))
error = mydata-model
error /= (err)
return np.array(error)
def ben_binary_model_fit_chi2(params):
residuals = ben_binary_model_fit_residuals(params,kpo=a,index=0,obs="KERNEL",err=myerr)
chi2 = np.sum(np.abs(residuals)**2)
return chi2
from jax import jit
chi2_jac = jacrev(ben_binary_model_fit_chi2)
chi2_jac_np = lambda x:onp.array(jit(chi2_jac)(x))
def ben_binary_model_fit(p0,kpo=a,index=0,obs='KERNEL',err=myerr):
soluce = leastsq(ben_binary_model_fit_residuals,
p0, args=((kpo,index, obs,err)), full_output=1)
# soluce = minimize(ben_binary_model_fit_chi2,p0,method='BFGS',jac=chi2_jac_np)
# p1 = soluce['x'][0] # the best fit parameter vector (sep, P.A., contrast)
return soluce
print("\nbinary model fitting...")
# p0 = params0 # good starting point
# p0 = [dist[y0, x0], azim[y0, x0], mmap.max()] # good starting point
p0 = truth
p0[1] = np.mod(p0[1],360.)
wl = a.CWAVEL
# mfit = a.binary_model_fit(p0)
mfit = ben_binary_model_fit(p0,kpo=a,err=myerr)
p1 = mfit[0] # the best fit parameter vector (sep, P.A., contrast)
# p1 = mfit['x']
p1[1] = np.mod(p1[1],360.)
# p1 = p0
# p1 = p0
cvis_b = xara.core.cvis_binary(
a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p1) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
def get_chi2(addederror):
return np.sum(((mydata - np.array(ker_theo))/np.sqrt(myerr**2+addederror**2))**2) / (1.0*a.kpi.nbkp)
def add_error():
def get_objective(addederror):
return np.abs(get_chi2(addederror)-1.)
jac = grad(get_objective)
return minimize(get_objective,0.0025,method='BFGS')['x']
added_error = add_error()
this_error = np.sqrt(myerr**2+added_error**2)
mfit = ben_binary_model_fit(p0,kpo=a,err=this_error)
p2 = mfit[0]
cvis_b = xara.core.cvis_binary(
a.kpi.UVC[:,0], a.kpi.UVC[:,1], wl, p2) # binary
ker_theo = a.kpi.KPM.dot(np.angle(cvis_b))
fig = plt.figure(figsize=(6,6))
ax = fig.add_subplot(111)
ax.errorbar(ker_theo, mydata, yerr=this_error, fmt="none", ecolor='c')
ax.plot(ker_theo, mydata, 'b.')
mmax = (np.abs(mydata).max())
ax.plot([-mmax,mmax],[-mmax,mmax], 'r')
ax.set_ylabel("data kernel-phase")
ax.set_xlabel("model kernel-phase")
ax.set_title('kernel-phase correlation diagram')
ax.axis("equal")
# ax.axis([-11, 11, -11, 11])
fig.set_tight_layout(True)
if myerr is not None:
chi2 = np.sum(((mydata - ker_theo)/(this_error))**2) / a.kpi.nbkp
else:
chi2 = np.sum(((mydata - ker_theo))**2) / a.kpi.nbkp
print("sep = %3f, ang=%3f, con=%3f => chi2 = %.3f" % (p1[0], p1[1], p1[2], chi2))
print("correlation matrix of parameters")
# hess_inv = mfit['hess_inv']
hess_inv = mfit[1]
print(np.round(hess_inv, 2))
print('Estimated Uncertainty')
print(np.sqrt(np.diag(hess_inv)))
return p2, np.sqrt(np.diag(hess_inv))
# +
truth = [300,90,20]
cal = ca2_cube[75,:,:]
simulate(truth,cal)
# +
# # %%time
seps_out, thetas_out, cons_out = [], [], []
dseps_out, dthetas_out, dcons_out = [], [], []
kpo_cal = c.copy()
kpo_cal.extract_KPD_single_cube(
ca2_cube[50:,:,:], pscale*1000, wl, target="epsilon Herculis", recenter=True,method='LDFT1')
calib = np.median(kpo_cal.KPDT[0],axis=0)
ss, tt, cc = [], [], []
seps_in = np.linspace(90,300,50)
for j, sep_in in enumerate(tqdm(seps_in)):
cal = ca2_cube[0,:,:]
truth = [sep_in,90,25]
p1, uncertainty = simulate(truth,cal)
ss.append(p1[0])
tt.append(p1[1])
cc.append(p1[2])
seps_out.append(p1[0])
thetas_out.append(p1[1])
cons_out.append(p1[2])
dseps_out.append(uncertainty[0])
dthetas_out.append(uncertainty[1])
dcons_out.append(uncertainty[2])
# except:
# print('Failed on',j)
# seps_out.append(np.nan)
# thetas_out.append(np.nan)
# cons_out.append(np.nan)
# dseps_out.append(np.nan)
# dthetas_out.append(np.nan)
# dcons_out.append(np.nan)
seps_out, thetas_out, cons_out = np.array(seps_out), np.array(thetas_out), np.array(cons_out)
dseps_out, dthetas_out, dcons_out = np.array(dseps_out), np.array(dthetas_out), np.array(dcons_out)
# +
fig, (ax1,ax2,ax3) = plt.subplots(1,3,figsize=(16.0,4.0))
inds = np.arange(len(seps_in))
ax1.plot(seps_in,seps_out-seps_in,'.')
# ax1.plot(seps_in,seps_in,'--')
ax1.axhline(0,color='k',linestyle='--',alpha=0.5)
ax1.errorbar(seps_in,seps_out-seps_in,yerr=dseps_out,ls='none',color=colours[0])
# ax1.plot(seps_in,seps_in,'--k',alpha=0.5)
ax2.plot(seps_in,thetas_out,'.')
ax2.axhline(truth[1],color='k',linestyle='--',alpha=0.5)
ax2.errorbar(seps_in,thetas_out,yerr=dthetas_out,ls='none',color=colours[0])
ax3.plot(seps_in,cons_out,'.')
ax3.errorbar(seps_in,cons_out,yerr=dcons_out,ls='none',color=colours[0])
ax3.axhline(truth[2],color='k',linestyle='--',alpha=0.5)
# +
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(6.0,16.0),sharex=True)
ax1.plot(seps_in,seps_out,'-')
ax1.fill_between(angles,mseps_out,pseps_out,alpha=0.5)
ax1.axvline(45,color=colours[2])
ax1.axvline(45+180,linestyle='--',color=colours[2])
ax1.set_ylabel('Separation (mas)')
# ax1.set_xlabel('Misalignment (deg)')
ax1.set_xticks([])
ax1.set_xlim(angles.min(),angles.max())
ax1.axhline(truth[0],color='k',linestyle='--',alpha=0.5)
# ax1.plot(seps_in,seps_in,'--k',alpha=0.5)
ax2.plot(angles,thetas_out,'-')
ax2.axhline(truth[1],color='k',linestyle='--',alpha=0.5)
ax2.fill_between(angles,mthetas_out,pthetas_out,alpha=0.5)
ax2.axvline(45,color=colours[2])
ax2.axvline(45+180,linestyle='--',color=colours[2])
ax2.set_ylabel('Position Angle (deg)')
# ax3.set_xlabel('Misalignment (deg)')
ax2.set_xticks([])
# ax2.set_yticks(np.arange(260,272))
ax3.plot(angles,cons_out,'-')
ax3.axhline(truth[2],color='k',linestyle='--',alpha=0.5)
ax3.fill_between(angles,mcons_out,pcons_out,alpha=0.5)
ax3.axvline(45,color=colours[2])
ax3.axvline(45+180,linestyle='--',color=colours[2])
ax3.set_ylabel('Contrast')
ax3.set_xlabel('Misalignment (deg)')
# ax3.set_xticks([0,45,90,135,180,225,270,315])
plt.subplots_adjust(wspace=0, hspace=0)
# plt.savefig('misalignment.png',bbox_inches='tight')
# plt.savefig('misalignment.pdf',bbox_inches='tight')
# +
fig, (ax1,ax2,ax3) = plt.subplots(3,1,figsize=(6.0,16.0),sharex=True)
inds = np.arange(len(seps_in))
ax1.plot(seps_in,seps_out-seps_in,'.')
# ax1.plot(seps_in,seps_in,'--')
ax1.axhline(0,color='k',linestyle='--',alpha=0.5)
ax1.errorbar(seps_in,seps_out-seps_in,yerr=dseps_out,ls='none',color=colours[0])
ax1.set_xticks([])
ax1.set_ylabel('Recovered Separation (mas)')
# ax1.plot(seps_in,seps_in,'--k',alpha=0.5)
ax2.plot(seps_in,thetas_out,'.')
ax2.axhline(truth[1],color='k',linestyle='--',alpha=0.5)
ax2.errorbar(seps_in,thetas_out,yerr=dthetas_out,ls='none',color=colours[0])
ax2.set_xticks([])
ax2.set_ylabel('Recovered Position Angle (deg)')
ax3.plot(seps_in,cons_out,'.')
ax3.errorbar(seps_in,cons_out,yerr=dcons_out,ls='none',color=colours[0])
ax3.axhline(truth[2],color='k',linestyle='--',alpha=0.5)
ax3.set_xticks(np.arange(100,350,50))
ax3.set_ylabel('Recovered Contrast')
ax3.set_xlabel('True Separation (mas)')
plt.subplots_adjust(wspace=0, hspace=0)
# -
to_save = np.array([seps_out, thetas_out, cons_out,dseps_out, dthetas_out, dcons_out]).T
onp.savetxt('autodiff_recovery.txt',to_save)
| notebooks/pharo_test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="WXaFSkUu0fzm" colab_type="text"
# 
#
# Proprietary content. © Great Learning. All Rights Reserved. Unauthorized use or distribution prohibited.
# + [markdown] id="OudB5by50jlI" colab_type="text"
# # Sentiment Classification
# + [markdown] colab_type="text" id="xT7MKZuMRaCg"
# ### Dataset
# - Dataset of 50,000 movie reviews from IMDB, labeled by sentiment positive (1) or negative (0)
# - Reviews have been preprocessed, and each review is encoded as a sequence of word indexes (integers).
# - For convenience, words are indexed by overall frequency in the dataset, so that for instance the integer "3" encodes the 3rd most frequent word in the data. This allows for quick filtering operations such as: "only consider the top 10,000 most common words, but eliminate the top 20 most common words".
# - As a convention, "0" does not stand for a specific word, but instead is used to encode any unknown word.
#
# Command to import data
# - `from tensorflow.keras.datasets import imdb`
# + id="vt6_3fYVV2Lw" colab_type="code" colab={}
# Initialize the random number generator
import random
random.seed(0)
# Ignore the warnings
import warnings
warnings.filterwarnings("ignore")
# + [markdown] id="Q34-Y3nRKXdO" colab_type="text"
# ### Import the data (2 Marks)
# - Use `imdb.load_data()` method
# - Get train and test set
# - Take 10000 most frequent words
# + id="JxfwbrbuKbk2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="7667ac58-e0de-454a-e958-0a863e0bbcc3"
from tensorflow.keras.datasets import imdb
(X_train, y_train), (X_test, y_test) = imdb.load_data(num_words=10000)
# + [markdown] id="DldivBO4LTbP" colab_type="text"
# ### Pad each sentence to be of same length (2 Marks)
# - Take maximum sequence length as 300
# + id="E808XB4tLtic" colab_type="code" colab={}
from tensorflow.keras.preprocessing import sequence
max_length = 300
X_train = sequence.pad_sequences(X_train, maxlen=max_length)
X_test = sequence.pad_sequences(X_test, maxlen=max_length)
# + [markdown] id="JBFFCrybMSXz" colab_type="text"
# ### Print shape of features & labels (2 Marks)
# + [markdown] id="qOcyRtZfMYZd" colab_type="text"
# Number of review, number of words in each review
# + colab_type="code" id="hdMCUPr7RaCm" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="c4cb9df2-daf5-496b-d20a-0df67e53917e"
import numpy as np
print("Number of unique words:", len(np.unique(np.hstack(X_train))))
length = [len(i) for i in X_train]
print("Average Review length:", np.mean(length))
print("Number of Reviews and words:", X_train.shape)
# + id="eGVHeKOWyJiG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 67} outputId="c65dc6fc-5d88-4dd4-e3ed-200419a6bcec"
print("Number of unique words:", len(np.unique(np.hstack(X_test))))
length = [len(i) for i in X_test]
print("Average Review length:", np.mean(length))
print("Number of Reviews and words:", X_test.shape)
# + [markdown] id="5cNk5sDvMr3j" colab_type="text"
# Number of labels
# + id="6Z00-mYgMoKv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="6076b314-cf0a-4d98-cd60-7ab6ab9edde8"
print("labels:", np.unique(y_train))
print("Number of labels:", y_train.shape)
# + id="H7f5tPeaMxti" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="ea40dba8-425b-4802-bdd4-a3509e62c3c9"
print("labels:", np.unique(y_test))
print("Number of labels:", y_test.shape)
# + [markdown] id="NdXPWuOmNEbh" colab_type="text"
# ### Print value of any one feature and it's label (2 Marks)
# + [markdown] id="MGLEdeFmNZfR" colab_type="text"
# Feature value
# + id="RKFyMa28zztL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 386} outputId="f0c0522b-c0cc-4f43-c829-486bd97e7a4c"
print(X_train[0])
# + [markdown] id="h_85Hqm0Nb1I" colab_type="text"
# Label value
# + id="-FoehB5jNd1g" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3abbb98f-a142-438e-8f92-f188c91dc064"
print("Label:", y_train[0])
# + [markdown] id="0cof4LSxNxuv" colab_type="text"
# ### Decode the feature value to get original sentence (2 Marks)
# + [markdown] id="Q_oiAyPZOkJD" colab_type="text"
# First, retrieve a dictionary that contains mapping of words to their index in the IMDB dataset
# + id="Clsk-yK8OtzD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 50} outputId="476e204c-b08c-42ef-dd2f-4e544e5da114"
index = imdb.get_word_index()
reverse_index = dict([(value, key) for (key, value) in index.items()])
# + [markdown] id="NRgOD5S2Uuvd" colab_type="text"
# Now use the dictionary to get the original words from the encodings, for a particular sentence
# + id="zJ504QDORwxj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="5b18aa1c-8ba2-4039-8d6f-199ede8ead17"
sentence = " ".join( [reverse_index.get(i - 3, "0") for i in X_train[0]] )
print(sentence)
# + [markdown] id="WLGABrJoVZe6" colab_type="text"
# Get the sentiment for the above sentence
# - positive (1)
# - negative (0)
# + id="XDyQGJT0Ve-a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="17e59e4e-9e7d-466a-c8db-26f866f3e6a4"
i = y_train[0]
if i==1:
print("positive")
else:
print("Negative")
# + [markdown] id="BmCjr8miXIWB" colab_type="text"
# ### Define model (10 Marks)
# - Define a Sequential Model
# - Add Embedding layer
# - Embedding layer turns positive integers into dense vectors of fixed size
# - `tensorflow.keras` embedding layer doesn't require us to onehot encode our words, instead we have to give each word a unique integer number as an id. For the imdb dataset we've loaded this has already been done, but if this wasn't the case we could use sklearn LabelEncoder.
# - Size of the vocabulary will be 10000
# - Give dimension of the dense embedding as 100
# - Length of input sequences should be 300
# - Add LSTM layer
# - Pass value in `return_sequences` as True
# - Add a `TimeDistributed` layer with 100 Dense neurons
# - Add Flatten layer
# - Add Dense layer
# + id="Np5GxT1caFEq" colab_type="code" colab={}
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, TimeDistributed, Embedding,LSTM,Flatten,Dropout
from tensorflow.keras.optimizers import Adam
model = Sequential()
model.add(Embedding(10000, 100, input_length=300))
model.add(LSTM(units=100, return_sequences=True))
model.add(TimeDistributed(Dense(100)))
model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
# + [markdown] id="Hc4bknOobDby" colab_type="text"
# ### Compile the model (2 Marks)
# - Use Optimizer as Adam
# - Use Binary Crossentropy as loss
# - Use Accuracy as metrics
# + id="jw4RJ0CQbwFY" colab_type="code" colab={}
optimizer = Adam()
model.compile(loss='binary_crossentropy',optimizer=optimizer,metrics=['accuracy'])
# + [markdown] id="8sEzwazqbz3T" colab_type="text"
# ### Print model summary (2 Marks)
# + id="6Hx1yxwlb2Ue" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 319} outputId="5510f3a5-4ad6-4cc9-a33a-2265d70de7bb"
model.summary()
# + [markdown] id="bmkolKP4b-U6" colab_type="text"
# ### Fit the model (2 Marks)
# + id="vRg3KFXLcAkk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 134} outputId="00c32daf-82d7-4830-baad-5254b2f4dae2"
model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=3, batch_size=128, verbose=1)
# + [markdown] id="bwLl54MXnkEA" colab_type="text"
# ### Evaluate model (2 Marks)
# + colab_type="code" id="EUqY-bD8RaDR" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4a68251b-b967-4c98-e71a-99a839d303cb"
evaluation = model.evaluate(X_test, y_test, verbose=0)
print("Evaluation_Accuracy: %.2f%%" % (evaluation[1]*100))
# + [markdown] id="h2amr1tJn9Jz" colab_type="text"
# ### Predict on one sample (2 Marks)
# + id="F9Ov7MykPQ2X" colab_type="code" colab={}
y_pred = (model.predict(X_test,batch_size=128) > 0.5).astype("int32")
# + id="Wl4idfWR_A8E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="fb031ea3-0e97-4111-e963-8c23c508b5a8"
sample_sentence = " ".join( [reverse_index.get(i - 3, "0") for i in X_test[0]] )
print(sample_sentence)
# + id="pdbXlqq17W6a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5271c8c3-caa0-4461-c06d-e6fdcab16387"
y_pred[0]
# + id="ujr0tpZ-Pa88" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6fdc3483-e4a7-4b33-8d3e-0eff177f0164"
y_test[0]
# + id="05HZCG6A0PtQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="32683a37-aec3-4809-de89-952daeae12d1"
sample_sentence = " ".join( [reverse_index.get(i - 3, "0") for i in X_test[10]] )
print(sample_sentence)
# + id="887U1_Lk0aKY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="79e0f224-1805-4374-be06-ef35554c4e55"
y_pred[10]
# + id="2cxiHzsZ0d1P" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="99cdc9ca-cae1-4e9a-f9cd-dc87cffcc0bc"
y_test[10]
# + [markdown] id="E3UElyozaSKL" colab_type="text"
# # Final Insights
#
#
# * All tasks succcessfully achieved
# * Model is overfitting after very few epoch
# * Achieved accuracy of nearly 87% during runs with different number of units in LSTM layer and adding dropout also produced similar results
# * prediction of the few samples gave almost accurate results
#
# ## Thank You
#
#
#
#
| Duvvuru_Lokesh_Project_1_Sequential_Models_in_NLP_Sentiment_Classification (1)-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
df = pd.read_csv('mushrooms.csv').iloc[:,1:]
df = df.apply(LabelEncoder().fit_transform)
df.head()
df = PCA(n_components = 2).fit_transform(df.values)
def euclidean_distance(x1, x2):
distance = 0
for i in range(len(x1)):
distance += np.square(x1[i] - x2[i])
return np.sqrt(distance)
def random_centroids(k, X):
centroids = np.zeros((k, X.shape[1]))
for i in range(k):
centroid = X[np.random.choice(range(X.shape[0]))]
centroids[i] = centroid
return centroids
r_centroids = random_centroids(5, df)
r_centroids
results = []
for i in range(df.shape[0]):
results.append(np.argmin([euclidean_distance(df[i,:], r_centroids[x, :]) for x in range(5)]))
Y = np.array(results)
fig = plt.figure(figsize=(10,5))
colors = sns.color_palette()
plt.scatter(r_centroids[:,0], r_centroids[:,1], color='b',marker='X', s=200)
for no, i in enumerate(np.unique(Y)):
plt.scatter(df[Y==i,0], df[Y==i,1], color=colors[no],label = no)
plt.legend()
plt.show()
| clustering/k-mean/mushroom-k5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Synapse PySpark
# name: synapse_pyspark
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Process the Customers vs Orders Synapse Link Data
# +
# Load the SynapseLink Customers and Orders SynapseLink Data into a Dataframes.
from pyspark.sql.functions import col
# initialize variables
use_push_down_predicate = True
df_orders, df_order_docs = None, None
min_timestamp = 1635168000
# read the customers SynapseLink data
df_customers = spark.read\
.format("cosmos.olap")\
.option("spark.synapse.linkedService", "demoCosmosDB")\
.option("spark.cosmos.container", "customers")\
.load().filter(col("_ts") > min_timestamp)
if use_push_down_predicate == True:
# this is more efficient, as the dataframe is filtered on load
df_order_docs = spark.read\
.format("cosmos.olap")\
.option("spark.synapse.linkedService", "demoCosmosDB")\
.option("spark.cosmos.container", "orders")\
.load().filter(col("doctype") == "order").filter(col("_ts") > min_timestamp)
# push-down predicate filters on doctype and _ts
else:
# this is less efficient, as the dataframe is filtered after load
df_orders = spark.read\
.format("cosmos.olap")\
.option("spark.synapse.linkedService", "demoCosmosDB")\
.option("spark.cosmos.container", "orders")\
.load()
print('df_orders, shape: {} x {}'.format(
df_orders.count(), len(df_orders.columns)))
df_orders.printSchema()
df_order_docs = df_orders.filter(df_orders["doctype"].isin(["order"]))
print('df_customers, shape: {} x {}'.format(
df_customers.count(), len(df_customers.columns)))
df_customers.printSchema()
print('df_order_docs, shape: {} x {}'.format(
df_order_docs.count(), len(df_order_docs.columns)))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Display the first few rows of the df_customers Dataframe
display(df_customers.limit(3))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Display the first few rows of the df_order_docs Dataframe
display(df_order_docs.limit(3))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Create Narrower/Minimal Dataframes for the Join operation
from pyspark.sql.functions import col
df_customers_minimal = df_customers.select(
col('id'),
col('customer_id'),
col('name'))
print('df_customers_minimal, shape: {} x {}'.format(
df_customers_minimal.count(), len(df_customers_minimal.columns)))
df_customers_minimal.printSchema()
df_orders_minimal = df_order_docs.select(
col('order_id'),
col('customer_id'),
col('item_count'),
col('order_total'))
print('df_orders_minimal, shape: {} x {}'.format(
df_orders_minimal.count(), len(df_orders_minimal.columns)))
df_orders_minimal.printSchema()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Join the (narrow) Customers to their (narrow) Order documents
df_joined = df_orders_minimal.join(df_customers_minimal, ['customer_id']) \
.sort("customer_id", ascending=False)
print('df_joined, shape: {} x {}'.format(
df_joined.count(), len(df_joined.columns)))
df_joined.printSchema()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Display the first few rows of the df_joined Dataframe
display(df_joined.limit(3))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Group the df_joined Dataframe by customerId, sum on order total and total_orders
df_grouped = df_joined.groupby("customer_id") \
.sum("order_total").alias('total_orders') \
.sort("customer_id", ascending=False)
display(df_grouped.printSchema())
print((df_grouped.count(), len(df_grouped.columns)))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
import pyspark.sql.functions as F
#from pyspark.sql.functions import col
df_agg = df_joined.groupBy("customer_id") \
.agg(
F.first('id').alias('id'), \
F.count("customer_id").alias('order_count'), \
F.sum("order_total").alias("total_dollar_amount"), \
F.sum("item_count").alias("total_item_count")) \
.sort("customer_id", ascending=False)
display(df_agg.printSchema())
print((df_agg.count(), len(df_agg.columns)))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
display(df_agg.limit(10))
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
from pyspark.sql import SparkSession
from pyspark.sql.types import *
import pyspark.sql.functions as F
# See https://github.com/Azure-Samples/Synapse/blob/main/Notebooks/PySpark/02%20Read%20and%20write%20data%20from%20Azure%20Blob%20Storage%20WASB.ipynb
# Azure storage access info
blob_account_name = 'cjoakimstorage'
blob_container_name = 'synapse'
blob_relative_path = 'ecomm/'
linked_service_name = 'cjoakimstorageAzureBlobStorage'
blob_sas_token = mssparkutils.credentials.getConnectionStringOrCreds(
linked_service_name)
#print('blob_sas_token: {}'.format(blob_sas_token))
# Allow Spark to access from Blob remotely
wasbs_path = 'wasbs://%s@%s.blob.core.windows.net/%s' % (
blob_container_name, blob_account_name, blob_relative_path)
spark.conf.set('fs.azure.sas.%s.%s.blob.core.windows.net' % (
blob_container_name, blob_account_name), blob_sas_token)
print('Remote wasbs_path: ' + wasbs_path)
csv_path = '{}{}'.format(wasbs_path,'sales_by_customer_csv')
#json_path = '{}{}'.format(wasbs_path,'sales_by_customer_json')
df_agg.coalesce(1).write.csv(csv_path, mode='overwrite', header='true')
#df_agg.coalesce(1).write.json(json_path, mode='overwrite')
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}} microsoft={}
# Write to CosmosDB - linked service 'demoCosmosDB'
# See https://docs.microsoft.com/en-us/azure/synapse-analytics/synapse-link/how-to-query-analytical-store-spark#write-spark-dataframe-to-azure-cosmos-db-container
df_agg.write.format("cosmos.oltp")\
.option("spark.synapse.linkedService", "demoCosmosDB")\
.option("spark.cosmos.container", "customer_sales")\
.option("spak.cosmos.write.upsertenabled", "true")\
.mode('append')\
.save()
| synapse/notebooks/customers_vs_orders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sequence classification by RNN
#
# * Creating the **data pipeline** with `tf.data`
# * Preprocessing word sequences (variable input sequence length) using `padding technique` by `user function (pad_seq)`
# * Using `tf.nn.embedding_lookup` for getting vector of tokens (eg. word, character)
# * Creating the model as **Class**
# * Reference
# * https://github.com/golbin/TensorFlow-Tutorials/blob/master/10%20-%20RNN/02%20-%20Autocomplete.py
# * https://github.com/aisolab/TF_code_examples_for_Deep_learning/blob/master/Tutorial%20of%20implementing%20Sequence%20classification%20with%20RNN%20series.ipynb
# +
import os
import sys
import time
import string
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
slim = tf.contrib.slim
rnn = tf.contrib.rnn
sess_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True))
# -
# ## Prepare example data
words = ['good', 'bad', 'amazing', 'so good', 'bull shit', 'awesome', 'how dare', 'very much', 'nice']
y = np.array([[1.,0.], [0.,1.], [1.,0.], [1.,0.], [0.,1.], [1.,0.], [0.,1.], [1.,0.], [1.,0.]])
# Character quantization
char_space = string.ascii_lowercase
char_space = char_space + ' ' + '*' # '*' means padding token
print("char_space: {}".format(char_space))
idx2char = [char for char in char_space]
print("idx2char: {}".format(idx2char))
char2idx = {char : idx for idx, char in enumerate(char_space)}
print("char2idx: {}".format(char2idx))
# ### Create pad_seq function
def pad_seq(sequences, max_length, dic):
"""Padding sequences
Args:
sequences (list of characters): input data
max_length (int): max length for padding
dic (dictionary): char to index
Returns:
seq_indices (2-rank np.array):
seq_length (1-rank np.array): sequence lengthes of all data
"""
seq_length, seq_indices = [], []
for sequence in sequences:
seq_length.append(len(sequence))
seq_idx = [dic.get(char) for char in sequence]
seq_idx += (max_length - len(seq_idx)) * [dic.get('*')] # 27 is idx of meaningless token "*"
seq_indices.append(seq_idx)
return np.array(seq_indices), np.array(seq_length)
# ### Apply pad_seq function to data
max_length = 10
X_indices, X_length = pad_seq(sequences=words, max_length=max_length, dic=char2idx)
print("X_indices")
print(X_indices)
print("X_length")
print(X_length)
# ## Define CharRNN class
class CharRNN:
def __init__(self, seq_indices, seq_length, labels, num_classes, hidden_dims, dic):
# data pipeline
with tf.variable_scope('input_layer'):
self._seq_indices = seq_indices
self._seq_length = seq_length
self._labels = labels
one_hot = tf.eye(len(dic), dtype=tf.float32)
self._one_hot = tf.get_variable(name='one_hot_embedding',
initializer=one_hot,
trainable=False) # embedding vector training 안할 것이기 때문
self._seq_embeddings = tf.nn.embedding_lookup(params=self._one_hot,
ids=self._seq_indices)
# MultiLayer RNN cell
with tf.variable_scope('multi_rnn_cell'):
multi_cells = rnn.MultiRNNCell([rnn.BasicRNNCell(hidden_dim) for hidden_dim in hidden_dims])
_, states = tf.nn.dynamic_rnn(cell=multi_cells, inputs=self._seq_embeddings,
sequence_length=self._seq_length, dtype=tf.float32)
with tf.variable_scope('output_layer'):
self._logits = slim.fully_connected(inputs=states[-1],
num_outputs=num_classes,
activation_fn=None)
with tf.variable_scope('loss'):
self.loss = tf.losses.softmax_cross_entropy(onehot_labels=self._labels,
logits=self._logits)
with tf.variable_scope('prediction'):
self._prediction = tf.argmax(input=self._logits, axis=-1, output_type=tf.int32)
def predict(self, sess, seq_indices, seq_length):
feed_dict = {self._seq_indices : seq_indices, self._seq_length : seq_length}
return sess.run(self._prediction, feed_dict=feed_dict)
# ### Create a model of CharRNN
# hyper-parameters
num_classes = 2
learning_rate = 0.003
batch_size = 2
max_epochs = 10
# #### Print dataset
print("X_indices: \n{}".format(X_indices))
print("X_length: {}".format(X_length))
print("y: \n{}".format(y))
# ### Set up dataset with `tf.data`
#
# #### create input pipeline with `tf.data.Dataset`
## create data pipeline with tf.data
train_dataset = tf.data.Dataset.from_tensor_slices((X_indices, X_length, y))
train_dataset = train_dataset.shuffle(buffer_size = 100)
train_dataset = train_dataset.batch(batch_size = batch_size)
print(train_dataset)
# #### Define Iterator
train_iterator = train_dataset.make_initializable_iterator()
seq_indices, seq_length, labels = train_iterator.get_next()
char_rnn = CharRNN(seq_indices=seq_indices, seq_length=seq_length,
labels=labels, num_classes=num_classes,
hidden_dims=[32, 16], dic=char2idx)
# ### Creat training op and train model
## create training op
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(char_rnn.loss)
# ### `tf.Session()` and train
# +
sess = tf.Session()
sess.run(tf.global_variables_initializer())
loss_history = []
step = 0
for epochs in range(max_epochs):
start_time = time.time()
sess.run(train_iterator.initializer)
avg_loss = []
while True:
try:
_, loss_ = sess.run([train_op, char_rnn.loss])
avg_loss.append(loss_)
step += 1
except tf.errors.OutOfRangeError:
#print("End of dataset") # ==> "End of dataset"
break
avg_loss_ = np.mean(avg_loss)
loss_history.append(avg_loss_)
duration = time.time() - start_time
examples_per_sec = batch_size / float(duration)
print("epochs: {}, step: {}, loss: {:g}, ({:.2f} examples/sec; {:.3f} sec/batch)".format(epochs+1, step, avg_loss_, examples_per_sec, duration))
# -
plt.plot(loss_history, label='train')
y_pred = char_rnn.predict(sess=sess, seq_indices=X_indices, seq_length=X_length)
accuracy = np.mean(y_pred==np.argmax(y, axis=-1))
print('training accuracy: {:.2%}'.format(accuracy))
| 03.06.sequence.classification.Multi.RNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from abc import ABC, abstractmethod
from collections import namedtuple
Customer = namedtuple('Customer', ['name', 'fidelity'])
# ## ”策略“模式
# +
# 商品类
class LineItem:
def __init__(self, product_name, quantity, price):
self.product = product_name
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
# +
# 促销策略类
class Promotion(ABC):
@abstractmethod
def discount(self, order):
pass
class FidelityPromo(Promotion):
"""积分为1000或者以上时,提供5%折扣"""
def discount(self, order):
return order.total() * 0.05 if order.customer.fidelity >= 1000 else 0
class BulkItemPromo(Promotion):
"""单个商品为20个或者以上时,对满足条件的商品提供10%折扣"""
discount = 0
def discount(self, order):
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * 0.1
return discount
class LargeOrderPromo(Promotion):
'''订单中不同商品的种类达到10个或以上时,对所有商品提供7%折扣'''
def discount(self, order):
distinct_items = {item.product for item in order.cart} # set
if len(distinct_items) >= 10:
return order.total() * 0.07
return 0
# +
# 汇总类
class Order:
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart) # cart's type: class LineItem
self.promotion = promotion # promotion's type: class Promotion
def total(self):
if not hasattr(self, '_total'):
self._total = sum(item.total() for item in self.cart)
return self._total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion.discount(self) # 将self作为参数
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
# +
joe = Customer('<NAME>', 0)
ann = Customer('<NAME>', 1100)
cart = [LineItem('banana', 4, 0.5),
LineItem('apple', 10, 1.5),
LineItem('watermellon', 5, 5.0)]
Order(joe, cart, FidelityPromo())
# -
# #### 上面的栗子中,每个具体策略都是一个类,而且都只定义了一个方法, 即 discount。此外,策略实例没有状态(没有实例属性)。它们看起来像是普通的函数——的确如此。下面把具体策略换成了简单的函数,而且去掉了 Promo 抽象类。
# +
# 商品类
class LineItem:
def __init__(self, product_name, quantity, price):
self.product = product_name
self.quantity = quantity
self.price = price
def total(self):
return self.price * self.quantity
# +
def fidelity_promo(order):
return order.total() * 0.05 if order.customer.fidelity >= 1000 else 0
def bulk_item_promo(order):
discount = 0
for item in order.cart:
if item.quantity >= 20:
discount += item.total() * 0.1
return discount
def large_order_promo(order):
distinct_items = {item.product for item in order.cart}
if len(distinct_items) >= 10:
return order.total() * 0.07
return 0
# +
# 汇总类
class Order:
def __init__(self, customer, cart, promotion=None):
self.customer = customer
self.cart = list(cart) # cart's type: class LineItem
self.promotion = promotion # promotion's type: class Promotion
def total(self):
if not hasattr(self, '_total'):
self._total = sum(item.total() for item in self.cart)
return self._total
def due(self):
if self.promotion is None:
discount = 0
else:
discount = self.promotion(self) # 将self作为参数
return self.total() - discount
def __repr__(self):
fmt = '<Order total: {:.2f} due: {:.2f}>'
return fmt.format(self.total(), self.due())
# +
joe = Customer('<NAME>', 0)
ann = Customer('<NAME>', 1100)
cart = [LineItem('banana', 4, 0.5),
LineItem('apple', 10, 1.5),
LineItem('watermellon', 5, 5.0)]
banana_cart = [LineItem('banana', 30, .5),
LineItem('apple', 10, 1.5)]
long_order = [LineItem(str(item_code), 1, 1.0) for item_code in range(10)]
Order(joe, cart, fidelity_promo)
# +
promos = [fidelity_promo, bulk_item_promo, large_order_promo]
def best_promo(order):
return max(promo(order) for promo in promos)
# -
Order(joe, long_order, best_promo)
# ## globals
promos = [globals()[name] for name in globals()
if name.endswith('_promo') and name != 'best_promo']
promos
import inspect
promos = [func for name, func in inspect.getmembers(promotions, inspect.isfunction())]
#
# ## "命令"模式
class MacroCommand:
def __init__(self, commands):
self.commands = list(commands)
def __call__(self):
for command in self.commands:
command()
| Jupyter/6.*.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:TF1]
# language: python
# name: conda-env-TF1-py
# ---
# # Hello World
print ('hello world') # Generate "hello world".
print("hello world") # Generate "hello world". The output is the same when single and double quotation marks are carried in input.
print("Hello Huawei")
print( '''this is the long string
this is the second
the thrid line''')
# # Data Type
# ## number
print(True+False)# The output is 1. By default, True indicates 1, and False indicates 0.
print(True or False)# If True is displayed, enter or or perform the OR operation.
print(5//2)# The output is 2, and // is the rounding operator.
print(5%2)# The output is 1, and % is the modulo operator.
print(3**2) # The output is 9, and ** indicates the power operation.
print(5+1.6) # The output is 6.6. By default, the sum of numbers of different precisions is the number of the highest precision type.
# # Experimental Operations on lists
# ### Check if the list is empty
#python
items=[]
if len(items) == 0:
print("empty list")
else:
print("It is not empty list")
#or
if items == []:
print("empty list")
# ### Copy a list.
#Method one:
old_list=["hello","Jeary"]
print(old_list[:])
print(old_list)
new_list = old_list[:]
print(new_list)
print(old_list)
old_list[1]
#Method two:
new_list = list(old_list)
print(new_list)
#Method three:
import copy
new_list1 = copy.copy(old_list)# copy
print(new_list1)
# ### Get the last element of a list.
# Elements in an index list can be positive or negative numbers. Positive numbers mean indexing from the left of list, while negative numbers mean indexing from the right of list. There are two methods to get the last element.
#
a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
a[len(a)-1]
a[-1]
# ### Sort lists.
# You can use two ways to sort lists. Python lists have a sort method and a built-in function (sorted). You can sort complicated data types by specifying key arguments. You can sort lists composed of dictionaries according to the age fields of elements in the dictionaries.
items = [{'name': 'Homer', 'age': 39},{'name': 'Bart', 'age': 10},{"name": 'cater', 'age': 20}]
items.sort(key=lambda item: item.get("age"))
print(items)
help(items.sort)
# ### Remove elements from a list.
# The "remove" method removes an element, and it removes only the element that appears for the first time.
a = [0, 2, 2, 3]
a.insert(0,3)
print(a)
a.remove(2)
a
# # type(a)
# ValueError will be returned if the removed element is not within the list.
a.remove(7)
# ### Connect two lists.
listone = [1, 2, 3]
listtwo = [4, 5, 6]
mergedlist = listone + listtwo[0:1000]
#print(mergedlist)
listone.extend(listtwo)
print(listone)
listone.append([1,23,3])
print(listone)
# # Experimental Operations on Tuples
# This section describes related operations on tuples.
# Another type of ordered lists is called tuples and is expressed in(). Similar to a list, a tuple cannot be changed after being initialized. Elements must be determined when a tuple is defined.
# A tuple has no append() and insert() methods and cannot be assigned as another element. The method for getting a tuple is similar to that for getting a list.
# As tuples are unchangeable, code is more secure. Therefore, if possible, use tuples instead of lists.
#
# ## Define a tuple for an element.
t=(1,5,'hello')
print(t)
type(t)
t2=(1,)
type(t2)
# Note: In t=(1), t is not a tuple type, as the parentheses () can represent a tuple, or mathematical formula. Python stipulates that in this case, () is a mathematical formula, and a tuple with only one element must have a comma to eliminate ambiguity.
# ## Define a 'changeable tuple'.
cn=('yi','er','san')
en=('one','two','three')
num=(1,2,3)
tmp=[cn,en,num,[1.1,2.2],'language']
print(tmp)
print(tmp[0])
print(tmp[0][0])
print(tmp[0][0][0])
# # Dictionaries
# This experiment mainly introduces related knowledge units about dictionaries in Python, and related operations on them.
# *Python dictionary. A dictionary has a data structure similar to a mobile phone list, which lists names and their associated information. In a dictionary, the name is called a "key", and its associated information is called "value". A dictionary is a combination of keys and values.
# *Its basic format is as follows:
# d = {key1 : value1, key2 : value2 }
# *You can separate a key and a value with a colon, separate each key/value pair with a comma, and include the dictionary in a brace.
# *Some notes about keys in a dictionary: Keys must be unique, and must be simple objects like strings, integers, floating numbers, and bool values.
# ## Create a dictionary
# A dictionary can be created in multiple manners, as shown below.
a = {'one': [1,2,3,4,5],'two': 2, 'three': 3}
print(a)
b = dict(one=1, two=2, three=3)
print(b)
c = dict([('one', 1), ('two', 2), ('three', 3)])
print(c)
d = dict(zip(['one', 'two', 'three'], [1, 2, 3]))
print(d)
e = dict({'one': 1, 'two': 2, 'three': 3})
print(e)
print(a==b==c==d==e)
# ## dictcomp
# "dictcomp" can build a dictionary from iterated objects that use key/value pairs as elements.
data = [("John","CEO",7),("Nacy","hr",7),("LiLei","engineer",9)]
employee = {name:work for name, work,age in data}
print(employee)
# ## Dictionary lookup
# Look up directly according to a key value.
# If there is no matched key value in a dictionary, KeyError is returned.
print(employee["John"])
print(employee["Joh"])
# When you use dic[key] to look up a key value in a dictionary, it will return an error if there is no such key value. However, if you use dic.get(key, default) to look up a key value, it will return default if there is no such key value.
print(employee.get("Nacy","UnKnown'"))
print(employee.get("Nac","UnKnown"))
# Three value assignment operations on dictionaries.
x = {'food':'Spam','quantity':4,'color':'pink'}
X =dict(food='Spam',quantity=4, color='pink')
x = dict([("food", "Spam"),("quantity", "4"),("color","pink")])
# dict.copy(): Copy data.
d =x.copy()
d['color'] = 'red'
print(x) # {'food':'Spam','quantity':4,'color':'pink'}
print(d) # {'food':'Spam','quantity':4,'color':'red'}
# Element access.
print (d['name']) # Obtain the error information.
print(d.get('name')) # Output: None
print(d.get('name','The key value does not exist.')) # Output: The key value does not exist.
print(d.keys()) # Output: dict_keys(['food', 'quantity', 'color'])
print(d.values())# Output: dict_values(['Spam', 4, 'red'])
print(d.items())
# Output: dict_items([('food', 'Spam'), ('quantity', 4), ('color', 'red')])
d.clear()# Clear all data in the dictionary.
print(d)# Output: {}
del(d)# Delete the dictionary.
print(d)# The program is abnormal, and a message is displayed, indicating that d is not defined.
# # string
# This experiment mainly introduces related knowledge units about strings in Python, and related operations on them.
# Strings of Python: A string is a sequence composed of zero or multiple characters, and it is one of the sixth built-in sequences of Python. Strings are unchangeable in Python, which are string constants in C and C++ languages.
# Expression of strings. Strings may be expressed in single quotes, double quotes, triple quotes, or as escape characters and original strings.
#
# ## Single quotes and double quotes
# Strings in single quotes are equal to those in double quotes, and they are exchangeable.
s = 'string'
print(s)
ss="python string"
print(ss)
sss='python "Hello World"string'
print(sss)
# ## Long strings
# Triple quotes can define long strings in Python as mentioned before. Long strings may have output like:
print('''"this is a long string",he said''')
# ## Original strings
# Original strings start with r, and you can input any character in original strings. The output strings include backslash used by transference at last. However, you cannot input backslash at the end of strings. For example:
rawStr = r'D:\SVN_CODE\V900R17C00_TRP\omu\src'
print(rawStr)
# ## Width, precision, and alignment of strings
# To achieve the expected effects of strings in aspects of width, precision, and alignment, refer to the formatting operator commands.
print("%c" % 98)
print("%6.3f" % 2.5)
print("%+10x" % 13)
print("%.*f" % (6, 1.5))
# ## Connect and repeat strings
# In Python, you can use "+" to connect strings and use "*" to repeat strings.
s = 'I' + ' ' + 'Knew' + 'Python' + '.'
print(s)
ss='I love Python.'*3
print(ss)
# ## Delete strings
# You can use "del" to delete a string. After being deleted, this object will no longer exist, and an error is reported when you access this object again.
del ss
print(ss)
# # Conditional and Looping Statements
# This experiment mainly introduces related knowledge units about conditional and looping statements in Python, and related operations on them.
# There are a lot of changes in looping statements. Common statements include the "for" statement and the "while" statement.
# In "for" looping, the "for" statement should be followed by a colon. "for" looping is performed in a way similar to iterating. In "while" looping, there is a judgment on condition and then looping, like in other languages.
# ## If Statement
#Determine the entered score.
# input(): Receive input data.
score = input("Please enter your score.") # The input function receives input, which is a character string.
score = float(score)# Convert the score to a number.
# try:… except Exception:… is a Python statement used to capture exceptions. If an error occurs in the statement in the try statement, the except statement will be executed.
try:
if 100>=score>=90: # Check whether the entered value is greater than the score of a level.
print("Excellent") # Generate the level when conditions are met.
elif 90 > score >= 80:
print("Good")
elif 80>score>0:
print("Medium")
else:
print("Bad")
except Exception:
print("Enter a correct score.")
# ## "for" looping
for i in range(0,10):
print(i)
a=[1,3,5,7,9]
for i in a[0:5]:
print(i)
print(a)
for i in range(1,10):# Define the outer loop.
for j in range(1,i+1):# Define the inner loop.
# Format the output character string to align the generated result. The end attribute is set to /n by default.
print("%d*%d=%2d"%(i,j,i*j), end=" ")
print()
# ## "while" looping
i=90
while (i<100):
i+=1
print(i)
i = 0# Create variable i.
while i<9: # Set a condition for the loop.
i+=1 # The value of i increases by 1 in each loop.
if i == 3: # Check whether the conditions are met.
print("Exit this loop.")
continue# Execute continue to exit the current loop.
if i == 5:
print("Exit the current big loop.")
break# Exit the current big loop.
print(i)
# # Functions
# This experiment mainly introduces related knowledge units about functions in Python, and related operations on them.
# Functions can raise modularity of applications and reuse of code. In Python, strings, tuples, and numbers are unchangeable, while lists and dictionaries are changeable. For those unchangeable types such as integers, strings, and tuples, only values are transferred during function calling, without any impact on the objects themselves. For those changeable types, objects are transferred during function calling, and external objects will also be impacted after changes.
# ## Common built-in functions
# The "int" function can be used to convert other types of data into integers.
int('123')
int(12.34)
float('12.34')
str(1.23)
str(100)
bool(1)
bool('')
# ## Function name
# A function name is a reference to a function object, and it can be assigned to a variable, which is equivalent to giving the function an alias name.
a = abs # Variable a points to function abs
a(-1) # Therefore, the "abs" can be called by using "a"
# ## Define functions
# In Python, you can use the "def" statement to define a function, listing function names, brackets, arguments in brackets and colons successively. Then you can edit a function in an indentation block and use the "return" statement to return values.
# We make an example by defining the "my_abs" function to get an absolute value.
def my_abs(x):
if x>=0:
return x
else:
return -x
my_abs(-2)
# You can use the "pass" statement to define a void function, which can be used as a placeholder. Change the definition of "my_abs" to check argument types, that is, to allow only arguments of integers and floating numbers. You can check data types with the built-in function "is instance()".
def my_abs(x):
if not isinstance(x, (int, float)):
raise TypeError('bad operand type')
if x >= 0:
return x
else:
return -x
my_abs('3')
def my_abs(x):
def my_abs(x):
pass
def my_abs(x):
pass
if x >= 0:
return x
else:
return -x
my_abs('3')
# ## Keyword arguments
# Changeable arguments allow you input zero or any number of arguments, and these changeable arguments will be assembled into a tuple for function calling. While keyword arguments allow you to input zero or any number of arguments, and these keyword arguments will be assembled into a dictionary in functions.
def person(name, age, **kw):
print('name:', name, 'age:', age, 'other:', kw)
# Function "person" receives keyword argument "kw" except essential arguments "name" and "age". You can only input essential arguments when calling this function.
person('Michael', 30)
#You can also input any number of keyword arguments.
person('Bob', 35, city='Beijing')
person('Adam', 45, gender='M', job='Engineer')
# You can assemble a dictionary and convert it into keyword arguments as inputs. This is similar to assembling changeable arguments.
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, city=extra['city'], job=extra['job'])
# You can certainly simplify the above-mentioned complex function calling.
# ***extra means transferring all key-values in this extra dictionary as key arguments into the **kw argument of the function. kw will get a dictionary, which is a copy of the extra dictionary. Changes on kw will not impact the extra dictionary outside the function.
extra = {'city': 'Beijing', 'job': 'Engineer'}
person('Jack', 24, **extra)
# ## Name keyword arguments
# If you want to restrict names of keyword arguments, you can name keyword arguments. For example, you can accept only "city" and "job" as keyword arguments. A function defined in this way is as follows:
def person(name, age, *, city, job):
print(name, age, city, job)
# Different from keyword argument "**kw", a special separator "*"is required to name a keyword argument. Arguments after "*"are regarded as naming keyword arguments, which are called as follows:
person('Jack', 24, city='Beijing', job='Engineer')
# The special separator "*"is not required in the keyword argument after a changeable argument in a function.
def person(name, age, *args, city="Beijing",job):
print(name, age, args, city, job)
# Because the keyword argument "city" has a default value, you do not need to input a parameter of "city" for calling.
person('Jack', 24, job='Engineer')
# When you name a keyword argument, "*" must be added as a special separator if there are no changeable arguments. Python interpreter cannot identify position arguments and keyword arguments if there is no "*".
# # Argument combination
# To define a function in Python, you can use required arguments, default arguments, changeable arguments, keyword arguments and named keyword arguments. These five types of arguments can be combined with each other.
# Note: Arguments must be defined in the order of required arguments, default arguments, changeable arguments, named keyword arguments, and keyword arguments.
# For example, to define a function that includes the above-mentioned arguments:
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
#Python interpreter will input the matched arguments according to argument positions and names automatically when calling a function.
f1(1, 2)
f1(1, 2, c=3)
f1(1, 2, 3, 'a', 'b')
f1(1, 2, 3, 'a', 'b', x=99)
def f2(a, b, c=0, *, d, **kw):
print('a =', a, 'b =', b, 'c =', c, 'd =', d, 'kw =', kw)
f2(1, 2, d=99, ext='a')
def f1(a, b, c=0, *args, **kw):
print('a =', a, 'b =', b, 'c =', c, 'args =', args, 'kw =', kw)
#The most amazing thing is that you can call the above-mentioned function through a tuple and a dictionary.
args = (1, 2, 3, 4,6)
kw = {'d': 99, 'x': '#'}
f1(*args, **kw)
args = (1, 2, 3)
kw = {'d': 88, 'x': '#'}
f2(*args, **kw)
# Therefore, you can call a function through the forms similar to "func(*args, **kw)", no matter how its arguments are defined.
# ## Recursive function
# You need to prevent a stack overflow when you use a recursive function. Functions are called through the stack data structure in computers. The stack will add a layer of stack frames when a function is called, while the stack will remove a layer of stack frames when a function is returned. As the size of a stack is limited, it will lead to a stack overflow if there are excessive numbers of recursive calling of functions.
# Solution to a stack overflow: tail recursion optimization
# You can use tail recursion optimization to solve a stack flow. As tail recursion enjoys the same effects with looping, you can take looping as a special tail recursion, which means to call itself when the function is returned and exclude expressions in the "return" statement. In this way, the compiler or interpreter can optimize tail recursion, making recursion occupying only one stack frame, no matter how many times the function is called. This eliminates the possibility of stack overflow.
# For the fact(n) function, because a multiplication expression is introduced in return n * fact(n - 1), it is not tail recursion. To change it into tail recursion, more code is needed to transfer the product of each step into a recursive function.
def fact(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
return fact_iter(num - 1, num * product)
fact(3)
fact_iter(1,8)
fact_iter(2,3)
# It can be learned that return fact_iter(num - 1, num * product) returns only the recursive function itself. num – 1 and num * product will be calculated before the function is called, without any impact on the function.
# # Object-Oriented Programming
# This experiment mainly introduces related knowledge units about object-oriented programming in Python, and related operations.
# As a programming idea, Object Oriented Programming (OOP) takes objects as the basic units of a program. An object includes data and functions that operate the data.
# Process-oriented design (OOD) takes a program as a series of commands, which are a group of functions to be executed in order. To simplify program design, OOD cuts functions further into sub-functions. This reduces system complexity by cutting functions into sub-functions.
# OOP takes a program as a combination of objects, each of which can receive messages from other objects and process these messages. Execution of a computer program is to transfer a series of messages among different objects.
# In Python, all data types can be regarded as objects, and you can customize objects. The customized object data types are classes in object-orientation.
# Introduction to the object-oriented technology
# * Class: A class refers to the combination of objects that have the same attributes and methods. It defines the common attributes and methods of these objects in the combination. Objects are instances of classes.
# * Class variable: Class variables are publicly used in the total instantiation, and they are defined within classes but beyond function bodies. Class variables are not used as instance variables.
# * Data member: Class variables or instance variables process data related to classes and their instance objects.
# * Method re-writing: If the methods inherited from parent classes do not meet the requirements of sub-classes, the methods can be re-written. Re-writing a method is also called overriding.
# * Instance variable: Instance variables are defined in methods and are used only for the classes of current instances.
# * Inheritance: Inheritance means that a derived class inherits the fields and methods from a base class, and it allows taking the objects of derived class as the objects of base classes. For example, a dog-class object drives from an animal-class object. This simulates a "(is-a)" relationship (in the figure, a dog is an animal).
# * Instantiation: It refers to creating instances for a class or objects for a class.
# * Methods: functions defined in classes.
# * Objects: data structure objects defined through classes. Objects include two data members (class variable and instance variable), and methods.
# ## Create and use a class
# Create a dog class.
# Each instance created based on a dog class stores name and age. We will assign capabilities of sitting (sit () ) and rolling over (roll_over () ) as follows:
class Dog():
"""a simple try of simulating a dog"""
def init (self,name,age):
"""Initializeattribute: name and age"""
self.name = name
self.age = age
def sit(self):
"""Simulate sitting when a dog is ordered to do so"""
print(self.name.title()+" is now sitting")
def roll_over(self):
"""Simulate rolling over when a dog is ordered to do so"""
print(self.name.title()+" rolled over!")
dog=Dog()
dog.init("luc",2)
dog.sit()
# ## Access attributes
# Let us see a complete instance.
class Employee:
'All employees base class'
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount )
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# "Create the first object of the employee class"
emp1 = Employee("Zara", 2000)
# "Create the second object of the employee class"
emp2 = Employee("Manni", 5000)
emp3 = Employee("Alibaba", 330)
emp1.displayEmployee()
emp2.displayEmployee()
emp3.displayEmployee()
print("Total Employee %d" % Employee.empCount)
# ## Class inheritance
# The major benefit of oriented-object programming is reuse of code. One way to reuse code is the inheritance mechanism. Inheritance can be taken as setting relationships of parent classes and child classes between classes.
# Some features of class inheritance in Python.
# The construction (_init_() method) of the base class will be not auto-called, and it has to be specially called in the construction of its derived classes.
# Class prefixes and self argument variables have to be added to the base class when its methods are called. The self argument is not required when regular functions in classes are called.
# Python always loops up the methods of the corresponding classes, and checks the base class one method by one method only if the methods are not found in the derived classes. (That is, Python searches for the calling method in this class first and then in the base class).
# If an inheritance type lists more than one class, this inheritance is called multi-inheritance.
#
#
class Parent: # Define the parent class
parentAttr = 100
def __init__(self):
print("Call parent class construction method")
def parentMethod(self):
print('Call parent class method')
def setAttr(self, attr):
Parent.parentAttr = attr
def getAttr(self):
print("Parent class attribute", Parent.parentAttr)
class Child(Parent): # Define a sub-class
def __init__(self):
print("Call sub-class construction method")
def childMethod(self):
print('Call sub-class method')
c = Child() # Instantiate sub-class
c.childMethod() # Call sub-class method
c.parentMethod() # Call parent class method
c.setAttr(200) # Re-call parent class method - set attributes
c.getAttr() # Re-call parent class method - get attributes
# ## Class attributes and methods
# Private attributes of classes:
# * __private_attrs: It starts with two underlines to indicate a private attribute, which cannot be used outside a class or directly accessed. When it is used inside a class method, follow the form of self.__private_attrs.
# Method of class
# Inside a class, the def keyword can be used to define a method; unlike a regular function, a class method must include the self argument, which has to be the first argument.
# Private method
# * __private_method: It starts with two underlines to indicate a private method, which cannot be used outside a class. When it is used inside a class, follow the form of self.__private_methods.
class JustCounter:
__secretCount = 0 # Private variable
publicCount = 0 # Public variable
def count(self):
self.__secretCount += 1
self.publicCount += 1
print(self.__secretCount)
counter = JustCounter()
counter.count()
counter.count()
print(counter.publicCount)
print(counter.__secretCount) # Error. Instance cannot access private variable
# ## Date and Time
# This experiment mainly introduces related knowledge units about date and time in Python, and related operations on them.
# How to process date and time is a typical problem for Python. Python provides the time and calendar modules to format date and time.
# Time spacing is floating numbers with seconds as the unit. Each time stamp is expressed as the time that has passed since the midnight of January 1st 1970.
# mThe time module of Python has many functions to convert common date formats.
# +
## Get the current time
# -
import time
localtime = time.localtime(time.time())
print("Local time:", localtime)
# ## Get the formatted time
# You can choose various formats as required, but the simplest function to get the readable time mode is asctime():
import time
localtime = time.asctime( time.localtime(time.time()) )
print("Local time :", localtime)
# +
## Format date
# -
import time
# Format into 2016-03-20 11:45:39
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
# Format into Sat Mar 28 22:24:24 2016
print(time.strftime("%a %b %d %H:%M:%S %Y", time.localtime()))
# Turn format string into timestamp
a = "Sat Mar 28 22:24:24 2016"
print(time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y")))
# ## Get calendar of a month
# The calendar module can process yearly calendars and monthly calendars using multiple methods, for example, printing a monthly calendar.
import calendar
cal = calendar.month(2019, 1)
print("output calendar of January 2019:")
print(cal)
calendar.prcal(2019)
# ## re.match function
# re.match tries to match a mode from the string start position. If no mode is matched from the string start, match() returns none.
# Function syntax:
# re.match(pattern, string, flags=0)
# Instance:
import re
print(re.match('www', 'www.runoob.com').span()) # Match at start
print(re.search('com', 'www.runoob.com')) # Match not at start
# ## Differences between re.match and re.search
# re.match matches the string start. If the string start does not agree with the regular expression, the matching fails and the function returns none. re.search matches the entire string until finding a match.
import re
line = "Cats are smarter than dogs";
matchObj = re.match( r'dogs', line, re.M|re.I)
if matchObj:
print("match --> matchObj.group() : ", matchObj.group())
else:
print("No match!!")
matchObj = re.search( r'dogs', line, re.M|re.I)
if matchObj:
print("search --> matchObj.group() : ", matchObj.group())
else:
print("No match!!")
# # File Manipulation
# This experiment mainly introduces related knowledge units about file manipulation in Python, and related operations.
# File manipulation is essential to programming languages, as information technologies would be meaningless if data cannot be stored permanently. This chapter introduces common file manipulation in Python.
# ## Read keyboard input
# Python provides two build-in functions to read a text line from the standard input, which a keyboard by default. The function is input function.
# ## input( ) function:
# The input([prompt]) can receive a Python expression as the input and return the result.
str = input("Please input:")
print("Your input is: ", str)
# ## Open and close files
# Python provides essential functions and methods to manipulate files by default. You can use the file object to do most file manipulations.
# Open() function: You should open a file using the Python build-in open() function, and create a file object, so that the related methods can call it to write and read.
# Open a file
fo = open("foo.txt", "w")
print("File name: ", fo.name)
print("closed or not: ", fo.closed)
print("access mode:", fo.mode)
# Open a file
fo = open("foo.txt", "w")
print("File name: ", fo.name)
# Close the opened file
fo.close()
# ## Write a file
# write() function: It writes any string into an opened file. Note that Python strings can be binary data, not just texts. This function will not add a line feed ('\n') at string ends.
# Open a file
fo = open("C:\foo.txt", "w")
fo.write( "www.baidu.com!\nVery good site!\n")
# Close an opened file
fo.close()
#The function above creates a foo.txt file, writes the received content into this file, and closes the file. If you open this file, you will see:
#www.baidu.com!
#Very good site!
# ## Read a file
# Read() function: It reads strings from an opened file. Note that Python strings can be binary data, not just texts.
# Open a file
fo = open("foo.txt", "r+")
str = fo.read(10)
print("The read string is: ", str)
# Close an opened file
fo.close()
# ## Rename a file
# The os module of Python provides a method to execute file processing operations, like renaming and deleting files. To use this module, you have to import it first and then call various functions.
# rename(): It requires two arguments: current file name and new file name.
# Function syntax:
# os.rename(current_file_name, new_file_name)
import os
# Rename file test1.txt to test2.txt
os.rename( "foo.txt", "test2.txt" )
# Open a file
fo = open("test2.txt", "r+")
str = fo.read(10)
print("The read string is: ", str)
fo.close()
# ## Delete a file
# You can use the remove() method to delete a file, using the name of file to be deleted as an argument.
# Function syntax:
# `os.remove(file_name)
import os
# Delete the existing file test2
os.remove("test2.txt")
open("test2.txt")
| Learn Python/Python Experiment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# <font size = "5"> **Chapter 2: [Diffraction](CH2_00-Diffraction.ipynb)** </font>
#
# <hr style="height:1px;border-top:4px solid #FF8200" />
#
# # Unic Cell Determination and Stereographic Projection
#
# [Download](https://raw.githubusercontent.com/gduscher/MSE672-Introduction-to-TEM//main/Diffraction/CH2_09-Unit_Cell.ipynb)
#
# [](
# https://colab.research.google.com/github/gduscher/MSE672-Introduction-to-TEM/blob/main/Diffraction/CH2_09-Unit_Cell.ipynb)
#
#
#
# part of
#
# <font size = "5"> **[MSE672: Introduction to Transmission Electron Microscopy](../_MSE672_Intro_TEM.ipynb)**</font>
#
# by <NAME>, Spring 2021
#
# Microscopy Facilities<br>
# Joint Institute of Advanced Materials<br>
# Materials Science & Engineering<br>
# The University of Tennessee, Knoxville
#
# Background and methods to analysis and quantification of data acquired with transmission electron microscopes
#
# ## Load relevant python packages
# ### Check Installed Packages
# +
import sys
from pkg_resources import get_distribution, DistributionNotFound
def test_package(package_name):
"""Test if package exists and returns version or -1"""
try:
version = get_distribution(package_name).version
except (DistributionNotFound, ImportError) as err:
version = '-1'
return version
# Colab setup ------------------
if 'google.colab' in sys.modules:
# !pip install pyTEMlib -q
# pyTEMlib setup ------------------
else:
if test_package('pyTEMlib') < '0.2021.3.17':
print('installing pyTEMlib')
# !{sys.executable} -m pip install --upgrade pyTEMlib -q
# ------------------------------
print('done')
# -
# ## Import numerical and plotting python packages
# Import the python packages that we will use:
#
# Beside the basic numerical (numpy) and plotting (pylab of matplotlib) libraries,
#
# and some libraries from the book
# * kinematic scattering library.
# +
# import matplotlib and numpy
# use "inline" instead of "notebook" for non-interactive plots
import sys
if 'google.colab' in sys.modules:
# %pylab --no-import-all inline
else:
# %pylab --no-import-all notebook
# additional package
import itertools
from matplotlib import patches
# Import libraries from the book
# Import libraries from pyTEMlib
import pyTEMlib
import pyTEMlib.KinsCat as ks # Kinematic sCattering Library
# Atomic form factors from Kirklands book
__notebook_version__ = '2021.02.17'
print('pyTEM version: ', pyTEMlib.__version__)
print('notebook version: ', __notebook_version__)
# -
# ## Unit Cell Determination
#
# - The HOLZ rings will give the lattice repeat vector (reciprocal vector parallel to the zone axis).
# - So tilting in [001] zone axis, the ZOLZ pattern will give you the [100] and [010] distance
# - and the HOLZ ring radius the [001] distance.
#
# > **This is the determination of the lattice parameter of a unit cell.**
# >
# >Thus, we see that one can determine 3D information from a single two dimensional pattern.
# >
# >It might be necessary to use other low order zone axes.
#
# ### Measurements
#
# - Record HOLZ and ZOLZ patterns, if possible in one picture (use double illumination with different exposure times to enhance dynamic range), but with different convergence angles.
# - If the angle of the ring is too large then your measurements may suffer from lens distortions.
# - If the HOLZ ring is split measure the inner one.
# ### Z-Component of Unit Cell
#
# If $H$ is the distance between the reciprocal-lattice planes parallel to the beam and $G_n$ is the projected radius of the HOLZ ring, then
# \begin{eqnarray}
# G_1&=& \left( \frac{2H}{\lambda}\right)^{1/2} = \sqrt{\frac{2H}{\lambda}}\\
# G_2&=& 2\left( \frac{H}{\lambda}\right)^{1/2} = 2 \sqrt{\frac{H}{\lambda}}
# \end{eqnarray}
# for FOLZ and SOLZ.
#
# Similar expressions can be developed for higher order HOLZ rings.
#
# In real space you get for example for FOLZ:
# \begin{equation}
# \frac{1}{H}=\frac{2}{\lambda G_1^2} = \frac{2}{\lambda} \left(\frac{\lambda L}{r}\right)^{2}
# \end{equation}
#
# If you did use a zone axis which is not ${100}$, then you have to compare your result to calculated values.
#
# Assuming you are looking down $[UVW]$ then we know:
# \begin{equation}
# \frac{1}{H}= |[UVW]|
# \end{equation}
#
# Now we have to calculate this $|[UVW]|$ for different structures:\\
# #### for fcc:
# \begin{equation}
# \frac{1}{H}= \frac{a_0}{p(U^2+V^2+W^2)}
# \end{equation}
# with $a_0$ is the lattice parameter and $p=1$ for $U+V+W$ is odd; $p=2$ for $U+V+W$ is even.
#
#
# #### for bcc:
# the same relationship as for fcc is true for bcc but $p$ is different: $p=2$ for $U$, $V$, and $W$ all odd; $p=1$ otherwise.
#
#
# Look up other crystal systems.
#
# If a ring is forbidden: you have to multiply your measurement $1/H_m$ with an integer $n$ to obtain the distance of the crystal.
#
# ## Lattice Centering
#
# We are going to look at cubic structures to
# - fcc
# - bcc
# - a-face
# - b-face
# - primitive or simple cubic
#
# The maximal excitation error is chosen so that ZOLZ and FOLZ overlap and we can see the different centering
# +
def plot_spots(tags, ax):
"""Simple plotting for spot pattern"""
points = tags['allowed']['g']
ix = np.argsort((points**2).sum(axis=1))
p = points[ix]
Laue_zones = np.unique(p[:,2])
ZOLZ = np.where(p[:,2] == Laue_zones[0])
FOLZ = np.where(p[:,2] == Laue_zones[1])
SOLZ = np.where(p[:,2] == Laue_zones[2])
ax.scatter(p[ZOLZ,0], p[ZOLZ,1], color='red')
ax.scatter(p[FOLZ,0], p[FOLZ,1], color='blue', alpha = 0.3)
ax.scatter(p[SOLZ,0], p[SOLZ,1], color='green', alpha = 0.3)
ax.set_aspect('equal')
ax.set_title(tags['crystal_name'])
ax.set_xlim(-40,40)
ax.set_ylim(-40,40)
# load structure
tags = ks.structure_by_name('FCC Fe')
# add necessary parameters for kinematic scattering calculation
tags['acceleration_voltage_V'] = 200000
tags['convergence_angle_mrad'] = 0
tags['zone_hkl'] = [0, 0, 1] # incident neares zone axis: defines Laue Zones!!!!
tags['mistilt'] = np.array([0,0,0]) # mistilt in degrees
tags['Sg_max'] = 2.5 # 1/nm maximum allowed excitation error ; This parameter is related to the thickness
tags['hkl_max'] = 15 # Highest evaluated Miller indices
# calulcuate kinematic scattering data
ks.kinematic_scattering(tags, False)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(10,6))
# plot diffraction pattern
plot_spots(tags, ax[0, 0])
tags['crystal_name'] = 'FCC or I'
plot_spots(tags, ax[1, 2])
tags.update(ks.structure_by_name('BCC Fe'))
ks.kinematic_scattering(tags, False)
# plot diffraction pattern
plot_spots(tags, ax[1, 0])
tags['crystal_name'] = 'a-face'
tags['base'] = np.array([[0. , 0. , 0. ], [0, 1/2, 1/2]])
ks.kinematic_scattering(tags, False)
plot_spots(tags, ax[0, 1])
tags['crystal_name'] = 'b-face'
tags['base'] = np.array([[0. , 0. , 0. ], [1/2, 0, 1/2]])
ks.kinematic_scattering(tags, False)
plot_spots(tags, ax[1, 1])
tags['crystal_name'] = 'simple cubic'
tags['base'] = np.array([[0. , 0. , 0. ]])
tags['elements'] = ['Fe']
ks.kinematic_scattering(tags, False)
plot_spots(tags, ax[0, 2])
# -
# To analyse an experimental pattern:
#
# - Extend the pattern for the ZOLZ into the HOLZ ring and look for discrepancies.
# ## Laue Circle
#
# The mistilt (angles in degrees) leads to a circular pattern in the ZOLZ.
#
# Any mistilt will cause the Ewald sphere to cut through the projection plane in a circle:
# the Laue circle. The maximal excitation error $S_{g_{max}}$ has to be rather small for this effect to appear.
#
# The nearest zone axis will always be in the middle of the Laue circle.
#
# If you encounter such a Laue circle try to minimize the circle by tilting towards the center.
# You can try this out below in rhe
# +
# -----Input ----------
tags['mistilt'] = np.array([0., -2 , 0])
tags['Sg_max'] = .05 # 1/nm maximum allowed excitation error ; This parameter is related to the thickness
# ---------------------
# add necessary parameters for kinematic scattering calculation
tags['acceleration_voltage_V'] = 200000
tags['convergence_angle_mrad'] = 0
tags['zone_hkl'] = [0, 0, 1] # incident neares zone axis: defines Laue Zones!!!!
tags['crystal_name'] = f"FCC with mistilt {tags['mistilt']}"
ks.kinematic_scattering(tags, False)
ks.plotSAED(tags)
# -
# The next graph shows a cross section through the reciprocal space (with Ewald sphere).
#
# The tilt out of zone axis (blue) leaves some spots in the middle with an high excitation error $s_g$ larger than the maximum allowed one. These spots (in the figure from 2 1/nm to 8 1/nm) are invisible in such a case. Because the Ewald sphere is a 3D object the cut of a sphere with a plane will give a circle.
#
from pyTEMlib import animation
plt.figure()
animation.deficient_holz_line(exact_bragg=False, laue_zone=0, color='black')
animation.deficient_holz_line(exact_bragg=True, laue_zone=0, color='blue')
# ## Stereographic Projection
# There are a lot of problems in materials science you can solve with diffraction patterns in the TEM.
#
# For instance the orientation relationship two crystals have to each other. What is the grain boundary plane and so on.
#
# The method to visualize such orientation relationships is the stereographic projection.
#
#
# ### Construction
#
# The Schematic below shows the construction of **Stereographic Projection** for cubic systems.
#
# Draw the crystal in the middle of a sphere. Draw a line from the center of the sphere through the middle of each plane (must be normal to the plane). Mark where this line intersects the sphere (it is named P in figure above). From this point draw a line to the south or north pole so that you intersect the equatorial plane. If you have to go to the south pole, mark the intersection of this line with equatorial plane with a dot;
# if you go to the north pole mark this intersection with a circle.We construct the point P'. This point represents uniquely one plane. The relevant area of the equatorial plane is a disk.
#
# Now we can also project circumference of a circle. Note that all the planes perpendicular to a low order zone axis lay on such a circle. These circles show up as lines or as ovals in the stereographic projection .
#
# Change the Miller indices to see the change
# +
# ------Input ----------
reflection = np.array([1,0, 1])
# -----------------------
if reflection[1] != 0:
print('we only use a cross section so y is set to 0')
reflection[1] = 0
R = 90 # 90 degrees projection sphere
x,y,z =reflection/np.linalg.norm(reflection)*R # Coordinates on sphere surface
x_projeted = (x*R/(R+z)) # x coordinate on stereographic projection plane
print(f'projected x-coordinate is {x_projeted:.2f} degree')
plt.figure()
plt.title(f'Cross Section of Stereographic Projection of {reflection} ')
sphere = plt.Circle(( 0. , 0. ), R , fill=False, linewidth=2)
plt.gca().set_aspect( 'equal')
plt.gca().add_artist(sphere)
plt.plot([-R*1.1,R*1.1], [0,0])
plt.text(0.04, 0, 'O', horizontalalignment='center', verticalalignment='bottom')
plt.text(-50, 0, 'projection plane', horizontalalignment='center', verticalalignment='bottom')
plt.ylim(-R*1.1,R*1.1)
plt.scatter(0,-R)
plt.text(0,-R*1.02, 'S', horizontalalignment='center', verticalalignment='top')
plt.scatter(0,0)
plt.plot([0, x], [0, z], label='diffracted wave vector')
plt.scatter(x, z)
plt.text(x*1.04, z, 'P', horizontalalignment='center', verticalalignment='bottom')
plt.plot([x, 0], [z, -R], label='connection to south pole')
plt.scatter(x_projeted, 0)
plt.text(x_projeted, -0.4, 'P\'', horizontalalignment='left', verticalalignment='top', )
plt.legend(loc='upper left');
# -
# **Some features of the stereographic projection:**
#
# - We can represent plane normals and directions in the same projection.
# - The can read off the angles between the directions, because the angles are preserved in this projection. Possibly the most important feature of this projection.
# - The zone axis is always 90$^{\rm o} $ away from any plane normal to its zone.
# - All the planes normal to a particular zone will lay on a great circle (oval). The zone of the centrale pole is on the circumference of the whole projection.
# - The angle between two planes is the angle between their normals measured with the Wulff net.
# - We can add the symmetry elements of any particular crystal system.
#
# ### Wulff Plot
#
# The result of the stereographic Projection of the holeprojeciton sphere is shown below. It is convenient to show the ** circles of the sphere** as a grid the Wulff Plot.
#
# But first we define some helper functions.
# +
## ## Some helper functions first
def circumcenter(a,b,c):
ax, ay = a
bx, by = b
cx, cy = c
d = 2 * (ax * (by - cy) + bx * (cy - ay) + cx * (ay - by))
ux = ((ax * ax + ay * ay) * (by - cy) + (bx * bx + by * by) * (cy - ay) + (cx * cx + cy * cy) * (ay - by)) / d
uy = ((ax * ax + ay * ay) * (cx - bx) + (bx * bx + by * by) * (ax - cx) + (cx * cx + cy * cy) * (bx - ax)) / d
return (ux, uy)
def wulff_net(ax, density=10):
outer_ring = plt.Circle(( 0. , 0. ), 90 , fill=False, linewidth=2)
ax.set_aspect( 'equal')
ax.add_artist( outer_ring )
ax.spines['left'].set_position(('data', 0))
ax.spines['bottom'].set_position(('data', 0))
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.set_xlim(-94,94)
ax.set_ylim(-94,94)
for phi in range(density,90,density):
phi_r = np.radians(phi)
x,y = 90*np.sin(phi_r), 90*np.cos(phi_r)
u,v = circumcenter([x,y],[-x,y],[0,90-phi])
theta = np.degrees(np.arctan2( y-v, x-u))
ax.add_patch(patches.Arc((0. , v), (v-90+phi)*2, (v-90+phi)*2, fill=False, edgecolor = 'gray', linewidth=.5, theta1=180-theta , theta2=theta))
ax.add_patch(patches.Arc((0. , -v), (v-90+phi)*2, (v-90+phi)*2, fill=False, edgecolor = 'gray', linewidth=.5, theta1=-theta , theta2=180+theta))
u,v = circumcenter([0,90],[0,-90],[phi,0])
theta = np.degrees(np.arctan2(u, phi))
radius = np.sqrt(u**2+ 90**2)
theta = np.degrees(np.arctan2( 90, u))
radius = np.abs(u-phi)
ax.add_patch(patches.Arc((u, 0), radius*2, radius*2, fill=False, edgecolor = 'gray', linewidth=.5, theta1=180+theta , theta2=-180-theta))
ax.add_patch(patches.Arc((-u, 0), radius*2, radius*2, fill=False, edgecolor = 'gray', linewidth=.5, theta1=theta , theta2=-theta))
def add_main_circles(ax):
phi = 45
phi_r = np.radians(phi)
x,y = 90*np.sin(phi_r), 90*np.cos(phi_r)
ax.plot([x,-x], [y,-y], color='blue')
ax.plot([x,-x], [-y,y], color='blue')
phi_r = np.radians(phi)
x,y = 90*np.sin(phi_r), 90*np.cos(phi_r)
u,v = circumcenter([x,y],[-x,y],[0,90-phi])
theta = np.degrees(np.arctan2( y-v, x-u))
ax.add_patch(patches.Arc((0., 90), np.sqrt(2)*180, np.sqrt(2)*180, fill=False, edgecolor = 'blue', linewidth=1, theta1=180+45 , theta2=-45))
ax.add_patch(patches.Arc((0., -90), np.sqrt(2)*180, np.sqrt(2)*180, fill=False, edgecolor = 'blue', linewidth=1, theta1=45, theta2=180-45))
ax.add_patch(patches.Arc((90., 0), np.sqrt(2)*180, np.sqrt(2)*180, fill=False, edgecolor = 'blue', linewidth=1, theta1=180-45, theta2=180+45))
ax.add_patch(patches.Arc((-90., 0), np.sqrt(2)*180, np.sqrt(2)*180, fill=False, edgecolor = 'blue', linewidth=1, theta1=-45 , theta2=45))
# -
# Again change the Miller indices around to see where the reflections *lands*.
# +
# ------Input ----------
reflection = np.array([3, 1, 1])
# -----------------------
R = 90 # 90 degrees Ewald sphere
projected =(reflection/np.linalg.norm(reflection)*R) # Coordinates on ewals sphere surface
projected = projected*R/(R+projected[2]) # x coordinate on stereographic projection plane
if projected[2]>=0:
print(f'Projected coordinates: {projected[:2]}')
else:
print('negative l Miller index is not supported')
plt.figure()
plt.title(f'Stereographic Projection of {reflection}')
wulff_net(plt.gca(), density=10)
add_main_circles(plt.gca())
if projected[2]>=0:
plt.scatter(projected[0], projected[1], color='red')
# -
# ### Cubic Crystal Reflections
#
# change the maximum Miller index around to see what happens
# +
# ------Input ----------
hkl_max = 7
# -----------------------
h = np.linspace(-hkl_max,hkl_max,2*hkl_max+1) # all evaluated single Miller Indices
hkl = np.array(list(itertools.product(h,h,h)), dtype=int) # all evaluated Miller indices
zero = np.where(np.linalg.norm(hkl)==0)
R = 90 # 90 degrees projection sphere
projected = []
reflections = []
for reflection in hkl:
if reflection[2]>=0:
if np.linalg.norm(reflection) >0:
p = reflection/np.linalg.norm(reflection)*R# Coordinates on sphere surface
projected.append(p*R/(R+p[2])) # x coordinate on stereographic projection plane
reflections.append(reflection)
projected = np.array(projected)
reflections = np.array(reflections, dtype=int)
plt.figure()
plt.title(f'Stereographic Projection of hkl up to [{hkl_max}{hkl_max}{hkl_max}]' )
wulff_net(plt.gca(), density=10)
add_main_circles(plt.gca())
color=['orange', 'green', 'red'] + ['blue']*200
for index, spot in enumerate(projected):
color_index = int(np.abs(reflections[index]).sum()-1)
plt.scatter(spot[0], spot[1], color=color[color_index])
if color_index<3:
plt.text(spot[0], spot[1], f'{reflections[index]}', horizontalalignment='left', verticalalignment='top')
# -
# ### Stereographic Projections for Any Symmertry and Any Orientation
#
# We did already all the work in the earlier notebooks and now we can just plot those the projections of the allowed $\vec{g}$ vectors.
#
# The stereographic projection is after all only a projection of allowed reflections.
#
# Use a high ``hkl_max`` parameter (about 15) and you start seeing the Kikuchi bands (next [notebook](CH2-10-Kikuchi.ipynb))
#
# Also see whether you can detect the 3-fold symmetry in [111] zone axis.
# +
# ---Input ---------
hkl_max = 8
zone_axis = [0,0,1]
# ------------------
#Initialize the dictionary of the input
tags = {}
### Define Crystal
tags = ks.structure_by_name('silicon')
### Define experimental parameters:
tags['acceleration_voltage_V'] = 200.0 *1000.0 #V
tags['new_figure'] = False
tags['plot FOV'] = 30
tags['convergence_angle_mrad'] = 0
tags['zone_hkl'] = np.array(zone_axis) # incident neares zone axis: defines Laue Zones!!!!
tags['mistilt'] = np.array([0,0,0]) # mistilt in degrees
tags['Sg_max'] = 20 # 1/nm maximum allowed excitation error ; This parameter is related to the thickness
tags['hkl_max'] = hkl_max # Highest evaluated Miller indices
######################################
# Diffraction Simulation of Crystal #
######################################
ks.kinematic_scattering(tags, verbose = True)
hkl = tags['allowed']['g'][tags['allowed']['g'][:,2]>=0]
projected = []
reflections = []
for reflection in hkl:
p = reflection/np.linalg.norm(reflection)*R# Coordinates on sphere surface
projected.append(p*R/(R+p[2])) # x coordinate on stereographic projection plane
reflections.append(reflection)
projected = np.array(projected)
reflections = np.array(reflections, dtype=int)
plt.figure()
plt.title(f'Stereographic Projection of hkl up to [{hkl_max}{hkl_max}{hkl_max}]' )
wulff_net(plt.gca(), density=10)
# add_main_circles(plt.gca())
color=['orange', 'green', 'red'] + ['blue']*100
alpha = [1, 1, 1] + [0.2]*100
for index, spot in enumerate(projected):
color_index = int(np.abs(reflections[index]).sum()-1)
plt.scatter(spot[0], spot[1], color=color[color_index], alpha = alpha[color_index])
if color_index<3:
plt.text(spot[0], spot[1], f'{reflections[index]}', horizontalalignment='left', verticalalignment='top')
# -
# ### Just a Pretty Plot
# +
def add_main_planes(ax):
ax.scatter(0, 0, color='blue', s=50)
ax.text(0, 0, '[001]', horizontalalignment='left',verticalalignment='top')
ax.scatter(-90, 0, color='blue', s=50)
ax.text(-90, 0, r'[0$\bar{1}$0]', horizontalalignment='left',verticalalignment='top')
ax.scatter(90, 0, color='blue', s=50)
ax.text(90, 0, '[010]', horizontalalignment='left',verticalalignment='top')
ax.scatter(0, 90, color='blue', s=50)
ax.text(0, 90, r'[$\bar{1}$00]', horizontalalignment='left', verticalalignment='top')
ax.scatter(0, -90, color='blue', s=50)
ax.text(0, -90, '[100]', horizontalalignment='left', verticalalignment='top')
phi_r = np.radians(45)
r = 46.6# 1/np.tan(phi_r/2)*20
x,y = r*sin(phi_r), r*cos(phi_r)
ax.scatter(x,-y, color='red', s=50)
ax.text(x,-y, '[111]', horizontalalignment='left',verticalalignment='top')
ax.scatter(x,y, color='red', s=50)
ax.text(x,y, r'[$\bar{1}$11]', horizontalalignment='left',verticalalignment='top')
ax.scatter(-x,y, color='red', s=50)
ax.text(-x,y, r'[$\bar{1}\bar{1}$1]', horizontalalignment='left',verticalalignment='top')
ax.scatter(-x,-y, color='red', s=50)
ax.text(-x,-y, r'[1,$\bar{1}$,1]', horizontalalignment='left',verticalalignment='top')
ax.scatter(37.2, 0, color='green', s=50)
ax.text(37.2, 0, '[011]', horizontalalignment='left',verticalalignment='top')
ax.scatter(-37.2, 0, color='green', s=50)
ax.text(-37.2, 0, r'[0$\bar{1}$1]', horizontalalignment='left',verticalalignment='top')
ax.scatter(0, -37.2, color='green', s=50)
ax.text(0, -37.2, r'[101]', horizontalalignment='left',verticalalignment='top')
ax.scatter(0, 37.2, color='green', s=50)
ax.text(0,37.2 , r'[$\bar{1}$01]', horizontalalignment='left',verticalalignment='top')
phi = 45
phi_r = np.radians(phi)
x,y = 90*sin(phi_r), 90*cos(phi_r)
ax.scatter(-x, -y, color='green', s=50)
ax.text(-x, -y, '[110]', horizontalalignment='left',verticalalignment='top')
ax.scatter(x,-y, color='green', s=50)
ax.text(x,-y, r'[1$\bar{1}$0]', horizontalalignment='left',verticalalignment='top')
ax.scatter(x,y, color='green', s=50)
ax.text(x,y, r'[$\bar{1}$10]', horizontalalignment='left',verticalalignment='top')
ax.scatter(-x,y, color='green', s=50)
ax.text(-x,y, r'[$\bar{1}\bar{1}$0]', horizontalalignment='left',verticalalignment='top')
plt.figure(figsize=(5, 5))
wulff_net(plt.gca(), density=10)
add_main_planes(plt.gca())
add_main_circles(plt.gca())
# -
# ## Summary
#
# Lot's of information can be gained with basic crystallogrpahy tools and trigonometry.
#
# ## Navigation
#
# - <font size = "3"> **Back: [Spot Diffraction Pattern](CH2_8-Spot_Diffraction_Pattern)** </font>
# - <font size = "3"> **Next: [Kikuchi Lines](CH2_10-Kikuchi_Lines.ipynb)** </font>
# - <font size = "3"> **Chapter 2: [Diffraction](CH2_00-Diffraction.ipynb)** </font>
# - <font size = "3"> **List of Content: [Front](../_MSE672_Intro_TEM.ipynb)** </font>
| Diffraction/.ipynb_checkpoints/CH2_09-Unit_Cell-checkpoint.ipynb |