repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
ecell/ecell4-notebooks | docs/tutorials/tutorial04.ipynb | gpl-2.0 | from ecell4_base.core import *
"""
Explanation: 4. How to Run a Simulation
In sections 2 and 3, we explained the way to build a model and to setup the intial state. Now, it is the time to run a simulation. Corresponding to World classes, six Simulator classes are there: spatiocyte.SpatiocyteSimulator, egfrd.EGFRDSimulator, bd.BDSimulator, meso.MesoscopicSimulator, gillespie.GillespieSimulator, and ode.ODESimulator. Each Simulator class only accepts the corresponding type of World, but all of them allow the same Model.
End of explanation
"""
from ecell4_base import *
"""
Explanation: 4.1. How to Setup a Simulator
Except for the initialization (so-called constructor function) with arguments specific to the algorithm, all Simulators have the same APIs.
End of explanation
"""
from ecell4 import species_attributes, reaction_rules, get_model
with species_attributes():
A | B | C | {'D': 1, 'radius': 0.005}
with reaction_rules():
A + B == C | (0.01, 0.3)
m = get_model()
w1 = gillespie.World()
w2 = ode.World()
w3 = spatiocyte.World()
w4 = bd.World()
w5 = meso.World()
w6 = egfrd.World()
"""
Explanation: Before constructing a Simulator, parepare a Model and a World corresponding to the type of Simulator.
End of explanation
"""
sim1 = gillespie.Simulator(w1, m)
sim2 = ode.Simulator(w2, m)
sim3 = spatiocyte.Simulator(w3, m)
sim4 = bd.Simulator(w4, m)
sim5 = meso.Simulator(w5, m)
sim6 = egfrd.Simulator(w6, m)
"""
Explanation: Simulator requires both Model and World in this order at the construction.
End of explanation
"""
w1.bind_to(m)
w2.bind_to(m)
w3.bind_to(m)
w4.bind_to(m)
w5.bind_to(m)
w6.bind_to(m)
sim1 = gillespie.Simulator(w1)
sim2 = ode.Simulator(w2)
sim3 = spatiocyte.Simulator(w3)
sim4 = bd.Simulator(w4)
sim5 = meso.Simulator(w5)
sim6 = egfrd.Simulator(w6)
"""
Explanation: If you bind the Model to the World, you need only the World to create a Simulator.
End of explanation
"""
print(sim1.model(), sim1.world())
print(sim2.model(), sim2.world())
print(sim3.model(), sim3.world())
print(sim4.model(), sim4.world())
print(sim5.model(), sim5.world())
print(sim6.model(), sim6.world())
"""
Explanation: Of course, the Model and World bound to a Simulator can be drawn from Simulator in the way below:
End of explanation
"""
w1.add_molecules(Species('C'), 60)
w2.add_molecules(Species('C'), 60)
w3.add_molecules(Species('C'), 60)
w4.add_molecules(Species('C'), 60)
w5.add_molecules(Species('C'), 60)
w6.add_molecules(Species('C'), 60)
sim1.initialize()
sim2.initialize()
sim3.initialize()
sim4.initialize()
sim5.initialize()
sim6.initialize()
"""
Explanation: After updating the World by yourself, you must initialize the internal state of a Simulator before running simulation.
End of explanation
"""
sim2.set_dt(1e-6) # ode.Simulator. This is optional
sim4.set_dt(1e-6) # bd.Simulator
"""
Explanation: For algorithms with a fixed step interval, the Simulator also requires dt.
End of explanation
"""
print(sim1.t(), sim1.next_time(), sim1.dt())
print(sim2.t(), sim2.next_time(), sim2.dt()) # => (0.0, 1e-6, 1e-6)
print(sim3.t(), sim3.next_time(), sim3.dt())
print(sim4.t(), sim4.next_time(), sim4.dt()) # => (0.0, 1e-6, 1e-6)
print(sim5.t(), sim5.next_time(), sim5.dt())
print(sim6.t(), sim6.next_time(), sim6.dt()) # => (0.0, 0.0, 0.0)
sim1.step()
sim2.step()
sim3.step()
sim4.step()
sim5.step()
sim6.step()
print(sim1.t())
print(sim2.t()) # => 1e-6
print(sim3.t())
print(sim4.t()) # => 1e-6
print(sim5.t())
print(sim6.t()) # => 0.0
"""
Explanation: 4.2. Running Simulations
For running simulations, Simulator provides two APIs, step and run.
step() advances a simulation for the time that the Simulator expects, next_time().
End of explanation
"""
print(sim1.last_reactions())
# print(sim2.last_reactions())
print(sim3.last_reactions())
print(sim4.last_reactions())
print(sim5.last_reactions())
print(sim6.last_reactions())
"""
Explanation: last_reactions() returns a list of pairs of ReactionRule and ReactionInfo which occurs at the last step. Each algorithm have its own implementation of ReactionInfo. See help(module.ReactionInfo) for details.
End of explanation
"""
print(sim1.step(1.0), sim1.t())
print(sim2.step(1.0), sim2.t())
print(sim3.step(1.0), sim3.t())
print(sim4.step(1.0), sim4.t())
print(sim5.step(1.0), sim5.t())
print(sim6.step(1.0), sim6.t())
"""
Explanation: step(upto) advances a simulation for next_time if next_time is less than upto, or for upto otherwise. step(upto) returns whether the time does NOT reach the limit, upto.
End of explanation
"""
while sim1.step(1.0): pass
while sim2.step(0.001): pass
while sim3.step(0.001): pass
while sim4.step(0.001): pass
while sim5.step(1.0): pass
while sim6.step(0.001): pass
print(sim1.t()) # => 1.0
print(sim2.t()) # => 0.001
print(sim3.t()) # => 0.001
print(sim4.t()) # => 0.001
print(sim5.t()) # => 1.0
print(sim6.t()) # => 0.001
"""
Explanation: To run a simulation just until the time, upto, call step(upto) while it returns True.
End of explanation
"""
sim1.run(1.0)
sim2.run(0.001)
sim3.run(0.001)
sim4.run(0.001)
sim5.run(1.0)
sim6.run(0.001)
print(sim1.t()) # => 2.0
print(sim2.t()) # => 0.002
print(sim3.t()) # => 0.002
print(sim4.t()) # => 0.002
print(sim5.t()) # => 2.0
print(sim6.t()) # => 0.02
"""
Explanation: This is just what run does. run(tau) advances a simulation upto t()+tau.
End of explanation
"""
print(sim1.num_steps())
print(sim2.num_steps())
print(sim3.num_steps())
print(sim4.num_steps())
print(sim5.num_steps())
print(sim6.num_steps())
"""
Explanation: num_steps returns the number of steps during the simulation.
End of explanation
"""
def singlerun(f, m):
w = f.world(Real3(1, 1, 1))
w.bind_to(m)
w.add_molecules(Species('C'), 60)
sim = f.simulator(w)
sim.run(0.01)
print(sim.t(), w.num_molecules(Species('C')))
"""
Explanation: 4.3. Capsulizing Algorithm into a Factory Class
Owing to the portability of a Model and consistent APIs of Worlds and Simulators, it is very easy to write a script common in algorithms. However, when switching the algorithm, still we have to rewrite the name of classes in the code, one by one.
To avoid the trouble, E-Cell4 also provides a Factory class for each algorithm. Factory encapsulates World and Simulator with their arguments needed for the construction. By using Factory class, your script could be portable and robust agaist changes in the algorithm.
Factory just provides two functions, world and simulator.
End of explanation
"""
singlerun(gillespie.Factory(), m)
singlerun(ode.Factory(), m)
singlerun(spatiocyte.Factory(), m)
singlerun(bd.Factory(bd_dt_factor=1), m)
singlerun(meso.Factory(), m)
singlerun(egfrd.Factory(), m)
"""
Explanation: singlerun above is free from the algorithm. Thus, by just switching Factory, you can easily compare the results.
End of explanation
"""
from ecell4.util import run_simulation
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=gillespie.Factory())[-1])
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=ode.Factory())[-1])
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=spatiocyte.Factory())[-1])
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=bd.Factory(bd_dt_factor=1))[-1])
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=meso.Factory())[-1])
print(run_simulation(0.01, model=m, y0={'C': 60}, return_type='array', solver=egfrd.Factory())[-1])
"""
Explanation: When you need to provide several parameters to initialize World or Simulator, run_simulation also accepts Factory instead of solver.
End of explanation
"""
|
zauonlok/cs231n | assignment2/FullyConnectedNets.ipynb | mit | # As usual, a bit of setup
import time
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.fc_net import *
from cs231n.data_utils import get_CIFAR10_data
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.solver import Solver
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# Load the (preprocessed) CIFAR10 data.
data = get_CIFAR10_data()
for k, v in data.iteritems():
print '%s: ' % k, v.shape
"""
Explanation: Fully-Connected Neural Nets
In the previous homework you implemented a fully-connected two-layer neural network on CIFAR-10. The implementation was simple but not very modular since the loss and gradient were computed in a single monolithic function. This is manageable for a simple two-layer network, but would become impractical as we move to bigger models. Ideally we want to build networks using a more modular design so that we can implement different layer types in isolation and then snap them together into models with different architectures.
In this exercise we will implement fully-connected networks using a more modular approach. For each layer we will implement a forward and a backward function. The forward function will receive inputs, weights, and other parameters and will return both an output and a cache object storing data needed for the backward pass, like this:
```python
def layer_forward(x, w):
""" Receive inputs x and weights w """
# Do some computations ...
z = # ... some intermediate value
# Do some more computations ...
out = # the output
cache = (x, w, z, out) # Values we need to compute gradients
return out, cache
```
The backward pass will receive upstream derivatives and the cache object, and will return gradients with respect to the inputs and weights, like this:
```python
def layer_backward(dout, cache):
"""
Receive derivative of loss with respect to outputs and cache,
and compute derivative with respect to inputs.
"""
# Unpack cache values
x, w, z, out = cache
# Use values in cache to compute derivatives
dx = # Derivative of loss with respect to x
dw = # Derivative of loss with respect to w
return dx, dw
```
After implementing a bunch of layers this way, we will be able to easily combine them to build classifiers with different architectures.
In addition to implementing fully-connected networks of arbitrary depth, we will also explore different update rules for optimization, and introduce Dropout as a regularizer and Batch Normalization as a tool to more efficiently optimize deep networks.
End of explanation
"""
# Test the affine_forward function
num_inputs = 2
input_shape = (4, 5, 6)
output_dim = 3
input_size = num_inputs * np.prod(input_shape)
weight_size = output_dim * np.prod(input_shape)
x = np.linspace(-0.1, 0.5, num=input_size).reshape(num_inputs, *input_shape)
w = np.linspace(-0.2, 0.3, num=weight_size).reshape(np.prod(input_shape), output_dim)
b = np.linspace(-0.3, 0.1, num=output_dim)
out, _ = affine_forward(x, w, b)
correct_out = np.array([[ 1.49834967, 1.70660132, 1.91485297],
[ 3.25553199, 3.5141327, 3.77273342]])
# Compare your output with ours. The error should be around 1e-9.
print 'Testing affine_forward function:'
print 'difference: ', rel_error(out, correct_out)
"""
Explanation: Affine layer: foward
Open the file cs231n/layers.py and implement the affine_forward function.
Once you are done you can test your implementaion by running the following:
End of explanation
"""
# Test the affine_backward function
x = np.random.randn(10, 2, 3)
w = np.random.randn(6, 5)
b = np.random.randn(5)
dout = np.random.randn(10, 5)
dx_num = eval_numerical_gradient_array(lambda x: affine_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_forward(x, w, b)[0], b, dout)
_, cache = affine_forward(x, w, b)
dx, dw, db = affine_backward(dout, cache)
# The error should be around 1e-10
print 'Testing affine_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
"""
Explanation: Affine layer: backward
Now implement the affine_backward function and test your implementation using numeric gradient checking.
End of explanation
"""
# Test the relu_forward function
x = np.linspace(-0.5, 0.5, num=12).reshape(3, 4)
out, _ = relu_forward(x)
correct_out = np.array([[ 0., 0., 0., 0., ],
[ 0., 0., 0.04545455, 0.13636364,],
[ 0.22727273, 0.31818182, 0.40909091, 0.5, ]])
# Compare your output with ours. The error should be around 1e-8
print 'Testing relu_forward function:'
print 'difference: ', rel_error(out, correct_out)
"""
Explanation: ReLU layer: forward
Implement the forward pass for the ReLU activation function in the relu_forward function and test your implementation using the following:
End of explanation
"""
x = np.random.randn(10, 10)
dout = np.random.randn(*x.shape)
dx_num = eval_numerical_gradient_array(lambda x: relu_forward(x)[0], x, dout)
_, cache = relu_forward(x)
dx = relu_backward(dout, cache)
# The error should be around 1e-12
print 'Testing relu_backward function:'
print 'dx error: ', rel_error(dx_num, dx)
"""
Explanation: ReLU layer: backward
Now implement the backward pass for the ReLU activation function in the relu_backward function and test your implementation using numeric gradient checking:
End of explanation
"""
from cs231n.layer_utils import affine_relu_forward, affine_relu_backward
x = np.random.randn(2, 3, 4)
w = np.random.randn(12, 10)
b = np.random.randn(10)
dout = np.random.randn(2, 10)
out, cache = affine_relu_forward(x, w, b)
dx, dw, db = affine_relu_backward(dout, cache)
dx_num = eval_numerical_gradient_array(lambda x: affine_relu_forward(x, w, b)[0], x, dout)
dw_num = eval_numerical_gradient_array(lambda w: affine_relu_forward(x, w, b)[0], w, dout)
db_num = eval_numerical_gradient_array(lambda b: affine_relu_forward(x, w, b)[0], b, dout)
print 'Testing affine_relu_forward:'
print 'dx error: ', rel_error(dx_num, dx)
print 'dw error: ', rel_error(dw_num, dw)
print 'db error: ', rel_error(db_num, db)
"""
Explanation: "Sandwich" layers
There are some common patterns of layers that are frequently used in neural nets. For example, affine layers are frequently followed by a ReLU nonlinearity. To make these common patterns easy, we define several convenience layers in the file cs231n/layer_utils.py.
For now take a look at the affine_relu_forward and affine_relu_backward functions, and run the following to numerically gradient check the backward pass:
End of explanation
"""
num_classes, num_inputs = 10, 50
x = 0.001 * np.random.randn(num_inputs, num_classes)
y = np.random.randint(num_classes, size=num_inputs)
dx_num = eval_numerical_gradient(lambda x: svm_loss(x, y)[0], x, verbose=False)
loss, dx = svm_loss(x, y)
# Test svm_loss function. Loss should be around 9 and dx error should be 1e-9
print 'Testing svm_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
dx_num = eval_numerical_gradient(lambda x: softmax_loss(x, y)[0], x, verbose=False)
loss, dx = softmax_loss(x, y)
# Test softmax_loss function. Loss should be 2.3 and dx error should be 1e-8
print '\nTesting softmax_loss:'
print 'loss: ', loss
print 'dx error: ', rel_error(dx_num, dx)
"""
Explanation: Loss layers: Softmax and SVM
You implemented these loss functions in the last assignment, so we'll give them to you for free here. You should still make sure you understand how they work by looking at the implementations in cs231n/layers.py.
You can make sure that the implementations are correct by running the following:
End of explanation
"""
N, D, H, C = 3, 5, 50, 7
X = np.random.randn(N, D)
y = np.random.randint(C, size=N)
std = 1e-2
model = TwoLayerNet(input_dim=D, hidden_dim=H, num_classes=C, weight_scale=std)
print 'Testing initialization ... '
W1_std = abs(model.params['W1'].std() - std)
b1 = model.params['b1']
W2_std = abs(model.params['W2'].std() - std)
b2 = model.params['b2']
assert W1_std < std / 10, 'First layer weights do not seem right'
assert np.all(b1 == 0), 'First layer biases do not seem right'
assert W2_std < std / 10, 'Second layer weights do not seem right'
assert np.all(b2 == 0), 'Second layer biases do not seem right'
print 'Testing test-time forward pass ... '
model.params['W1'] = np.linspace(-0.7, 0.3, num=D*H).reshape(D, H)
model.params['b1'] = np.linspace(-0.1, 0.9, num=H)
model.params['W2'] = np.linspace(-0.3, 0.4, num=H*C).reshape(H, C)
model.params['b2'] = np.linspace(-0.9, 0.1, num=C)
X = np.linspace(-5.5, 4.5, num=N*D).reshape(D, N).T
scores = model.loss(X)
correct_scores = np.asarray(
[[11.53165108, 12.2917344, 13.05181771, 13.81190102, 14.57198434, 15.33206765, 16.09215096],
[12.05769098, 12.74614105, 13.43459113, 14.1230412, 14.81149128, 15.49994135, 16.18839143],
[12.58373087, 13.20054771, 13.81736455, 14.43418138, 15.05099822, 15.66781506, 16.2846319 ]])
scores_diff = np.abs(scores - correct_scores).sum()
assert scores_diff < 1e-6, 'Problem with test-time forward pass'
print 'Testing training loss (no regularization)'
y = np.asarray([0, 5, 1])
loss, grads = model.loss(X, y)
correct_loss = 3.4702243556
assert abs(loss - correct_loss) < 1e-10, 'Problem with training-time loss'
model.reg = 1.0
loss, grads = model.loss(X, y)
correct_loss = 26.5948426952
assert abs(loss - correct_loss) < 1e-10, 'Problem with regularization loss'
for reg in [0.0, 0.7]:
print 'Running numeric gradient check with reg = ', reg
model.reg = reg
loss, grads = model.loss(X, y)
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
"""
Explanation: Two-layer network
In the previous assignment you implemented a two-layer neural network in a single monolithic class. Now that you have implemented modular versions of the necessary layers, you will reimplement the two layer network using these modular implementations.
Open the file cs231n/classifiers/fc_net.py and complete the implementation of the TwoLayerNet class. This class will serve as a model for the other networks you will implement in this assignment, so read through it to make sure you understand the API. You can run the cell below to test your implementation.
End of explanation
"""
model = TwoLayerNet()
solver = None
##############################################################################
# TODO: Use a Solver instance to train a TwoLayerNet that achieves at least #
# 50% accuracy on the validation set. #
##############################################################################
solver = Solver(model, data,
update_rule='sgd',
optim_config={
'learning_rate': 1e-3,
},
lr_decay=0.95,
num_epochs=10, batch_size=100,
print_every=100)
solver.train()
##############################################################################
# END OF YOUR CODE #
##############################################################################
# Run this cell to visualize training loss and train / val accuracy
plt.subplot(2, 1, 1)
plt.title('Training loss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
plt.subplot(2, 1, 2)
plt.title('Accuracy')
plt.plot(solver.train_acc_history, '-o', label='train')
plt.plot(solver.val_acc_history, '-o', label='val')
plt.plot([0.5] * len(solver.val_acc_history), 'k--')
plt.xlabel('Epoch')
plt.legend(loc='lower right')
plt.gcf().set_size_inches(15, 12)
plt.show()
"""
Explanation: Solver
In the previous assignment, the logic for training models was coupled to the models themselves. Following a more modular design, for this assignment we have split the logic for training models into a separate class.
Open the file cs231n/solver.py and read through it to familiarize yourself with the API. After doing so, use a Solver instance to train a TwoLayerNet that achieves at least 50% accuracy on the validation set.
End of explanation
"""
N, D, H1, H2, C = 2, 15, 20, 30, 10
X = np.random.randn(N, D)
y = np.random.randint(C, size=(N,))
for reg in [0, 3.14]:
print 'Running check with reg = ', reg
model = FullyConnectedNet([H1, H2], input_dim=D, num_classes=C,
reg=reg, weight_scale=5e-2, dtype=np.float64)
loss, grads = model.loss(X, y)
print 'Initial loss: ', loss
for name in sorted(grads):
f = lambda _: model.loss(X, y)[0]
grad_num = eval_numerical_gradient(f, model.params[name], verbose=False, h=1e-5)
print '%s relative error: %.2e' % (name, rel_error(grad_num, grads[name]))
"""
Explanation: Multilayer network
Next you will implement a fully-connected network with an arbitrary number of hidden layers.
Read through the FullyConnectedNet class in the file cs231n/classifiers/fc_net.py.
Implement the initialization, the forward pass, and the backward pass. For the moment don't worry about implementing dropout or batch normalization; we will add those features soon.
Initial loss and gradient check
As a sanity check, run the following to check the initial loss and to gradient check the network both with and without regularization. Do the initial losses seem reasonable?
For gradient checking, you should expect to see errors around 1e-6 or less.
End of explanation
"""
# TODO: Use a three-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
"""
weight_scale = 1e-2
learning_rate = 1e-4
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
"""
def train_model(weight_scale, learning_rate, verbose=False):
model = FullyConnectedNet([100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
},
verbose=verbose
)
solver.train()
return solver.train_acc_history[-1], solver.val_acc_history[-1]
weight_scales = [1e-03, 1e-02, 1e-01]
learning_rates = [1e-5, 1e-4, 1e-3]
for scale in weight_scales:
for rate in learning_rates:
train_acc, val_acc = train_model(scale, rate)
print('scale: %f, rate: %f, train_acc: %f, val_acc: %f' % (
scale, rate, train_acc, val_acc))
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
"""
Explanation: As another sanity check, make sure you can overfit a small dataset of 50 images. First we will try a three-layer network with 100 units in each hidden layer. You will need to tweak the learning rate and initialization scale, but you should be able to overfit and achieve 100% training accuracy within 20 epochs.
End of explanation
"""
# TODO: Use a five-layer Net to overfit 50 training examples.
num_train = 50
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
"""
learning_rate = 1e-3
weight_scale = 1e-5
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
}
)
solver.train()
"""
def train_model(weight_scale, learning_rate, verbose=False):
model = FullyConnectedNet([100, 100, 100, 100],
weight_scale=weight_scale, dtype=np.float64)
solver = Solver(model, small_data,
print_every=10, num_epochs=20, batch_size=25,
update_rule='sgd',
optim_config={
'learning_rate': learning_rate,
},
verbose=verbose
)
solver.train()
return solver.train_acc_history[-1], solver.val_acc_history[-1]
weight_scales = [1e-03, 1e-02, 1e-01]
learning_rates = [1e-5, 1e-4, 1e-3]
for scale in weight_scales:
for rate in learning_rates:
train_acc, val_acc = train_model(scale, rate)
print('scale: %f, rate: %f, train_acc: %f, val_acc: %f' % (
scale, rate, train_acc, val_acc))
plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()
"""
Explanation: Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.
End of explanation
"""
from cs231n.optim import sgd_momentum
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
v = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-3, 'velocity': v}
next_w, _ = sgd_momentum(w, dw, config=config)
expected_next_w = np.asarray([
[ 0.1406, 0.20738947, 0.27417895, 0.34096842, 0.40775789],
[ 0.47454737, 0.54133684, 0.60812632, 0.67491579, 0.74170526],
[ 0.80849474, 0.87528421, 0.94207368, 1.00886316, 1.07565263],
[ 1.14244211, 1.20923158, 1.27602105, 1.34281053, 1.4096 ]])
expected_velocity = np.asarray([
[ 0.5406, 0.55475789, 0.56891579, 0.58307368, 0.59723158],
[ 0.61138947, 0.62554737, 0.63970526, 0.65386316, 0.66802105],
[ 0.68217895, 0.69633684, 0.71049474, 0.72465263, 0.73881053],
[ 0.75296842, 0.76712632, 0.78128421, 0.79544211, 0.8096 ]])
print 'next_w error: ', rel_error(next_w, expected_next_w)
print 'velocity error: ', rel_error(expected_velocity, config['velocity'])
"""
Explanation: Inline question:
Did you notice anything about the comparative difficulty of training the three-layer net vs training the five layer net?
Answer:
[FILL THIS IN]
Update rules
So far we have used vanilla stochastic gradient descent (SGD) as our update rule. More sophisticated update rules can make it easier to train deep networks. We will implement a few of the most commonly used update rules and compare them to vanilla SGD.
SGD+Momentum
Stochastic gradient descent with momentum is a widely used update rule that tends to make deep networks converge faster than vanilla stochstic gradient descent.
Open the file cs231n/optim.py and read the documentation at the top of the file to make sure you understand the API. Implement the SGD+momentum update rule in the function sgd_momentum and run the following to check your implementation. You should see errors less than 1e-8.
End of explanation
"""
num_train = 4000
small_data = {
'X_train': data['X_train'][:num_train],
'y_train': data['y_train'][:num_train],
'X_val': data['X_val'],
'y_val': data['y_val'],
}
solvers = {}
for update_rule in ['sgd', 'sgd_momentum']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': 1e-2,
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Once you have done so, run the following to train a six-layer network with both SGD and SGD+momentum. You should see the SGD+momentum update rule converge faster.
End of explanation
"""
# Test RMSProp implementation; you should see errors less than 1e-7
from cs231n.optim import rmsprop
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
cache = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'cache': cache}
next_w, _ = rmsprop(w, dw, config=config)
expected_next_w = np.asarray([
[-0.39223849, -0.34037513, -0.28849239, -0.23659121, -0.18467247],
[-0.132737, -0.08078555, -0.02881884, 0.02316247, 0.07515774],
[ 0.12716641, 0.17918792, 0.23122175, 0.28326742, 0.33532447],
[ 0.38739248, 0.43947102, 0.49155973, 0.54365823, 0.59576619]])
expected_cache = np.asarray([
[ 0.5976, 0.6126277, 0.6277108, 0.64284931, 0.65804321],
[ 0.67329252, 0.68859723, 0.70395734, 0.71937285, 0.73484377],
[ 0.75037008, 0.7659518, 0.78158892, 0.79728144, 0.81302936],
[ 0.82883269, 0.84469141, 0.86060554, 0.87657507, 0.8926 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'cache error: ', rel_error(expected_cache, config['cache'])
# Test Adam implementation; you should see errors around 1e-7 or less
from cs231n.optim import adam
N, D = 4, 5
w = np.linspace(-0.4, 0.6, num=N*D).reshape(N, D)
dw = np.linspace(-0.6, 0.4, num=N*D).reshape(N, D)
m = np.linspace(0.6, 0.9, num=N*D).reshape(N, D)
v = np.linspace(0.7, 0.5, num=N*D).reshape(N, D)
config = {'learning_rate': 1e-2, 'm': m, 'v': v, 't': 5}
next_w, _ = adam(w, dw, config=config)
expected_next_w = np.asarray([
[-0.40094747, -0.34836187, -0.29577703, -0.24319299, -0.19060977],
[-0.1380274, -0.08544591, -0.03286534, 0.01971428, 0.0722929],
[ 0.1248705, 0.17744702, 0.23002243, 0.28259667, 0.33516969],
[ 0.38774145, 0.44031188, 0.49288093, 0.54544852, 0.59801459]])
expected_v = np.asarray([
[ 0.69966, 0.68908382, 0.67851319, 0.66794809, 0.65738853,],
[ 0.64683452, 0.63628604, 0.6257431, 0.61520571, 0.60467385,],
[ 0.59414753, 0.58362676, 0.57311152, 0.56260183, 0.55209767,],
[ 0.54159906, 0.53110598, 0.52061845, 0.51013645, 0.49966, ]])
expected_m = np.asarray([
[ 0.48, 0.49947368, 0.51894737, 0.53842105, 0.55789474],
[ 0.57736842, 0.59684211, 0.61631579, 0.63578947, 0.65526316],
[ 0.67473684, 0.69421053, 0.71368421, 0.73315789, 0.75263158],
[ 0.77210526, 0.79157895, 0.81105263, 0.83052632, 0.85 ]])
print 'next_w error: ', rel_error(expected_next_w, next_w)
print 'v error: ', rel_error(expected_v, config['v'])
print 'm error: ', rel_error(expected_m, config['m'])
"""
Explanation: RMSProp and Adam
RMSProp [1] and Adam [2] are update rules that set per-parameter learning rates by using a running average of the second moments of gradients.
In the file cs231n/optim.py, implement the RMSProp update rule in the rmsprop function and implement the Adam update rule in the adam function, and check your implementations using the tests below.
[1] Tijmen Tieleman and Geoffrey Hinton. "Lecture 6.5-rmsprop: Divide the gradient by a running average of its recent magnitude." COURSERA: Neural Networks for Machine Learning 4 (2012).
[2] Diederik Kingma and Jimmy Ba, "Adam: A Method for Stochastic Optimization", ICLR 2015.
End of explanation
"""
learning_rates = {'rmsprop': 1e-4, 'adam': 1e-3}
for update_rule in ['adam', 'rmsprop']:
print 'running with ', update_rule
model = FullyConnectedNet([100, 100, 100, 100, 100], weight_scale=5e-2)
solver = Solver(model, small_data,
num_epochs=5, batch_size=100,
update_rule=update_rule,
optim_config={
'learning_rate': learning_rates[update_rule]
},
verbose=True)
solvers[update_rule] = solver
solver.train()
print
plt.subplot(3, 1, 1)
plt.title('Training loss')
plt.xlabel('Iteration')
plt.subplot(3, 1, 2)
plt.title('Training accuracy')
plt.xlabel('Epoch')
plt.subplot(3, 1, 3)
plt.title('Validation accuracy')
plt.xlabel('Epoch')
for update_rule, solver in solvers.iteritems():
plt.subplot(3, 1, 1)
plt.plot(solver.loss_history, 'o', label=update_rule)
plt.subplot(3, 1, 2)
plt.plot(solver.train_acc_history, '-o', label=update_rule)
plt.subplot(3, 1, 3)
plt.plot(solver.val_acc_history, '-o', label=update_rule)
for i in [1, 2, 3]:
plt.subplot(3, 1, i)
plt.legend(loc='upper center', ncol=4)
plt.gcf().set_size_inches(15, 15)
plt.show()
"""
Explanation: Once you have debugged your RMSProp and Adam implementations, run the following to train a pair of deep networks using these new update rules:
End of explanation
"""
best_model = None
################################################################################
# TODO: Train the best FullyConnectedNet that you can on CIFAR-10. You might #
# batch normalization and dropout useful. Store your best model in the #
# best_model variable. #
################################################################################
def train_model(weight_scale, learning_rate, verbose=False):
model = FullyConnectedNet([100, 100, 100, 100, 100],
weight_scale=weight_scale)
solver = Solver(model, data,
num_epochs=10, batch_size=200,
update_rule='adam',
optim_config={
'learning_rate': learning_rate
},
verbose=verbose
)
solver.train()
return model, solver.train_acc_history[-1], solver.val_acc_history[-1]
weight_scales = [5e-3, 5e-2, 5e-1]
learning_rates = [1e-4, 1e-3, 1e-2]
best_val_acc = -1
for scale in weight_scales:
for rate in learning_rates:
model, train_acc, val_acc = train_model(scale, rate)
if val_acc > best_val_acc:
best_val_acc = val_acc
best_model = model
print('scale: %f, rate: %f, train_acc: %f, val_acc: %f' % (
scale, rate, train_acc, val_acc))
################################################################################
# END OF YOUR CODE #
################################################################################
"""
Explanation: Train a good model!
Train the best fully-connected model that you can on CIFAR-10, storing your best model in the best_model variable. We require you to get at least 50% accuracy on the validation set using a fully-connected net.
If you are careful it should be possible to get accuracies above 55%, but we don't require it for this part and won't assign extra credit for doing so. Later in the assignment we will ask you to train the best convolutional network that you can on CIFAR-10, and we would prefer that you spend your effort working on convolutional nets rather than fully-connected nets.
You might find it useful to complete the BatchNormalization.ipynb and Dropout.ipynb notebooks before completing this part, since those techniques can help you train powerful models.
End of explanation
"""
X_val = data['X_val']
y_val = data['y_val']
X_test = data['X_test']
y_test = data['y_test']
y_test_pred = np.argmax(best_model.loss(X_test), axis=1)
y_val_pred = np.argmax(best_model.loss(X_val), axis=1)
print 'Validation set accuracy: ', (y_val_pred == y_val).mean()
print 'Test set accuracy: ', (y_test_pred == y_test).mean()
"""
Explanation: Test you model
Run your best model on the validation and test sets. You should achieve above 50% accuracy on the validation set.
End of explanation
"""
|
jorisvandenbossche/2015-EuroScipy-pandas-tutorial | solved - 04b - Advanced groupby operations.ipynb | bsd-2-clause | %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
try:
import seaborn
except ImportError:
pass
pd.options.display.max_rows = 10
"""
Explanation: Groupby operations
Some imports:
End of explanation
"""
df = pd.DataFrame({'key':['A','B','C','A','B','C','A','B','C'],
'data': [0, 5, 10, 5, 10, 15, 10, 15, 20]})
df
"""
Explanation: Recap: the groupby operation (split-apply-combine)
The "group by" concept: we want to apply the same function on subsets of your dataframe, based on some key to split the dataframe in subsets
This operation is also referred to as the "split-apply-combine" operation, involving the following steps:
Splitting the data into groups based on some criteria
Applying a function to each group independently
Combining the results into a data structure
<img src="img/splitApplyCombine.png">
Similar to SQL GROUP BY
The example of the image in pandas syntax:
End of explanation
"""
df.groupby('key').aggregate('sum') # np.sum
df.groupby('key').sum()
"""
Explanation: Using the filtering and reductions operations we have seen in the previous notebooks, we could do something like:
df[df['key'] == "A"].sum()
df[df['key'] == "B"].sum()
...
But pandas provides the groupby method to do this:
End of explanation
"""
df.groupby(lambda x: x % 2).mean()
"""
Explanation: Pandas does not only let you group by a column name. In df.groupby(grouper) can be many things:
Series (or string indicating a column in df)
function (to be applied on the index)
dict : groups by values
levels=[], names of levels in a MultiIndex
End of explanation
"""
cast = pd.read_csv('data/cast.csv')
cast.head()
titles = pd.read_csv('data/titles.csv')
titles.head()
"""
Explanation: And now applying this on some real data
These exercises are based on the PyCon tutorial of Brandon Rhodes (so all credit to him!) and the datasets he prepared for that. You can download these data from here: titles.csv and cast.csv and put them in the /data folder.
cast dataset: different roles played by actors/actresses in films
title: title of the film
name: name of the actor/actress
type: actor/actress
n: the order of the role (n=1: leading role)
End of explanation
"""
titles.groupby(titles.year // 10 * 10).size().plot(kind='bar')
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Using groupby(), plot the number of films that have been released each decade in the history of cinema.
</div>
End of explanation
"""
hamlet = titles[titles['title'] == 'Hamlet']
hamlet.groupby(hamlet.year // 10 * 10).size().plot(kind='bar')
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Use groupby() to plot the number of "Hamlet" films made each decade.
</div>
End of explanation
"""
cast1950 = cast[cast.year // 10 == 195]
cast1950 = cast1950[cast1950.n == 1]
cast1950.groupby(['year', 'type']).size()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: How many leading (n=1) roles were available to actors, and how many to actresses, in each year of the 1950s?
</div>
End of explanation
"""
cast1990 = cast[cast['year'] >= 1990]
cast1990 = cast1990[cast1990.n == 1]
cast1990.groupby('name').size().nlargest(10)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: List the 10 actors/actresses that have the most leading roles (n=1) since the 1990's.
</div>
End of explanation
"""
c = cast
c = c[c.title == 'The Pink Panther']
c = c.groupby(['year'])[['n']].max()
c
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Use groupby() to determine how many roles are listed for each of The Pink Panther movies.
</div>
End of explanation
"""
c = cast
c = c[c.name == 'Frank Oz']
g = c.groupby(['year', 'title']).size()
g[g > 1]
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: List, in order by year, each of the films in which Frank Oz has played more than 1 role.
</div>
End of explanation
"""
c = cast
c = c[c.name == 'Frank Oz']
g = c.groupby(['character']).size()
g[g > 1].sort_values()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: List each of the characters that Frank Oz has portrayed at least twice.
</div>
End of explanation
"""
df
df.groupby('key').transform('mean')
def normalize(group):
return (group - group.mean()) / group.std()
df.groupby('key').transform(normalize)
df.groupby('key').transform('sum')
"""
Explanation: Transforms
Sometimes you don't want to aggregate the groups, but transform the values in each group. This can be achieved with transform:
End of explanation
"""
cast['n_total'] = cast.groupby('title')['n'].transform('max')
cast.head()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Add a column to the `cast` dataframe that indicates the number of roles for the film.
</div>
End of explanation
"""
leading = cast[cast['n'] == 1]
sums_decade = leading.groupby([cast['year'] // 10 * 10, 'type']).size()
sums_decade
#sums_decade.groupby(level='year').transform(lambda x: x / x.sum())
ratios_decade = sums_decade / sums_decade.groupby(level='year').transform('sum')
ratios_decade
ratios_decade[:, 'actor'].plot()
ratios_decade[:, 'actress'].plot()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Calculate the ratio of leading actor and actress roles to the total number of leading roles per decade.
</div>
Tip: you can to do a groupby twice in two steps, once calculating the numbers, and then the ratios.
End of explanation
"""
s = 'Bradwurst'
s.startswith('B')
"""
Explanation: Intermezzo: string manipulations
Python strings have a lot of useful methods available to manipulate or check the content of the string:
End of explanation
"""
s = pd.Series(['Bradwurst', 'Kartoffelsalat', 'Sauerkraut'])
s.str.startswith('B')
"""
Explanation: In pandas, those methods (together with some additional methods) are also available for string Series through the .str accessor:
End of explanation
"""
hamlets = titles[titles['title'].str.contains('Hamlet')]
hamlets['title'].value_counts()
hamlets = titles[titles['title'].str.match('Hamlet')]
hamlets['title'].value_counts()
"""
Explanation: For an overview of all string methods, see: http://pandas.pydata.org/pandas-docs/stable/api.html#string-handling
<div class="alert alert-success">
<b>EXERCISE</b>: We already plotted the number of 'Hamlet' films released each decade, but not all titles are exactly called 'Hamlet'. Give an overview of the titles that contain 'Hamlet', and that start with 'Hamlet':
</div>
End of explanation
"""
title_longest = titles['title'].str.len().nlargest(10)
title_longest
pd.options.display.max_colwidth = 210
titles.loc[title_longest.index]
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: List the 10 movie titles with the longest name.
</div>
End of explanation
"""
titles.title.value_counts().head()
"""
Explanation: Value counts
A useful shortcut to calculate the number of occurences of certain values is value_counts (this is somewhat equivalent to df.groupby(key).size()))
For example, what are the most occuring movie titles?
End of explanation
"""
t = titles
t.year.value_counts().head(3)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Which years saw the most films released?
</div>
End of explanation
"""
titles.year.value_counts().sort_index().plot()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Plot the number of released films over time
</div>
End of explanation
"""
t = titles
t = t[t.title == 'Hamlet']
(t.year // 10 * 10).value_counts().sort_index().plot(kind='bar')
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Plot the number of "Hamlet" films made each decade.
</div>
End of explanation
"""
cast.character.value_counts().head(11)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: What are the 11 most common character names in movie history?
</div>
End of explanation
"""
cast[cast.year == 2010].name.value_counts().head(10)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Which actors or actresses appeared in the most movies in the year 2010?
</div>
End of explanation
"""
cast[cast.name == 'Brad Pitt'].year.value_counts().sort_index().plot()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: Plot how many roles Brad Pitt has played in each year of his career.
</div>
End of explanation
"""
c = cast
c[c.title.str.startswith('The Life')].title.value_counts().head(10)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: What are the 10 most film titles roles that start with the word "The Life"?
</div>
End of explanation
"""
c = cast
c = c[c.year // 10 == 195]
c = c[c.n == 1]
c.type.value_counts()
c = cast
c = c[c.year // 10 == 200]
c = c[c.n == 1]
c.type.value_counts()
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>: How many leading (n=1) roles were available to actors, and how many to actresses, in the 1950s? And in 2000s?
</div>
End of explanation
"""
|
catalyst-cooperative/pudl | devtools/eia-etl-debug.ipynb | mit | %load_ext autoreload
%autoreload 2
import pudl
import logging
import sys
from pathlib import Path
import pandas as pd
pd.options.display.max_columns = None
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(stream=sys.stdout)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.handlers = [handler]
pudl_settings = pudl.workspace.setup.get_defaults()
"""
Explanation: Working with the EIA Extract / Transform
This notebook steps through PUDL's extract and transform steps for the EIA 860 and 923 datasets, to make it easier to test and add new years of data, or new tables from the various spreadsheets that haven't been integrated yet.
End of explanation
"""
from pudl.settings import Eia860Settings, Eia923Settings, EiaSettings
from pudl.metadata.classes import DataSource
eia860_data_source = DataSource.from_id("eia860")
eia860_years = eia860_data_source.working_partitions["years"]
#eia860_years = [2020]
eia860_settings = Eia860Settings(years=eia860_years)
# Uncomment to use all available years:
eia923_data_source = DataSource.from_id("eia923")
eia923_years = eia923_data_source.working_partitions["years"]
#eia923_years = [2020]
eia923_settings = Eia923Settings(years=eia923_years)
eia_settings = EiaSettings(eia860=eia860_settings, eia923=eia923_settings)
"""
Explanation: Set the scope for the Extract-Transform:
End of explanation
"""
ds_kwargs = {"local_cache_path": pudl_settings["data_dir"]}
ds = pudl.workspace.datastore.Datastore(**ds_kwargs)
"""
Explanation: Create a locally cached datastore
End of explanation
"""
%%time
eia860_extractor = pudl.extract.eia860.Extractor(ds)
eia860_raw_dfs = eia860_extractor.extract(year=eia860_settings.years)
if eia860_settings.eia860m:
eia860m_raw_dfs = pudl.extract.eia860m.Extractor(ds).extract(
year_month=eia860_settings.eia860m_date
)
eia860_raw_dfs = pudl.extract.eia860m.append_eia860m(
eia860_raw_dfs=eia860_raw_dfs,
eia860m_raw_dfs=eia860m_raw_dfs
)
"""
Explanation: EIA-860
Extract just the EIA-860 / EIA-860m
End of explanation
"""
%%time
eia860_transformed_dfs = pudl.transform.eia860.transform(
eia860_raw_dfs,
eia860_tables=eia860_settings.tables,
)
"""
Explanation: Transform EIA-860 / EIA-860m
End of explanation
"""
%%time
eia923_extractor = pudl.extract.eia923.Extractor(ds)
eia923_raw_dfs = eia923_extractor.extract(year=eia923_settings.years)
"""
Explanation: EIA-923
Extract just the EIA-923
End of explanation
"""
%%time
eia923_transformed_dfs = pudl.transform.eia923.transform(
eia923_raw_dfs,
eia923_tables=eia923_settings.tables,
)
"""
Explanation: Transform just the EIA-923
End of explanation
"""
%%time
eia_transformed_dfs = eia923_transformed_dfs.copy()
eia_transformed_dfs.update(eia860_transformed_dfs.copy())
# Do some final cleanup and assign appropriate types:
eia_transformed_dfs = {
name: pudl.helpers.convert_cols_dtypes(df, data_source="eia")
for name, df in eia_transformed_dfs.items()
}
entities_dfs, eia_transformed_dfs = pudl.transform.eia.transform(
eia_transformed_dfs,
eia860_years=eia860_settings.years,
eia923_years=eia923_settings.years,
eia860m=eia860_settings.eia860m,
)
# Assign appropriate types to new entity tables:
entities_dfs = {
name: pudl.helpers.apply_pudl_dtypes(df, group="eia")
for name, df in entities_dfs.items()
}
for table in entities_dfs:
entities_dfs[table] = (
pudl.metadata.classes.Package.from_resource_ids()
.get_resource(table)
.encode(entities_dfs[table])
)
out_dfs = pudl.etl._read_static_tables_eia()
out_dfs.update(entities_dfs)
out_dfs.update(eia_transformed_dfs)
"""
Explanation: Combined EIA Data
Merge EIA-923/860, set dtypes, harvest entities
End of explanation
"""
|
predictscan3/scan3 | analysis_nbs/Normalise Hormones by Gestational Age.ipynb | mit | from os.path import join
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data_fname = r"../data_staging/all_by_baby_enriched_v3.csv"
df = pd.read_csv(data_fname)
"""
Explanation: Explore the data first
End of explanation
"""
all = pd.concat([df.t1_ga_weeks, df.t2_ga_weeks, df.t3_ga_weeks])
all.dropna(inplace=True)
print all.describe()
print "Less than 0: {}".format(len(all[all < 0]))
all = all[all > 0]
all.hist().plot()
plt.show()
# Look at the trimester scans one by one
t1 = df[df.t1_ga_weeks > 0]
t2 = df[(df.t2_ga_weeks > 0) & (df.t2_ga_weeks < 30)]
t3 = df[(df.t3_ga_weeks >= 30)]
print "T1\n", t1.t1_ga_weeks.describe()
print "T2\n", t2.t2_ga_weeks.describe()
print "T3\n", t3.t3_ga_weeks.describe()
t1.t1_ga_weeks.hist(color="blue", label="scan1", bins=20).plot()
t2.t2_ga_weeks.hist(color="green", label="scan2", bins=20).plot()
t3.t3_ga_weeks.hist(color="purple", label="scan3", bins=20).plot()
plt.title("Trimester scan count by week")
plt.legend()
plt.show()
t2 = df[(df.t2_ga_weeks > 0) & (df.t2_ga_weeks < 30)]
print t2.t2_ga_weeks.describe()
t2.t2_ga_weeks.hist().plot()
plt.show()
# Check whether the records we're dropping are actually ok and that we haven't made a mistake somewhere
# print df[df.t2_ga_weeks > 30].iloc[0]
t3 = df[(df.t3_ga_weeks >= 30)]
print t3.t3_ga_weeks.describe()
t3_dist = np.histogram(t3.t3_ga_weeks)
print t3_dist
t3.t3_ga_weeks.hist().plot()
plt.show()
"""
Explanation: First, have a look at the distribution of gestational ages, as this might determine which buckets we use.
End of explanation
"""
t1.loc[:, "t1_pappa_log"] = np.log(t1.t1_pappa)
print t1.t1_pappa.describe()
print t1.t1_pappa_log.describe()
t1_pappa_threshold = 99.95
pappatv = np.percentile(t1.t1_pappa.dropna().sort_values(), t1_pappa_threshold)
t1.t1_pappa_log.hist(bins=50).plot()
plt.title("Histogram of log(Trimester1 pappa)")
plt.show()
plt.scatter(t1.t1_ga_weeks, t1.t1_pappa_log, alpha=0.5, label="log(t1_pappa)")
plt.plot([t1.t1_ga_weeks.min(), t1.t1_ga_weeks.max()], np.log([pappatv, pappatv]),
linestyle="--", label="{}% < {}".format(t1_pappa_threshold, pappatv))
plt.title("log(Trimester1 pappa) by GA")
plt.legend(bbox_to_anchor=(1.6, 1.))
plt.show()
"""
Explanation: Investigate the pappa distributions and what range is "reasonable"
Need to be sure what readings can be ignored and which ones are generally in an expected range.
Do some analysis and ask Basky for advice.
It actually looks reasonable if we use a log scale instead, then there aren't so many outliers.
End of explanation
"""
hfields = ["pappa", "b_hcg"]
print len(t1)
t11 = t1[(t1.t1_pappa.map(np.isnan) == False) & (t1.t1_pappa > 0) & (t1.t1_pappa <= 10)]
t12 = t1[t1.t1_pappa > 10]
t12_hist = np.histogram(t12.t1_pappa)
print "Hist of pappa>10"
print t12_hist
print pd.DataFrame(t12_hist[0], index=t12_hist[1][0:-1])
print len(t11)
print t11.t1_pappa.describe()
t11.t1_pappa.hist(bins=50, color="blue").plot()
plt.title("Trimester 1 pappa [0, 10]")
plt.show()
plt.scatter(t11.t1_ga_weeks, t11.t1_pappa, alpha=0.5)
plt.title("Trimester 1 GA weeks by Pappa [0, 10]")
plt.show()
"""
Explanation: Now look at some basic plots of hormone readings, to see if there are simple looking relationships before we express as multiples of the mean
End of explanation
"""
data_rows = []
# Bucket the gestational ages
ga_buckets = np.histogram(t11.t1_ga_weeks, bins=50)[1]
# Use the same buckets for all ga bins, so bucket the total pappa
h_buckets = np.histogram(t11.t1_pappa, bins=50)[1]
# For each ga bucket, get the associated pappa readings
for l, r in zip(ga_buckets[0:-1], ga_buckets[1:]):
ga_bucket = t11[(t11.t1_ga_weeks >= l) & (t11.t1_ga_weeks < r)]
if len(ga_bucket) == 0:
#TODO Want to interpolate ideally, but just use the last row for now
data_rows.append(data_rows[-1])
else:
data_rows.append(ga_bucket.t1_pappa)
data_rows.append(t11[(t11.t1_ga_weeks >= r)].t1_pappa)
# Now calculate histograms of the pappa by ga bucket, so that we have a density for each slice
rows = []
for drow in data_rows:
rows.append(np.histogram(drow, bins=h_buckets)[0])
# Calc extents for the axes
ga_extents = [t11.t1_ga_weeks.min(), t11.t1_ga_weeks.max()]
pappa_extents = [t11.t1_pappa.min(), t11.t1_pappa.max()]
# Plot the whole lot, sort of a 3d heatmap?
# Transpose the data so we have rows as pappa levels and columns as GA
rows = np.transpose(rows)
fig, ax = plt.subplots(figsize=(5, 6))
im = ax.imshow(rows, extent=ga_extents + pappa_extents, cmap="hot", origin="lower", interpolation="bilinear")
plt.colorbar(im, orientation='vertical')
plt.title("Trimester 1 GA by Pappa [0, 10] by density")
plt.show()
"""
Explanation: I think the right way to interpret this is that each vertical slice (which is probably a single day) is a distribution, and so (assuming enough data in that slice), we determine the mean of that, then express every other reading in that slice as a multiple of that mean.
That means that regardless of the actual ga, we now have a pappa MoM which can all be compared, as they are expressed relative to the "normal" reading at that ga.
We do need to think about the number of obs in each slice, and widen if there aren't enough, for example under 11.5 weeks and over 14.
This is where we could also throw in some colours to see whether age and race etc have any effect (which is how the published means are calculated).
It might be interesting to view this as a 3d surface, or a heatmap with the colour denoting density. Just for interst really, to see how the distribution changes with ga.
End of explanation
"""
week = 12
d1 = 4
s = t11[(t11.t1_ga_weeks >= (week + d1 / 7.)) & (t11.t1_ga_weeks < (week + (d1 + 1) / 7.))]
print s.t1_pappa.describe()
s.t1_pappa.hist(bins=50).plot()
plt.title("Week {}, day {} Pappa [0, 10] distribution".format(week, d1))
plt.show()
"""
Explanation: Pappa distributions by GA
End of explanation
"""
log_s = np.log(s.t1_pappa)
print log_s.describe()
print np.exp(log_s.mean())
# log_hist = np.histogram(log_s, bins=50)
log_s.hist(bins=50).plot()
plt.title("Week {}, day {} log(Pappa [0, 10]) distribution".format(week, d1))
plt.show()
"""
Explanation: The main observation here is that the mean is not representative of where most of the data lies, because of the skewed nature of the distribution.
Does it matter? I think so, because a lot of readings will show that they're not near the usual level, which isn't actually the case.
End of explanation
"""
plt.scatter(s.dem_mat_age, log_s, alpha=0.5)
plt.title("Maternal age by log(pappa)")
plt.show()
"""
Explanation: This is showing that if we look at the log of pappa instead, we get something that looks a lot more guassian, and in fact the mean of this distribution translates back into something that visually looks a lot closer to the middle of the original distribution.
Quick Experiment on Pappa vs Other Variables
Just as a quick experiment, for a single ga slice, are there any obvious correlations between pappa and other variables, like maternal age?
End of explanation
"""
|
mdpiper/topoflow-notebooks | EvapEnergyBalance-Meteorology-SnowDegreeDay.ipynb | mit | from cmt.components import EvapEnergyBalance, Meteorology, SnowDegreeDay
evp, met, sno = EvapEnergyBalance(), Meteorology(), SnowDegreeDay()
"""
Explanation: EvapEnergyBalance-Meteorology-SnowDegreeDay coupling
Goal: Try to successfully run a coupled EvapEnergyBalance-Meteorology-SnowDegreeDay simulation, with EvapEnergyBalance as the driver.
Import the Babel-wrapped EvapEnergyBalance, Meteorology and SnowDegreeDay components and create instances:
End of explanation
"""
%cd input
evp.initialize('evap_energy_balance-1.cfg')
met.initialize('meteorology-2.cfg')
sno.initialize('snow_degree_day-1.cfg')
"""
Explanation: Initialize the components with cfg files that, for simplicity, use the same time step and run duration:
End of explanation
"""
time = [met.get_current_time()]
snow_depth = [sno.get_value('snowpack__depth').max()]
air_temp = [met.get_value('atmosphere_bottom_air__temperature').max()]
evap_flux = [evp.get_value('land_surface_water__evaporation_volume_flux').max()]
"""
Explanation: Store initial values of time, snow depth, and air temperature:
End of explanation
"""
count = 1
while evp.get_current_time() < evp.get_end_time():
T_air = met.get_value('atmosphere_bottom_air__temperature')
P_snow = met.get_value('atmosphere_water__snowfall_leq-volume_flux')
T_surf = met.get_value('land_surface__temperature')
rho_H2O = met.get_value('water-liquid__mass-per-volume_density')
sno.set_value('atmosphere_bottom_air__temperature', T_air)
sno.set_value('atmosphere_water__snowfall_leq-volume_flux', P_snow)
sno.set_value('land_surface__temperature', T_surf)
sno.set_value('water-liquid__mass-per-volume_density', rho_H2O)
sno.update(sno.get_time_step()*count)
rho_snow = sno.get_value('snowpack__z_mean_of_mass-per-volume_density')
h_snow = sno.get_value('snowpack__depth')
h_swe = sno.get_value('snowpack__liquid-equivalent_depth')
SM = sno.get_value('snowpack__melt_volume_flux')
met.set_value('snowpack__z_mean_of_mass-per-volume_density', rho_snow)
met.set_value('snowpack__depth', h_snow)
met.set_value('snowpack__liquid-equivalent_depth', h_swe)
met.set_value('snowpack__melt_volume_flux', SM)
met.update(met.get_time_step()*count)
T_air = met.get_value('atmosphere_bottom_air__temperature')
Qe = met.get_value('atmosphere_bottom_air_land_net-latent-heat__energy_flux')
Q_sum = met.get_value('land_surface_net-total-energy__energy_flux')
T_surf = met.get_value('land_surface__temperature')
h_snow = sno.get_value('snowpack__depth')
evp.set_value('atmosphere_bottom_air__temperature', T_air)
evp.set_value('atmosphere_bottom_air_land_net-latent-heat__energy_flux', Qe)
evp.set_value('land_surface_net-total-energy__energy_flux', Q_sum)
evp.set_value('land_surface__temperature', T_surf)
evp.set_value('snowpack__depth', h_snow)
evp.update(evp.get_time_step()*count)
time.append(met.get_current_time())
snow_depth.append(sno.get_value('snowpack__depth').max())
air_temp.append(met.get_value('atmosphere_bottom_air__temperature').max())
evap_flux.append(evp.get_value('land_surface_water__evaporation_volume_flux').max())
count += 1
print time
print snow_depth
print air_temp
print evap_flux
"""
Explanation: Run the coupled models to completion. In each time step, perform the following actions:
Get variables from Meteorology; set into SnowDegreeDay
Advance SnowDegreeDay
Get variables from SnowDegreeDay; set into Meteorology
Advance Meteorology
Get variables from Meteorology and SnowDegreeDay; set into EvapEnergyBalance
Advance EvapEnergyBalance
End of explanation
"""
evp.finalize(), met.finalize(), sno.finalize()
"""
Explanation: Finalize the components:
End of explanation
"""
%matplotlib inline
from matplotlib import pyplot as plt
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(12, 5))
snow_depth_plot = axes[0].plot(time[1:], snow_depth[1:], 'b')
axes[0].set_title('Snow depth versus time')
axes[0].set_xlabel('Time [s]')
axes[0].set_ylabel('Snow depth [m]')
evap_flux_plot = axes[1].plot(time[1:], evap_flux[1:], 'r')
axes[1].set_title('Evaporative flux versus time')
axes[1].set_xlabel('Time [s]')
axes[1].set_ylabel('Evaporative flux [m s-1]')
"""
Explanation: Plot snow depth versus time.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/test-institute-3/cmip6/models/sandbox-3/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-3', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: TEST-INSTITUTE-3
Source ID: SANDBOX-3
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:46
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.3/examples/extinction_eclipse_depth_v_teff.ipynb | gpl-3.0 | #!pip install -I "phoebe>=2.3,<2.4"
"""
Explanation: Extinction: Eclipse Depth Difference as Function of Temperature
In this example, we'll reproduce Figure 3 in the extinction release paper (Jones et al. 2020).
NOTE: this script takes a long time to run.
<img src="jones+20_fig3.png" alt="Figure 3" width="800px"/>
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import matplotlib
matplotlib.rcParams['text.usetex'] = True
matplotlib.rcParams['pdf.fonttype'] = 42
matplotlib.rcParams['ps.fonttype'] = 42
matplotlib.rcParams['mathtext.fontset'] = 'stix'
matplotlib.rcParams['font.family'] = 'STIXGeneral'
from matplotlib import gridspec
%matplotlib inline
from astropy.table import Table
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger('error')
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle.
End of explanation
"""
b['period@orbit']=10*u.d
b['teff@secondary']=5780.*u.K
b['requiv@secondary']=1.0*u.solRad
b.flip_constraint('mass@primary', solve_for='sma@binary')
b.flip_constraint('mass@secondary', solve_for='q')
"""
Explanation: First we'll define the system parameters
End of explanation
"""
times = phoebe.linspace(0, 10, 301)
b.add_dataset('lc', times=times, dataset='B', passband="Johnson:B")
b.add_dataset('lc', times=times, dataset='R', passband="Cousins:R")
"""
Explanation: And then create three light curve datasets at the same times, but in different passbands
End of explanation
"""
b.set_value_all('gravb_bol', 0.0)
b.set_value_all('ld_mode', 'manual')
b.set_value_all('ld_func', 'linear')
b.set_value_all('ld_coeffs', [0.0])
"""
Explanation: Now we'll set some atmosphere and limb-darkening options
End of explanation
"""
b.flip_constraint('ebv', solve_for='Av')
masses=np.array([ 0.6 , 0.7 , 0.8 , 0.9 , 1. , 1.1 , 1.2 , 1.3 , 1.4 ,
1.5 , 1.6 , 1.7 , 1.8 , 1.9 , 1.95, 2. , 2.1 , 2.2 ,
2.3 , 2.5 , 3. , 3.5 , 4. , 4.5 , 5. , 6. , 7. ,
8. , 10. , 12. , 15. , 20. ])
temps=np.array([ 4285., 4471., 4828., 5242.,
5616., 5942., 6237., 6508.,
6796., 7121., 7543., 7968.,
8377., 8759., 8947., 9130.,
9538., 9883., 10155., 10801.,
12251., 13598., 14852., 16151.,
17092., 19199., 21013., 22526.,
25438., 27861., 30860., 34753.])
radii=np.array([0.51, 0.63, 0.72, 0.80, 0.90,
1.01, 1.13, 1.26, 1.36, 1.44,
1.48, 1.51, 1.54, 1.57, 1.59,
1.61, 1.65, 1.69, 1.71, 1.79,
1.97, 2.14, 2.30, 2.48, 2.59,
2.90, 3.17, 3.39, 3.87, 4.29,
4.85, 5.69])
t=Table(names=('Mass','Tdiff','B1','B2','R1','R2'), dtype=('f4', 'f4', 'f8', 'f8', 'f8', 'f8'))
def binmodel(teff,requiv,mass):
b.set_value('teff', component='primary', value=teff*u.K)
b.set_value('requiv', component='primary', value=requiv*u.solRad)
b.set_value('mass', component='primary', value=mass*u.solMass)
b.set_value('mass', component='secondary', value=1.0*u.solMass)
b.set_value('ebv', value=0.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext', overwrite=True)
b.set_value('ebv', value=1.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext', overwrite=True)
Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model'])
Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model'])
Bdiff=(Bextmags-Bextmags.min())-(Bnoextmags-Bnoextmags.min())
Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model'])
Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model'])
Rdiff=(Rextmags-Rextmags.min())-(Rnoextmags-Rnoextmags.min())
tdiff=teff-5780
t.add_row((mass, tdiff, Bdiff[0],Bdiff[150],Rdiff[0],Rdiff[150]))
def binmodel_teff(teff):
b.set_value('teff', component='primary', value=teff*u.K)
b.set_value('ebv', value=0.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='noext', overwrite=True)
b.set_value('ebv', value=1.0)
b.run_compute(distortion_method='rotstar', irrad_method='none', model='ext', overwrite=True)
Bextmags=-2.5*np.log10(b['value@fluxes@B@ext@model'])
Bnoextmags=-2.5*np.log10(b['value@fluxes@B@noext@model'])
Bdiff=(Bextmags-Bextmags.min())-(Bnoextmags-Bnoextmags.min())
Rextmags=-2.5*np.log10(b['value@fluxes@R@ext@model'])
Rnoextmags=-2.5*np.log10(b['value@fluxes@R@noext@model'])
Rdiff=(Rextmags-Rextmags.min())-(Rnoextmags-Rnoextmags.min())
tdiff=teff-5780
t_teff.add_row((tdiff, Bdiff[0],Bdiff[150],Rdiff[0],Rdiff[150]))
# NOTE: this loop takes a long time to run
for i in range(0,len(masses)):
binmodel(temps[i], radii[i], masses[i])
#t.write("Extinction_G2V_ZAMS.dat", format='ascii', overwrite=True)
#t=Table.read("Extinction_G2V_ZAMS.dat", format='ascii')
plt.clf()
plt.plot(t['Tdiff'],t['B1'],color="b",ls="-", label="G2V eclipsed")
plt.plot(t['Tdiff'],t['B2'],color="b",ls="--", label="Secondary eclipsed")
plt.plot(t['Tdiff'],t['R1'],color="r",ls="-", label="")
plt.plot(t['Tdiff'],t['R2'],color="r",ls="--", label="")
plt.ylabel(r'$\Delta m$ ')
plt.xlabel(r'$T_\mathrm{secondary} - T_\mathrm{G2V}$')
plt.legend()
plt.xlim([-1450,25000])
t_teff=Table(names=('Tdiff','B1','B2','R1','R2'), dtype=('f4', 'f8', 'f8', 'f8', 'f8'))
b.set_value('requiv', component='primary', value=1.0*u.solRad)
b.set_value('mass', component='primary', value=1.0*u.solMass)
b.set_value('mass', component='secondary', value=1.0*u.solMass)
# NOTE: this loop takes a long time to run
for i in range(0,len(temps)):
binmodel_teff(temps[i])
#t_teff.write("Extinction_Solar_exceptTeff_test.dat", format='ascii', overwrite=True)
#t_teff=Table.read("Extinction_Solar_exceptTeff_test.dat", format='ascii')
plt.clf()
plt.plot(t_teff['Tdiff'],t_teff['B1'],color="b",ls="-", label="G2V eclipsed")
plt.plot(t_teff['Tdiff'],t_teff['B2'],color="b",ls="--", label="Secondary eclipsed")
plt.plot(t_teff['Tdiff'],t_teff['R1'],color="r",ls="-", label="")
plt.plot(t_teff['Tdiff'],t_teff['R2'],color="r",ls="--", label="")
plt.ylabel(r'$\Delta m$ ')
plt.xlabel(r'$T_\mathrm{secondary} - T_\mathrm{G2V}$')
plt.legend()
plt.xlim([-1450,25000])
"""
Explanation: And flip the extinction constraint so we can provide E(B-V).
End of explanation
"""
|
crcresearch/GOS | examples/multiscale-migration/GOS+Multiscale+Migration+Model.ipynb | apache-2.0 | import os
import sys
import subprocess
working_directory = os.path.abspath('')
sys.path.append(os.path.normpath(os.path.join(working_directory, "..", "..")))
# These libraries are used later to supply mathematical calculations.
import numpy as np
import pandas as pd
from math import e
from haversine import haversine
import ipywidgets as widgets
from ipywidgets import *
# Visualizaton
import matplotlib
import matplotlib.pyplot as plt
import gos
from gos.visualization import map_plot
%matplotlib inline
# Reduce some visual clutter by only printing ten rows at a time.
# This can be adjusted to match personal preferences.
pd.set_option("display.max_rows", 500)
plt.style.use('ggplot')
"""
Explanation: Multiscale Migration Model
Scroll down to "Editing Variable Values" after running all cells
This notebook implements our model using numpy, haversine, and pandas (with xlrd). It has been tested to run on Python 3.6. To start, import the required libraries.
End of explanation
"""
print(subprocess.run(['ls', 'data'], capture_output=True, check=True).stdout.decode('utf-8'))
"""
Explanation: The datasets used in the model are found in the /data subdirectory. Many are formatted as either CSV files or XLSX files.
End of explanation
"""
skill = IntSlider(min=0, max=100, value=90)
display(skill)
"""
Explanation: Use this tool to change the skill level at which migration is studied
End of explanation
"""
def file_path(name):
"""
Shortcut function to get the relative path to the directory
which contains the data.
"""
return "./data/%s" % name
"""
Explanation: The following shortcut functions helps locate these data files easily.
End of explanation
"""
column_names = ["Name", "Code"]
def country_codes():
"""
Build country rows from their names, ISO codes, and Numeric
Country Codes.
"""
cc = (
pd.read_csv(
file_path(
"Country_List_ISO_3166_Codes_Latitude_Longitude.csv"),
usecols=[0, 2, 3],
index_col=1,
keep_default_na=False))
#other_codes = pd.read_csv(file_path("other.csv"), index_col=1)
#
cc.columns = column_names
#cc.index.rename("Name")
return cc #pd.concat([cc, other_codes])
def other_codes():
other_codes = pd.read_csv(file_path("other.csv"), index_col=1)
other_codes.columns = column_names[0:1]
return other_codes
world = gos.World(index=set(country_codes().index) | set(other_codes().index))
gos.Neighborhood.update(country_codes().groupby("Alpha-3 code")["Name"].apply(list).to_dict())
gos.Neighborhood.update(other_codes().groupby('ISO')["Name"].apply(list).to_dict())
gos.Neighborhood.update(country_codes().groupby("Alpha-3 code")["Code"].apply(list).to_dict())
"""
Explanation: Cleaning the data.
In this step, we define some helper functions that will help all of our datasets talk to each other.
End of explanation
"""
def freedom_index():
"""
Read data from the Freedom Index.
"""
# TODO: Add xlrd to requirements.
xl = pd.ExcelFile(file_path("Freedom_index.xlsx"))
xl = xl.parse(1)
xl.set_index("Country")
return xl
fi = freedom_index().set_index("Country")
fi.columns = ["Freedom Index"]
fi.plot.hist(bins=10)
#print(len(fi))
"""
Explanation: <font color=red>
Importing Data
Freedom Index
The Freedom Index comes from Freedom House.
A high freedom index score indicates a politically free country
</font>
End of explanation
"""
def ab_values():
"""
Read generated A/B values for each country.
"""
return pd.read_excel(file_path("A&B values for RTS.xlsx"))
ab = ab_values()
ab = ab.set_index("Country")
#ab.info()
#print(ab)
ab.plot.hist(subplots=True, sharex=False)
"""
Explanation: <font color=red>
A/B Values
These values are used in the return to skill function. These values are based on each country's income distribution.
</font>
End of explanation
"""
def passport_index():
"""
Read data from the Passport Index.
"""
pi = pd.read_excel(file_path("PassportIndex.xlsx"))
pi = pi.set_index("Country")
pi.columns = ["Passport Index"]
return pi
pi = passport_index()
pi.plot.hist()
"""
Explanation: <font color=red>
Passport Index
The Passport Index comes from Arton Capital.
A low passport index indicates a welcoming country.
</font>
End of explanation
"""
unemployment_data = pd.read_csv(file_path("CIA_Unemployment.csv"), index_col=0, usecols=[1, 2])
unemployment_data["Unemployment"] /= 100
unemployment_data.plot.hist()
"""
Explanation: <font color=red>
Unemployment
</font>
End of explanation
"""
# Population
population = pd.read_csv(file_path("newPOP.csv"))
population = population.set_index("Country")
population
world.update_neighborhoods(ab)
world.update_neighborhoods(pi)
world.update_neighborhoods(unemployment_data)
world.update_neighborhoods(population)
world.update_neighborhoods(fi)
"""
Explanation: <font color=red>
Population
</font>
End of explanation
"""
lang_csv = pd.read_csv(file_path("languages.csv"), index_col=0)
lang_sets = [set([str(y).strip() for y in x[1] if y != ' ']) for x in lang_csv.iterrows()]
overlap = []
for s in lang_sets:
o = []
for i in range(len(lang_sets)):
o.append(len(lang_sets[i].intersection(s)) >= 1)
overlap.append(o)
lang_data = pd.DataFrame(overlap, index=lang_csv.index, columns=lang_csv.index)
print(len(lang_data))
world.add_matrix("language", 1-lang_data)
"""
Explanation: <font color=red>
Shared Language
Agents are assigned proficiency in languages spoken in their origin country. Moving to a country with entirely new languages presents a higher migration cost.
\begin{equation}
L_{O\leftrightarrow D}\ =
\begin{cases}
0 & \text{if origin and destination share a spoken language} \
1 & \text{otherwise} \
\end{cases}
\end{equation}
</font>
End of explanation
"""
un_pd = pd.read_excel(
file_path(
"UN_MigrantStockByOriginAndDestination_2015.xlsx"
),
skiprows=15
)
un_pd = un_pd.set_index('Unnamed: 1')
un_pd = un_pd.iloc[0:275,7:250]
# TODO: Should we be using the UN numbers for this?
un_pd = un_pd.sort_index().fillna(1)
world.add_matrix("un", un_pd)
"""
Explanation: <font color=red>
UN Migration History
</font>
End of explanation
"""
distance_frame = pd.read_csv(
file_path("Country_List_ISO_3166_Codes_Latitude_Longitude.csv"),
usecols=[2,4,5],
index_col=0,
keep_default_na=False)
locations = [(x[1][0], x[1][1]) for x in distance_frame.iterrows()]
rows = []
for i in range(len(locations)):
row = []
for loc in locations:
row.append(haversine(loc, locations[i]))
rows.append(row)
distance = pd.DataFrame(rows, distance_frame.index, distance_frame.index)
world.add_matrix("distance", distance / distance.max().max())
"""
Explanation: <font color=blue>
Calculations
</font>
<font color=blue>
Distance
The great circle distance between the average latitude and longitude of each country is used to determine distance between each pair of countries. A greater distance between countries corresponds to a greater cost of migration.
</font>
End of explanation
"""
pd.options.mode.chained_assignment = None # default='warn'
world.data["Freedom Index"]["AUS"]=50
#world.data["Passport Index"]["AUS"]=80
#world.data["Unemployment"]["AUS"]=.058
#world.data["Population"]["AUS"]=8000000
#world.matrices["un"]["AUS"]["USA"]=77845
"""
Explanation: <font color=blue>
Editing Variable Values
Remove the "#" and change the values to best simulate your scenario
To reset values, run all cells above
</font>
End of explanation
"""
world.update_neighborhoods(pd.Series(world.data["A"] * e ** (world.data["B"] * skill.value)), "rts")
"""
Explanation: <font color=blue>
Calculate "RTS".
This function measures income potential in a given country based on an agent's skill level (x).
It takes the form:
\begin{equation}
Ae^{Bx}
\end{equation}
Where A and B are constants unique to each country
</font>
End of explanation
"""
world.update_neighborhoods(pd.Series(world.data["A"] * e ** (world.data["B"] * 30)), "beta")
"""
Explanation: <font color=blue>
Country Beta Values
</font>
End of explanation
"""
rows=[]
for i in range(len(world.data["Freedom Index"])):
row = []
for freedom_index in world.data["Freedom Index"]:
diff=(freedom_index-(world.data["Freedom Index"][i]))/100.0
row.append(diff)
rows.append(row)
fi_diff = (pd.DataFrame(rows, world.data["Freedom Index"].index, world.data["Freedom Index"].index))
print(fi_diff)
"""
Explanation: <font color=blue>
Freedom Index difference
\begin{equation}
\frac{FI_D-FI_O}{100}
\end{equation}
This value weighs the origin and destination FI values against eachother.
</font>
End of explanation
"""
delta1=.5
delta2=.25
political_barriers=delta1*world.data["Passport Index"]/100.0 + delta2*(1 - fi_diff)
print(political_barriers)
world.add_matrix("Political Barriers", political_barriers)
"""
Explanation: <font color=blue>
Political Barriers
$$ PB = \delta_1 \frac{PIR_{D}}{100} + \delta_2 (1-\frac{(FI_D-FI_O)}{100}) $$
Political barriers combines passport index rank (PIR) and freedom index scores (FI) for each country. The greater the value, the higher the cost of migration.
<font color=blue>
End of explanation
"""
gamma1=.5
gamma2=.5
OM=world.matrices['un'].sort_index(axis=1).sort_index(axis=0)/world.data['Population']
#transpose UN matrix for this calculation so that we are dividing by population of destination
EE=world.matrices['un'].T.sort_index(axis=1).sort_index(axis=0)/world.data['Population']
EE=EE.T
MH=gamma1*(OM)+gamma2*(EE)
#print(MH)
max_MH = MH.max().nlargest(10).mean()
#print(max_MH)
MH = 1 - (MH/max_MH)
MH[MH<0] = 0
print(MH)
world.add_matrix("Migration History", MH)
"""
Explanation: <font color=blue>
Migration History
Out Migration:
\begin{equation} OM_{O\rightarrow D}\ = \frac {\text {migrants from origin in destination}} {\text {population of origin}} \end{equation}
Ethnic Enclave:
\begin{equation} EE_{O\leftrightarrow D}\ = \frac {\text{migrants from origin in destination}} {\text {population of destination.}} \end{equation}
Migration History:
\begin{equation}
MH_{O\rightarrow D}\ = \gamma_1 (1-OM_{O\rightarrow D})\ + \gamma_2 (1-EE_{O\rightarrow D})
\end{equation}
</font>
End of explanation
"""
# Cost
alpha1=.35
alpha2=.35
alpha3=.15
alpha4=.15
c = (alpha1*world.matrices["distance"] +
alpha2*world.matrices["Migration History"] +
alpha3*world.matrices["language"] +
alpha4*world.matrices["Political Barriers"])
world.add_matrix("cost", c * world.data['beta'])
map_plot(world.matrices["cost"]["SYR"], title="Costs (SYR)")
map_plot(world.matrices["cost"]["FRA"], title="Costs (FRA)")
world.update_neighborhoods((1 - world.data["Unemployment"]) * world.data["rts"], "wages")
map_plot(world.data["wages"], title="Wages")
#beta = world.data.rts.mean()
#beta = RTS_list.median()
world.add_matrix("migration", (pd.DataFrame(
np.array(
[[x] * len(world.data) for x in world.data["wages"].values]
) - np.array(
[list(world.data["wages"].values)] * len(world.data)
),
world.data.index,
world.data.index
) - world.matrices["cost"]).clip(lower=0))
world.matrices["migration"] = world.matrices["migration"] / (world.matrices["migration"].sum() + 1)
world.matrices["migration"] = world.matrices["migration"] / world.matrices["migration"].sum(axis=1).max()
# TODO: Why does this require being transposed?
world.matrices["migration"] = (0.15 * world.matrices["migration"].transpose() * world.data["Population"]).transpose()
world.matrices["migration"]
map_plot(
world.matrices["migration"].sum(axis=1)+1,
title="Immigration Estimations (x={})".format(skill.value),
normc=matplotlib.colors.LogNorm
)
map_plot(
world.matrices["migration"].sum(),
title="Estimated Number of Emigrants (x={})".format(skill.value),
normc=matplotlib.colors.Normalize
)
map_plot(
world.matrices["migration"].sum(axis=1) - world.matrices["migration"].sum(),
title="Net Migration (x={})".format(skill.value),
normc=gos.visualization.MidPointNorm
)
"""
Explanation: <font color=blue>
Cost
</font>
<font color=blue>
The cost of migration between an origin and destination is the weighted average of the distance, migration history, shared language and political barriers between the two countries.
\begin{equation}
C = { \alpha_1 \frac{D_{O\leftrightarrow D}} {D_{Max}} + \alpha_2 MH_{O\rightarrow D} + \alpha_3 L_{O\leftrightarrow D} + \alpha_4 PB}
\end{equation}
</font>
End of explanation
"""
immigration = world.matrices["migration"].sum(axis=1)+1
emigration = world.matrices["migration"].sum()
net_migration = world.matrices["migration"].sum(axis=1) - world.matrices["migration"].sum()
"""
Explanation: <font color=purple>
Exporting Data
</font>
<font color=purple>
Defining Vectors
</font>
End of explanation
"""
immigration.to_excel("data/immigration.xlsx")
emigration.to_excel("data/emigration.xlsx")
net_migration.to_excel("data/net_migration.xlsx")
"""
Explanation: <font color=purple>
To export the following data, remove the "#" and run the cell.
The excel files will appear in the data folder under multiscale-migration.
Change the file names (in red) everytime you want to create a spreadsheet with new results. Otherwise new changes will override the original spreadsheets.
</font>
End of explanation
"""
|
dataDogma/Computer-Science | Courses/DAT-208x/DAT208x - Week 3 - Section 1 - Functions.ipynb | gpl-3.0 | # use python help() on max()
help(max)
# use help() on round()
help(round)
# example on max
height = [ 4.5, 5.2, 6.7, 4.8, 5.6 ]
print("The tallest one is : " + str( max( height ) ) + " feets" )
# exmple on round
some_number = 5.63
# round() with two arguments, "number" and "decimal place significance"
print("The number is rounded to: " + str( round( some_number, 1 ) ) + " with 1 decimal place of significance" )
# next, round() with only one arugment
print("\nThe number is rounded to: " + str( round( some_number ) ) + " by default" )
"""
Explanation: Introductory functions for data science
Lecture Objective:
Learning max() function and it's usage.
Learning round() function and it's usage.
End of explanation
"""
"""
Instructionns:
+ Use print() in combination with type() to print out the type of
var1.
+ Use len() to get the length of the list var1.
Wrap it in a print() call to directly print it out.
+ Use int() to convert var2 to an integer. Store the output as out2.
"""
# Create variables var1 and var2
var1 = [1, 2, 3, 4]
var2 = True
# Print out type of var1
print( type( var1 ) )
# Print out length of var1
print( len( var1 ))
# Convert var2 to an integer: out2
out2 = int( var2 )
print(out2)
"""
Explanation: What if while using max()there were multiple items?
In this case, the function returns the first one encountered. This remains consistent with other sort-stability preserving tools, such as sorted() and heapq().
Exercise:
1) RQ1: What is a Python function?
Ans: A piece of reusable Python code, that solves a particular problem.
2) RQ2: You have a list named x. To calculate the minimum value in this list, you use the min() function.
Which Python command should you use?
Ans: min(x)
3) RQ3: What Python command opens up the documentation from inside the IPython Shell for the min fucntion?
Ans: help(min)
4) RQ4: The function round has two arguments. Select the two correct statements about these arguments.
Ans: Number is a required argument, and ndigits is an optional argument.
Lab: Functions
Objective:
Use functions in different ways.
Experiment with different ways of specifyinng arguments.
How default argumetns work.
1 Familiar functions - 100xp, status: Earned
The genereal recepie for calling functions is:
`output = function_name(input)`
or
`variable = function_name( required arg, [ optional arg ] )`
End of explanation
"""
?sorted
help(sorted)
"""
Problem Definition:
In this exercise, you'll only have to specify "iterable" and "reverse", not key.
The first input you pass to "sorted()" will obviously be matched to the iterable argument,
but what about the second input?
To tell Python you want to specify reverse without changing anything about key,
you can use "=` :
sorted(___, reverese = ___)
Two lists have been created,
+ paste them together and
+ sort them in "descending order".
Instructions:
+ Use "+" to merge the contents of "first" and "second" into a new list: "full".
+ Call "sorted()" on "full" and specify the "reverse" argument to be "True".
- Save the sorted list as "full_sorted".
+ Finish off by printing out "full_sorted".
"""
# Create lists first and second
first = [11.25, 18.0, 20.0]
second = [10.75, 9.50]
# Paste together first and second: full
full = list(first + second)
print("Modified list: " + str( full ) )
# Sort full in descending order: full_sorted
full_sorted = sorted( full, key = None, reverse = True )
# Print out full_sorted
print("\nThe sorted list is decending order is: " + str( full_sorted ) )
"""
Explanation: 2. Help1 - 50xp, status: Earned
To get the help on the any function, use the following two syntax or function:
help( function_name )
?function_name
Use the shell to open up the documentation on complex().
Which of the following statements is true?:
Ans: complex() takes two arguments, required: real number and optional: imaginary number. If only requried argument is inserted, by default the value of optional,is 0.
3. Multiple arguments -- 100xp, status: Earned
[] barackets around a function argument, represent an "optional argument"
Python also uses different way's to notify user about arg's being optional.
E.g. documentation of sorted() takes three arguments:
Iterable.
key, where key = None, if arg non-specified, key will be "None".
reverese, where reverse = False, if arg non-specified, argument by default will be "False".
End of explanation
"""
|
AtmaMani/pyChakras | udemy_ml_bootcamp/Machine Learning Sections/Logistic-Regression/Logistic Regression Project - Solutions.ipynb | mit | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
"""
Explanation: <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
Logistic Regression Project - Solutions
In this project we will be working with a fake advertising data set, indicating whether or not a particular internet user clicked on an Advertisement on a company website. We will try to create a model that will predict whether or not they will click on an ad based off the features of that user.
This data set contains the following features:
'Daily Time Spent on Site': consumer time on site in minutes
'Age': cutomer age in years
'Area Income': Avg. Income of geographical area of consumer
'Daily Internet Usage': Avg. minutes a day consumer is on the internet
'Ad Topic Line': Headline of the advertisement
'City': City of consumer
'Male': Whether or not consumer was male
'Country': Country of consumer
'Timestamp': Time at which consumer clicked on Ad or closed window
'Clicked on Ad': 0 or 1 indicated clicking on Ad
Import Libraries
Import a few libraries you think you'll need (Or just import them as you go along!)
End of explanation
"""
ad_data = pd.read_csv('advertising.csv')
"""
Explanation: Get the Data
Read in the advertising.csv file and set it to a data frame called ad_data.
End of explanation
"""
ad_data.head()
"""
Explanation: Check the head of ad_data
End of explanation
"""
ad_data.info()
ad_data.describe()
"""
Explanation: Use info and describe() on ad_data
End of explanation
"""
sns.set_style('whitegrid')
ad_data['Age'].hist(bins=30)
plt.xlabel('Age')
"""
Explanation: Exploratory Data Analysis
Let's use seaborn to explore the data!
Try recreating the plots shown below!
Create a histogram of the Age
End of explanation
"""
sns.jointplot(x='Age',y='Area Income',data=ad_data)
"""
Explanation: Create a jointplot showing Area Income versus Age.
End of explanation
"""
sns.jointplot(x='Age',y='Daily Time Spent on Site',data=ad_data,color='red',kind='kde');
"""
Explanation: Create a jointplot showing the kde distributions of Daily Time spent on site vs. Age.
End of explanation
"""
sns.jointplot(x='Daily Time Spent on Site',y='Daily Internet Usage',data=ad_data,color='green')
"""
Explanation: Create a jointplot of 'Daily Time Spent on Site' vs. 'Daily Internet Usage'
End of explanation
"""
sns.pairplot(ad_data,hue='Clicked on Ad',palette='bwr')
"""
Explanation: Finally, create a pairplot with the hue defined by the 'Clicked on Ad' column feature.
End of explanation
"""
from sklearn.model_selection import train_test_split
X = ad_data[['Daily Time Spent on Site', 'Age', 'Area Income','Daily Internet Usage', 'Male']]
y = ad_data['Clicked on Ad']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
"""
Explanation: Logistic Regression
Now it's time to do a train test split, and train our model!
You'll have the freedom here to choose columns that you want to train on!
Split the data into training set and testing set using train_test_split
End of explanation
"""
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
"""
Explanation: Train and fit a logistic regression model on the training set.
End of explanation
"""
predictions = logmodel.predict(X_test)
"""
Explanation: Predictions and Evaluations
Now predict values for the testing data.
End of explanation
"""
from sklearn.metrics import classification_report
print(classification_report(y_test,predictions))
"""
Explanation: Create a classification report for the model.
End of explanation
"""
|
ceos-seo/Data_Cube_v2 | agdc-v2/contrib/notebooks/zonal-stats-example.ipynb | apache-2.0 | dc = datacube.api.API()
"""
Explanation: Query the datacube
End of explanation
"""
vfname = '/g/data2/v10/public/water-example/sample-water-bodies.shp'
src = fiona.open(vfname, 'r')
xidx = (src.bounds[0], src.bounds[2])
yidx = (src.bounds[-1], src.bounds[1])
gdf = geopandas.read_file(vfname)
gdf.plot()
"""
Explanation: Open the vector file and get the bounding box
End of explanation
"""
nbar = dc.get_dataset(product='nbar', platform='LANDSAT_8',
y=yidx, x=xidx)
nbar
"""
Explanation: Using the co-ordinates of the vector file's bunding box, query the datacube.
End of explanation
"""
sr = osr.SpatialReference()
sr.ImportFromWkt(nbar.crs.spatial_ref)
crs = from_string(sr.ExportToProj4())
print crs
"""
Explanation: Extract and convert the co-ordinate reference system into rasterio's preferred format
End of explanation
"""
pix_x = nbar.x.values[1] - nbar.x.values[0]
pix_y = nbar.y.values[1] - nbar.y.values[0]
ulx = nbar.x.values[0] - pix_x / 2.0
uly = nbar.y.values[0] - pix_y /2.0
transform = Affine.from_gdal(*[ulx, pix_x, 0, uly, 0, pix_y])
print transform
"""
Explanation: Create an affine/geotransformtion matrix.
We make an assumption that the spatial array is uniformally spaced in order for this to work.
End of explanation
"""
ha = pix_x **2 / 10000.0
print "Pixel area in hectares: {}".format(ha)
dims = nbar.band_4.shape
img = numpy.zeros((3, dims[1], dims[2]), dtype='float32')
img[0] = nbar.band_5[28]
img[1] = nbar.band_4[28]
img[2] = nbar.band_3[28]
img[img == -999] = numpy.nan
img /= 10000
scl = exposure.equalize_hist(img, mask=numpy.isfinite(img))
plt.imshow(scl.transpose(1, 2, 0))
"""
Explanation: Determine the pixel size in hectares. We'll use this later on.
End of explanation
"""
ras = rasterise_vector(vfname, shape=dims[1:], crs=crs, transform=transform)
seg = Segments(ras)
print "Number of segments: {}".format(seg.n_segments)
"""
Explanation: Segmentation
Rasterise all the geometry contained within the vector file, and create a Segments object.
End of explanation
"""
dat = nbar.band_5[28].values.astype('float32')
dat[dat == -999] = numpy.nan
nir_stats = seg.basic_statistics(dat, nan=True, scale_factor=ha)
dat = nbar.band_4[28].values.astype('float32')
dat[dat == -999] = numpy.nan
red_stats = seg.basic_statistics(dat, nan=True, scale_factor=ha)
dat = nbar.band_3[28].values.astype('float32')
dat[dat == -999] = numpy.nan
green_stats = seg.basic_statistics(dat, nan=True, scale_factor=ha)
nir_stats.head(10)
"""
Explanation: For the NIR, Green, & Red bands, calculate statistics for every segment.
End of explanation
"""
bboxes = seg.bounding_box()
sid = 185
window = bboxes[sid]
ys, ye = window[0]
xs, xe = window[1]
print window
subs = img[:, ys:ye, xs:xe]
scl_subs = exposure.equalize_hist(subs, mask=numpy.isfinite(subs))
plt.title(src[sid - 1]['properties']['FEATURETYP'] + ' (???)')
plt.imshow(scl_subs.transpose(1, 2, 0))
"""
Explanation: Viewing segments
Get the bounding boxes for every segment
End of explanation
"""
nir_stats[nir_stats['Mean'] < 500]
sid = 357
window = bboxes[sid]
ys, ye = window[0]
xs, xe = window[1]
print window
subs = img[:, ys:ye, xs:xe]
scl_subs = exposure.equalize_hist(subs, mask=numpy.isfinite(subs))
plt.title(src[sid - 1]['properties']['FEATURETYP'])
plt.imshow(scl_subs.transpose(1, 2, 0))
sid = 262
window = bboxes[sid]
ys, ye = window[0]
xs, xe = window[1]
print window
subs = img[:, ys:ye, xs:xe]
scl_subs = exposure.equalize_hist(subs, mask=numpy.isfinite(subs))
plt.title(src[sid - 1]['properties']['NAME'])
plt.imshow(scl_subs.transpose(1, 2, 0))
"""
Explanation: Find a segment with a low mean value.
End of explanation
"""
dat = nbar.band_6[0].values.astype('float32')
dat[dat == -999] = numpy.nan
swir_stats = seg.basic_statistics(dat, nan=True, scale_factor=ha)
swir_stats['timestamp'] = nbar.time[0].values
for i in range(1, dims[0]):
dat = nbar.band_6[i].values.astype('float32')
dat[dat == -999] = numpy.nan
stats = seg.basic_statistics(dat, nan=True, scale_factor=ha)
stats['timestamp'] = nbar.time[i].values
swir_stats = swir_stats.append(stats)
swir_stats.set_index('timestamp', inplace=True)
print "Number of records: {}".format(swir_stats.shape[0])
swir_stats.head(10)
"""
Explanation: Timeseries of the SWIR band for every segment
End of explanation
"""
sid = 262
roi = swir_stats[swir_stats['Segment_IDs'] == sid]
roi = roi[numpy.isfinite(roi['Mean'])]
roi['Mean'].plot(title=src[sid - 1]['properties']['NAME'])
sid = 357
roi = swir_stats[swir_stats['Segment_IDs'] == sid]
roi = roi[numpy.isfinite(roi['Mean'])]
roi['Mean'].plot(title=src[sid - 1]['properties']['FEATURETYP'])
sid = 185
roi = swir_stats[swir_stats['Segment_IDs'] == sid]
roi = roi[numpy.isfinite(roi['Mean'])]
roi['Mean'].plot(title=src[sid - 1]['properties']['FEATURETYP'])
"""
Explanation: Segment (polygon) timeseries profiles
End of explanation
"""
sid = 262
window = bboxes[sid]
ys, ye = window[0]
xs, xe = window[1]
roi = swir_stats[swir_stats['Segment_IDs'] == sid]
roi = roi[numpy.isfinite(roi['Mean'])]
wh1 = (roi['Mean'] < 1100) & (roi['Total'] > 110000000)
roi[wh1]
wh2 = (roi['Mean'] > 2900) & (roi['StdDev'] < 400)
roi[wh2]
fig, axes = plt.subplots(ncols=2)
timestamp = roi[wh1].index[0]
subs[0] = nbar.band_5.loc[timestamp][ys:ye, xs:xe]
subs[1] = nbar.band_4.loc[timestamp][ys:ye, xs:xe]
subs[1] = nbar.band_3.loc[timestamp][ys:ye, xs:xe]
scl_subs = exposure.equalize_hist(subs, mask=subs != -999)
axes[0].set_title(timestamp)
axes[0].imshow(scl_subs.transpose(1, 2, 0))
timestamp = roi[wh2].index[0]
subs[0] = nbar.band_5.loc[timestamp][ys:ye, xs:xe]
subs[1] = nbar.band_4.loc[timestamp][ys:ye, xs:xe]
subs[1] = nbar.band_3.loc[timestamp][ys:ye, xs:xe]
scl_subs = exposure.equalize_hist(subs, mask=subs != -999)
axes[1].set_title(timestamp)
axes[1].imshow(scl_subs.transpose(1, 2, 0))
"""
Explanation: Investigating a segment/polygon
End of explanation
"""
|
jamesjia94/BIDMach | tutorials/NVIDIA/BIDMach_basic_classification.ipynb | bsd-3-clause | import BIDMat.{CMat,CSMat,DMat,Dict,IDict,FMat,FND,GDMat,GMat,GIMat,GSDMat,GSMat,HMat,Image,IMat,Mat,SMat,SBMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMat.Solvers._
import BIDMat.JPlotting._
import BIDMach.Learner
import BIDMach.models.{FM,GLM,KMeans,KMeansw,ICA,LDA,LDAgibbs,NMF,RandomForest,SFA}
import BIDMach.datasources.{MatSource,FileSource,SFileSource}
import BIDMach.mixins.{CosineSim,Perplexity,Top,L1Regularizer,L2Regularizer}
import BIDMach.updaters.{ADAGrad,Batch,BatchNorm,IncMult,IncNorm,Telescoping}
import BIDMach.causal.{IPTW}
Mat.checkMKL
Mat.checkCUDA
Mat.setInline
if (Mat.hasCUDA > 0) GPUmem
"""
Explanation: BIDMach: basic classification
For this tutorial, we'll BIDMach's GLM (Generalized Linear Model) package. It includes linear regression, logistic regression, and support vector machines (SVMs). The imports below include both BIDMat's matrix classes, and BIDMach machine learning classes.
End of explanation
"""
var dir = "../data/rcv1/" // Assumes bidmach is run from BIDMach/tutorials. Adjust to point to the BIDMach/data/rcv1 directory
tic
val train = loadSMat(dir+"docs.smat.lz4")
val cats = loadFMat(dir+"cats.fmat.lz4")
val test = loadSMat(dir+"testdocs.smat.lz4")
val tcats = loadFMat(dir+"testcats.fmat.lz4")
toc
"""
Explanation: Dataset: Reuters RCV1 V2
The dataset is the widely used Reuters news article dataset RCV1 V2. This dataset and several others are loaded by running the script <code>getdata.sh</code> from the BIDMach/scripts directory. The data include both train and test subsets, and train and test labels (cats).
End of explanation
"""
val (mm, opts) = GLM.learner(train, cats, GLM.logistic)
"""
Explanation: BIDMach's basic classifiers can invoked like this on data that fits in memory:
End of explanation
"""
opts.what
opts.lrate=0.3f
"""
Explanation: The last option specifies the type of model, linear, logistic or SVM. The syntax is a little unusual. There are two values returned. The first <code>mm</code> is a "learner" which includes model, optimizer, and mixin classes. The second <code>opts</code> is an options object specialized to that combination of learner components. This design facilitates rapid iteration over model parameters from the command line or notebook.
The parameters of the model can be viewed and modified by doing <code>opts.what</code>
End of explanation
"""
opts.npasses=2
mm.train
"""
Explanation: Most of these will work well with their default values. On the other hand, a few have a strong effect on performance. Those include:
<pre>
lrate: the learning rate
batchSize: the minibatch size
npasses: the number of passes over the dataset
</pre>
We will talk about tuning those in a moment. For now lets train the model:
End of explanation
"""
val (pp, popts) = GLM.predictor(mm.model, test)
"""
Explanation: The output includes important information about the training cycle:
* Percentage of dataset processed
* Cross-validated log likelihood (or negative loss)
* Overall throughput in gigaflops
* Elapsed time in seconds
* Total Gigabytes processed
* I/O throughput in MB/s
* GPU memory remaining (if using a GPU)
The likelihood is calculated on a set of minibatches that are held out from training on every cycle. So this is a cross-validated likelihood estimate. Cross-validated likelihood will increase initially, but will then flatten and may decrease. There is random variation in the likelihood estimates because we are using SGD. Determining the best point to stop is tricky to do automatically, and is instead left to the analyst.
To evaluate the model, we build a classifier from it:
End of explanation
"""
pp.predict
"""
Explanation: And invoke the predict method on the predictor:
End of explanation
"""
val preds = FMat(pp.preds(0))
val lls = mean(ln(1e-7f + tcats ∘ preds + (1-tcats) ∘ (1-preds)),2) // actual logistic likelihood
mean(lls)
"""
Explanation: Although ll values are printed above, they are not meaningful (there is no target to compare the prediction with).
We can now compare the accuracy of predictions (preds matrix) with ground truth (the tcats matrix).
End of explanation
"""
val rocs = roc2(preds, tcats, 1-tcats, 100) // Compute ROC curves for all categories
plot(rocs(?,6))
plot(rocs(?,0->5))
val aucs = mean(rocs)
aucs(6)
"""
Explanation: A more thorough measure is ROC area:
End of explanation
"""
|
davidsanfal/iPython-Notebook | intro_to_py3/Python3.ipynb | mit | print("hello world")
"""
Explanation: <p style="text-align: center; font-size: 200%"><a href="http://davidsanfal.github.io/">David Sánchez Falero</a></p>
<p style="text-align: center; font-size: 200%">david.sanchez.falero@gmail.com</p>
<p style="text-align: center; font-size: 200%">@David_SanFal</p>
Introducción a Python 3:
¿Por qué Python?
Fácil de aprender.
Fácil de leer.
Lenguaje de propósito general.
Una librería estándar muy completa.
Es multiplataforma.
Rápido de desarrollar.
Tiene una comunidad muy activa, de la cual aprender.
Es interpretado, no tienes que pelearte con el compilador.
¿Por qué Python 3?
Se resume en la frase que aparece en la wiki de python:
Short version: Python 2.x is legacy, Python 3.x is the present and future of the language
Index
"Hello World" en Python
Variables
Basic types (int, float and string)
Arithmetic operators
Logical operators
Conditional structures
Functions
List, Tuples and Strings
Dictionaries
I/O
Exceptions
Classes
Hello World
End of explanation
"""
age = 10
print(age)
age = "diez"
print(age)
"""
Explanation: Variables
Las variables actúan como contenedores para los datos. Puedes poner el dato que quieras almacenar dentro y usar el nombre de la variable siempre que quieras saber el contenido.
Las variables se definen asignándoles un nombre, el cual suele dar información del dato que contiene o que va a contener. Por ejemplo, si queremos almacenar un numero que corresponde a la edad de una persona que tiene 10 años, podemos utilizar la variable age para almacenar el numero 10.
Como contenedores, su contenido puede cambiar con solo reasignar su contenido.
End of explanation
"""
# int
a = 2
b = 4
print(a + b)
print(type(a))
"""
Explanation: Basic types
Los datos que almacena una variable pueden ser de diferentes tipos. Vamos a ver 3 tipos básicos, los números enteros(int), con decimales(float) y las cadenas de texto(string).
Como ejemplo, un int podría ser 12 o -345, un float podría ser 12.43 o -345.00 y string podría ser "soy un string" o 'Otro string'.
int
End of explanation
"""
# float
a = 2
b = 4.52
print(a + b)
print(type(b))
#from float to int
a = 2
b = 4.52
print(a + int(b))
"""
Explanation: float
End of explanation
"""
#string
a = "2"
b = "4"
print(a + b)
print(type(b))
"""
Explanation: string
End of explanation
"""
a = "2"
b = "4"
c = 3
a + b + c
a = "2"
b = "4"
c = 3
print(a + b + str(c))
print(int(a) + float(b) + c)
a = "2"
b = 3
a * b
"""
Explanation: Play with types
End of explanation
"""
print (3 + 4)
print (3 - 4)
print (3 * 4)
print (3 / 4)
print (3 % 2)
print (3 ** 2) # 3^2
print (3 // 4) # floor division
a = 0
a += 2 #a = a + 2
print(a)
a //= 4 #a = a // 4
print(a)
"""
Explanation: Arithmetic operators
Como hemos visto antes, se puede hacer operaciones con diferentes tipos de datos. Vamos a ver los diferentes operadores aritméticos que existen en Python3.
End of explanation
"""
print(1 == 0)
print(1 > 0)
print(1 < 0)
print(1 <= 0)
print(1 >= 0)
"""
Explanation: Logical operators
Además de los operadores aritméticos, existen operadores lógicos. Vamos a ver los diferentes operadores lógicos que existen en Python3.
End of explanation
"""
print(1 == 3.4 and "a" == 32)
print(1 == 3.4 or 1)
print(False == True)
print(23 == True)
print((1 == 3.4) == False)
print(not (1 == 3.4))
"""
Explanation: También podemos hacer operaciones lógicas con los operadores and, or, not o utilizando True o False para comparar.
End of explanation
"""
a = 3
if a > 2:
print("a > 2")
else:
print("a <= 2")
a = 1
if a > 2:
print("a > 2")
else:
print("a <= 2")
a = -3
if a >= 2:
print("a => 2")
elif a <= 0:
print("a <= 0")
else:
print("0 < a < 2")
a = 1
if a >= 2:
print("a => 2")
elif a <= 0:
print("a <= 0")
else:
print("0 < a < 2")
"""
Explanation: conditional structures
If, elif and else
End of explanation
"""
for a in range(4):
print(a)
#range(4)=0,1,2,3
for a in range(1, 4):
print(a)
#range(1, 4)=0,1,2,3
"""
Explanation: for
End of explanation
"""
a = 0
while a < 4:
print (a)
a += 4
"""
Explanation: while
End of explanation
"""
def hello():
print("hello world")
hello()
def printer(a):
print(a)
a = "hello world"
printer(a)
printer(4)
def add(a, b):
return a + b
c = add(1, 3)
print(c)
def add(a, b):
result = a + b
return result
d = add(1, 3)
print(d)
print(result) #Scope error
def suma(a, b):
c = a + b
d = a - b
return c, d
e, f = suma(2, 4)
print(e, f)
"""
Explanation: functions
End of explanation
"""
sampleList = [1,2,3,4,5,6,7,8]
print (sampleList[1])
sampleList = [1,2,3,4,5,6,7,8]
for a in sampleList:
print(a)
"""
Explanation: lists, tuples and strings
Las listas y las tuplas son vectores iterables de elementos. Esto quiere decir que puede contener de 0 a n elementos y que se puede recorrer el contenido de la lista o la tupla preguntando que hay dentro o modificando su contenido. Se pueden almacenar como una variable más.
Un string una secuencia ordenada de longitud arbitraria de caracteres. Funciona igual que una tupla a efectos de iterar sobre sus elementos.
list
End of explanation
"""
sampleTuple = (1,2,3,4,5,6,7,8)
for a in sampleList:
print(a)
"""
Explanation: .append(value) - añade value al final
.count('x') - número de apariciones de X
.index('x') - posición de X
.insert('y','x') - Inserta X en la posición Y
.pop() - Retorna y elimina el último elemento
.remove('x') - Elimina X
.reverse() - Da la vuelta a la lista
.sort() - Ordena la lista alfabéticamente en orden ascendente, o numéricamente en orden ascendente.
tuple
End of explanation
"""
myList = [1,2,3]
myList.append(4)
myTuple = (1,2,3)
myTuple.append(4)
"""
Explanation: list vs tuple
End of explanation
"""
a = "Monty Python"
print(a[6:10])
print(a[-12:-7])
print(a[2:])
print(a[:])
print(a)
"""
Explanation: string
End of explanation
"""
print('Num %s' % 123.44)
print('Num %i' % 123.44)
print('Num %f' % 123.44)
print("Num %.2f" % 123.444)
a = "abcdefghijklmnopqrstu"
print('%.10s' % a)
a = "strings".replace("s", "S")
print(a)
print(a.startswith("St"))
print(a.endswith("ngs"))
"""
Explanation:
End of explanation
"""
myDict = {"foo": 2,
"bar": 20}
print(myDict ["bar"])
myDict = {"foo": 2,
"bar": 20}
for a in myDict:
print (a, myDict[a])
for a, b in myDict.items():
print (a, b)
for a in myDict.values():
print (a)
myDict = {"David": {'edad': 26,
'sexo': 'H'},
"Rita": {'edad': 24,
'sexo': 'M'}}
for a in myDict:
print (a)
for b in myDict[a]:
print (b, myDict[a][b])
"""
Explanation: dictionaries
Un diccionario es una colección de pares de datos con una relación de clave y valor.
Cada dato dentro de un diccionario tiene una clave asociada, con la que puede ser recuperado.
End of explanation
"""
f = open('hello.txt', 'r')
print(f.read(1))
print('-' * 10)
print(f.read())
f.close()
f = open('hello.txt', 'r')
print(f.readline())
print('-' * 10)
print(f.readline())
f.close()
f = open('hello.txt', 'r')
myList = []
for line in f:
myList.append(line)
print(myList)
f.close()
with open('hello.txt', 'r') as f:
myList = []
for line in f:
myList.append(line)
print(myList)
name = input("What is your name? ")
age = int(input("How old are you? "))
print ("%s, %s " % (name, age))
"""
Explanation: I/O
hello.txt
Hola a todos,
soy un texto de prueba
para aprender como Python 3
manipula ficheros.
End of explanation
"""
var = '1'
try:
var = var + 1
except:
print("ERROR")
print(var)
var = '1'
try:
var = var + 1
except:
var = int(var) + 1
print(var)
var = '1'
try:
var = var + 1
except TypeError as e:
# e.message in python 2
print("ERROR: %s" % e)
finally:
print(var)
def add_one(var):
try:
var += 1
except TypeError as e:
print("ERROR: %s" % e)
raise TypeError("TypeError: %s" % e)
finally:
print("var value: %s" % var)
print("All correct")
add_one(1)
try:
add_one("1")
except TypeError as e:
print(e)
"""
Explanation: exceptions
Interrupciones del flujo normal de ejecución de cualquier programa debido a un error.
End of explanation
"""
class Counter(object):
def __init__(self):
self.current = 0
def add(self, amount):
self.current += amount
def getCurrent(self):
return self.current
myCounter = Counter()
myCounter.add(7)
a = myCounter.getCurrent()
print(a)
print(myCounter.current)
class Counter(object):
def __init__(self, num):
self.current = num
def add(self, amount):
self.current += amount
def getCurrent(self):
return self.current
myCounter = Counter(5)
print(myCounter.current)
try:
myCounter = Counter()
except Exception as e:
print(e)
class Counter(object):
def __init__(self, num=0):
self.current = num
def add(self, amount):
self.current += amount
def getCurrent(self):
return self.current
myCounter = Counter()
print(myCounter.current)
myCounter = Counter(5)
print(myCounter.current)
class Counter(object):
def __init__(self, num=0):
self.current = num
def add(self, amount):
self.current += amount
def getCurrent(self):
return self.current
class FullCounter(Counter):
def remove(self, amount):
self.current -= amount
myFullCounter = FullCounter(1)
myFullCounter.add(7)
a = myFullCounter.getCurrent()
print(a)
myFullCounter.remove(9)
a = myCounter.getCurrent()
print(a)
PI = 3.1416
class Figure(object):
def __init__(self, name):
self.name = name
self.area = 0
def print_area(self):
print(self.area)
def calculate_area(self):
raise Exception("Define me!")
class Circle(Figure):
def __init__(self, radio, name='Circle'):
self.radio = radio
super(Circle, self).__init__(name)
def print_name(self):
print(self.name)
a = Circle(2)
a.print_name()
a.calculate_area()
a.print_area()
class Circle(Figure):
def __init__(self, radio, name='Circle'):
self.radio = radio
super(Circle, self).__init__(name)
def print_name(self):
print(self.name)
def calculate_area(self):
self.area = PI * (self.radio ** 2)
a = Circle(2)
a.print_name()
a.calculate_area()
a.print_area()
class Square(Figure):
def __init__(self, side1, side2, name='Square'):
self.side1 = side1
self.side2 = side2
super(Square, self).__init__(name)
def print_name(self):
print(self.name)
def calculate_area(self):
self.area = self.side1 * self.side2
a = Square(2, 8)
a.print_name()
a.calculate_area()
a.print_area()
"""
Explanation: class
Una clase es una plantilla para la creación de objetos de datos según un modelo predefinido. Las clases se utilizan para representar entidades o conceptos.
End of explanation
"""
|
jonathanmorgan/msu_phd_work | methods/reliability/prelim_month-reliability.ipynb | lgpl-3.0 | import datetime
import six
print( "packages imported at " + str( datetime.datetime.now() ) )
"""
Explanation: prelim_month - reliability
original title: 2017.10.25 - work log - prelim_month - Reliability_Names reliability
original file name: 2017.10.25-work_log-prelim_month-Reliability_Names_reliability.ipynb
Run the reliability calculations for prelim_month just to get lookup assessment (since it is not classification, precision and recall make no sense).
<h1>Table of Contents<span class="tocSkip"></span></h1>
<div class="toc"><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-1"><span class="toc-item-num">1 </span>Setup</a></span><ul class="toc-item"><li><span><a href="#Setup---Imports" data-toc-modified-id="Setup---Imports-1.1"><span class="toc-item-num">1.1 </span>Setup - Imports</a></span></li><li><span><a href="#Setup---virtualenv-jupyter-kernel" data-toc-modified-id="Setup---virtualenv-jupyter-kernel-1.2"><span class="toc-item-num">1.2 </span>Setup - virtualenv jupyter kernel</a></span></li><li><span><a href="#Setup---Initialize-Django" data-toc-modified-id="Setup---Initialize-Django-1.3"><span class="toc-item-num">1.3 </span>Setup - Initialize Django</a></span></li><li><span><a href="#Setup-R" data-toc-modified-id="Setup-R-1.4"><span class="toc-item-num">1.4 </span>Setup R</a></span></li><li><span><a href="#Setup-database" data-toc-modified-id="Setup-database-1.5"><span class="toc-item-num">1.5 </span>Setup database</a></span></li></ul></li><li><span><a href="#Reliability-data-assessment---prelim_month" data-toc-modified-id="Reliability-data-assessment---prelim_month-2"><span class="toc-item-num">2 </span>Reliability data assessment - <code>prelim_month</code></a></span></li></ul></div>
Setup
Back to Table of Contents
Setup - Imports
Back to Table of Contents
End of explanation
"""
%pwd
"""
Explanation: Setup - virtualenv jupyter kernel
Back to Table of Contents
If you are using a virtualenv, make sure that you:
have installed your virtualenv as a kernel.
choose the kernel for your virtualenv as the kernel for your notebook (Kernel --> Change kernel).
Since I use a virtualenv, need to get that activated somehow inside this notebook. One option is to run ../dev/wsgi.py in this notebook, to configure the python environment manually as if you had activated the sourcenet virtualenv. To do this, you'd make a code cell that contains:
%run ../dev/wsgi.py
This is sketchy, however, because of the changes it makes to your Python environment within the context of whatever your current kernel is. I'd worry about collisions with the actual Python 3 kernel. Better, one can install their virtualenv as a separate kernel. Steps:
activate your virtualenv:
workon sourcenet
in your virtualenv, install the package ipykernel.
pip install ipykernel
use the ipykernel python program to install the current environment as a kernel:
python -m ipykernel install --user --name <env_name> --display-name "<display_name>"
sourcenet example:
python -m ipykernel install --user --name sourcenet --display-name "sourcenet (Python 3)"
More details: http://ipython.readthedocs.io/en/stable/install/kernel_install.html
End of explanation
"""
%run ../django_init.py
"""
Explanation: Setup - Initialize Django
Back to Table of Contents
First, initialize my dev django project, so I can run code in this notebook that references my django models and can talk to the database using my project's settings.
End of explanation
"""
# start to support python 3:
from __future__ import unicode_literals
from __future__ import division
#==============================================================================#
# ! imports
#==============================================================================#
# grouped by functional area, then alphabetical order by package, then
# alphabetical order by name of thing being imported.
# context_analysis imports
from context_analysis.reliability.reliability_names_analyzer import ReliabilityNamesAnalyzer
#==============================================================================#
# ! logic
#==============================================================================#
# declare variables
my_analysis_instance = None
label = ""
indices_to_process = -1
result_status = ""
# make reliability instance
my_analysis_instance = ReliabilityNamesAnalyzer()
# database connection information - 2 options... Enter it here:
#my_analysis_instance.db_username = ""
#my_analysis_instance.db_password = ""
#my_analysis_instance.db_host = "localhost"
#my_analysis_instance.db_name = "sourcenet"
# Or set up the following properties in Django_Config, inside the django admins.
# All have application of: "sourcenet-db-admin":
# - db_username
# - db_password
# - db_host
# - db_port
# - db_name
# run the analyze method, see what happens.
#label = "prelim_reliability_test"
#indices_to_process = 3
#label = "prelim_reliability_combined_human"
#indices_to_process = 3
#label = "name_data_test_combined_human"
#indices_to_process = 3
#label = "prelim_reliability_combined_human_final"
#indices_to_process = 3
#label = "prelim_reliability_combined_all"
#indices_to_process = 4
#label = "prelim_reliability_combined_all_final"
#indices_to_process = 4
#label = "prelim_reliability_test_human"
#indices_to_process = 3
#label = "prelim_reliability_test_all"
#indices_to_process = 4
label = "prelim_month"
indices_to_process = 2
result_status = my_analysis_instance.analyze_reliability_names( label, indices_to_process )
print( "result status: {status_string}".format( status_string = result_status ) )
"""
Explanation: Setup R
Back to Table of Contents
To allow Python to talk to R, at R prompt:
/* install packages */
install.packages( "Rserve" )
install.packages( "irr" )
/* load Rserve */
library( Rserve )
/* start server */
Rserve( args="--no-save" )
Setup database
Back to Table of Contents
Also need to either pass database connection information to names analyzer below, or store database configuration in Django_Config:
# database connection information - 2 options... Enter it here:
#my_analysis_instance.db_username = ""
#my_analysis_instance.db_password = ""
#my_analysis_instance.db_host = "localhost"
#my_analysis_instance.db_name = "sourcenet"
# Or set up the following properties in Django_Config, inside the django admins.
# All have application of: "sourcenet-db-admin":
# - db_username
# - db_password
# - db_host
# - db_port
# - db_name
Reliability data assessment - prelim_month
Back to Table of Contents
Generate reliability analysis for label "prelim_month".
End of explanation
"""
|
sgrindy/Bayesian-estimation-of-relaxation-spectra | Double_Maxwell_Uniform_prior.ipynb | mit | def H(tau):
g1 = 1; tau1 = 0.03; sd1 = 0.5;
g2 = 7; tau2 = 10; sd2 = 0.5;
term1 = g1/np.sqrt(2*sd1**2*np.pi) * np.exp(-(np.log10(tau/tau1)**2)/(2*sd1**2))
term2 = g2/np.sqrt(2*sd2**2*np.pi) * np.exp(-(np.log10(tau/tau2)**2)/(2*sd2**2))
return term1 + term2
Nfreq = 50
Nmodes = 30
w = np.logspace(-4,4,Nfreq).reshape((1,Nfreq))
tau = np.logspace(-np.log10(w.max()),-np.log10(w.min()),Nmodes).reshape((Nmodes,1))
# get equivalent discrete spectrum
delta_log_tau = np.log10(tau[1]/tau[0])
g_true = (H(tau) * delta_log_tau).reshape((1,Nmodes))
plt.loglog(tau,H(tau), label='Continuous spectrum')
plt.plot(tau.ravel(),g_true.ravel(), 'or', label='Equivalent discrete spectrum')
plt.legend(loc=4)
plt.xlabel(r'$\tau$')
plt.ylabel(r'$H(\tau)$ or $g$')
"""
Explanation: First, we need to set up our test data. We'll use two relaxation modes that are themselves log-normally distributed.
End of explanation
"""
wt = tau*w
Kp = wt**2/(1+wt**2)
Kpp = wt/(1+wt**2)
noise_level = 0.02
Gp_true = np.dot(g_true,Kp)
Gp_noise = Gp_true + Gp_true*noise_level*np.random.randn(Nfreq)
Gpp_true = np.dot(g_true,Kpp)
Gpp_noise = Gpp_true + Gpp_true*noise_level*np.random.randn(Nfreq)
plt.loglog(w.ravel(),Gp_true.ravel(),label="True G'")
plt.plot(w.ravel(),Gpp_true.ravel(), label='True G"')
plt.plot(w.ravel(),Gp_noise.ravel(),'xr',label="Noisy G'")
plt.plot(w.ravel(),Gpp_noise.ravel(),'+r',label='Noisy G"')
plt.xlabel(r'$\omega$')
plt.ylabel("Moduli")
plt.legend(loc=4)
"""
Explanation: Now, let's construct the moduli. We'll have both a true version and a noisy version with some random noise added to simulate experimental variance.
End of explanation
"""
noisyModel = pm.Model()
with noisyModel:
g = pm.Uniform('g', lower=Gp_noise.min()/1e4, upper=Gp_noise.max(),
shape=g_true.shape)
sd1 = pm.HalfNormal('sd1',tau=1)
sd2 = pm.HalfNormal('sd2',tau=1)
# we'll log-weight the moduli as in other fitting methods
logGp = pm.Normal('logGp',mu=np.log(tt.dot(g,Kp)),
sd=sd1, observed=np.log(Gp_noise))
logGpp = pm.Normal('logGpp',mu=np.log(tt.dot(g,Kpp)),
sd=sd2, observed=np.log(Gpp_noise))
trueModel = pm.Model()
with trueModel:
g = pm.Uniform('g', lower=Gp_true.min()/1e4, upper=Gp_true.max(),
shape=g_true.shape)
sd1 = pm.HalfNormal('sd1',tau=1)
sd2 = pm.HalfNormal('sd2',tau=1)
# we'll log-weight the moduli as in other fitting methods
logGp = pm.Normal('logGp',mu=np.log(tt.dot(g,Kp)),
sd=sd1, observed=np.log(Gp_true))
logGpp = pm.Normal('logGpp',mu=np.log(tt.dot(g,Kpp)),
sd=sd2, observed=np.log(Gpp_true))
"""
Explanation: Now, we can build the model with PyMC3. I'll make 2: one with noise, and one without.
End of explanation
"""
Nsamples = 5000
trueMapEstimate = pm.find_MAP(model=trueModel)
with trueModel:
trueTrace = pm.sample(Nsamples, start=trueMapEstimate)
pm.backends.text.dump('./Double_Maxwell_true', trueTrace)
noisyMapEstimate = pm.find_MAP(model=noisyModel)
with noisyModel:
noisyTrace = pm.sample(Nsamples, start=noisyMapEstimate)
pm.backends.text.dump('./Double_Maxwell_noisy', noisyTrace)
"""
Explanation: Now we can sample the models to get our parameter distributions:
End of explanation
"""
noisyTrace = pm.backends.text.load('./Double_Maxwell_noisy',model=noisyModel)
trueTrace = pm.backends.text.load('./Double_Maxwell_true', model=trueModel)
burn = 500
trueQ = pm.quantiles(trueTrace[burn:])
noisyQ = pm.quantiles(noisyTrace[burn:])
def plot_quantiles(Q,ax):
ax.fill_between(tau.ravel(), y1=Q['g'][2.5], y2=Q['g'][97.5], color='c',
alpha=0.25)
ax.fill_between(tau.ravel(), y1=Q['g'][25], y2=Q['g'][75], color='c',
alpha=0.5)
ax.plot(tau.ravel(), Q['g'][50], 'b-')
# sampling localization lines:
ax.axvline(x=np.exp(np.pi/2)/w.max(), color='k', linestyle='--')
ax.axvline(x=(np.exp(np.pi/2)*w.min())**-1, color='k', linestyle='--')
fig,ax = plt.subplots(nrows=2, sharex=True,
subplot_kw={'xscale':'log','yscale':'log',
'ylabel':'$g_i$'})
plot_quantiles(trueQ,ax[0])
plot_quantiles(noisyQ,ax[1])
# true spectrum
trueSpectrumline0 = ax[0].plot(tau.ravel(), g_true.ravel(),'xr',
label='True Spectrum')
trueSpectrumline1 = ax[1].plot(tau.ravel(), g_true.ravel(),'xr',
label='True Spectrum')
ax[0].legend(loc=4)
ax[0].set_title('Using True Moduli')
ax[1].set_xlabel(r'$\tau$')
ax[1].legend(loc=4)
ax[1].set_title('Using Noisy Moduli')
fig.set_size_inches(5,8)
fig.savefig('True,Noisy_moduli_uniform_prior.png',dpi=500)
"""
Explanation: Load trace:
End of explanation
"""
|
hvillanua/deep-learning | tensorboard/Anna_KaRNNa_Summaries.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, I'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = set(text)
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
chars = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
text[:100]
chars[:100]
"""
Explanation: First we'll load the text file and convert it into integers for our network to use.
End of explanation
"""
def split_data(chars, batch_size, num_steps, split_frac=0.9):
"""
Split character data into training and validation sets, inputs and targets for each set.
Arguments
---------
chars: character array
batch_size: Size of examples in each of batch
num_steps: Number of sequence steps to keep in the input and pass to the network
split_frac: Fraction of batches to keep in the training set
Returns train_x, train_y, val_x, val_y
"""
slice_size = batch_size * num_steps
n_batches = int(len(chars) / slice_size)
# Drop the last few characters to make only full batches
x = chars[: n_batches*slice_size]
y = chars[1: n_batches*slice_size + 1]
# Split the data into batch_size slices, then stack them into a 2D matrix
x = np.stack(np.split(x, batch_size))
y = np.stack(np.split(y, batch_size))
# Now x and y are arrays with dimensions batch_size x n_batches*num_steps
# Split into training and validation sets, keep the virst split_frac batches for training
split_idx = int(n_batches*split_frac)
train_x, train_y= x[:, :split_idx*num_steps], y[:, :split_idx*num_steps]
val_x, val_y = x[:, split_idx*num_steps:], y[:, split_idx*num_steps:]
return train_x, train_y, val_x, val_y
train_x, train_y, val_x, val_y = split_data(chars, 10, 200)
train_x.shape
train_x[:,:10]
"""
Explanation: Now I need to split up the data into batches, and into training and validation sets. I should be making a test set here, but I'm not going to worry about that. My test will be if the network can generate new text.
Here I'll make both input and target arrays. The targets are the same as the inputs, except shifted one character over. I'll also drop the last bit of data so that I'll only have completely full batches.
The idea here is to make a 2D matrix where the number of rows is equal to the number of batches. Each row will be one long concatenated string from the character data. We'll split this data into a training set and validation set using the split_frac keyword. This will keep 90% of the batches in the training set, the other 10% in the validation set.
End of explanation
"""
def get_batch(arrs, num_steps):
batch_size, slice_size = arrs[0].shape
n_batches = int(slice_size/num_steps)
for b in range(n_batches):
yield [x[:, b*num_steps: (b+1)*num_steps] for x in arrs]
def build_rnn(num_classes, batch_size=50, num_steps=50, lstm_size=128, num_layers=2,
learning_rate=0.001, grad_clip=5, sampling=False):
if sampling == True:
batch_size, num_steps = 1, 1
tf.reset_default_graph()
# Declare placeholders we'll feed into the graph
with tf.name_scope('inputs'):
inputs = tf.placeholder(tf.int32, [batch_size, num_steps], name='inputs')
x_one_hot = tf.one_hot(inputs, num_classes, name='x_one_hot')
with tf.name_scope('targets'):
targets = tf.placeholder(tf.int32, [batch_size, num_steps], name='targets')
y_one_hot = tf.one_hot(targets, num_classes, name='y_one_hot')
y_reshaped = tf.reshape(y_one_hot, [-1, num_classes])
keep_prob = tf.placeholder(tf.float32, name='keep_prob')
# Build the RNN layers
with tf.name_scope("RNN_cells"):
lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
cell = tf.contrib.rnn.MultiRNNCell([drop] * num_layers)
with tf.name_scope("RNN_init_state"):
initial_state = cell.zero_state(batch_size, tf.float32)
# Run the data through the RNN layers
with tf.name_scope("RNN_forward"):
outputs, state = tf.nn.dynamic_rnn(cell, x_one_hot, initial_state=initial_state)
final_state = state
# Reshape output so it's a bunch of rows, one row for each cell output
with tf.name_scope('sequence_reshape'):
seq_output = tf.concat(outputs, axis=1,name='seq_output')
output = tf.reshape(seq_output, [-1, lstm_size], name='graph_output')
# Now connect the RNN outputs to a softmax layer and calculate the cost
with tf.name_scope('logits'):
softmax_w = tf.Variable(tf.truncated_normal((lstm_size, num_classes), stddev=0.1),
name='softmax_w')
softmax_b = tf.Variable(tf.zeros(num_classes), name='softmax_b')
logits = tf.matmul(output, softmax_w) + softmax_b
tf.summary.histogram('softmax_w', softmax_w)
tf.summary.histogram('softmax_b', softmax_b)
with tf.name_scope('predictions'):
preds = tf.nn.softmax(logits, name='predictions')
tf.summary.histogram('predictions', preds)
with tf.name_scope('cost'):
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y_reshaped, name='loss')
cost = tf.reduce_mean(loss, name='cost')
tf.summary.scalar('cost', cost)
# Optimizer for training, using gradient clipping to control exploding gradients
with tf.name_scope('train'):
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
merged = tf.summary.merge_all()
# Export the nodes
export_nodes = ['inputs', 'targets', 'initial_state', 'final_state',
'keep_prob', 'cost', 'preds', 'optimizer', 'merged']
Graph = namedtuple('Graph', export_nodes)
local_dict = locals()
graph = Graph(*[local_dict[each] for each in export_nodes])
return graph
"""
Explanation: I'll write another function to grab batches out of the arrays made by split data. Here each batch will be a sliding window on these arrays with size batch_size X num_steps. For example, if we want our network to train on a sequence of 100 characters, num_steps = 100. For the next batch, we'll shift this window the next sequence of num_steps characters. In this way we can feed batches to the network and the cell states will continue through on each batch.
End of explanation
"""
batch_size = 100
num_steps = 100
lstm_size = 512
num_layers = 2
learning_rate = 0.001
"""
Explanation: Hyperparameters
Here I'm defining the hyperparameters for the network. The two you probably haven't seen before are lstm_size and num_layers. These set the number of hidden units in the LSTM layers and the number of LSTM layers, respectively. Of course, making these bigger will improve the network's performance but you'll have to watch out for overfitting. If your validation loss is much larger than the training loss, you're probably overfitting. Decrease the size of the network or decrease the dropout keep probability.
End of explanation
"""
!mkdir -p checkpoints/anna
epochs = 10
save_every_n = 100
train_x, train_y, val_x, val_y = split_data(chars, batch_size, num_steps)
model = build_rnn(len(vocab),
batch_size=batch_size,
num_steps=num_steps,
learning_rate=learning_rate,
lstm_size=lstm_size,
num_layers=num_layers)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
train_writer = tf.summary.FileWriter('./logs/2/train', sess.graph)
test_writer = tf.summary.FileWriter('./logs/2/test')
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/anna20.ckpt')
n_batches = int(train_x.shape[1]/num_steps)
iterations = n_batches * epochs
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for b, (x, y) in enumerate(get_batch([train_x, train_y], num_steps), 1):
iteration = e*n_batches + b
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 0.5,
model.initial_state: new_state}
summary, batch_loss, new_state, _ = sess.run([model.merged, model.cost,
model.final_state, model.optimizer],
feed_dict=feed)
loss += batch_loss
end = time.time()
print('Epoch {}/{} '.format(e+1, epochs),
'Iteration {}/{}'.format(iteration, iterations),
'Training loss: {:.4f}'.format(loss/b),
'{:.4f} sec/batch'.format((end-start)))
train_writer.add_summary(summary, iteration)
if (iteration%save_every_n == 0) or (iteration == iterations):
# Check performance, notice dropout has been set to 1
val_loss = []
new_state = sess.run(model.initial_state)
for x, y in get_batch([val_x, val_y], num_steps):
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: 1.,
model.initial_state: new_state}
summary, batch_loss, new_state = sess.run([model.merged, model.cost,
model.final_state], feed_dict=feed)
val_loss.append(batch_loss)
test_writer.add_summary(summary, iteration)
print('Validation loss:', np.mean(val_loss),
'Saving checkpoint!')
#saver.save(sess, "checkpoints/anna/i{}_l{}_{:.3f}.ckpt".format(iteration, lstm_size, np.mean(val_loss)))
tf.train.get_checkpoint_state('checkpoints/anna')
"""
Explanation: Training
Time for training which is is pretty straightforward. Here I pass in some data, and get an LSTM state back. Then I pass that state back in to the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I calculate the validation loss and save a checkpoint.
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
prime = "Far"
samples = [c for c in prime]
model = build_rnn(vocab_size, lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.preds, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
checkpoint = "checkpoints/anna/i3560_l512_1.122.ckpt"
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i200_l512_2.432.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i600_l512_1.750.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = "checkpoints/anna/i1000_l512_1.484.ckpt"
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/tutorials/structured_data/imbalanced_data.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2019 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
from tensorflow import keras
import os
import tempfile
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import sklearn
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
mpl.rcParams['figure.figsize'] = (12, 10)
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
"""
Explanation: 불균형 데이터 분류
<table class="tfo-notebook-buttons" align="left">
<td> <a target="_blank" href="https://www.tensorflow.org/tutorials/structured_data/imbalanced_data"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a> </td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/tutorials/structured_data/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행하기</a> </td>
<td> <a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/tutorials/structured_data/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub에서 소스 </a> </td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/tutorials/structured_data/imbalanced_data.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드</a></td>
</table>
이 튜토리얼에서는 한 클래스의 예시의 수가 다른 클래스보다 훨씬 많은 매우 불균형적인 데이터세트를 분류하는 방법을 소개합니다. Kaggle에서 호스팅되는 신용 카드 부정 행위 탐지 데이터세트를 사용하여 작업해 보겠습니다. 총 284,807건의 거래에서 492건의 부정 거래를 탐지하는 것을 목표로 합니다. Keras를 사용하여 모델 및 클래스 가중치를 정의하여 불균형 데이터에서 모델을 학습시켜 보겠습니다.
이 튜토리얼에는 다음을 수행하기 위한 완전한 코드가 포함되어 있습니다.
Pandas를 사용하여 CSV 파일 로드.
학습, 검증 및 테스트세트 작성.
Keras를 사용하여 모델을 정의하고 학습(클래스 가중치 설정 포함)
다양한 측정 기준(정밀도 및 재현율 포함)을 사용하여 모델 평가
다음과 같은 불균형 데이터를 처리하기 위한 일반적인 기술 사용
클래스 가중치
오버샘플링
설정
End of explanation
"""
file = tf.keras.utils
raw_df = pd.read_csv('https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv')
raw_df.head()
raw_df[['Time', 'V1', 'V2', 'V3', 'V4', 'V5', 'V26', 'V27', 'V28', 'Amount', 'Class']].describe()
"""
Explanation: 데이터 처리 및 탐색
Kaggle 신용 카드 부정 행위 데이터 세트
Pandas는 구조적 데이터를 로드하고 처리하는 데 유용한 여러 유틸리티가 포함된 Python 라이브러리입니다. CSV를 Pandas 데이터 프레임으로 다운로드하는 데 사용할 수 있습니다.
참고: 이 데이터세트는 빅데이터 마이닝 및 부정 행위 감지에 대한 Worldline과 ULB(Université Libre de Bruxelles) Machine Learning Group의 연구 협업을 통해 수집 및 분석되었습니다. 관련 주제에 관한 현재 및 과거 프로젝트에 대한 자세한 내용은 여기를 참조하거나 DefeatFraud 프로젝트 페이지에서 확인할 수 있습니다.
End of explanation
"""
neg, pos = np.bincount(raw_df['Class'])
total = neg + pos
print('Examples:\n Total: {}\n Positive: {} ({:.2f}% of total)\n'.format(
total, pos, 100 * pos / total))
"""
Explanation: 클래스 레이블 불균형 검사
데이터세트 불균형을 살펴보겠습니다.:
End of explanation
"""
cleaned_df = raw_df.copy()
# You don't want the `Time` column.
cleaned_df.pop('Time')
# The `Amount` column covers a huge range. Convert to log-space.
eps = 0.001 # 0 => 0.1¢
cleaned_df['Log Ammount'] = np.log(cleaned_df.pop('Amount')+eps)
"""
Explanation: 이를 통해 양성 샘플 일부를 확인할 수 있습니다.
데이터 정리, 분할 및 정규화
원시 데이터에는 몇 가지 문제가 있습니다. 먼저 Time 및 Amount 열이 매우 가변적이므로 직접 사용할 수 없습니다. (의미가 명확하지 않으므로) Time 열을 삭제하고 Amount 열의 로그를 가져와 범위를 줄입니다.
End of explanation
"""
# Use a utility from sklearn to split and shuffle your dataset.
train_df, test_df = train_test_split(cleaned_df, test_size=0.2)
train_df, val_df = train_test_split(train_df, test_size=0.2)
# Form np arrays of labels and features.
train_labels = np.array(train_df.pop('Class'))
bool_train_labels = train_labels != 0
val_labels = np.array(val_df.pop('Class'))
test_labels = np.array(test_df.pop('Class'))
train_features = np.array(train_df)
val_features = np.array(val_df)
test_features = np.array(test_df)
"""
Explanation: 데이터세트를 학습, 검증 및 테스트 세트로 분할합니다. 검증 세트는 모델 피팅 중에 사용되어 손실 및 메트릭을 평가하지만 해당 모델은 이 데이터에 적합하지 않습니다. 테스트 세트는 훈련 단계에서는 전혀 사용되지 않으며 마지막에만 사용되어 모델이 새 데이터로 일반화되는 정도를 평가합니다. 이는 훈련 데이터가 부족하여 과대적합이 크게 문제가 되는 불균형 데이터세트에서 특히 중요합니다.
End of explanation
"""
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
val_features = scaler.transform(val_features)
test_features = scaler.transform(test_features)
train_features = np.clip(train_features, -5, 5)
val_features = np.clip(val_features, -5, 5)
test_features = np.clip(test_features, -5, 5)
print('Training labels shape:', train_labels.shape)
print('Validation labels shape:', val_labels.shape)
print('Test labels shape:', test_labels.shape)
print('Training features shape:', train_features.shape)
print('Validation features shape:', val_features.shape)
print('Test features shape:', test_features.shape)
"""
Explanation: sklearn StandardScaler를 사용하여 입력 특성을 정규화하면 평균은 0으로, 표준 편차는 1로 설정됩니다.
참고: StandardScaler는 모델이 유효성 검사 또는 테스트 세트를 참고하는지 여부를 확인하기 위해 train_features를 사용하는 경우에만 적합합니다.
End of explanation
"""
pos_df = pd.DataFrame(train_features[ bool_train_labels], columns=train_df.columns)
neg_df = pd.DataFrame(train_features[~bool_train_labels], columns=train_df.columns)
sns.jointplot(pos_df['V5'], pos_df['V6'],
kind='hex', xlim=(-5,5), ylim=(-5,5))
plt.suptitle("Positive distribution")
sns.jointplot(neg_df['V5'], neg_df['V6'],
kind='hex', xlim=(-5,5), ylim=(-5,5))
_ = plt.suptitle("Negative distribution")
"""
Explanation: 주의: 모델을 배포하려면 전처리 계산을 유지하는 것이 중요합니다. 따라서 레이어로 구현하고 내보내기 전에 모델에 연결하는 것이 가장 쉬운 방법입니다.
데이터 분포 살펴보기
다음으로 몇 가지 특성에 대한 양 및 음의 예시 분포를 비교해 보겠습니다. 이 때 스스로 검토할 사항은 다음과 같습니다.
이와 같은 분포가 합리적인가?
예, 이미 입력을 정규화했으며 대부분 +/- 2 범위에 집중되어 있습니다.
분포 간 차이를 알 수 있습니까?
예, 양의 예에는 극단적 값의 비율이 훨씬 높습니다.
End of explanation
"""
METRICS = [
keras.metrics.TruePositives(name='tp'),
keras.metrics.FalsePositives(name='fp'),
keras.metrics.TrueNegatives(name='tn'),
keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
def make_model(metrics=METRICS, output_bias=None):
if output_bias is not None:
output_bias = tf.keras.initializers.Constant(output_bias)
model = keras.Sequential([
keras.layers.Dense(
16, activation='relu',
input_shape=(train_features.shape[-1],)),
keras.layers.Dropout(0.5),
keras.layers.Dense(1, activation='sigmoid',
bias_initializer=output_bias),
])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=1e-3),
loss=keras.losses.BinaryCrossentropy(),
metrics=metrics)
return model
"""
Explanation: 모델 및 메트릭 정의
조밀하게 연결된 숨겨진 레이어, 과대적합을 줄이기 위한 드롭아웃 레이어, 거래 사기 가능성을 반환하는 시그모이드 출력 레이어로 간단한 신경망을 생성하는 함수를 정의합니다.
End of explanation
"""
EPOCHS = 100
BATCH_SIZE = 2048
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_auc',
verbose=1,
patience=10,
mode='max',
restore_best_weights=True)
model = make_model()
model.summary()
"""
Explanation: 유용한 메트릭 이해하기
위에서 정의한 몇 가지 메트릭은 모델을 통해 계산할 수 있으며 성능을 평가할 때 유용합니다.
허위 음성과 허위 양성은 잘못 분류된 샘플입니다.
실제 음성과 실제 양성은 올바로 분류된 샘플입니다.
정확도는 올바로 분류된 예의 비율입니다.
$\frac{\text{true samples}}{\text{total samples}}$
정밀도는 올바르게 분류된 예측 양성의 비율입니다.
$\frac{\text{true positives}}{\text{true positives + false positives}}$
재현율은 올바르게 분류된 실제 양성의 비율입니다.
$\frac{\text{true positives}}{\text{true positives + false negatives}}$
AUC는 ROC-AUC(Area Under the Curve of a Receiver Operating Characteristic) 곡선을 의미합니다. 이 메트릭은 분류자가 임의의 양성 샘플 순위를 임의의 음성 샘플 순위보다 높게 지정할 확률과 같습니다.
AUPRC는 PR curve AUC를 의미합니다. 이 메트릭은 다양한 확률 임계값에 대한 정밀도-재현율 쌍을 계산합니다.
참고: 정확도는 이 작업에 유용한 측정 항목이 아닙니다. 항상 False를 예측해야 이 작업에서 99.8% 이상의 정확도를 얻을 수 있습니다.
더 읽어보기:
참 vs. 거짓, 양성 vs. 음성
정확성
정밀도와 재현율
ROC-AUC
Precision-Recall과 ROC 곡선의 관계
기준 모델
모델 구축
이제 앞서 정의한 함수를 사용하여 모델을 만들고 학습해 보겠습니다. 모델은 기본 배치 크기인 2048보다 큰 배치 크기를 사용하는 것이 좋습니다. 각 배치에서 양성 샘플을 일부 포함시켜 적절한 기회를 얻는 것이 중요합니다. 배치 크기가 너무 작으면 부정 거래 예시를 제대로 학습할 수 없습니다.
참고: 이 모델은 클래스의 불균형을 잘 다루지 못합니다. 이를 이 튜토리얼의 뒷부분에서 개선하게 될 겁니다.
End of explanation
"""
model.predict(train_features[:10])
"""
Explanation: 모델을 실행하여 테스트해보겠습니다.
End of explanation
"""
results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0)
print("Loss: {:0.4f}".format(results[0]))
"""
Explanation: 선택사항: 초기 바이어스를 올바로 설정합니다.
이와 같은 초기 추측은 적절하지 않습니다. 데이터세트가 불균형하다는 것을 알고 있으니까요. 출력 레이어의 바이어스를 설정하여 해당 데이터세트를 반영하면(참조: 신경망 훈련 방법: "init well") 초기 수렴에 유용할 수 있습니다.
기본 바이어스 초기화를 사용하면 손실은 약 math.log(2) = 0.69314
End of explanation
"""
initial_bias = np.log([pos/neg])
initial_bias
"""
Explanation: 올바른 바이어스 설정은 다음에서 가능합니다.
$$ p_0 = pos/(pos + neg) = 1/(1+e^{-b_0}) $$ $$ b_0 = -log_e(1/p_0 - 1) $$ $$ b_0 = log_e(pos/neg)$$
End of explanation
"""
model = make_model(output_bias=initial_bias)
model.predict(train_features[:10])
"""
Explanation: 이를 초기 바이어스로 설정하면 모델은 훨씬 더 합리적으로 초기 추측을 할 수 있습니다.
pos/total = 0.0018에 가까울 것입니다.
End of explanation
"""
results = model.evaluate(train_features, train_labels, batch_size=BATCH_SIZE, verbose=0)
print("Loss: {:0.4f}".format(results[0]))
"""
Explanation: 이 초기화를 통해서 초기 손실은 대략 다음과 같아야합니다.:
$$-p_0log(p_0)-(1-p_0)log(1-p_0) = 0.01317$$
End of explanation
"""
initial_weights = os.path.join(tempfile.mkdtemp(), 'initial_weights')
model.save_weights(initial_weights)
"""
Explanation: 이 초기 손실은 단순한 상태의 초기화에서 발생했을 때 보다 약 50배 적습니다.
이런 식으로 모델은 처음 몇 epoch를 쓰며 양성 예시가 거의 없다는 것을 학습할 필요는 없습니다. 이렇게 하면 학습을 하면서 손실된 플롯을 더 쉽게 파악할 수 있습니다.
초기 가중치 체크 포인트
다양한 학습 과정을 비교하려면 이 초기 모델의 가중치를 체크포인트 파일에 보관하고 학습 전에 각 모델에 로드합니다.
End of explanation
"""
model = make_model()
model.load_weights(initial_weights)
model.layers[-1].bias.assign([0.0])
zero_bias_history = model.fit(
train_features,
train_labels,
batch_size=BATCH_SIZE,
epochs=20,
validation_data=(val_features, val_labels),
verbose=0)
model = make_model()
model.load_weights(initial_weights)
careful_bias_history = model.fit(
train_features,
train_labels,
batch_size=BATCH_SIZE,
epochs=20,
validation_data=(val_features, val_labels),
verbose=0)
def plot_loss(history, label, n):
# Use a log scale to show the wide range of values.
plt.semilogy(history.epoch, history.history['loss'],
color=colors[n], label='Train '+label)
plt.semilogy(history.epoch, history.history['val_loss'],
color=colors[n], label='Val '+label,
linestyle="--")
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plot_loss(zero_bias_history, "Zero Bias", 0)
plot_loss(careful_bias_history, "Careful Bias", 1)
"""
Explanation: 바이어스 수정이 도움이 되는지 확인하기
계속 진행하기 전에 조심스러운 바이어스 초기화가 실제로 도움이 되었는지 빠르게 확인하십시오
정교한 초기화를 한 모델과 하지 않은 모델을 20 epoch 학습시키고 손실을 비교합니다.
End of explanation
"""
model = make_model()
model.load_weights(initial_weights)
baseline_history = model.fit(
train_features,
train_labels,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=[early_stopping],
validation_data=(val_features, val_labels))
"""
Explanation: 위의 그림에서 명확히 알 수 있듯이, 검증 손실 측면에서 이와 같은 정교한 초기화에는 분명한 이점이 있습니다.
모델 학습
End of explanation
"""
def plot_metrics(history):
metrics = ['loss', 'auc', 'precision', 'recall']
for n, metric in enumerate(metrics):
name = metric.replace("_"," ").capitalize()
plt.subplot(2,2,n+1)
plt.plot(history.epoch, history.history[metric], color=colors[0], label='Train')
plt.plot(history.epoch, history.history['val_'+metric],
color=colors[0], linestyle="--", label='Val')
plt.xlabel('Epoch')
plt.ylabel(name)
if metric == 'loss':
plt.ylim([0, plt.ylim()[1]])
elif metric == 'auc':
plt.ylim([0.8,1])
else:
plt.ylim([0,1])
plt.legend()
plot_metrics(baseline_history)
"""
Explanation: 학습 이력 확인
이 섹션에서는 훈련 및 검증 세트에서 모델의 정확도 및 손실에 대한 플롯을 생성합니다. 이는 과대적합 확인에 유용하며 과대적합 및 과소적합 튜토리얼에서 자세히 알아볼 수 있습니다.
또한, 위에서 만든 모든 메트릭에 대해 다음과 같은 플롯을 생성할 수 있습니다. 거짓 음성이 예시에 포함되어 있습니다.
End of explanation
"""
train_predictions_baseline = model.predict(train_features, batch_size=BATCH_SIZE)
test_predictions_baseline = model.predict(test_features, batch_size=BATCH_SIZE)
def plot_cm(labels, predictions, p=0.5):
cm = confusion_matrix(labels, predictions > p)
plt.figure(figsize=(5,5))
sns.heatmap(cm, annot=True, fmt="d")
plt.title('Confusion matrix @{:.2f}'.format(p))
plt.ylabel('Actual label')
plt.xlabel('Predicted label')
print('Legitimate Transactions Detected (True Negatives): ', cm[0][0])
print('Legitimate Transactions Incorrectly Detected (False Positives): ', cm[0][1])
print('Fraudulent Transactions Missed (False Negatives): ', cm[1][0])
print('Fraudulent Transactions Detected (True Positives): ', cm[1][1])
print('Total Fraudulent Transactions: ', np.sum(cm[1]))
"""
Explanation: 참고: 검증 곡선은 일반적으로 훈련 곡선보다 성능이 좋습니다. 이는 주로 모델을 평가할 때 drop out 레이어가 활성화 되지 않았기 때문에 발생합니다.
메트릭 평가
혼동 행렬을 사용하여 실제 레이블과 예측 레이블을 요약할 수 있습니다. 여기서 X축은 예측 레이블이고 Y축은 실제 레이블입니다.
End of explanation
"""
baseline_results = model.evaluate(test_features, test_labels,
batch_size=BATCH_SIZE, verbose=0)
for name, value in zip(model.metrics_names, baseline_results):
print(name, ': ', value)
print()
plot_cm(test_labels, test_predictions_baseline)
"""
Explanation: 테스트 데이터세트에서 모델을 평가하고 위에서 생성한 메트릭 결과를 표시합니다.
End of explanation
"""
def plot_roc(name, labels, predictions, **kwargs):
fp, tp, _ = sklearn.metrics.roc_curve(labels, predictions)
plt.plot(100*fp, 100*tp, label=name, linewidth=2, **kwargs)
plt.xlabel('False positives [%]')
plt.ylabel('True positives [%]')
plt.xlim([-0.5,20])
plt.ylim([80,100.5])
plt.grid(True)
ax = plt.gca()
ax.set_aspect('equal')
plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: 만약 모델이 모두 완벽하게 예측했다면 대각행렬이 되어 예측 오류를 보여주며 대각선 값은 0이 됩니다. 이와 같은 경우, 매트릭에 거짓 양성이 상대적으로 낮음을 확인할 수 있으며 이를 통해 플래그가 잘못 지정된 합법적인 거래가 상대적으로 적다는 것을 알 수 있습니다. 그러나 거짓 양성 수를 늘리더라도 거짓 음성을 더 낮추고 싶을 수 있습니다. 거짓 음성은 부정 거래가 발생할 수 있지만, 거짓 양성은 고객에게 이메일을 보내 카드 활동 확인을 요청할 수 있기 때문에 거짓 음성을 낮추는 것이 더 바람직할 수 있기 때문입니다.
ROC 플로팅
이제 ROC을 플로팅 하십시오. 이 그래프는 출력 임계값을 조정하기만 해도 모델이 도달할 수 있는 성능 범위를 한눈에 보여주기 때문에 유용합니다.
End of explanation
"""
def plot_prc(name, labels, predictions, **kwargs):
precision, recall, _ = sklearn.metrics.precision_recall_curve(labels, predictions)
plt.plot(precision, recall, label=name, linewidth=2, **kwargs)
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.grid(True)
ax = plt.gca()
ax.set_aspect('equal')
plot_prc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_prc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: AUPRC 플로팅
Now plot the AUPRC. Area under the interpolated precision-recall curve, obtained by plotting (recall, precision) points for different values of the classification threshold. Depending on how it's calculated, PR AUC may be equivalent to the average precision of the model.
End of explanation
"""
# Scaling by total/2 helps keep the loss to a similar magnitude.
# The sum of the weights of all examples stays the same.
weight_for_0 = (1 / neg) * (total / 2.0)
weight_for_1 = (1 / pos) * (total / 2.0)
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Weight for class 0: {:.2f}'.format(weight_for_0))
print('Weight for class 1: {:.2f}'.format(weight_for_1))
"""
Explanation: 정밀도가 비교적 높은 것 같지만 재현율과 ROC 곡선(AUC) 아래 면적이 높지 않습니다. 분류자가 정밀도와 재현율 모두를 최대화하려고 하면 종종 어려움에 직면하는데, 불균형 데이터세트로 작업할 때 특히 그렇습니다. 관심있는 문제의 맥락에서 다른 유형의 오류 비용을 고려하는 것이 중요합니다. 이 예시에서 거짓 음성(부정 거래를 놓친 경우)은 금전적 비용이 들 수 있지만 , 거짓 양성(거래가 사기 행위로 잘못 표시됨)은 사용자 만족도를 감소시킬 수 있습니다.
클래스 가중치
클래스 가중치 계산
목표는 부정 거래를 식별하는 것이지만, 작업할 수 있는 양성 샘플이 많지 않지 않기 때문에 분류자는 이용 가능한 몇 가지 예에 가중치를 두고자 할 것입니다. 매개 변수를 통해 각 클래스에 대한 Keras 가중치를 전달한다면 이 작업을 수행할 수 있습니다. 이로 인해 모델은 더 적은 클래스 예시에 "더 많은 관심을 기울일" 수 있습니다.
End of explanation
"""
weighted_model = make_model()
weighted_model.load_weights(initial_weights)
weighted_history = weighted_model.fit(
train_features,
train_labels,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
callbacks=[early_stopping],
validation_data=(val_features, val_labels),
# The class weights go here
class_weight=class_weight)
"""
Explanation: 클래스 가중치로 모델 교육
이제 해당 모델이 예측에 어떤 영향을 미치는지 확인하기 위하여 클래스 가중치로 모델을 재 교육하고 평가해 보십시오.
참고: class_weights를 사용하면 손실 범위가 변경됩니다. 이는 옵티마이저에 따라 훈련의 안정성에 영향을 미칠 수 있습니다. tf.keras.optimizers.SGD와 같이 단계 크기가 그래디언트의 크기에 따라 달라지는 옵티마이저는 실패할 수 있습니다. 여기서 사용된 옵티마이저인 tf.keras.optimizers.Adam은 스케일링 변경의 영향을 받지 않습니다. 또한, 가중치로 인해 총 손실은 두 모델 간에 비교할 수 없습니다.
End of explanation
"""
plot_metrics(weighted_history)
"""
Explanation: 학습 이력 조회
End of explanation
"""
train_predictions_weighted = weighted_model.predict(train_features, batch_size=BATCH_SIZE)
test_predictions_weighted = weighted_model.predict(test_features, batch_size=BATCH_SIZE)
weighted_results = weighted_model.evaluate(test_features, test_labels,
batch_size=BATCH_SIZE, verbose=0)
for name, value in zip(weighted_model.metrics_names, weighted_results):
print(name, ': ', value)
print()
plot_cm(test_labels, test_predictions_weighted)
"""
Explanation: 매트릭 평가
End of explanation
"""
plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plot_roc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1])
plot_roc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: 여기서 클래스 가중치를 사용하면 거짓 양성이 더 많기 때문에 정확도와 정밀도는 더 낮지만, 반대로 참 양성이 많으므로 재현율과 AUC는 더 높다는 것을 알 수 있습니다. 정확도가 낮음에도 불구하고 이 모델은 재현율이 더 높습니다(더 많은 부정 거래 식별). 물론 두 가지 유형의 오류 모두 비용이 발생합니다(많은 합법 거래를 사기로 표시하여 사용자를 번거롭게 하는 것은 바람직하지 않으므로). 따라서, 여러 유형 오류 간 절충 사항을 신중하게 고려해야 합니다.
ROC 플로팅
End of explanation
"""
plot_prc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_prc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plot_prc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1])
plot_prc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: AUPRC 플로팅
End of explanation
"""
pos_features = train_features[bool_train_labels]
neg_features = train_features[~bool_train_labels]
pos_labels = train_labels[bool_train_labels]
neg_labels = train_labels[~bool_train_labels]
"""
Explanation: 오버샘플링
소수 계급 과대 표본
관련된 접근 방식은 소수 클래스를 오버 샘플링 하여 데이터 세트를 리 샘플링 하는 것입니다.
End of explanation
"""
ids = np.arange(len(pos_features))
choices = np.random.choice(ids, len(neg_features))
res_pos_features = pos_features[choices]
res_pos_labels = pos_labels[choices]
res_pos_features.shape
resampled_features = np.concatenate([res_pos_features, neg_features], axis=0)
resampled_labels = np.concatenate([res_pos_labels, neg_labels], axis=0)
order = np.arange(len(resampled_labels))
np.random.shuffle(order)
resampled_features = resampled_features[order]
resampled_labels = resampled_labels[order]
resampled_features.shape
"""
Explanation: NumPy 사용
긍정적인 예에서 적절한 수의 임의 인덱스를 선택하여 데이터 세트의 균형을 수동으로 조정할 수 있습니다.:
End of explanation
"""
BUFFER_SIZE = 100000
def make_ds(features, labels):
ds = tf.data.Dataset.from_tensor_slices((features, labels))#.cache()
ds = ds.shuffle(BUFFER_SIZE).repeat()
return ds
pos_ds = make_ds(pos_features, pos_labels)
neg_ds = make_ds(neg_features, neg_labels)
"""
Explanation: tf.data 사용
tf.data를 사용하는 경우 균형있는 예를 생성하는 가장 쉬운 방법은 positive와 negative 데이터세트로 시작하여 이들을 병합하는 것입니다. tf.data guide에서 더 많은 예를 참조하시기 바랍니다.
End of explanation
"""
for features, label in pos_ds.take(1):
print("Features:\n", features.numpy())
print()
print("Label: ", label.numpy())
"""
Explanation: 각 데이터 세트는 (feature, label) 쌍으로 되어 있습니다.
End of explanation
"""
resampled_ds = tf.data.experimental.sample_from_datasets([pos_ds, neg_ds], weights=[0.5, 0.5])
resampled_ds = resampled_ds.batch(BATCH_SIZE).prefetch(2)
for features, label in resampled_ds.take(1):
print(label.numpy().mean())
"""
Explanation: experimental.sample_from_datasets 를 사용하여 두 가지를 병합합니다.:
End of explanation
"""
resampled_steps_per_epoch = np.ceil(2.0*neg/BATCH_SIZE)
resampled_steps_per_epoch
"""
Explanation: 이 데이터 세트를 사용하려면 epoch당 스텝 수가 필요합니다.
이 경우 "epoch"의 정의는 명확하지 않습니다. 각 음성 예시를 한 번 볼 때 필요한 배치 수라고 해봅시다.
End of explanation
"""
resampled_model = make_model()
resampled_model.load_weights(initial_weights)
# Reset the bias to zero, since this dataset is balanced.
output_layer = resampled_model.layers[-1]
output_layer.bias.assign([0])
val_ds = tf.data.Dataset.from_tensor_slices((val_features, val_labels)).cache()
val_ds = val_ds.batch(BATCH_SIZE).prefetch(2)
resampled_history = resampled_model.fit(
resampled_ds,
epochs=EPOCHS,
steps_per_epoch=resampled_steps_per_epoch,
callbacks=[early_stopping],
validation_data=val_ds)
"""
Explanation: 오버 샘플링 된 데이터에 대한 학습
이제 클래스 가중치를 사용하는 대신 리 샘플링 된 데이터 세트로 모델을 학습하여 이러한 방법이 어떻게 비교되는지 확인하십시오.
참고: 긍정적인 예를 복제하여 데이터가 균형을 이루었기 때문에 총 데이터 세트 크기가 더 크고 각 세대가 더 많은 학습 단계를 위해 실행됩니다.
End of explanation
"""
plot_metrics(resampled_history)
"""
Explanation: 만약 훈련 프로세스가 각 기울기 업데이트에서 전체 데이터 세트를 고려하는 경우, 이 오버 샘플링은 기본적으로 클래스 가중치와 동일합니다.
그러나 여기에서와 같이, 모델을 배치별로 훈련할 때 오버샘플링된 데이터는 더 부드러운 그래디언트 신호를 제공합니다. 각 양성 예시가 하나의 배치에서 큰 가중치를 가지기보다, 매번 여러 배치에서 작은 가중치를 갖기 때문입니다.
이 부드러운 기울기 신호는 모델을 더 쉽게 훈련 할 수 있습니다.
교육 이력 확인
학습 데이터의 분포가 검증 및 테스트 데이터와 완전히 다르기 때문에 여기서 측정 항목의 분포가 다를 수 있습니다.
End of explanation
"""
resampled_model = make_model()
resampled_model.load_weights(initial_weights)
# Reset the bias to zero, since this dataset is balanced.
output_layer = resampled_model.layers[-1]
output_layer.bias.assign([0])
resampled_history = resampled_model.fit(
resampled_ds,
# These are not real epochs
steps_per_epoch=20,
epochs=10*EPOCHS,
callbacks=[early_stopping],
validation_data=(val_ds))
"""
Explanation: 재교육
균형 잡힌 데이터에 대한 훈련이 더 쉽기 때문에 위의 훈련 절차가 빠르게 과적합 될 수 있습니다.
epoch를 나누어 tf.keras.callbacks.EarlyStopping를 보다 세밀하게 제어하여 훈련 중단 시점을 정합니다.
End of explanation
"""
plot_metrics(resampled_history)
"""
Explanation: 훈련 이력 재확인
End of explanation
"""
train_predictions_resampled = resampled_model.predict(train_features, batch_size=BATCH_SIZE)
test_predictions_resampled = resampled_model.predict(test_features, batch_size=BATCH_SIZE)
resampled_results = resampled_model.evaluate(test_features, test_labels,
batch_size=BATCH_SIZE, verbose=0)
for name, value in zip(resampled_model.metrics_names, resampled_results):
print(name, ': ', value)
print()
plot_cm(test_labels, test_predictions_resampled)
"""
Explanation: 메트릭 평가
End of explanation
"""
plot_roc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_roc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plot_roc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1])
plot_roc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')
plot_roc("Train Resampled", train_labels, train_predictions_resampled, color=colors[2])
plot_roc("Test Resampled", test_labels, test_predictions_resampled, color=colors[2], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: ROC 플로팅
End of explanation
"""
plot_prc("Train Baseline", train_labels, train_predictions_baseline, color=colors[0])
plot_prc("Test Baseline", test_labels, test_predictions_baseline, color=colors[0], linestyle='--')
plot_prc("Train Weighted", train_labels, train_predictions_weighted, color=colors[1])
plot_prc("Test Weighted", test_labels, test_predictions_weighted, color=colors[1], linestyle='--')
plot_prc("Train Resampled", train_labels, train_predictions_resampled, color=colors[2])
plot_prc("Test Resampled", test_labels, test_predictions_resampled, color=colors[2], linestyle='--')
plt.legend(loc='lower right')
"""
Explanation: AUPRC 플로팅
End of explanation
"""
|
mediagit2016/workcamp-maschinelles-lernen-grundlagen | 17-12-11-workcamp-ml/2017-12-11-arbeiten-mit-listen-10.ipynb | gpl-3.0 | x = [4,2,6,3] #Erzeugt eine Liste mit Werten
x1 = [4,2,6,3] #Erzeugt eine Liste mit den gleichen Werten
y = list() # Erzeugt eine leere Liste
y = [] #Erzeugt eine leere Liste
z = ["11","22","33","a","b","c","d"] #erzeugt eine Liste mit strg Werten
print(x)
print(id(x))
print(x1)
print(id(x1))
print(y)
print(id(y))
print(z)
print(id(z))
"""
Explanation: <h1>Listen</h1>
<li>Listen sind eine sequentielle, geordnete Sammlung von Werten, Zahlen oder strg oder boolean oder hashes etc.
['spass',[1,2,4], 3.14, [{1],[2],[3]] in eckigen Klammern
<h2>Listen erzeugen</h2>
End of explanation
"""
x=list()
print(x)
x.append('One') #Adds 'One' to the back of the empty list
print(x)
x.append('Two') #Adds 'Two' to the back of the list ['One']
print(x)
x.insert(0,'Half') #Inserts 'Half' at location 0. Items will shift to make roomw
print(x)
x=list()
x.extend([1,2,3]) #Unpacks the list and adds each item to the back of the list
print(x)
"""
Explanation: <h3>Hinzufügen von Objekten in Listen</h3>
End of explanation
"""
x=[1,7,2,5,3,5,67,32]
print(len(x))
print(x[3])
print(x[2:5])
print(x[-1])
print(x[::-1])
"""
Explanation: <h3>Index und Teilstücke</h3>
End of explanation
"""
x=[1,7,2,5,3,5,67,32]
x.pop() #Entfernt das letzte Element aus der Liste
print(x)
x.pop(3) #Removes element at item 3 from a list
print(x)
x.remove(7) #Removes the first 7 from the list
print(x)
"""
Explanation: <h3>Entfernen von Objekten aus Listen</h3>
End of explanation
"""
x.remove(20)
"""
Explanation: <h3>Anything you want to remove must be in the list or the location must be inside the list</h3>
End of explanation
"""
y=['a','b']
x = [1,y,3]
print(x)
print(y)
y[1] = 4
print(y)
print(x)
x="Hello"
print(x,id(x))
x+=" You!"
print(x,id(x)) #x is not the same object it was
y=["Hello"]
print(y,id(y))
y+=["You!"]
print(y,id(y)) #y is still the same object. Lists are mutable. Strings are immutable
def eggs(item,total=0):
total+=item
return total
def spam(elem,some_list=[]):
some_list.append(elem)
return some_list
print(eggs(1))
print(eggs(2))
print(spam(1))
print(spam(2))
"""
Explanation: <h2>Listen sind veränderbar [mutable]</h2>
End of explanation
"""
#The for loop creates a new variable (e.g., index below)
#range(len(x)) generates values from 0 to len(x)
x=[1,7,2,5,3,5,67,32]
for index in range(len(x)):
print(x[index])
list(range(len(x)))
"""
Explanation: <h1>Iteration</h1>
<h2>Range iteration</h2>
End of explanation
"""
x=[1,7,2,5,3,5,67,32]
for element in x: #The for draws elements - sequentially - from the list x and uses the variable "element" to store values
print(element)
"""
Explanation: <h3>List element iteration</h3>
End of explanation
"""
def search_list(list_of_tuples,value):
print(range(len(list_of_tuples)))
for element in list_of_tuples:
if element[0]==value:
print(element[1])
return 0
#return(list_of_tuples[index])
#Write the function here
prices = [('AAPL',96.43),('IONS',39.28),('GS',159.53),('AA',160.45)]
ticker = 'AA'
print(search_list(prices,ticker))
"""
Explanation: <h3>Practice problem</h3>
Write a function search_list that searches a list of tuple pairs and returns the value associated with the first element of the pair
End of explanation
"""
import hashlib
m=list()
x=[1,7,2,5,3,5,67,32,32,1,10,11,12,13,14,15,16] #Listen können gleiche Werte enthalten
for element in x: #The for draws elements - sequentially - from the list x and uses the variable "element" to store values
y=str(element) # der Variablen y wird der str Wert zugewiesen
z=hashlib.sha256(y) # dauraus wird für z der sha256 Wert ermittelt
print(z.hexdigest()) # der Wert wird in hexadezimal gedruckt
m.append(z.hexdigest()) # der Wert wird der liste m angefügt
print("Wir haben die Daten in die Liste m gelegt:")
print(m) # die Liste wird gedruckt
"""
Explanation: <h3>Hashes in Listen ablegen</h3>
End of explanation
"""
mktcaps = {'AAPL':538.7,'GOOG':68.7,'IONS':4.6}# Dictionary wird initialisiert
print(type(mktcaps))
print(mktcaps)
print(mktcaps.values())
print(mktcaps.keys())
print(mktcaps.items())
c=mktcaps.items()
print c[0]
mktcaps['AAPL'] #Gibt den Wert zurück der mit dem Schlüssel "AAPL" verknüpft ist
mktcaps['GS'] #Error because GS is not in mktcaps
mktcaps.get('GS') #Returns None because GS is not in mktcaps
mktcaps['GS'] = 88.65 #Fügt GS to the dictionary
print(mktcaps)
del(mktcaps['GOOG']) #Removes GOOG from mktcaps
print(mktcaps)
mktcaps.keys() #Returns all the keys
mktcaps.values() #Returns all the values
import hashlib
l=('AAA','BBB','CCC','DDD','EEE')
print(l)
print(len(l))
hshdict={'AAA':hashlib.sha256('AAA)')}
hshdict.values()
v=hshdict['AAA']
m=v.hexdigest()
print(m)
"""
Explanation: <h1>Dictionaries</h1>
<li>d={}
<li>d.values()
<li>d.keys()
<li>d.items()
<li>d.clear()
<li>d.copy()
<li>d.get(k,x)
<li>k in d
<li>d.setdefault(k[ ,x])
<li>d1.update(d2)
End of explanation
"""
alter = {'Peter':45,'Julia':23,'Mathias':36} #Erzeugen eines Dictionaries
print(alter)
alter['Julia']=27 #Ändern des Alters
alter['Monika']=33 #Hinzufügen von Monika - die Reihenfolge der Schlüssel spielt keine Rolle
print(alter)
if 'Monika' in alter:
print (alter['Monika'])
"""
Explanation: <h3>Beispiel: Alter</h3>
End of explanation
"""
temperatur={'stuttgart':32.9,'muenchen':29.8,'hamburg':24.4}# Erzeugen eines dictionaries mit Temperaturen in verschiedenen Städten
temperatur['koeln']=29.7 #hinzufuegen der temperatur in koeln
print(temperatur) #ausgabe der temperaturen
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
if 'Berlin' in temperatur:
print ('Berlin:', temperatur['Berlin'])
else:
print ('Keine Daten für Berlin gefunden')
'stuttgart' in temperatur #überprüfen ob Schlüssel in temperatur enthalten ist
temperatur.keys() #Ausgabe der Schlüssel im Dictionary
temperatur.values()#ausgabe der Werte im Dictionary
for stadt in sorted(temperatur):
print(stadt)
temperatur_kopie=temperatur.copy() #erstellt eine KOpie des dictonaries
print (temperatur_kopie)
temperatur2={'stuttgart':22.9,'muenchen':23.8,'hamburg':21.4} #ein 2-tes dictionary
temperatur.update(temperatur2)
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
print('Anzahl enthaltene Staedte: %g'% len(temperatur))
temperatur2={'stuttgart':22.9,'muenchen':23.8,'hamburg':21.4,'koeln':18.6,'frankfurt':20.6, 'weimar':18.8} #ein 2-tes dictionary
temperatur.update(temperatur2)
for stadt in temperatur:
print('Die Temperatur in %s ist %g °C' % (stadt,temperatur[stadt]))
print('Anzahl enthaltene Staedte: %g'% len(temperatur))
"""
Explanation: <h3>Beispiel: Temperaturen in Staedten</h3>
End of explanation
"""
st={}#Erzeugen des leeren dictionarys
st['100100'] = {'Mathe':1.0, 'Bwl':2.5}
st['100200'] = {'Mathe':2.3, 'Bwl':1.8}
print(st.items())
print(type(st))
print(st.values())
print(st.keys())
for k in st.keys():
print(st.['k'])
"""
Explanation: <h2>Beispiel Studenten - mit dictionary</h2>
End of explanation
"""
def stud_verz():
stud={}#erzeugen eines leeren dictionaries
student=input('Matrikel-Nr als string eingeben:')
while student:
Mathe = input('Mathe Note eingeben:')
Bwl = input('Bwl Note eingeben:')
stud[student]={"Mathematik":Mathe,"BWL":Bwl}
student=input('Matrikel-Nr als string eingeben:')
return stud
print (stud_verz())
"""
Explanation: <h2>Schrittweiser Aufbau eines Studentenverezichnisses</h2>
End of explanation
"""
d1={'hans':1.8,'peter':1.73,'rainer':1.74}
d2={'petra':1.8,'hannes':1.73,'rainer':1.78}
d1.update(d2)
print(d1)
"""
Explanation: <h2>Ein Dictionary aus anderen zusammensetzen
<li>d2.update(d1)
End of explanation
"""
deutsch = {'key':['Schluessel','Taste'],'slice':['Scheibe','Schnitte','Stueck'],'value':['Wert']}
print(deutsch)
######Abfangen von Abfragefehlern
def uebersetze(wort,d):
if wort in d:
return d[wort]
else:
return 'unbekannt'
print(uebersetze('slice',deutsch))
uebersetze('search',deutsch)
"""
Explanation: <h2>Datenzugriff in einem dictionary
End of explanation
"""
#Vokabeltrainer entwickeln
import random
#Definition der Funktionen
def dict_laden(pfad):
d={}
try:
datei = open(pfad)
liste = datei.readlines()
for eintrag in liste:
l_eintrag = eintrag.split()
d[l_eintrag[0]]=l_eintrag[1:]
datei.close()
except:
pass
return d
#def aufgabe(d):
zufall = random.randint(0, len(d.keys())-1)
vokabel = list(d.keys())[zufall]
#print(vokabel +'?')
#Datei liegt auf dem Pfad
#c:\\Benutzer\\ramon\\Dokumente\\Python Scripts\\python-edx-07-07-17\\woerterbuch.txt'
#woerterbuch liste von einträgen mit leerzeichen getrennt
d={}
datei=open('woerterbuch.txt')
liste = datei.readlines()
print(liste)
for eintrag in liste:
l_eintrag = eintrag.split()#trennung an leerzeichen
#print(l_eintrag[0])
#print(l_eintrag[1])
d[l_eintrag[0]]=l_eintrag[1:]
datei.close()
print(d)
zufall = random.randint(0, len(d.keys())-1)
vokabel = list(d.keys())[zufall]
print(vokabel+' ?')
antwort=input()
"""
Explanation: <h1>Vokabeltrainer entwickeln
End of explanation
"""
|
net-titech/CREST-Deep-M | notebooks/00-classification.ipynb | mit | # set up Python environment: numpy for numerical routines, and matplotlib for plotting
import numpy as np
import matplotlib.pyplot as plt
# display plots in this notebook
%matplotlib inline
# set display defaults
plt.rcParams['figure.figsize'] = (10, 10) # large images
plt.rcParams['image.interpolation'] = 'nearest' # don't interpolate: show square pixels
plt.rcParams['image.cmap'] = 'gray' # use grayscale output rather than a (potentially misleading) color heatmap
"""
Explanation: Classification: Instant Recognition with Caffe
In this example we'll classify an image with the bundled CaffeNet model (which is based on the network architecture of Krizhevsky et al. for ImageNet).
We'll compare CPU and GPU modes and then dig into the model to inspect features and the output.
1. Setup
First, set up Python, numpy, and matplotlib.
End of explanation
"""
# The caffe module needs to be on the Python path;
# we'll add it here explicitly.
import sys
caffe_root = '/opt/caffe/' # this file should be run from {caffe_root}/examples (otherwise change this line)
sys.path.insert(0, caffe_root + 'python')
import caffe
# If you get "No module named _caffe", either you have not built pycaffe or you have the wrong path.
"""
Explanation: Load caffe.
End of explanation
"""
import os
if os.path.isfile(caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'):
print 'CaffeNet found.'
else:
print 'Downloading pre-trained CaffeNet model...'
!/opt/caffe/scripts/download_model_binary.py /opt/caffe/models/bvlc_reference_caffenet
"""
Explanation: If needed, download the reference model ("CaffeNet", a variant of AlexNet).
End of explanation
"""
caffe.set_mode_cpu()
model_def = caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt'
model_weights = caffe_root + 'models/bvlc_reference_caffenet/bvlc_reference_caffenet.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
"""
Explanation: 2. Load net and set up input preprocessing
Set Caffe to CPU mode and load the net from disk.
End of explanation
"""
# load the mean ImageNet image (as distributed with Caffe) for subtraction
mu = np.load(caffe_root + 'python/caffe/imagenet/ilsvrc_2012_mean.npy')
mu = mu.mean(1).mean(1) # average over pixels to obtain the mean (BGR) pixel values
print 'mean-subtracted values:', zip('BGR', mu)
# create transformer for the input called 'data'
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2,0,1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2,1,0)) # swap channels from RGB to BGR
"""
Explanation: Set up input preprocessing. (We'll use Caffe's caffe.io.Transformer to do this, but this step is independent of other parts of Caffe, so any custom preprocessing code may be used).
Our default CaffeNet is configured to take images in BGR format. Values are expected to start in the range [0, 255] and then have the mean ImageNet pixel value subtracted from them. In addition, the channel dimension is expected as the first (outermost) dimension.
As matplotlib will load images with values in the range [0, 1] in RGB format with the channel as the innermost dimension, we are arranging for the needed transformations here.
End of explanation
"""
# set the size of the input (we can skip this if we're happy
# with the default; we can also change it later, e.g., for different batch sizes)
net.blobs['data'].reshape(50, # batch size
3, # 3-channel (BGR) images
227, 227) # image size is 227x227
"""
Explanation: 3. CPU classification
Now we're ready to perform classification. Even though we'll only classify one image, we'll set a batch size of 50 to demonstrate batching.
End of explanation
"""
image = caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')
transformed_image = transformer.preprocess('data', image)
plt.imshow(image)
"""
Explanation: Load an image (that comes with Caffe) and perform the preprocessing we've set up.
End of explanation
"""
# copy the image data into the memory allocated for the net
net.blobs['data'].data[...] = transformed_image
### perform classification
output = net.forward()
output_prob = output['prob'][0] # the output probability vector for the first image in the batch
print 'predicted class is:', output_prob.argmax()
"""
Explanation: Adorable! Let's classify it!
End of explanation
"""
# load ImageNet labels
labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt'
if not os.path.exists(labels_file):
!/opt/caffe/data/ilsvrc12/get_ilsvrc_aux.sh
labels = np.loadtxt(labels_file, str, delimiter='\t')
print 'output label:', labels[output_prob.argmax()]
"""
Explanation: The net gives us a vector of probabilities; the most probable class was the 281st one. But is that correct? Let's check the ImageNet labels...
End of explanation
"""
# sort top five predictions from softmax output
top_inds = output_prob.argsort()[::-1][:5] # reverse sort and take five largest items
print 'probabilities and labels:'
zip(output_prob[top_inds], labels[top_inds])
"""
Explanation: "Tabby cat" is correct! But let's also look at other top (but less confident predictions).
End of explanation
"""
%timeit net.forward()
"""
Explanation: We see that less confident predictions are sensible.
4. Switching to GPU mode
Let's see how long classification took, and compare it to GPU mode.
End of explanation
"""
caffe.set_device(0) # if we have multiple GPUs, pick the first one
caffe.set_mode_gpu()
net.forward() # run once before timing to set up memory
%timeit net.forward()
"""
Explanation: That's a while, even for a batch of 50 images. Let's switch to GPU mode.
End of explanation
"""
# for each layer, show the output shape
for layer_name, blob in net.blobs.iteritems():
print layer_name + '\t' + str(blob.data.shape)
"""
Explanation: That should be much faster!
5. Examining intermediate output
A net is not just a black box; let's take a look at some of the parameters and intermediate activations.
First we'll see how to read out the structure of the net in terms of activation and parameter shapes.
For each layer, let's look at the activation shapes, which typically have the form (batch_size, channel_dim, height, width).
The activations are exposed as an OrderedDict, net.blobs.
End of explanation
"""
for layer_name, param in net.params.iteritems():
print layer_name + '\t' + str(param[0].data.shape), str(param[1].data.shape)
"""
Explanation: Now look at the parameter shapes. The parameters are exposed as another OrderedDict, net.params. We need to index the resulting values with either [0] for weights or [1] for biases.
The param shapes typically have the form (output_channels, input_channels, filter_height, filter_width) (for the weights) and the 1-dimensional shape (output_channels,) (for the biases).
End of explanation
"""
def vis_square(data):
"""Take an array of shape (n, height, width) or (n, height, width, 3)
and visualize each (height, width) thing in a grid of size approx. sqrt(n) by sqrt(n)"""
# normalize data for display
data = (data - data.min()) / (data.max() - data.min())
# force the number of filters to be square
n = int(np.ceil(np.sqrt(data.shape[0])))
padding = (((0, n ** 2 - data.shape[0]),
(0, 1), (0, 1)) # add some space between filters
+ ((0, 0),) * (data.ndim - 3)) # don't pad the last dimension (if there is one)
data = np.pad(data, padding, mode='constant', constant_values=1) # pad with ones (white)
# tile the filters into an image
data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
plt.imshow(data); plt.axis('off')
"""
Explanation: Since we're dealing with four-dimensional data here, we'll define a helper function for visualizing sets of rectangular heatmaps.
End of explanation
"""
# the parameters are a list of [weights, biases]
filters = net.params['conv1'][0].data
vis_square(filters.transpose(0, 2, 3, 1))
"""
Explanation: First we'll look at the first layer filters, conv1
End of explanation
"""
feat = net.blobs['conv1'].data[0, :36]
vis_square(feat)
"""
Explanation: The first layer output, conv1 (rectified responses of the filters above, first 36 only)
End of explanation
"""
feat = net.blobs['pool5'].data[0]
vis_square(feat)
"""
Explanation: The fifth layer after pooling, pool5
End of explanation
"""
feat = net.blobs['fc6'].data[0]
plt.subplot(2, 1, 1)
plt.plot(feat.flat)
plt.subplot(2, 1, 2)
_ = plt.hist(feat.flat[feat.flat > 0], bins=100)
"""
Explanation: The first fully connected layer, fc6 (rectified)
We show the output values and the histogram of the positive values
End of explanation
"""
feat = net.blobs['prob'].data[0]
plt.figure(figsize=(15, 3))
plt.plot(feat.flat)
"""
Explanation: The final probability output, prob
End of explanation
"""
# download an image
#my_image_url = "..." # paste your URL here
# for example:
my_image_url = "http://upload.wikimedia.org/wikipedia/commons/b/be/Orang_Utan%2C_Semenggok_Forest_Reserve%2C_Sarawak%2C_Borneo%2C_Malaysia.JPG"
!wget --no-check-certificate -O image.jpg $my_image_url
# transform it and copy it into the net
image = caffe.io.load_image('image.jpg')
net.blobs['data'].data[...] = transformer.preprocess('data', image)
# perform classification
net.forward()
# obtain the output probabilities
output_prob = net.blobs['prob'].data[0]
# sort top five predictions from softmax output
top_inds = output_prob.argsort()[::-1][:5]
plt.imshow(image)
print 'probabilities and labels:'
zip(output_prob[top_inds], labels[top_inds])
"""
Explanation: Note the cluster of strong predictions; the labels are sorted semantically. The top peaks correspond to the top predicted labels, as shown above.
6. Try your own image
Now we'll grab an image from the web and classify it using the steps above.
Try setting my_image_url to any JPEG image URL.
End of explanation
"""
from scipy.misc import imread
import numpy as np
import glob
npy_path = '../compressed-models/alexnet/npy/'
jpg_path = '../compressed-models/alexnet/jpegs/'
gif_path = '../compressed-models/alexnet/gifs/'
png_path = '../compressed-models/alexnet/pngs/'
dic = {
'conv1': [96, 3, 11, 11],
'conv2': [256, 48, 5, 5],
'conv3': [384, 256, 3, 3],
'conv4': [384, 192, 3, 3],
'conv5': [256, 192, 3, 3],
'fc6': [4096, 9216],
'fc7': [4096, 4096],
'fc8': [1000, 4096]
}
min_max = np.load(png_path + 'range.npy').item()
#convert(106, (0.,255.), (f_conv1.min(), f_conv1.max()))
def convert(val, old_range, new_range):
return (((val - old_range[0]) * (new_range[1] - new_range[0])) / (old_range[1] - old_range[0])) + new_range[0]
for layer_name, param in net.params.iteritems():
# param 0 - weights, param 1 - bias
#f = imread(jpg_path + layer_name + '.jpg')
f = imread(png_path + layer_name + '.png')
f_original = convert(f, (0., 255.), min_max[layer_name])
print layer_name, np.min(f_original), np.max(f_original)
#print np.min(f), np.max(f)
#f_original = (f / 255. * 2.) - 1
print f.shape
f_reshape = f_original.reshape(dic[layer_name])
#print f_reshape.shape
param[0].data[...] = f_reshape
from scipy.misc import imread
import numpy as np
import glob
npy_path = '../compressed-models/alexnet/npy/'
jpg_path = '../compressed-models/alexnet/jpegs/'
dic = {
'conv1': [96, 3, 11, 11],
'conv2': [256, 48, 5, 5],
'conv3': [384, 256, 3, 3],
'conv4': [384, 192, 3, 3],
'conv5': [256, 192, 3, 3],
'fc6': [4096, 9216],
'fc7': [4096, 4096],
'fc8': [1000, 4096]
}
for layer_name, param in net.params.iteritems():
# param 0 - weights, param 1 - bias
f = np.load(npy_path + layer_name + '.npy')
param[0].data[...] = f
print layer_name, np.min(f), np.max(f)
#layer_name = 'fc6'
#f = imread(jpg_path + layer_name + '.jpg', True)
#f_original = (f / 255. * 2.) - 1
#f_reshape = f_original.reshape(dic[layer_name])
#net.params[layer_name][0].data[...] = f_reshape
#print np.min(net.params[layer_name][0].data)
image = caffe.io.load_image(caffe_root + 'examples/images/cat.jpg')
transformed_image = transformer.preprocess('data', image)
plt.imshow(image)
# copy the image data into the memory allocated for the net
net.blobs['data'].data[...] = transformed_image
### perform classification
output = net.forward()
output_prob = output['prob'][0] # the output probability vector for the first image in the batch
print 'predicted class is:', output_prob.argmax()
# load ImageNet labels
labels_file = caffe_root + 'data/ilsvrc12/synset_words.txt'
if not os.path.exists(labels_file):
!/opt/caffe/data/ilsvrc12/get_ilsvrc_aux.sh
labels = np.loadtxt(labels_file, str, delimiter='\t')
print 'output label:', labels[output_prob.argmax()]
# sort top five predictions from softmax output
top_inds = output_prob.argsort()[::-1][:5] # reverse sort and take five largest items
print 'probabilities and labels:'
zip(output_prob[top_inds], labels[top_inds])
"""
Explanation: 7. Using Compressed Images
End of explanation
"""
caffe.set_mode_cpu()
#model_def = caffe_root + 'models/bvlc_reference_caffenet/deploy.prototxt'
model_def = '/home/choong/work/Deep-Compression-AlexNet/bvlc_alexnet_deploy.prototxt'
model_weights = '/home/choong/work/caffe/alexnet.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
"""
Explanation: 8. Using Han's Model
End of explanation
"""
|
xaratustrah/iq_suite | doc/quick_introduction_iqtools.ipynb | gpl-2.0 | # In your new notebook, first import the library, this automaticall imports IQBase as well
from iqtools import *
%matplotlib inline
"""
Explanation: Quick introduction to iqtools
General information
iqtools is a collection consisting of a library, command line tools. The best way to use the library is inside a jupyter notebook. After checking the concept, the code inside can be put into a separate python script for routine analysis.
The advantage is that you have the full arsenal of python libraries that you can use and mix.
iqtools comes itself with a variety of tools in the IQBase class, such as averaging ffts and spectrograms.
A related project is the iqgui which is a GUI viewer using the iqtools library and can be found in the repository.
Using the CLI (command line interface)
The command line interface has a help which is also relatively self explanatory. It can be used to extract ready pictures from the code.
./iqtools --help
brings out the help page. You can see what is inside a file by:
./iqtools -v -d FILE.TIQ
Plot the spectrogram of a data file, reading 100 frames each 512 samples long starting from frame 56:
./iqtools.py -v --spec -l 512 -n 100 -s 56 FILE.IQT
Using the library
Start by cloning the repository into a local directory:
mkdir git
cd git
git clone https://github.com/xaratustrah/iqtools
cd iqtools
then start the jupyter-notebook. Then start a browser, go to the following address:
localhost:8888
Then navigate to the iqtools directory, click
File --> New Notebook --> Python 3
Then you are all set up.
End of explanation
"""
filename = 'FILENAME.tiq'
"""
Explanation: Reading IQ files
What is the filename? For TCAP files you need to specify a header file as well. For all other data formats (TIQ, IQT, TDMS, WAV, RAW, TXT) you don't need to do that.
End of explanation
"""
iq_data = TIQData(filename)
"""
Explanation: create an object of that filetype. E.g. for TCAP data you create an instance of the class TCAPData. For TIQ you use the instance of class TIQData and so on...
End of explanation
"""
iq_data.read(nframes= 100, lframes=2048, sframes=500)
"""
Explanation: Read data, how many frames, how long each, starting from which frame. Note that 10 frames each 100 samples long is the same as 100 frames each 10 samples long.
End of explanation
"""
iq_data.window='hamming'
iq_data.method='mtm'
xx, yy, zz = iq_data.get_spectrogram(nframes= 100, lframes=2048)
"""
Explanation: create a spectrogram. You can set a window or choose a method. in this case we choose multitaper. You can also choose a nice colormap:
End of explanation
"""
plot_spectrogram_dbm(xx, yy, zz)
"""
Explanation: now plot it
End of explanation
"""
ff, pp = iq_data.get_time_average_vs_frequency(xx, yy, zz)
fpeak, ppeak = iq_data.get_narrow_peaks_dbm(ff, pp, accuracy=100)
plot_dbm_per_hz(ff, pp, iq_data.center)
plt.plot(fpeak, IQBase.get_dbm(ppeak), 'rv')
"""
Explanation: make a 1D time average plot, then find its peaks:
End of explanation
"""
iq_data.window='bartlett'
ff, pp,_ = iq_data.get_fft()
plot_dbm_per_hz(ff, pp, iq_data.center)
"""
Explanation: make a single FFT over the whole range, set the window before if you like:
End of explanation
"""
fs = 22050
f = 400
center = 133
t, x = make_test_signal(400, 22050, noise=False, nharm=2)
plt.plot(t[:100], x[:100])
xbar , insph = make_analytical(x)
plot_hilbert(xbar)
write_signal_as_ascii('results.txt', xbar, fs, center)
write_signal_as_binary('results.bin', xbar, fs, center)
"""
Explanation: Synthesis of signals
Here you can create a synthetic signal for test purposes:
End of explanation
"""
|
liganega/Gongsu-DataSci | previous/notes2017/W04/GongSu09_Dictionary.ipynb | gpl-3.0 | record_f = open("Sample_Data/Swim_Records/record_list.txt")
record = record_f.read().decode('utf-8').split('\n')
record_f.close()
for line in record:
print(line)
"""
Explanation: 사전 활용
주요 내용
파이썬에 내장되어 있는 컬렉션 자료형 중 사전에 대해 알아 본다.
사전(dictionaries): 키(keys)와 값(values)으로 이루어진 쌍(pairs)들의 집합
사용 형태: 집합기호 사용
eng_math = {'year': 2017, 'semester' : 2, 'subject': 'Data Science'}
특징
키로 사용될 수 있는 자료형: 문자열 등 불변 자료형 값
값으로 사용될 수 있는 자료형: 임의의 값
사전은 가변 자료형이다.
사전이름[키이름] = 값 을 이용해 특정 항목의 키에 해당하는 값을 변경할 수 있다.
update() 메소드: 항목 추가
del 함수 또는 pop() 메소드: 특정 항목 삭제
items, keys, values 등의 메소드를 이용하여 사전의 항목 확인 가능
오늘의 주요 예제
record_list.txt 파일은 여덟 명의 수영 선수의 50m 기록을 담고 있다.
txt
player1 21.09
player2 20.32
player3 21.81
player4 22.97
player5 23.29
player6 22.09
player7 21.20
player8 22.16
목표: 위 파일로부터 1~3등 선수의 이름과 기록을 아래와 같이 확인하기
txt
1등 player2 20.32
2등 player1 21.09
3등 player7 21.20
주의: 이전에는 1~3등의 점수만 확인하였다.
하지만 이제는 선수 이름까지 함께 확인해야 한다.
참조: Head First Programming(한빛미디어) 5장
사전 활용
저장된 파일에서 데이터를 불러와서 한 줄씩 확인하는 방법은 다음과 같다.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
time_only = []
for line in record:
(player, p_record) = line.split()
time_only.append(float(p_record))
record_f.close()
time_only.sort()
for i in range(3):
print(str(i+1) + "등", time_only[i])
"""
Explanation: 복습
앞 장에서 1~3등의 50m 기록을 확인하였다.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
time_only = []
name_only = []
for line in record:
(p_name, p_record) = line.split()
time_only.append(float(p_record))
name_only.append(p_name)
record_f.close()
print(name_only)
print(time_only)
"""
Explanation: 이제 위 코드를 수정하여 아래 결과를 얻고자 한다.
txt
1등 player2 20.32
2등 player1 21.09
3등 player7 21.20
즉, 각 등수의 선수 이름까지 필요하다.
어떻게 하면 선수이름과 점수를 동시에 움직이게 할 수 있을까?
마이크로소프트의 엑셀 프로그램을 활용하면 매우 간단하다.
<p>
<table cellspacing="20">
<tr>
<td align="center">
<img src="../../images/excel/excel1.png" style="width:100%">
</td>
<td align="center">
<img src="../../images/excel/excel1a.png" style="width:100%">
</td>
<td align="center">
<img src="../../images/excel/excel2.png" style="width:100%">
</td>
</tr>
<tr>
<td align="center">
기존 기록표
</td>
<td align="center">
점수 기준으로 정렬하기
</td>
<td align="center">
정렬 후 기록표
</td>
</tr>
</table>
</p>
두 개의 리스트로 쪼개기
먼저 앞서 사용한 방식을 약간 수정해서 기록들의 리스트와 선수이름들의 리스트를 생성해보자.
End of explanation
"""
city_temperature = {}
"""
Explanation: 현재 두 개의 리스트는 기존 테이블의 리스트의 순서와 동일한 순서대로 항목을 갖고 있다.
예를 들어, name_only 리스트의 첫 째 선수의 기록은 time_only 리스트의 첫 째 항목 값이다.
그런데 1~3등의 점수를 얻기 위해 time_only 리스트를 정렬하면 상위 세 명의 점수는 확인할 수 있었지만 어떤 선수가 수상을 해야 할지는 알 수 없었다.
어떻게 해야 할까? name_only 리스트도 정렬할까? 그런데 어떤 기준으로 정렬하나? 이름순으로? 그러면 A 또는 Z로 시작하는 선수가 항상 1등 아니면 꼴등이 되어 버리는 문제가 발생한다.
이런 문제는 두 개의 리스트를 다룰 때 항상 발생한다. 그리고 일반적으로 두 개의 리스트를 엑셀의 경우처럼 한 가지 기준으로 연동해서 정렬할 수는 없다.
따라서 다른 접근방식이 요구된다.
여기서는 사전 자료형을 이용하여 문제를 해결하고자 한다.
하지만 해결법을 설명하기 전에 사전 자료형을 간단한 예제를 통해 공부하고자 한다.
사전 자료형 예제
사전 자료형에 대한 이해는 어학공부에 사용하는 사전을 떠올리면 쉽다.
영어 사전의 경우 '단어 와 뜻'으로 이루어진 쌍들의 집합이라고 생각할 수 있다.
사전 자료형도 동일하게 작동한다.
예를 들어, 평택, 수원, 제주의 현재 온도에 대한 정보가 아래와 같다고 하자.
Pyongtaek 22
Suwon 18
Jeju 25
이제 사전 자료형을 이용하여 위 정보를 저장하고 활용하는 방법은 다음과 같다.
먼저 빈 사전을 선언한다.
End of explanation
"""
city_temperature['Pyongtaek'] = 22
"""
Explanation: 이제 원하는 자료들의 쌍을 입력한다.
예를 들어 '평택 온도는 22도' 라는 정보를 추가하고자 하면 아래와 같이 하면 된다.
End of explanation
"""
city_temperature
"""
Explanation: 이제 평택의 정보가 추가되었음을 확인할 수 있다.
End of explanation
"""
city_temperature['Suwon'] = 18
city_temperature['Jeju'] = 25
city_temperature
"""
Explanation: 이제 수원과 제주의 정보를 추가하고 확인해보자.
End of explanation
"""
city_temperature['Pyongtaek']
city_temperature['Jeju']
"""
Explanation: 주의: 사전 자료형에서 각 항목의 순서는 전혀 의미가 없다.
키(key) 와 키값(value)
앞서 살펴보았듯 사전자료형의 항목들은 콜론(:)으로 구분된 두 개의 값들의 쌍으로 이루어진다.
왼쪽에 있는 값을 키(key), 오른쪽에 위치하는 값은 키값(value)라 부른다.
예를 들어 city_temperature에 사용된 키들은 Pyeongtaek, Suwon, Jeju 등이고 각 키들에 대응하는 키값은 각각 22, 18, 25이다.
키에 해당하는 키값을 확인하고자 하면 아래와 같이 명령하면 된다.
End of explanation
"""
key_list = city_temperature.keys()
key_list
"""
Explanation: 키만 모아 놓은 리스트
사전에 사용된 키들만 따로 모아놓은 리스트를 만들어주는 사전 자료형 메소드가 있다.
End of explanation
"""
value_list = city_temperature.values()
value_list
"""
Explanation: 주의: 도시명들의 순서 전혀 중요하지 않다.
키값만 모아 놓은 리스트
사전에 사용된 키값들만 따로 모아놓은 리스트를 만들어주는 사전 자료형 메소드가 있다.
End of explanation
"""
item_list = city_temperature.items()
item_list
"""
Explanation: 각각의 항목을 리스트의 항목으로 묶는 방식
사전에 사용된 항목들을 튜플로 묶어 리스트를 만들 수 있다.
End of explanation
"""
for key in city_temperature.keys():
print(key,"의 온도는", city_temperature[key], "도 이다.")
"""
Explanation: 사전 자료형 반복문
사전자료형을 반복문에 활용할 수 있다.
이를 위해 keys 메소드를 사용한다.
예를 들어, 도시와 온도를 동시에 추출하여 모두 보여주고자 할 경우 아래와 같이 하면 된다.
End of explanation
"""
for key in city_temperature:
print(key,"의 온도는", city_temperature[key], "도 이다.")
"""
Explanation: 사실 keys 메소드를 굳이 사용하지 않아도 된다.
End of explanation
"""
dir(city_temperature)
"""
Explanation: 사전 자료형의 메소드는 그리 많지 않다.
특정 자료형의 메소드를 확인하고자 하면 dir() 함수를 활용한다.
End of explanation
"""
city_temperature.pop("Suwon")
print(city_temperature)
"""
Explanation: 이중에서 pop와 has_key에 대해서는 기본적으로 알고 있는 것이 좋다.
pop() 메소드는 키에 해당하는 항목을 삭제한다.
End of explanation
"""
city_temperature.has_key("Suwon")
city_temperature.has_key("Jeju")
"""
Explanation: has_key() 메소드는 특정 키의 존재 여부를 확인해준다.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
for item_key in record_dict:
print(item_key, ":", record_dict[item_key])
"""
Explanation: 선수이름과 기록 연동하기
이제 선수이름과 기록을 연동하여 기록순으로 정렬하는 방법을 다루고자 하며,
이를 위해 사전 자료형을 활용한다.
방식은 앞서 언급한 아래의 코드를 약간 수정하면 된다.
End of explanation
"""
sorted(record_dict.keys())
for each_record in sorted(record_dict.keys()):
print(each_record, record_dict[each_record])
"""
Explanation: 이제 record_dict를 기록 기준으로 오름차순으로 정렬하면 된다.
하지만 사전 자료형에는 sort() 메소드가 없다.
대신에 sorted() 함수를 적용할 수 있다.
즉, sorted() 함수를 이용하여 기록을 정렬한 후에 그 순서대로 키값을 읽으면 된다.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
ranking = 1
for each_record in sorted(record_dict.keys()):
print(str(ranking) + "등", record_dict[each_record], each_record)
ranking += 1
"""
Explanation: 이제 코드를 정리하면 다음과 같다.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
ranking = 1
for each_record in sorted(record_dict.keys()):
print(str(ranking) + "등", record_dict[each_record], each_record)
if ranking < 3:
ranking += 1
else:
break
"""
Explanation: 연습
위 코드를 수정하여 3등까지만 출력되도록 하라.
힌트: break 활용
견본답안 1: sorted() 함수의 활용에 주의할 것.
End of explanation
"""
from __future__ import print_function
record_f = open("Sample_Data/Swim_Records/record_list.txt", 'r')
record = record_f.read().decode('utf8').split('\n')
record_dict = {}
for line in record:
(player, p_record) = line.split()
record_dict[p_record] = player
record_f.close()
record_list = record_dict.keys()
record_list.sort()
for i in range(3):
item = record_list[i]
print(str(i+1) + "등", record_dict[item], item)
"""
Explanation: 주의: break 명령어가 실행되는 순간 현재 실행되고 있는 반복문이 멈추고 다음 과정으로 넘어간다.
견본답안 2: 아래와 같이 range() 함수를 활용할 수도 있다.
End of explanation
"""
|
jlaura/camera_model | python/notebooks/Image2Ground Testing.ipynb | unlicense | # 512, 512 are the focal width/height in pixels divided by 2
def create_intrinsic_matrix(focal_length, image_width, sensor_width=14.4, skew=0, pixel_aspect=1):
focal_pixels = (focal_length / sensor_width) * image_width # From the IK - how do we get 14.4 automatically
print( 'These should be equal.', focal_pixels * sensor_width / 1024, focal_length)
intrinsic_matrix = np.zeros((3,3))
intrinsic_matrix[0,0] = focal_pixels
intrinsic_matrix[1,1] = focal_pixels
intrinsic_matrix[:,2] = [512.5, 512.5, 1]
return intrinsic_matrix
K = create_intrinsic_matrix(isd['focal_length'], isd['nsamples'])
print(K)
"""
Explanation: For a framing camera the interior orientation (intrinsic matrix) requires (at a minimum):
a distortion model
focal point
principal point offset
The example that we have been working on looks like a pinhole ground to image projection, defined as:
$$\begin{bmatrix}
w \cdot u \
w \cdot v \
w
\end{bmatrix} = \mathbf{K}
\begin{bmatrix}
\mathbf{Rt}
\end{bmatrix}
\begin{bmatrix}
X\
Y\
Z\
1
\end{bmatrix}
$$
or
$$\begin{bmatrix}
w \cdot u \
w \cdot v \
w
\end{bmatrix} =
\begin{bmatrix}
f & s & u_{0} \
0 & \alpha f & v_{0} \
0 & 0 & 1
\end{bmatrix}
\begin{bmatrix}
r_{11} & r_{12} & r_{13} & t_{x} \
r_{21} & r_{22} & r_{23} & t_{y} \
r_{31} & r_{32} & r_{33} & t_{z} \
\end{bmatrix}
\begin{bmatrix}
X\
Y\
Z\
1
\end{bmatrix}
$$
K is the intrinsic matrix (interior orientation), R is the extrinsic matrix (exterior orientation), and t is the translation. In the extrinsic matrix $\alpha$ (pixel aspect ratio) and $s$ (skew) are often assume to be unit and zero, respectively. $f$ is the focal length (in pixels) and ($u_{0}, v_{0}$) are the optical center (principal point).
The second resource below suggests that t can be thought of as the world origin in camera coordinates.
Focal Length Conversion from mm to pixels
If the sensor's physical width is known: $focal_{pixel} = (focal_{mm} / sensor_{width}) * imagewidth_{pixels}$
If the horizontal FoV is known: $focal_{pixel} = (imagewidth_{pixels} * 0.5) / \tan(FoV * 0.5)$
Resources:
http://ksimek.github.io/2013/08/13/intrinsic/
http://ksimek.github.io/2012/08/22/extrinsic/
http://slazebni.cs.illinois.edu/spring16/3dscene_book_svg.pdf
End of explanation
"""
L = np.array([isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin']])
L
"""
Explanation: Here we define:
$$L = \begin{bmatrix}
X_{L}\
Y_{L}\
Z_{L}
\end{bmatrix}
$$
End of explanation
"""
object_point = np.array([1116890, -1604470, 1459570])
# Discard scale momentarily.
k = 1
# Compute M
o = isd['omega']
p = isd['phi']
k = isd['kappa']
M = opk_to_rotation(o, p, k)
xyz = M.dot(L)
# And now reverse because M is orthogonal
L0 = (M.T).dot(xyz)
print(L, L0) # These should be equal.
"""
Explanation: $$\begin{bmatrix}
x\
y\
z \end{bmatrix} = k\mathbf{M} \begin{bmatrix}
X - X_{L}\
Y - Y_{L}\
Z - Z_{L}
\end{bmatrix}$$, where $(x, y, -f)$ are in image coordinates, $k$ is a scale factor, $\mathbf{M}$ is a 3x3 rotation matrix, and $(X,Y,Z)$ represent the object point.
End of explanation
"""
def opk_to_rotation(o, p, k):
"""
Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
Parameters
----------
o : float
Omega in radians
p : float
Phi in radians
k : float
Kappa in radians
Returns
-------
: ndarray
(3,3) rotation array
"""
om = np.empty((3,3))
om[:,0] = [1,0,0]
om[:,1] = [0, cos(o), -sin(o)]
om[:,2] = [0, sin(o), cos(o)]
pm = np.empty((3,3))
pm[:,0] = [cos(p), 0, sin(p)]
pm[:,1] = [0,1,0]
pm[:,2] = [-sin(p), 0, cos(p)]
km = np.empty((3,3))
km[:,0] = [cos(k), -sin(k), 0]
km[:,1] = [sin(k), cos(k), 0]
km[:,2] = [0,0,1]
return km.dot(pm).dot(om)
def collinearity(f, M, camera_position, ground_position, principal_point=(0,0)):
XL, YL, ZL = camera_position
X, Y, Z = ground_position
x0, y0 = principal_point
x = (-f * ((M[0,0] * (X - XL) + M[0,1] * (Y - YL) + M[0,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + x0
y = (-f * ((M[1,0] * (X - XL) + M[1,1] * (Y - YL) + M[1,2] * (Z - ZL))/
(M[2,0] * (X - XL) + M[2,1] * (Y - YL) + M[2,2] * (Z - ZL)))) + y0
return x, y, -f
def collinearity_inv(f, M, camera_position, pixel_position, elevation, principal_point=(0,0)):
XL, YL, ZL = camera_position
x, y = pixel_position
Z = elevation
x0, y0 = principal_point
X = (Z-ZL) * ((M[0,0] * (x - x0) + M[1,0] * (y - y0) + M[2,0] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + XL
Y = (Z-ZL) * ((M[0,1] * (x - x0) + M[1,1] * (y - y0) + M[2,1] * (-f))/
(M[0,2] * (x - x0) + M[1,2] * (y - y0) + M[2,2] * (-f))) + YL
return X,Y
o = radians(2)
p = radians(5)
k = radians(15)
XL = 5000
YL = 10000
ZL = 2000
# Interior Orientation
x0 = 0.015 # mm
y0 = -0.02 # mm
f = 152.4 # mm
# Ground Points
X = 5100
Y = 9800
Z = 100
M = opk_to_rotation(o,p,k) # Distortion model here?
# This is correct as per Mikhail
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [0,0])
print(x, y)
x, y, _ = collinearity(f, M, [XL, YL, ZL], [X, Y, Z], [x0,y0])
print(x,y)
# And now the inverse, find X, Y
Z = 500 # Provided by Mikhail - his random number
print(collinearity_inv(f, M, [XL, YL, ZL], [x, y], Z, (x0, y0)))
"""
Explanation: Example from Mikhail
End of explanation
"""
# First from pixel to ground:
f = isd['focal_length']
XL = isd['x_sensor_origin']
YL = isd['y_sensor_origin']
ZL = isd['z_sensor_origin']
# We know that the pixel size is 0.014^2 mm per pixel (14.4mm / 1024 pixels)
pixel_size = 0.014
x0 = 512 * pixel_size # Convert from pixel based principal point to metric principal point
y0 = 512 * pixel_size
f = isd['focal_length']
M = opk_to_rotation(o,p,k)
# This is image to ground
X, Y = collinearity_inv(f, M, [XL, YL, ZL], [10.2,5.1], 1000, (x0, y0))
print('Ground: ', X, Y, 1000) # Arbitrary 1000m elevation - here is where iteration with intersection is needed.
# Now reverse! This is ground to image
# These are in mm and need to convert to pixels
x, y, f = collinearity(f, M, [XL, YL, ZL], [X, Y, 1000], [x0,y0])
print(x,y)
def opk_to_rotation(o, p, k):
"""
Convert from Omega, Phi, Kappa to a 3x3 rotation matrix
"""
om = np.empty((3,3))
om[:,0] = [1,0,0]
om[:,1] = [0, cos(o), -sin(o)]
om[:,2] = [0, sin(o), cos(o)]
pm = np.empty((3,3))
pm[:,0] = [cos(p), 0, sin(p)]
pm[:,1] = [0,1,0]
pm[:,2] = [-sin(p), 0, cos(p)]
km = np.empty((3,3))
km[:,0] = [cos(k), -sin(k), 0]
km[:,1] = [sin(k), cos(k), 0]
km[:,2] = [0,0,1]
return km.dot(pm).dot(om)
# This makes a great test case (Mikhail p.95 has the rotation matrix.)
o = isd['omega']
p = isd['phi']
k = isd['kappa']
# This is R, but we need t to have a proper augmented matrix
R = np.empty((3,4))
R[:,:3] = opk_to_rotation(o, p, k)
RC = np.empty((4,4))
RC[:3,:3] = opk_to_rotation(o, p, k)
RC[:3,-1] = [isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin']]
RC[-1] = [0,0,0,1]
invRC = np.linalg.inv(RC)[:3, :]
print(invRC)
def setfocalrot(x, y, z):
# This is a focal plan rotation matrix that is flipping the camera vertically (I think)
# 0,0,1000 is the z position of the spacecraft
c = np.zeros((3,4))
c[0,0] = 1
c[1,1] = -1
c[2,2] = -1
c[:,3] = [x,y,z]
return c
# Arguments are spacecraft position: x, y, z
c = setfocalrot(isd['x_sensor_origin'],
isd['y_sensor_origin'],
isd['z_sensor_origin'])
def pixelloc(K,R,t, tx, ty):
res = K.dot(R).dot(t)
res[0] /= res[-1]
res[1] /= res[-1]
res[2] /= res[-1]
# Mapping from focal plane to pixel space
res[0]
res[1]
return res[:2]
# pixel position on the surface: x,y,z,1
position = np.array([1116890,
-1604470,
1459570,
1])
# The above should be (ballpark) 90 and 110 I believe
"""position = np.array([1131980,
-1597990,
1455060,
1])"""
ploc = pixelloc(K, invRC, position, isd['transx'][1], isd['transy'][2])
ploc
"""
Explanation: Now with our Messenger Camera
End of explanation
"""
def ground_to_image(ground, precision):
i = 0
while current_precision > precision:
current_precision = g2i(ground, precision)
i += 1
if i > 10:
break
def calc_rotation_matrix(o, p, k):
R = np.empty((3,4))
R[:,:3] = rotation_from_opk(o, p, k)
R[:,:-1] = [0,0,1]
return R
def g2i(ground):
gx = ground[0]
gy = ground[1]
gz = ground[2]
r = calc_rotation_matrix(o,p,k)
# This does not account for adjustments - how
lnum =
snum =
denom =
"""
Explanation: Trying the collinearity version
End of explanation
"""
|
sympy/scipy-2017-codegen-tutorial | notebooks/_35-chemical-kinetics-lambdify-deserialize.ipynb | bsd-3-clause | reactions = [
('k1', {'A': 1}, {'B': 1, 'A': -1}),
('k2', {'B': 1, 'C': 1}, {'A': 1, 'B': -1}),
('k3', {'B': 2}, {'B': -1, 'C': 1})
]
names, params = 'A B C'.split(), 'k1 k2 k3'.split()
tex_names = ['[%s]' % n for n in names]
"""
Explanation: Generating symbolic expressions
For larger reaction systems it is preferable to generate the system of ordinary differential equations from some serialized format and then generate the callback using code generation.
In this notebook we will define such a serialized format, and use it load a larger set of reactions. We represent a reaction as length 3 tuple of: (rate_const, coeff_powers, net_effect). Representing Robertson's system this way looks like this:
End of explanation
"""
# %load ../scipy2017codegen/chem.py
from operator import mul
from functools import reduce
import sympy as sym
def prod(seq):
return reduce(mul, seq) if seq else 1
def mk_exprs_symbs(rxns, names):
concs = sym.symbols(names, real=True, nonnegative=True)
c_dict = dict(zip(names, concs))
f = {n: 0 for n in names}
for coeff, r_stoich, net_stoich in rxns:
r = sym.S(coeff)*prod([c_dict[rk]**p for rk, p in r_stoich.items()])
for nk, nm in net_stoich.items():
f[nk] += nm*r
return [f[n] for n in names], concs
def mk_rsys(ODEcls, reactions, names, params=(), **kwargs):
f, symbs = mk_exprs_symbs(reactions, names)
return ODEcls(f, symbs, params=map(sym.S, params), **kwargs)
sym.init_printing()
f, symbs = mk_exprs_symbs(reactions, names)
f, symbs
"""
Explanation: the reaction system is still defined as:
$$
A \overset{k_1}{\rightarrow} B \
B + C \overset{k_2}{\rightarrow} A + C \
2 B \overset{k_3}{\rightarrow} B + C
$$
We will now write a small convenience function which takes the above representation and creates symbolic expressions for the ODE system:
End of explanation
"""
# %load ../scipy2017codegen/odesys.py
from itertools import chain # Py 2.7 does not support func(*args1, *args2)
import sympy as sym
from scipy.integrate import odeint
class ODEsys(object):
default_integrator = 'odeint'
def __init__(self, f, y, t=None, params=(), tex_names=None, lambdify=None):
assert len(f) == len(y), 'f is dy/dt'
self.f = tuple(f)
self.y = tuple(y)
self.t = t
self.p = tuple(params)
self.tex_names = tex_names
self.j = sym.Matrix(self.ny, 1, f).jacobian(y)
self.lambdify = lambdify or sym.lambdify
self.setup()
@property
def ny(self):
return len(self.y)
def setup(self):
self.lambdified_f = self.lambdify(self.y + self.p, self.f)
self.lambdified_j = self.lambdify(self.y + self.p, self.j)
def f_eval(self, y, t, *params):
return self.lambdified_f(*chain(y, params))
def j_eval(self, y, t, *params):
return self.lambdified_j(*chain(y, params))
def integrate(self, *args, **kwargs):
integrator = kwargs.pop('integrator', self.default_integrator)
return getattr(self, 'integrate_%s' % integrator)(*args, **kwargs)
def integrate_odeint(self, tout, y0, params=(), rtol=1e-8, atol=1e-8, **kwargs):
return odeint(self.f_eval, y0, tout, args=tuple(params), full_output=True,
Dfun=self.j_eval, rtol=rtol, atol=atol, **kwargs)
def print_info(self, info):
if info is None:
return
nrhs = info.get('num_rhs')
if not nrhs:
nrhs = info['nfe'][-1]
njac = info.get('num_dls_jac_evals')
if not njac:
njac = info['nje'][-1]
print("The rhs was evaluated %d times and the Jacobian %d times" % (nrhs, njac))
def plot_result(self, tout, yout, info=None, ax=None):
ax = ax or plt.subplot(1, 1, 1)
for i, label in enumerate(self.tex_names):
ax.plot(tout, yout[:, i], label='$%s$' % label)
ax.set_ylabel('$\mathrm{concentration\ /\ mol \cdot dm^{-3}}$')
ax.set_xlabel('$\mathrm{time\ /\ s}$')
ax.legend(loc='best')
self.print_info(info)
odesys = ODEsys(f, symbs, params=params, tex_names=tex_names)
import numpy as np
tout = np.logspace(-6, 6)
yout, info = odesys.integrate_odeint(tout, [1, 0, 0], [0.04, 1e4, 3e7], atol=1e-9, rtol=1e-9)
import matplotlib.pyplot as plt
%matplotlib inline
fig, axes = plt.subplots(1, 2, figsize=(14, 4))
odesys.plot_result(tout, yout, info, ax=axes[0])
odesys.plot_result(tout, yout, ax=axes[1])
axes[1].set_xscale('log')
axes[1].set_yscale('log')
"""
Explanation: We create a helper class to represent to ODE system.
End of explanation
"""
import json
watrad_data = json.load(open('../scipy2017codegen/data/radiolysis_300_Gy_s.json'))
watrad = mk_rsys(ODEsys, **watrad_data)
print(len(watrad.f), watrad.y[0], watrad.f[0])
"""
Explanation: The reason for why we went through this trouble is to be able to create a ODEsys instance from conveniently serialized data. Here is a much larger set of reactions, describing water radiolysis at 298 K and a doserate of 300 Gy/s (which is a doserate not far from that of a nuclear reactor):
End of explanation
"""
tout = np.logspace(-6, 3, 200) # close to one hour of operation
c0 = {'H2O': 55.4e3, 'H+': 1e-4, 'OH-': 1e-4}
y0 = [c0.get(symb.name, 0) for symb in watrad.y]
%timeit watrad.integrate_odeint(tout, y0)
fig, ax = plt.subplots(1, 1, figsize=(14, 6))
watrad.plot_result(tout, *watrad.integrate_odeint(tout, y0), ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
"""
Explanation: Values correspond to SI units, the concentration of water at 298 K is 55400 mol/m³. Neutral water contains [H+] = [HO-] = 10^-4 mol/m³:
End of explanation
"""
|
bryanfry/nyc-schools | nyc-schools_C.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
import os
bp_data = '/Users/bryanfry/projects/proj_nyc-schools/data_files'
n_tracts = 10 # Average ACS variable from 20 closest tracts to each school.
"""
Explanation: nyc-schools_C
This script averages the ACS variables for the N census tracts closest to each school, and combines these averaged variables with the school outcomes in a single dataframe (saved as a *.csv)
End of explanation
"""
# Compute average value for ACS var, given a list of geoid. Ideally perhaps the tracts should
# be weighted by population rather than using a simple mean, but probably results won't be
# much different since the census tracts are intended to have roughly equal populations.
def calc_multitract_var (df_acs, var, geoid_list, mode = 'sum'):
t = 0 # Total value
#print geoid_list.tolist()
for g in geoid_list:
#print g
try:
t = t + float (df_acs[df_acs.GEOID == g][var])
except: pass
if mode == 'avg':
t = t / len (geoid_list)
return t
"""
Explanation: Function to compute the average value of an ACS variable across several census tracts
End of explanation
"""
# Load school data (with 50 closest census tracts), and ACS variables for each tract
df_sch = pd.read_csv (os.path.join (bp_data, 'df_A_school_info.csv'))
df_acs = pd.read_csv (os.path.join (bp_data, 'df_B_acs_geoid.csv'))
# Drop first column of each imported dataframe (these are just redundent indices)
df_sch = df_sch.drop (df_sch.columns[0], axis = 1)
df_acs = df_acs.drop (df_acs.columns[0], axis = 1)
df_acs.head()
"""
Explanation: MAIN
End of explanation
"""
# Define a dictionary with the census variables to be added to the dataframe
dict_var = {}
acs_col_list = df_acs.columns[2:] # These are the census variables of interest
# Loop on the rows of the school file.
for c in acs_col_list:
dict_var [c] = [] # Make an empty list for each column.
# One element will be added to each list in
# the dictionary for each school# For variables which are either FRACTIONS or MEDIAN VALUES, we take the
# MEAN across the tracts. For other values (corresponging to actual number of
# respondants) we take the SUM.
for i in range (0, len (df_sch)):
geoid_list= df_sch.ix [i][9:9+n_tracts]
for i, c in enumerate (acs_col_list):
if i in [9, 10, 11, 18, 19, 20, 21, 22]: mode = 'avg'
else: mode = 'sum'
dict_var[c].append (calc_multitract_var (df_acs, var = c, geoid_list=geoid_list, mode = mode))
df_tract_avg = pd.DataFrame(data = dict_var)
df_tract_avg.head()
"""
Explanation: Now loop on the schools, and average ACS variables across census tracts
End of explanation
"""
df = pd.concat ([df_sch, df_tract_avg], axis = 1)
df.head()
"""
Explanation: Concatenate the tract-averaged data with the school outcome data
End of explanation
"""
df_c = pd.DataFrame() # c -> 'concise'
# Build list of columns to copy
c_list = ['NAME','DBN','STREET','ZIPCODE','LAT','LON','COUNTY','HOOD','DISPLAY_NAME']
c_list = c_list + ['GEOCODE' + str (i).zfill(2) for i in range (0, n_tracts)]
c_list = c_list + ['2+_RACES','ASIAN','BLACK','DIFFERENT_HOUSE','DIFFERENT_HOUSE_ABROAD',\
'DIFFERENT_HOUSE_DIFFERENT_CITY_SAME_STATE','DIFFERENT_HOUSE_SAME_CITY',\
'DIFFERENT_HOUSE_US_DIFFERENT_STATE','FOREIGN_BORN_INCLUDING_NATURALIZED',\
'MEDIAN_AGE','MEDIAN_INCOME','MEDIAN_MONTHLY_HOUSING_COSTS','NATIVE_AMERICAN',\
'NATIVE_CITIZEN','NON_CITIZEN','SAME_HOUSE','TOTAL_POP?','WHITE','FRAC_MINORITY',\
'RENT_INCOME_RATIO','FRAC_MOVED','FRAC_NONCITIZEN','FRAC_FOREIN_BORN']
for c in c_list: df_c[c] = df[c]
# Copy and rename school outcome data
old_c_list = ['Total Cohort','Total Grads - % of cohort',\
'Total Regents - % of cohort','Total Regents - % of grads','Advanced Regents - % of cohort',\
'Advanced Regents - % of grads','Regents w/o Advanced - % of cohort',\
'Regents w/o Advanced - % of grads','Local - % of cohort','Local - % of grads',\
'Dropped Out - % of cohort','Q_Total Grads - % of cohort','Q_Total Regents - % of cohort',\
'Q_Total Regents - % of grads','Q_Advanced Regents - % of cohort',\
'Q_Advanced Regents - % of grads','Q_Regents w/o Advanced - % of cohort','Q_Local - % of cohort',\
'Q_Local - % of grads','Q_Still Enrolled - % of cohort','Q_Dropped Out - % of cohort']
new_c_list = ['TOTAL_COHORT','GRADS_%','REGENTS_%_COHORT','REGENTS_%_GRADS'\
,'ADV_REGENTS_%_COHORT','ADV_REGENTS_%_GRADS','REG_REGENTS_%_COHORT','REG_REGENTS_%_GRADS'\
,'LOCAL_%_COHORT','LOCAL_%_GRADS','DROPPED_OUT_%','Q_GRADS_%',\
'Q_REGENTS_%_COHORT','Q_REGENTS_%_GRADS','Q_ADV_REGENTS_%_COHORT',\
'Q_ADV_REGENTS_%_GRADS','Q_REG_REGENTS_%_COHORT','Q_LOCAL_%_COHORT',\
'Q_LOCAL_%_GRADS','Q_STILL_ENROLLED_%','Q_DROPPED_OUT_%']
for old_c, new_c in zip (old_c_list, new_c_list):
df_c[new_c] = df[old_c]
#There are some empties -- drop rows with NaN
df_c = df_c.dropna()
# Save the 'concise' dataframe
fp_out = os.path.join (bp_data, 'df_C_sch_acs_NTract=' + str (n_tracts).zfill(2) + '.csv')
df_c.to_csv (fp_out)
"""
Explanation: Finally clean up some of column names, and eliminate some that will not be used
End of explanation
"""
|
ESGF/esgf-pyclient | notebooks/examples/search.ipynb | bsd-3-clause | from pyesgf.search import SearchConnection
conn = SearchConnection('http://esgf-index1.ceda.ac.uk/esg-search',
distrib=True)
"""
Explanation: Examples of pyesgf.search usage
Prelude:
End of explanation
"""
facets='project,experiment_family'
"""
Explanation: Warning: don't use default search with facets=*.
This behavior is kept for backward-compatibility, but ESGF indexes might not
successfully perform a distributed search when this option is used, so some
results may be missing. For full results, it is recommended to pass a list of
facets of interest when instantiating a context object. For example,
ctx = conn.new_context(facets='project,experiment_id')
Only the facets that you specify will be present in the facets_counts dictionary.
This warning is displayed when a distributed search is performed while using the
facets=* default, a maximum of once per context object. To suppress this warning,
set the environment variable ESGF_PYCLIENT_NO_FACETS_STAR_WARNING to any value
or explicitly use conn.new_context(facets='*')
End of explanation
"""
ctx = conn.new_context(project='CMIP5', query='humidity', facets=facets)
ctx.hit_count
ctx.facet_counts['experiment_family']
"""
Explanation: Find how many datasets containing humidity in a given experiment family:
End of explanation
"""
conn = SearchConnection('http://esgf-index1.ceda.ac.uk/esg-search', distrib=False)
ctx = conn.new_context(facets=facets)
dataset_id_pattern = "cmip5.output1.MOHC.HadGEM2-CC.historical.mon.atmos.Amon.*"
results = ctx.search(query="id:%s" % dataset_id_pattern)
len(results)
files = results[0].file_context().search()
len(files)
download_url = files[0].download_url
print(download_url)
"""
Explanation: Search using a partial ESGF dataset ID (and get first download URL):
End of explanation
"""
conn = SearchConnection('http://esgf-data.dkrz.de/esg-search', distrib=False)
ctx = conn.new_context(project='CMIP5', model='MPI-ESM-LR', experiment='decadal2000', time_frequency='day')
print('Hits: {}, Realms: {}, Ensembles: {}'.format(
ctx.hit_count,
ctx.facet_counts['realm'],
ctx.facet_counts['ensemble']))
ctx = ctx.constrain(realm='atmos', ensemble='r1i1p1')
ctx.hit_count
result = ctx.search()[0]
agg_ctx = result.aggregation_context()
agg = agg_ctx.search()[0]
print(agg.opendap_url)
"""
Explanation: Find the OpenDAP URL for an aggregated dataset:
End of explanation
"""
conn = SearchConnection('http://esgf-data.dkrz.de/esg-search', distrib=False)
ctx = conn.new_context(project='obs4MIPs')
ctx.hit_count
ds = ctx.search()[0]
files = ds.file_context().search()
len(files)
for f in files:
print(f.download_url)
"""
Explanation: Find download URLs for all files in a dataset:
End of explanation
"""
conn = SearchConnection('http://esgf-index1.ceda.ac.uk/esg-search', distrib=False)
ctx = conn.new_context(
project="CMIP5", model="HadGEM2-ES",
time_frequency="mon", realm="atmos", ensemble="r1i1p1", latest=True,
from_timestamp="2100-12-30T23:23:59Z", to_timestamp="2200-01-01T00:00:00Z")
ctx.hit_count
"""
Explanation: Define a search for datasets that includes a temporal range:
End of explanation
"""
ctx = conn.new_context(
project="CMIP5", model="HadGEM2-ES",
time_frequency="mon", realm="atmos", ensemble="r1i1p1", latest=True)
ctx.hit_count
ctx = ctx.constrain(from_timestamp = "2100-12-30T23:23:59Z", to_timestamp = "2200-01-01T00:00:00Z")
ctx.hit_count
"""
Explanation: Or do the same thing by searching without temporal constraints and then applying the constraint:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cmcc/cmip6/models/cmcc-esm2-hr5/ocnbgchem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cmcc', 'cmcc-esm2-hr5', 'ocnbgchem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: CMCC
Source ID: CMCC-ESM2-HR5
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:50
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
"""
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
"""
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
"""
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
"""
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
"""
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
"""
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.24/_downloads/299b3deaa8eb66e88d34f06090d06628/evoked_ers_source_power.ipynb | bsd-3-clause | # Authors: Luke Bloy <luke.bloy@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD-3-Clause
import os.path as op
import numpy as np
import mne
from mne.cov import compute_covariance
from mne.datasets import somato
from mne.time_frequency import csd_morlet
from mne.beamformer import (make_dics, apply_dics_csd, make_lcmv,
apply_lcmv_cov)
from mne.minimum_norm import (make_inverse_operator, apply_inverse_cov)
print(__doc__)
"""
Explanation: Compute evoked ERS source power using DICS, LCMV beamformer, and dSPM
Here we examine 3 ways of localizing event-related synchronization (ERS) of
beta band activity in this dataset: somato-dataset using
:term:DICS, :term:LCMV beamformer, and :term:dSPM applied to active and
baseline covariance matrices.
End of explanation
"""
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
# crop to 5 minutes to save memory
raw = mne.io.read_raw_fif(raw_fname).crop(0, 300)
# We are interested in the beta band (12-30 Hz)
raw.load_data().filter(12, 30)
# The DICS beamformer currently only supports a single sensor type.
# We'll use the gradiometers in this example.
picks = mne.pick_types(raw.info, meg='grad', exclude='bads')
# Read epochs
events = mne.find_events(raw)
epochs = mne.Epochs(raw, events, event_id=1, tmin=-1.5, tmax=2, picks=picks,
preload=True, decim=3)
# Read forward operator and point to freesurfer subject directory
fname_fwd = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
subjects_dir = op.join(data_path, 'derivatives', 'freesurfer', 'subjects')
fwd = mne.read_forward_solution(fname_fwd)
"""
Explanation: Reading the raw data and creating epochs:
End of explanation
"""
rank = mne.compute_rank(epochs, tol=1e-6, tol_kind='relative')
active_win = (0.5, 1.5)
baseline_win = (-1, 0)
baseline_cov = compute_covariance(epochs, tmin=baseline_win[0],
tmax=baseline_win[1], method='shrunk',
rank=rank, verbose=True)
active_cov = compute_covariance(epochs, tmin=active_win[0], tmax=active_win[1],
method='shrunk', rank=rank, verbose=True)
# Weighted averaging is already in the addition of covariance objects.
common_cov = baseline_cov + active_cov
mne.viz.plot_cov(baseline_cov, epochs.info)
"""
Explanation: Compute covariances
ERS activity starts at 0.5 seconds after stimulus onset. Because these
data have been processed by MaxFilter directly (rather than MNE-Python's
version), we have to be careful to compute the rank with a more conservative
threshold in order to get the correct data rank (64). Once this is used in
combination with an advanced covariance estimator like "shrunk", the rank
will be correctly preserved.
End of explanation
"""
def _gen_dics(active_win, baseline_win, epochs):
freqs = np.logspace(np.log10(12), np.log10(30), 9)
csd = csd_morlet(epochs, freqs, tmin=-1, tmax=1.5, decim=20)
csd_baseline = csd_morlet(epochs, freqs, tmin=baseline_win[0],
tmax=baseline_win[1], decim=20)
csd_ers = csd_morlet(epochs, freqs, tmin=active_win[0], tmax=active_win[1],
decim=20)
filters = make_dics(epochs.info, fwd, csd.mean(), pick_ori='max-power',
reduce_rank=True, real_filter=True, rank=rank)
stc_base, freqs = apply_dics_csd(csd_baseline.mean(), filters)
stc_act, freqs = apply_dics_csd(csd_ers.mean(), filters)
stc_act /= stc_base
return stc_act
# generate lcmv source estimate
def _gen_lcmv(active_cov, baseline_cov, common_cov):
filters = make_lcmv(epochs.info, fwd, common_cov, reg=0.05,
noise_cov=None, pick_ori='max-power')
stc_base = apply_lcmv_cov(baseline_cov, filters)
stc_act = apply_lcmv_cov(active_cov, filters)
stc_act /= stc_base
return stc_act
# generate mne/dSPM source estimate
def _gen_mne(active_cov, baseline_cov, common_cov, fwd, info, method='dSPM'):
inverse_operator = make_inverse_operator(info, fwd, common_cov)
stc_act = apply_inverse_cov(active_cov, info, inverse_operator,
method=method, verbose=True)
stc_base = apply_inverse_cov(baseline_cov, info, inverse_operator,
method=method, verbose=True)
stc_act /= stc_base
return stc_act
# Compute source estimates
stc_dics = _gen_dics(active_win, baseline_win, epochs)
stc_lcmv = _gen_lcmv(active_cov, baseline_cov, common_cov)
stc_dspm = _gen_mne(active_cov, baseline_cov, common_cov, fwd, epochs.info)
"""
Explanation: Compute some source estimates
Here we will use DICS, LCMV beamformer, and dSPM.
See ex-inverse-source-power for more information about DICS.
End of explanation
"""
brain_dics = stc_dics.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='DICS source power in the 12-30 Hz frequency band')
"""
Explanation: Plot source estimates
DICS:
End of explanation
"""
brain_lcmv = stc_lcmv.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='LCMV source power in the 12-30 Hz frequency band')
"""
Explanation: LCMV:
End of explanation
"""
brain_dspm = stc_dspm.plot(
hemi='rh', subjects_dir=subjects_dir, subject=subject,
time_label='dSPM source power in the 12-30 Hz frequency band')
"""
Explanation: dSPM:
End of explanation
"""
|
jimregan/tesseract-gle-uncial | Update_gle_uncial_traineddata_for_Tesseract_4.ipynb | apache-2.0 | !wget https://github.com/jimregan/tesseract-gle-uncial/releases/download/v0.1beta2/gle_uncial.traineddata
"""
Explanation: <a href="https://colab.research.google.com/github/jimregan/tesseract-gle-uncial/blob/master/Update_gle_uncial_traineddata_for_Tesseract_4.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Grab this for later
End of explanation
"""
!apt-get install libicu-dev libpango1.0-dev libcairo2-dev libleptonica-dev
"""
Explanation: Install dependencies
End of explanation
"""
!git clone https://github.com/tesseract-ocr/tesseract
import os
os.chdir('tesseract')
!sh autogen.sh
!./configure --disable-graphics
!make -j 8
!make install
!ldconfig
!make training
!make training-install
"""
Explanation: Clone, compile and set up Tesseract
End of explanation
"""
import os
os.chdir('/content')
!git clone https://github.com/jimregan/tesseract-gle-uncial/
!apt-get install lynx
"""
Explanation: Grab some things to scrape the RIA corpus
End of explanation
"""
! for i in A B C D E F G H I J K L M N O P Q R S T U V W X Y Z;do lynx -dump "http://corpas.ria.ie/index.php?fsg_function=1&fsg_page=$i" |grep http://corpas.ria.ie|awk '{print $NF}' >> list;done
!grep 'function=3' list |sort|uniq|grep corpas.ria|sed -e 's/function=3/function=5/' > input
!wget -x -c -i input
!mkdir text
!for i in corpas.ria.ie/*;do id=$(echo $i|awk -F'=' '{print $NF}');cat $i | perl /content/tesseract-gle-uncial/scripts/extract-ria.pl > text/$id.txt;done
"""
Explanation: Scrape the RIA corpus
End of explanation
"""
!cat text/*.txt|grep -v '^$' > ria-raw.txt
"""
Explanation: Get the raw corpus in a single text file
End of explanation
"""
!gzip ria-raw.txt
"""
Explanation: Compress the raw text; this can be downloaded through the file browser on the left, so the scraping steps can be skipped in future
End of explanation
"""
!gzip -d ria-raw.txt.gz
"""
Explanation: ...and can be re-added using the upload feature in the file browser
End of explanation
"""
import os
os.chdir('/content')
!git clone https://github.com/tesseract-ocr/langdata
!cat ria-raw.txt | perl /content/tesseract-gle-uncial/scripts/toponc.pl > ria-ponc.txt
!mkdir genwlout
!perl /content/tesseract-gle-uncial/scripts/genlangdata.pl -i ria-ponc.txt -d genwlout -p gle_uncial
import os
os.chdir('/content/genwlout')
#!for i in gle_uncial.word.bigrams gle_uncial.wordlist gle_uncial.numbers gle_uncial.punc; do cat $i.unsorted | awk -F'\t' '{print $1}' | sort | uniq > $i.sorted;done
!for i in gle_uncial.word.bigrams gle_uncial.wordlist gle_uncial.numbers gle_uncial.punc; do cat $i.sorted /content/langdata/gle_uncial/$i | sort | uniq > $i;done
!for i in gle_uncial.word.bigrams gle_uncial.wordlist gle_uncial.numbers gle_uncial.punc; do cp $i /content/langdata/gle_uncial/;done
Grab the fonts
import os
os.chdir('/content')
!mkdir fonts
os.chdir('fonts')
!wget -i /content/tesseract-gle-uncial/fonts.txt
!for i in *.zip; do unzip $i;done
"""
Explanation: This next part is so I can update the langdata files
End of explanation
"""
os.chdir('/content')
!mkdir unpack
!combine_tessdata -u /content/gle_uncial.traineddata unpack/gle_uncial.
os.chdir('unpack')
!for i in gle_uncial.word.bigrams gle_uncial.wordlist gle_uncial.numbers gle_uncial.punc; do cp /content/genwlout/$i .;done
!wordlist2dawg gle_uncial.numbers gle_uncial.lstm-number-dawg gle_uncial.lstm-unicharset
!wordlist2dawg gle_uncial.punc gle_uncial.lstm-punc-dawg gle_uncial.lstm-unicharset
!wordlist2dawg gle_uncial.wordlist gle_uncial.lstm-word-dawg gle_uncial.lstm-unicharset
!rm gle_uncial.numbers gle_uncial.word.bigrams gle_uncial.punc gle_uncial.wordlist
os.chdir('/content')
!mv gle_uncial.traineddata gle_uncial.traineddata.orig
!combine_tessdata unpack/gle_uncial.
os.chdir('/content')
!bash /content/tesseract/src/training/tesstrain.sh
!text2image --fonts_dir fonts --list_available_fonts
!cat genwlout/gle_uncial.wordlist.unsorted|awk -F'\t' '{print $2 "\t" $1'}|sort -nr > freqlist
!cat freqlist|awk -F'\t' '{print $2}'|grep -v '^$' > wordlist
!cat ria-ponc.txt|sort|uniq|head -n 400000 > gle_uncial.training_text
!cp unpack/gle_uncial.traineddata /usr/share/tesseract-ocr/4.00/tessdata
!cp gle_uncial.trainingtext langdata/gle_uncial/
!mkdir output
!bash tesseract/src/training/tesstrain.sh --fonts_dir fonts --lang gle_uncial --linedata_only --noextract_font_properties --langdata_dir langdata --tessdata_dir /usr/share/tesseract-ocr/4.00/tessdata --output_dir output
"""
Explanation: Generate
End of explanation
"""
|
DallasTrinkle/Onsager | examples/GF-RBC.ipynb | mit | import sys
sys.path.extend(['.','./Vacancy'])
import numpy as np
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
%matplotlib inline
import scipy.sparse
import itertools
from numba import jit, njit, prange, guvectorize # faster runtime with update routines
from scipy.misc import comb
# from sympy import *
import onsager.PowerExpansion as PE
import onsager.crystal as crystal
import onsager.crystalStars as stars
import onsager.GFcalc as GFcalc
from tqdm import tnrange, tqdm_notebook
# Turn off or on to run optional testing code in notebook:
# Also turns on / off progress bars
__TESTING__ = False
"""
Explanation: Residual bias correction of Mean-field GF
This is the full computation of the residual bias correction for our (mean-field) GF solution for the percolation problem (immobile "solute" with vacancy diffusion). It work through all of the matrix averages "analytically" (storing them as polynomials in the concentration of the immobile solute, $c_\text{B}$), and then brings everything together to express the residual bias correction as an analytic function with numerical coefficients for the square lattice.
End of explanation
"""
def calc_P(N):
"""
Returns the probability matrix P[n,c] where the probability of seeing `n` atoms
of type B in `N` sites is sum(c=0..N, x^c P[n,c])
:param N: total number of sites
:returns P[n,c]: matrix of probablities, n=0..N, c=0..N
"""
P = np.zeros((N+1, N+1), dtype=int)
for n in range(N+1):
Nn = N-n
P[n,n:] = comb([Nn]*(Nn+1), [j for j in range(Nn+1)])
for j in range(Nn+1):
P[n,j+n] *= (-1)**j
return P
if __TESTING__:
calc_P(4)
"""
Explanation: Now, we need to expand out our probability factors. Let $x$ be the concentration of solute B; imagine we have $N$ sites possible. Then, if there are $n$ B atoms, the probability factor is
$$P(n;N) = x^n (1-x)^{N-n} = x^n \sum_{j=0}^{N-n} \frac{(N-n)!}{j!(N-n-j)!} (-x)^j
= \sum_{j=0}^{N-n} \frac{(N-n)!}{j!(N-n-j)!} (-1)^j x^{n+j}$$
The factorial term is $N-n$ choose $j$, which is scipy.misc.comb.
We want to construct a probability matrix P[n,c] such that $P(n;N)$ is written as a sum over $x^c$ terms; $c=0\ldots N$.
End of explanation
"""
N = 24
prob = calc_P(N)
states = np.array([(0,) + st for st in itertools.product((0,1), repeat=N)])
nB = np.sum(states, axis=1)
if __TESTING__:
norm = np.zeros(N+1, dtype=int)
for n in tqdm_notebook(nB):
norm += prob[n]
print(norm)
states.shape
Pstates = np.array([prob[n] for n in nB])
"""
Explanation: Normalization check: construct the $2^N$ states, and see if it averages to 1. Each state is a vector of length $N$, with entries that are 0 (A) or 1 (B). Here, we explicitly build our state space, and also do a quick count to determine $n_\text{B}$ for each state. Note: we prepend a value of 0, since this corresponds to the initial location of the vacancy.
New version: we now generate group operations for the square lattice, and take advantage of those to reduce the computational time.
End of explanation
"""
dxlist = [np.array([1,0]), np.array([-1,0]), np.array([0,1]), np.array([0,-1])]
Njump = 3
sites = [np.array([0,0])]
sitedict = {(0,0): 0}
lastsites = sites.copy()
for nj in range(Njump):
newsites = []
for dx in dxlist:
for x in lastsites:
y = x+dx
yt = tuple(y)
if yt not in sitedict:
sitedict[yt] = len(sites)
sites.append(y)
newsites.append(y)
lastsites = newsites
Nsite = len(sites)
Nsite0 = len(sites) - len(lastsites)
sites0 = sites[:Nsite0]
jumplist = []
for x in sites0:
jumplist.append([sitedict[tuple(x+dx)] for dx in dxlist])
if __TESTING__:
print(jumplist)
basisfunc, basisdict = [], {}
for x in sites:
for y in sites:
d = x-y
dt = tuple(d)
if dt not in basisdict:
basisdict[dt] = len(basisfunc)
basisfunc.append(d)
Nbasis = len(basisfunc)
"""
Explanation: Now, we do some analysis by constructing up to 3 jumps (corresponding to third power of our transition rate matrix $W$). We do this analysis by setting up some bookkeeping:
We work with a list of displacement vectors [dx_0, dx_1, dx_2, dx_3]
We construct the list of positions for the vacancy
For each position, we identify the possible jumps (though we only need to do this
for positions that are reachable in 0-2 jumps.
We construct a list of possible basis functions: these are all possible
differences of vacancy positions
Finally, for each position, we identify which position corresponds to each possible
basis function, as well a list of all basis functions that are not in the state.
This is all sufficient to construct a sparse version of $W$ (and $\Gamma$) for a given state $\chi$.
End of explanation
"""
chibasisfound, chibasismiss, chibasismissmatch = [], [], []
chibasisfar = []
for x in sites:
xbdict = {}
xbmiss = []
xbmissmatch = {}
xbfar = {}
for bindex, b in enumerate(basisfunc):
bt = tuple(b)
y = x+b
yt = tuple(y)
if yt in basisdict:
xbfar[bindex] = basisdict[yt]
if yt in sitedict:
xbdict[bindex] = sitedict[yt]
else:
xbmiss.append(bindex)
if bt not in sitedict and yt in basisdict:
xbmissmatch[bindex] = basisdict[yt]
chibasisfound.append(xbdict)
chibasismiss.append(xbmiss)
chibasismissmatch.append(xbmissmatch)
chibasisfar.append(xbfar)
# make a set of "outside" and "inside" basis functions:
basisout = set([tuple(basisfunc[bindex]) for bindex in chibasismiss[0]])
basisin = set([tuple(bv) for bv in basisfunc if tuple(bv) not in basisout])
# converting chibasisfound and chibasismiss into matrices:
chibasisfound_mat = np.zeros((N+1, Nbasis, N+1), dtype=int)
# chibasisfound_sparse = [scipy.sparse.csr_matrix((Nbasis, N+1), dtype=int)
# for n in range(N+1)]
chibasismiss_mat = np.zeros((N+1, Nbasis), dtype=int)
chibasismissmatch_mat = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
chibasisfar_mat = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
for n, cbf, cbm, cbmm, cbfar in zip(itertools.count(), chibasisfound,
chibasismiss, chibasismissmatch, chibasisfar):
for bindex in cbm:
chibasismiss_mat[n, bindex] = 1
for bindex, siteindex in cbf.items():
chibasisfound_mat[n, bindex, siteindex] = 1
# chibasisfound_sparse[n][bindex, siteindex] = 1
for bindex, siteindex in cbmm.items():
chibasismissmatch_mat[bindex, siteindex, n] = 1
for bindex, siteindex in cbfar.items():
chibasisfar_mat[bindex, siteindex, n] = 1
"""
Explanation: Some matrices and lists to manage conversion between sites and basis functions.
We also include a matrix that corresponds to "matching" basis functions as a function of endstate $x$. This is used to correct the outer product for "missing" basis functions, for when the missing basis functions map onto identical sites.
End of explanation
"""
groupops = [np.array([[1,0],[0,1]]), np.array([[0,-1],[1,0]]),
np.array([[-1,0],[0,-1]]), np.array([[0,1],[-1,0]]),
np.array([[-1,0],[0,1]]), np.array([[1,0],[0,-1]]),
np.array([[0,-1],[-1,0]]), np.array([[0,1],[1,0]])]
sitegroupops, basisgroupops = [], []
for g in groupops:
sg = np.zeros([Nsite, Nsite], dtype=int)
bg = np.zeros([Nbasis, Nbasis], dtype=int)
for n, x in enumerate(sites):
yt = tuple(np.dot(g, x))
sg[sitedict[yt], n] = 1
for n, x in enumerate(basisfunc):
yt = tuple(np.dot(g, x))
bg[basisdict[yt], n] = 1
sitegroupops.append(sg)
basisgroupops.append(bg)
foundstates = set([])
binary = np.array([2**n for n in range(Nsite)])
symmstateslist, symmPlist = [], []
for st, P in tqdm_notebook(zip(states, Pstates), total=(2**N), disable=not __TESTING__):
bc = np.dot(st, binary)
if bc not in foundstates:
symmstateslist.append(st)
equivset = set([np.dot(np.dot(g, st), binary) for g in sitegroupops])
foundstates.update(equivset)
symmPlist.append(len(equivset)*P)
symmstates = np.array(symmstateslist)
symmPstates = np.array(symmPlist)
symmstates.shape
if __TESTING__:
np.sum(symmPstates, axis=0)
"""
Explanation: Group operation simplification
For our 8 group operations, corresponding to the point group operations on a square, we're going to make a reduced state list that only contains one symmetry-unique representative. This requires mapping the group operations on Cartesian coordinates into corresponding group operations on our sites, and our basis functions.
End of explanation
"""
biasvec_mat = np.zeros((2, N+1), dtype=int)
for j, dx in enumerate(dxlist):
biasvec_mat[:, j+1] -= dx
if __TESTING__:
print(np.dot(biasvec_mat, states[8388608]), states[8388608])
def symmetrize(mat, groupops0, groupops1):
"""
Designed to symmetrize the first two entries of a matrix with the
corresponding group operations
"""
symmmat = np.zeros(mat.shape)
for g0, g1 in zip(groupops0, groupops1):
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
symmmat[i, j] += np.tensordot(np.tensordot(
mat, g1[j], axes=(1,0)), g0[i], axes=(0,0))
symmmat /= len(groupops0)
return symmmat
"""
Explanation: Now, we need symmetrized versions of a lot of our information from above, in order to properly account for all of the symmetrized versions of our basis functions. This includes
Computation of bias function times a basis function
Computation of two basis functions
Inside/inside
Inside/outside
Outside/outside
Outside/outside matching
We can group these in terms of what factor of concentration goes in front.
End of explanation
"""
@njit(nogil=True, parallel=True)
def tripleouterupdate(summand, A, B, C):
"""Update summand[i,j,k] += A[i]*B[j]*C[k]"""
I, = A.shape
J, = B.shape
K, = C.shape
for i in prange(I):
for j in prange(J):
for k in prange(K):
summand[i, j, k] += A[i]*B[j]*C[k]
@njit(nogil=True, parallel=True)
def matrixouterupdate(summand, A, B):
"""Update summand[i,j,k] += A[i, j]*B[k]"""
I,J = A.shape
K, = B.shape
for i in prange(I):
for j in prange(J):
for k in prange(K):
summand[i, j, k] += A[i,j]*B[k]
"""
Explanation: Efficient matrix operations
Some jit functions via numba to make operations efficient:
End of explanation
"""
resbiasave = np.zeros(N+1, dtype=int)
for st, P in tqdm_notebook(zip(symmstates, symmPstates), total=symmstates.shape[0],
disable=not __TESTING__):
# bv = np.sum(dx for j, dx in enumerate(dxlist) if st[j+1] == 0)
bv = np.dot(biasvec_mat, st)
W = 4-np.sum(st[1:5])
if W>0:
resbiasave += P*(bv[0]*bv[0]+bv[1]*bv[1])*(12//W)
print(resbiasave/12)
"""
Explanation: Evaluation of averages
We have a state vector $\chi_i$ = 0 or 1, and for each end position $j$, we'll have the representation of $M_{\chi\chi'}$ as a vector $M_j$, we want the contribution to each basis function $b$.
Let's try some averages; first, without basis functions:
$\langle \tau_\chi \mathbf{b}\chi\cdot\mathbf{b}\chi\rangle_\chi$, the average residual bias.
End of explanation
"""
biasvecbar = np.zeros((2, Nbasis, N+1), dtype=int)
Pc = np.zeros(N+1, dtype=int)
for st, P in tqdm_notebook(zip(symmstates, symmPstates), total=symmstates.shape[0],
disable=not __TESTING__):
# bv = np.sum(dx for j, dx in enumerate(dxlist) if st[j+1] == 0)
W = 4-np.sum(st[1:5])
if W==0 or W==4: continue
bv = np.dot(biasvec_mat, st)
Pc[1:] = P[:-1]
tripleouterupdate(biasvecbar, bv, np.dot(chibasisfound_mat[0], st), P)
tripleouterupdate(biasvecbar, bv, chibasismiss_mat[0], Pc)
symmbiasvecbar = symmetrize(biasvecbar, groupops, basisgroupops)
"""
Explanation: Now, an average involving a single basis function: $\langle \mathbf{b}\chi \phi{\chi,\mathbf{x}}\rangle_\chi$.
End of explanation
"""
# @njit(nogil=True, parallel=True)
@jit
def matrixupdate(mat_bar, mat_vec, chibasis, chibasis_miss,
chibasismissmatch_mat, P, Pc, Pcc):
chibasis0, chibasis1 = chibasis[0], chibasis_miss[0]
chipbasis0, chipbasis1 = np.dot(mat_vec, chibasis), np.dot(mat_vec, chibasis_miss)
tripleouterupdate(mat_bar, chibasis0, chipbasis0, P)
tripleouterupdate(mat_bar, chibasis1, chipbasis0, Pc)
tripleouterupdate(mat_bar, chibasis0, chipbasis1, Pc)
# note: this is a little confusing; if the two ("missing") basis functions are
# referencing *different* sites, then we pick up a x^2 term; but if they
# reference the same site, it is a factor of x.
tripleouterupdate(mat_bar, chibasis1, chipbasis1, Pcc)
matchouter = np.dot(chibasismissmatch_mat, mat_vec)
matrixouterupdate(mat_bar, matchouter, Pc-Pcc)
# I'm not entirely sure how this is supposed to read; the matching seems to be the key?
# @njit(nogil=True, parallel=True)
@jit
def farmatrixupdate(mat_bar, mat_vec, chibasis_far, Pc, Pcc):
# note: this is a little confusing; if the two ("missing") basis functions are
# referencing *different* sites, then we pick up a x^2 term; but if they
# reference the same site, it is a factor of x.
# tripleouterupdate(mat_bar, chibasis1, chipbasis1, Pcc)
matchouter = np.dot(chibasis_far, mat_vec)
matrixouterupdate(mat_bar, matchouter, Pc-Pcc)
# @njit(nogil=True, parallel=True)
@jit
def vectorupdate(vec_bar, bv, vec, chibasis, chibasis_miss, P, Pc):
# chibasis0, chibasis1 = chibasis[0], chibasis_miss[0]
chipbasis0, chipbasis1 = np.dot(vec, chibasis), np.dot(vec, chibasis_miss)
tripleouterupdate(vec_bar, bv, chipbasis0, P)
tripleouterupdate(vec_bar, bv, chipbasis1, Pc)
tauscale = 12
eye = tauscale*np.pad(np.eye(Nsite0, dtype=int), ((0,0), (0,Nsite-Nsite0)), 'constant')
onevec = np.array([1,] + [0,]*(Nsite-1))
# We don't expect to need c^N+1 or c^N+2 so we ignore those...
# Matrices: <sum_c' W_cc' chi chi'> and higher order (GG, and WGG terms...)
Wbar = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
WGbar = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
WGGbar = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
# far-field versions of the same; the matched versions, followed by the "summed" (baseline) version:
Wbar_far = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
WGbar_far = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
WGGbar_far = np.zeros((Nbasis, Nbasis, N+1), dtype=int)
Wbar_far0 = np.zeros(N+1, dtype=int)
WGbar_far0 = np.zeros(N+1, dtype=int)
WGGbar_far0 = np.zeros(N+1, dtype=int)
# bias vector versions, including products with gamma:
biasvecbar = np.zeros((2, Nbasis, N+1), dtype=int)
biasGvecbar = np.zeros((2, Nbasis, N+1), dtype=int)
biasGGvecbar = np.zeros((2, Nbasis, N+1), dtype=int)
# residual bias vector versions:
resbiasave = np.zeros(N+1, dtype=int)
resbiasGave = np.zeros(N+1, dtype=int)
Pc, Pcc = np.zeros(N+1, dtype=int), np.zeros(N+1, dtype=int)
for st, P in tqdm_notebook(zip(symmstates, symmPstates), total=symmstates.shape[0],
disable=not __TESTING__):
Pc[1:] = P[:-1]
Pcc[2:] = P[:-2]
# basis0: those inside \chi, basis1: those outside \chi
chibasis = np.dot(chibasisfound_mat, st)
# chibasis0, chibasis1 = np.dot(chibasisfound_mat[0], st), chibasismiss_mat[0]
# construct our transition matrix:
W = np.zeros((Nsite0, Nsite), dtype=int)
for n, jumps in enumerate(jumplist):
if st[n] == 1: continue
for m in jumps:
if st[m] == 0:
W[n,n] -= 1
W[n,m] = 1
tau = -np.diag(W) # will be tau multiplied by tauscale = 12 (== -12//W[n,n])
Gam = W.copy() # Gamma matrix multiplied by tauscale = 12.
for n in range(Nsite0):
if tau[n] > 0:
tau[n] = tauscale//tau[n]
Gam[n,n] = 0
Gam[n] *= tau[n]
WG = -W[0,0]*np.dot(Gam[0,:Nsite0], Gam)+tauscale*tauscale*W[0,0]*onevec
WGG = np.dot(W[0,:Nsite0], np.dot(Gam[:,:Nsite0], Gam - 2*eye))
matrixupdate(Wbar, W[0], chibasis, chibasismiss_mat, chibasismissmatch_mat,
P, Pc, Pcc)
matrixupdate(WGbar, WG, chibasis, chibasismiss_mat, chibasismissmatch_mat,
P, Pc, Pcc)
matrixupdate(WGGbar, WGG, chibasis, chibasismiss_mat, chibasismissmatch_mat,
P, Pc, Pcc)
# far-field contributions of same:
farmatrixupdate(Wbar_far, W[0], chibasisfar_mat, Pc, Pcc)
farmatrixupdate(WGbar_far, WG, chibasisfar_mat, Pc, Pcc)
farmatrixupdate(WGGbar_far, WGG, chibasisfar_mat, Pc, Pcc)
Wbar_far0 += np.sum(W[0])*Pcc
WGbar_far0 += np.sum(WG)*Pcc
WGGbar_far0 += np.sum(WGG)*Pcc
# bias contributions (only bother if there's non-zero bias)
if tau[0]==0: continue
bv = np.sum(dx for j, dx in enumerate(dxlist) if st[j+1] == 0)
vectorupdate(biasvecbar, bv, onevec, chibasis, chibasismiss_mat, P, Pc)
vectorupdate(biasGvecbar, bv, Gam[0], chibasis, chibasismiss_mat, P, Pc)
vectorupdate(biasGGvecbar, bv, np.dot(Gam[0,:Nsite0],Gam-2*eye),
chibasis, chibasismiss_mat, P, Pc)
resbiasave += P*(bv[0]*bv[0]+bv[1]*bv[1])*tau[0]
bb = 0
for j, G in enumerate(Gam[0]):
if G>0:
bvp = np.array([0,0])
for k, dx in zip(jumplist[j], dxlist):
if st[k] == 0: bvp += dx
bb += G*np.dot(bv, bvp)*tau[j]
resbiasGave += P*bb
if __TESTING__:
print(Wbar_far0, WGbar_far0, WGGbar_far0)
# scaling and symmetrization
symmWbar = symmetrize(Wbar, basisgroupops, basisgroupops)
symmWGbar = symmetrize(WGbar, basisgroupops, basisgroupops)/(tauscale*tauscale)
symmWGGbar = symmetrize(WGGbar, basisgroupops, basisgroupops)/(tauscale*tauscale)
symmWbar_far = symmetrize(Wbar_far, basisgroupops, basisgroupops)
symmWGbar_far = symmetrize(WGbar_far, basisgroupops, basisgroupops)/(tauscale*tauscale)
symmWGGbar_far = symmetrize(WGGbar_far, basisgroupops, basisgroupops)/(tauscale*tauscale)
symmresbiasave = resbiasave/tauscale
symmresbiasGave = resbiasGave/(tauscale*tauscale)
symmbiasvecbar = symmetrize(biasvecbar, groupops, basisgroupops)
symmbiasGvecbar = symmetrize(biasGvecbar, groupops, basisgroupops)/tauscale
symmbiasGGvecbar = symmetrize(biasGGvecbar, groupops, basisgroupops)/(tauscale*tauscale)
symmresbiasave
symmresbiasGave
"""
Explanation: Now, let's try a basis / basis vector average: $\langle \sum_{\chi'} \phi_{\chi,\mathbf{x}} W_{\chi\chi'} \phi_{\chi',\mathbf{y}}\rangle_\chi$.
This gets a bit complicated with the "missing" basis functions for $\chi$, and especially when we consider those that are missing in both $\chi$ and $\chi'$. We also need to treat the "far" case, where both $\mathbf{x}$ and $\mathbf{y}$ are far away from the origin.
We ignore terms higher than $c^N$ ($N$=25); no contributions are found higher than 10.
End of explanation
"""
def truncate_vec(v):
"""Return a vector that's shortened by truncating the high-order 0 components"""
return v[:(np.max(np.nonzero(v))+1)]
def printvecbasis(VB):
"""Print out the components of a vector-basis matrix"""
for d in range(2):
print("dim {}".format(d+1))
for bv, v in zip(basisfunc, VB[d]):
if np.any(v) != 0:
print(bv, truncate_vec(v))
def printbasisbasis(BB, comp=None):
"""Print out the components of a basis-basis matrix"""
for bv0, BB0 in zip(basisfunc, BB):
if comp is not None and tuple(bv0) not in comp: continue
for bv1, B in zip(basisfunc, BB0):
if np.any(B) != 0:
print(bv0, bv1, truncate_vec(B))
printbasisbasis(symmWbar_far, {(0,0)})
printbasisbasis(symmWbar-symmWbar_far)
printbasisbasis(symmWGbar_far, {(0,0)})
printbasisbasis(symmWGbar-symmWGbar_far)
printbasisbasis(symmWGGbar_far, {(0,0)})
printbasisbasis(symmWGGbar-symmWGGbar_far)
printvecbasis(symmbiasvecbar)
printvecbasis(symmbiasGvecbar)
printvecbasis(symmbiasGGvecbar)
"""
Explanation: Output of averages
Some helper functions to make the printing nicer, followed by direct output.
End of explanation
"""
import h5py
rewriteFile = False
printFile = False
if rewriteFile:
with h5py.File('Neighbor-averaging.hdf5', 'w') as f:
f['dxlist'] = np.array(dxlist)
f['sites'] = np.array(sites)
f['jumplist'] = np.array(jumplist)
f['basisfunc'] = np.array(basisfunc)
f['symmWbar'] = symmWbar
f['symmWGbar'] = symmWGbar
f['symmWGGbar'] = symmWGGbar
f['symmWbar_far'] = symmWbar_far
f['symmWGbar_far'] = symmWGbar_far
f['symmWGGbar_far'] = symmWGGbar_far
f['symmresbias'] = symmresbiasave
f['symmresbiasGave'] = symmresbiasGave
f['symmbiasvecbar'] = symmbiasvecbar
f['symmbiasGvecbar'] = symmbiasGvecbar
f['symmbiasGGvecbar'] = symmbiasGGvecbar
if printFile:
with h5py.File('Neighbor-averaging.hdf5', 'r') as f:
for k, c in f.items():
print(k)
print(c.value)
"""
Explanation: Write out to HDF5 file
We now store the output in an HDF5 file for later use and analysis.
End of explanation
"""
def mpmesh(Ndiv, pre=np.pi):
"""
Generates a MP mesh for a square lattice.
:param Ndiv: number of divisions
:param pre: prefactor for edge of Brilloiun zone (pi/a_0)
:returns k[Nk,2]: k-points
:returns w[Nk]: weight
"""
prescale = pre/Ndiv
wscale = 1./(Ndiv*Ndiv)
Nk = (Ndiv*(Ndiv+1))//2
kpt, w = np.zeros((Nk,2)), np.zeros(Nk)
i = 0
for n in range(Ndiv):
for m in range(n+1):
kpt[i,0] = prescale*(n+0.5)
kpt[i,1] = prescale*(m+0.5)
if n==m:
w[i] = wscale
else:
w[i] = 2*wscale
i += 1
return kpt, w
square = crystal.Crystal(np.eye(2), [np.zeros(2)])
chem = 0
sitelist = square.sitelist(chem)
jumpnetwork = square.jumpnetwork(chem, 1.01) # [[((0,0), dx) for dx in dxlist]]
starset = stars.StarSet(jumpnetwork, square, chem, 3)
vecstarset = stars.VectorStarSet(starset)
if __TESTING__:
print(starset)
if __TESTING__:
for vR, vV in zip(vecstarset.vecpos, vecstarset.vecvec):
print('')
for R, v in zip(vR, vV):
print(starset.states[R] , v)
GF = GFcalc.GFCrystalcalc(square, chem, sitelist, jumpnetwork, kptwt = mpmesh(32))
GF.SetRates(np.ones(1), np.zeros(1), np.ones(1), np.zeros(1))
if __TESTING__:
print(GF)
GFmat, GFstarset = vecstarset.GFexpansion()
GF0array = np.array([GF(0,0,GFstarset.states[s[0]].R) for s in GFstarset.stars])
g0 = np.dot(GFmat, GF0array)
print(g0)
basis2state = [starset.stateindex(stars.PairState(0, 0, bv, bv)) for bv in basisfunc]
basis2star = [starset.starindex(stars.PairState(0, 0, bv, bv)) for bv in basisfunc]
if __TESTING__:
for bv, stateind, starind in zip(basisfunc, basis2state, basis2star):
print(bv, stateind, starind)
state2basis = [basis2state.index(n) for n in range(starset.Nstates)]
if __TESTING__:
print(state2basis)
"""
Explanation: Mapping onto vectorStars
We create the simplified symmetry basis functions using vectorStars, to folddown the full representation, and compute proper inverses. We also make our own Monkhorst-Pack mesh that is shifted off of the origin, and symmetrized, for simplicity.
End of explanation
"""
NVS = vecstarset.Nvstars
symmbiasvecVS = np.zeros((N+1, NVS))
symmbiasGvecVS = np.zeros((N+1, NVS))
symmbiasGGvecVS = np.zeros((N+1, NVS))
for i in range(vecstarset.Nvstars):
for Ri, vi in zip(vecstarset.vecpos[i], vecstarset.vecvec[i]):
bi = state2basis[Ri]
symmbiasvecVS[:, i] += np.dot(vi, symmbiasvecbar[:,bi,:])
symmbiasGvecVS[:, i] += np.dot(vi, symmbiasGvecbar[:,bi,:])
symmbiasGGvecVS[:, i] += np.dot(vi, symmbiasGGvecbar[:,bi,:])
stars.zeroclean(symmbiasvecVS);
stars.zeroclean(symmbiasGvecVS);
stars.zeroclean(symmbiasGGvecVS);
for nv in range(NVS):
if not np.allclose(symmbiasvecVS[:,nv], 0):
print(nv, truncate_vec(symmbiasvecVS[:,nv]))
for nv in range(NVS):
if not np.allclose(symmbiasGvecVS[:,nv], 0):
print(nv, truncate_vec(symmbiasGvecVS[:,nv]))
for nv in range(NVS):
if not np.allclose(symmbiasGGvecVS[:,nv], 0):
print(nv, truncate_vec(symmbiasGGvecVS[:,nv]))
symmWbarVS = np.zeros((N+1, NVS, NVS))
symmWGbarVS = np.zeros((N+1, NVS, NVS))
symmWGGbarVS = np.zeros((N+1, NVS, NVS))
for i in range(vecstarset.Nvstars):
for Ri, vi in zip(vecstarset.vecpos[i], vecstarset.vecvec[i]):
bi = state2basis[Ri]
for j in range(vecstarset.Nvstars):
for Rj, vj in zip(vecstarset.vecpos[j], vecstarset.vecvec[j]):
bj = state2basis[Rj]
vivj = np.dot(vi,vj)
symmWbarVS[:, i, j] += vivj*(symmWbar[bi,bj,:]-symmWbar_far[bi,bj,:])
symmWGbarVS[:, i, j] += vivj*(symmWGbar[bi,bj,:]-symmWGbar_far[bi,bj,:])
symmWGGbarVS[:, i, j] += vivj*(symmWGGbar[bi,bj,:]-symmWGGbar_far[bi,bj,:])
stars.zeroclean(symmWbarVS);
stars.zeroclean(symmWGbarVS);
stars.zeroclean(symmWGGbarVS);
for nv,mv in itertools.product(range(NVS), repeat=2):
if not np.allclose(symmWbarVS[:,nv,mv], 0):
print(nv, mv, truncate_vec(symmWbarVS[:,nv,mv]))
for nv,mv in itertools.product(range(NVS), repeat=2):
if not np.allclose(symmWGbarVS[:,nv,mv], 0):
print(nv, mv, truncate_vec(symmWGbarVS[:,nv,mv]))
for nv,mv in itertools.product(range(NVS), repeat=2):
if not np.allclose(symmWGGbarVS[:,nv,mv], 0):
print(nv, mv, truncate_vec(symmWGGbarVS[:,nv,mv]))
"""
Explanation: Now the real conversion begins! We start by mapping all of the bias vectors and local functions onto our vectorBasis.
End of explanation
"""
def FT(mat, kptwt):
"""
(real) Fourier transform of translationally invariant function.
:param mat[Nbasis, N+1]: far-field version of matrix;
each Nbasis is relative to 0
:param kptwt: tuple of (kpt[Nkpt, 2], wt[Nkpt])
:returns matFT[Nkpt, N+1]: FT of matrix
"""
kpt = kptwt[0]
matFT = np.zeros((kpt.shape[0], N+1))
for bv, matv in zip(basisfunc, mat):
matFT += np.outer(np.cos(np.dot(kpt, bv)), matv)
return matFT
PE.Taylor2D(Lmax=6); # initialize
def Taylor(mat):
"""
(real) Taylor expansion of Fourier transform of translationally invariant function.
:param mat[Nbasis, N+1]: far-field version of matrix;
each Nbasis is relative to 0
:returns matTaylor: T2D version of FT Taylor expansion matrix
"""
pre = np.array([1., 0., -1/2, 0., 1/24]) # Taylor coefficients for cos()
matTaylor = PE.Taylor2D()
for bv, matv in zip(basisfunc, mat):
for ve in PE.Taylor2D.constructexpansion([(matv, bv)], pre=pre):
matTaylor += ve
matTaylor.reduce()
return matTaylor
if __TESTING__:
print(FT(symmWbar_far[0], mpmesh(4)))
g0Taylor = (Taylor(symmWbar_far[0])[1]).inv() # extract out the "constant" term
print(g0Taylor)
g0WGbarTaylor = ( (g0Taylor*g0Taylor)*Taylor(symmWGbar_far[0])).reduce().truncate(0)
g0WGGbarTaylor = ( (g0Taylor*g0Taylor)*Taylor(symmWGGbar_far[0])).reduce().truncate(0)
print(g0WGbarTaylor)
print(g0WGGbarTaylor)
kpt, wt = mpmesh(32)
g0FT = 1./FT(symmWbar_far[0], (kpt, wt))[:,1]
WGbarFT = FT(symmWGbar_far[0], (kpt, wt))
WGGbarFT = FT(symmWGGbar_far[0], (kpt, wt))
if __TESTING__:
print(g0FT)
pmax = np.sqrt(min([np.dot(G, G) for G in square.BZG]) / -np.log(1e-11))
prefactor = square.volume
g0Taylor_fnlp = {(n, l): GFcalc.Fnl_p(n, pmax) for (n, l) in g0Taylor.nl()}
g0Taylor_fnlu = {(n, l): GFcalc.Fnl_u(n, l, pmax, prefactor, d=2)
for (n, l) in g0Taylor.nl()}
if __TESTING__:
print(pmax)
if __TESTING__:
print(g0Taylor.nl(), g0WGbarTaylor.nl(), g0WGGbarTaylor.nl())
g0WGbarsc = np.zeros_like(g0WGbarFT)
g0WGGbarsc = np.zeros_like(g0WGGbarFT)
for i, k in enumerate(kpt):
g0WGbarsc[i] = (g0FT[i]**2)*g0WGbarFT[i] - g0WGbarTaylor(k, g0Taylor_fnlp).real
g0WGGbarsc[i] = (g0FT[i]**2)*g0WGGbarFT[i] - g0WGGbarTaylor(k, g0Taylor_fnlp).real
if __TESTING__:
print(truncate_vec(np.dot(wt, g0WGGbarsc)))
"""
Explanation: Fourier transformation of translationally invariant contributions
Our "far" functions represent the translationally invariant contributions, and this requires Fourier transforms, and Taylor expansions to then be made into local contributions.
Mathematically, we're attempting to compute $\eta_i\cdot M_{ij}\cdot\eta_j$; the issue is that $\eta_i$ does not go to zero in the far-field (it's not local), and $M$ can be written as a local function plus a translationally invariant function $M^0$. Only the latter is problematic. However, as $\eta_i$ comes from a Green function solution (using the Dyson equation), if we multiply by the $w^0$, we produce a local function. Hence, we can rewrite that matrix equation as $(w^0\eta)i\cdot (g^0M^0g^0){ij}\cdot (w^0\eta_j)$. Now, then we "simply" need to evaluate $g^0M^0g^0$, which can be done using Fourier transforms, as it is the product of three translationally invariant functions.
End of explanation
"""
# this list is a bit of overkill, but...
veclist = [GFstarset.states[s[0]].dx for s in GFstarset.stars]
g0WGbar, g0WGGbar = [], []
for x in veclist:
coskx = np.sum(np.cos(np.tensordot(kpt, np.dot(g, x), axes=(1, 0)))
for g in groupops) / 8
g0WGbar.append(np.dot(wt*coskx,g0WGbarsc) + g0WGbarTaylor(x, g0Taylor_fnlu).real)
g0WGGbar.append(np.dot(wt*coskx,g0WGGbarsc) + g0WGGbarTaylor(x, g0Taylor_fnlu).real)
for v, g in zip(veclist, g0WGbar):
print(v, truncate_vec(g))
for v, g in zip(veclist, g0WGGbar):
print(v, truncate_vec(g))
"""
Explanation: inverse Fourier transformation
Now we go from the Fourier transformed version to the inverse Fourier transformed version (the final product version).
End of explanation
"""
@njit(nogil=True, parallel=True)
def polymult(p, q):
"""
Multiplication of two polynomial coefficients, where
p(x) = sum_n p[n] * x^n
:param p: polynomial coefficients for p
:param q: polynomial coefficients for q
:returns pq: polynomial coefficients for pq
"""
P = p.shape[0]-1
Q = q.shape[0]-1
pq = np.zeros(P+Q+1)
for n in range(P+Q+1):
for i in range(max(0,n-Q), min(n,P)+1):
pq[n] += p[i]*q[n-i]
return pq
@njit(nogil=True, parallel=True)
def polydiv(p, a):
"""
Division of polynomial p(x) by (x-a)
:param p: polynomial coefficients for p
:param a: term in nomial (x-a)
:returns d, r: divisor d(x), and remainder r
"""
P = p.shape[0]-1
d = np.zeros(P)
d[P-1] = p[P]
for n in range(P-2,-1,-1):
d[n] = p[n+1] + a*d[n+1]
return d, p[0] + a*d[0]
divpoly = np.zeros(N+1)
divpoly[0], divpoly[1] = 1+g0[0,0], -(1+3*g0[0,0])
etabar_div = -2*g0[0] # this is etabar*div, so that etabar = etabar_div/div
etaW0_div = np.zeros(N+1)
etaW0_div[0] = -2 # this is W0*etabar*div (for the translational invariant terms)
# unbiased:
L0 = np.zeros(N+1)
L0[0], L0[1] = 1., -1.
# Note: vecstarset.outer[i,j, v1, v2] = 1/2 delta_ij delta_v1v2,
# so we can use dot-products throughout
# SCGF:
L1 = 0.5*np.dot(symmbiasvecVS, etabar_div)
L_SCGF = polymult(L0, divpoly)[:N+1] + L1
polydiv(L_SCGF, 1)
# print(np.dot(GFmat[0,0], g0WGGbar))
PsiB = polymult(polymult(divpoly, divpoly), symmresbiasave)[:N+1] + \
-2*polymult(divpoly, np.dot(symmbiasGvecVS, etabar_div))[:N+1] + \
np.dot(np.dot(symmWGbarVS, etabar_div), etabar_div) + \
4*np.dot(GFmat[0,0], g0WGbar) # far-field; note: etaW0_div == 2, so factor of 4
print(PsiB)
WR = polymult(polymult(divpoly, divpoly), symmresbiasGave)[:N+1] - \
polymult(polymult(divpoly, divpoly), symmresbiasave)[:N+1] + \
-2*polymult(divpoly, L1)[:N+1] + \
-2*polymult(divpoly, np.dot(symmbiasGGvecVS, etabar_div))[:N+1] + \
np.dot(np.dot(symmWGGbarVS, etabar_div), etabar_div) + \
4*np.dot(GFmat[0,0], g0WGGbar)
print(WR)
# Now, to put it together, and do the division...
cBv = np.linspace(0.01,1,num=99,endpoint=False)
D1, D2 = [], []
for cB in cBv:
# print(cB)
cA = 1-cB
cpow = np.array([cB**n for n in range(N+1)])
L0c, divc, L1c = np.dot(cpow, L0), np.dot(cpow, divpoly), np.dot(cpow, L_SCGF)
L1c /= divc
PsiBc, WRc = np.dot(cpow, PsiB)/(divc*divc), np.dot(cpow, WR)/(divc*divc)
L2c = L1c + 0.5*PsiBc*PsiBc/WRc
D0c = L0c/cA
D1c = L1c/cA
D2c = L2c/cA
D1.append(D1c)
D2.append(D2c)
print(cB, D1c, D2c, D2c/D1c) #, PsiBc)
D1v, D2v = np.array(D1), np.array(D2)
plt.rcParams['figure.figsize'] = (8,8)
fig, ax = plt.subplots()
ax.plot(cBv, D1, 'b', label='GF')
ax.plot(cBv, D2, 'r', label='GF+resbias')
ax.set_ylabel('$D^{\\rm A}$', fontsize='x-large')
ax.set_xlabel('$c_{\\rm B}$', fontsize='x-large')
ax.legend(bbox_to_anchor=(0.5,0.5,0.5,0.3), ncol=1, shadow=True,
frameon=True, fontsize='x-large', framealpha=1.)
plt.tight_layout()
plt.show()
"""
Explanation: Putting it all together
All of the pieces are in place; we can now compute:
Transport coefficients using the SCGF approach
Residual bias correction to the latter
Quantities are expressed as polynomials in $c_\text{B}$, the concentration of the immobile species.
The Green function, and the correction $\eta$, end up having particularly simple expressions, that we will compute directly (it requires some simplification of the polynomial expressions which are more difficult to directly express here. It, unfortunately, also introduces a denominator polynomial which makes some of our expressions more complicated.
We have
$$\eta_i = -2\frac{g^0_{i0}}{1+g^0_{i0} - (1+3g^0_{i0})c_\text{B}}$$
End of explanation
"""
num_SCGF, denom_SCGF = truncate_vec(-polydiv(L_SCGF,1)[0]), truncate_vec(divpoly)
num_SCGFbc, denom_SCGFbc = \
truncate_vec(-polydiv(0.5*polymult(PsiB,PsiB),1)[0]), \
truncate_vec(polymult(polymult(divpoly, divpoly), WR))
# check remainders (should be 0 for both)
if __TESTING__:
print(polydiv(L_SCGF,1)[1], polydiv(0.5*polymult(PsiB,PsiB),1)[1])
def print_fraction(numer, denom, powstring='**'):
"""
Returns a string representation of our polynomial ratio
"""
def format_pow(n):
if n==0:
return ''
if n==1:
return '*c'
return '*c' + powstring +'{}'.format(n)
# first, "divide" through until lowest order is constant on both:
while np.isclose(numer[0], 0) and np.isclose(denom[0], 0):
numer, denom = numer[1:], denom[1:]
# second, scale everything by lowest order term in denominator
scale = denom[np.min(np.nonzero(denom))]
numer /= scale
denom /= scale
s = '('
for n, coeff in enumerate(numer):
if not np.isclose(coeff, 0):
s += '{:+.10g}'.format(coeff) + format_pow(n)
s += ')/('
for n, coeff in enumerate(denom):
if not np.isclose(coeff, 0):
s += '{:+.10g}'.format(coeff) + format_pow(n)
s += ')'
return s
print(print_fraction(num_SCGF, denom_SCGF))
print(print_fraction(num_SCGF, denom_SCGF) + ' + ' +\
print_fraction(num_SCGFbc, denom_SCGFbc))
"""
Explanation: Final "analytic" versions
We now produce the analytic (with numerical coefficients) version of our transport coefficients.
End of explanation
"""
polydiv(polydiv(polydiv(num_SCGFbc,1)[0],1)[0],1)
polydiv(polydiv(denom_SCGFbc,1)[0],1)
SCGFbc_func = print_fraction(num_SCGF, denom_SCGF) + ' + ' +\
print_fraction(polydiv(polydiv(num_SCGFbc,1)[0],1)[0],
polydiv(polydiv(denom_SCGFbc,1)[0],1)[0])
print(SCGFbc_func)
"""
Explanation: Note: both of these polynomials have two factors of $(1-c)$ in them; so we can simplify further...
End of explanation
"""
|
Leguark/pynoddy | docs/notebooks/.ipynb_checkpoints/2-Adjust-input-checkpoint.ipynb | gpl-2.0 | from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
cd ../docs/notebooks/
%matplotlib inline
import sys, os
import matplotlib.pyplot as plt
import numpy as np
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy
import pynoddy.history
import pynoddy.output
"""
Explanation: Change Noddy input file and recompute model
In this section, we will briefly present possibilities to access the properties defined in the Noddy history input file and show how simple adjustments can be performed, for example changing the cube size to obtain a model with a higher resolution.
Also outlined here is the way that events are stored in the history file as single objects. For more information on accessing and changing the events themselves, please be patient until we get to the next section.
End of explanation
"""
# Change to sandbox directory to store results
os.chdir(os.path.join(repo_path, 'sandbox'))
# Path to exmaple directory in this repository
example_directory = os.path.join(repo_path,'examples')
# Compute noddy model for history file
history_file = 'simple_two_faults.his'
history = os.path.join(example_directory, history_file)
output_name = 'noddy_out'
H1 = pynoddy.history.NoddyHistory(history)
"""
Explanation: First step: load the history file into a Python object:
End of explanation
"""
print("The history contains %d events" % H1.n_events)
"""
Explanation: Technical note: the NoddyHistory class can be accessed on the level of pynoddy (as it is imported in the __init__.py module) with the shortcut:
H1 = pynoddy.NoddyHistory(history)
I am using the long version pynoddy.history.NoddyHistory here to ensure that the correct package is loaded with the reload() function. If you don't make changes to any of the pynoddy files, this is not required. So for any practical cases, the shortcuts are absolutely fine!
Get basic information on the model
The history file contains the entire information on the Noddy model. Some information can be accessed through the NoddyHistory object (and more will be added soon!), for example the total number of events:
End of explanation
"""
H1.events
"""
Explanation: Events are implemented as objects, the classes are defined in H1.events. All events are accessible in a list on the level of the history object:
End of explanation
"""
H1.events[2].properties
# print H1.events[5].properties.keys()
"""
Explanation: The properties of an event are stored in the event objects themselves. To date, only a subset of the properties (deemed as relevant for the purpose of pynoddy so far) are parsed. The .his file contains a lot more information! If access to this information is required, adjustments in pynoddy.events have to be made.
For example, the properties of a fault object are:
End of explanation
"""
# We will first recompute the model and store results in an output file for comparison
NH1 = pynoddy.history.NoddyHistory(history)
pynoddy.compute_model(history, output_name)
NO1 = pynoddy.output.NoddyOutput(output_name)
# Now: change cubsize, write to new file and recompute
NH1.change_cube_size(50)
# Save model to a new history file and recompute (Note: may take a while to compute now)
new_history = "fault_model_changed_cubesize.his"
new_output_name = "noddy_out_changed_cube"
NH1.write_history(new_history)
pynoddy.compute_model(new_history, new_output_name)
NO2 = pynoddy.output.NoddyOutput(new_output_name)
"""
Explanation: Change model cube size and recompute model
The Noddy model itself is, once computed, a continuous model in 3-D space. However, for most visualisations and further calculations (e.g. geophysics), a discretised version is suitable. The discretisation (or block size) can be adapted in the history file. The according pynoddy function is change_cube_size.
A simple example to change the cube size and write a new history file:
End of explanation
"""
print("Model 1 contains a total of %7d cells with a blocksize %.0f m" %
(NO1.n_total, NO1.delx))
print("Model 2 contains a total of %7d cells with a blocksize %.0f m" %
(NO2.n_total, NO2.delx))
"""
Explanation: The different cell sizes are also represented in the output files:
End of explanation
"""
# create basic figure layout
fig = plt.figure(figsize = (15,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
NO1.plot_section('y', position=0, ax = ax1, colorbar=False, title="Low resolution")
NO2.plot_section('y', position=1, ax = ax2, colorbar=False, title="High resolution")
plt.show()
"""
Explanation: We can compare the effect of the different model discretisations in section plots, created with the plot_section method described before. Let's get a bit more fancy here and use the functionality to pass axes to the plot_section method, and to create one figure as direct comparison:
End of explanation
"""
# We use here simply the time() function to evaulate the simualtion time.
# This is not the best possible way to do it, but probably the simplest.
import time
start_time = time.time()
pynoddy.compute_model(history, output_name)
end_time = time.time()
print("Simulation time for low-resolution model: %5.2f seconds" % (end_time - start_time))
start_time = time.time()
pynoddy.compute_model(new_history, new_output_name)
end_time = time.time()
print("Simulation time for high-resolution model: %5.2f seconds" % (end_time - start_time))
"""
Explanation: Note: the following two subsections contain some slighly advanced examples on how to use the possibility to adjust cell sizes through scripts directly to autmote processes that are infeasible using the GUI version of Noddy - as a 'peek preview' of the automation for uncertainty estimation that follows in a later section. Feel free to skip those two sections if you are only interested in the basic features so far.
Estimating computation time for a high-resolution model
You surely realised (if you ran these examples in an actual interactive ipython notebook) that the computation of the high-resolution model takes siginificantly longer than the low-resolution model. In a practical case, this can be very important.
End of explanation
"""
# perform computation for a range of cube sizes
cube_sizes = np.arange(200,49,-5)
times = []
NH1 = pynoddy.history.NoddyHistory(history)
tmp_history = "tmp_history"
tmp_output = "tmp_output"
for cube_size in cube_sizes:
NH1.change_cube_size(cube_size)
NH1.write_history(tmp_history)
start_time = time.time()
pynoddy.compute_model(tmp_history, tmp_output)
end_time = time.time()
times.append(end_time - start_time)
times = np.array(times)
# create plot
fig = plt.figure(figsize=(18,4))
ax1 = fig.add_subplot(131)
ax2 = fig.add_subplot(132)
ax3 = fig.add_subplot(133)
ax1.plot(cube_sizes, np.array(times), 'ro-')
ax1.set_xlabel('cubesize [m]')
ax1.set_ylabel('time [s]')
ax1.set_title('Computation time')
ax1.set_xlim(ax1.get_xlim()[::-1])
ax2.plot(cube_sizes, times**(1/3.), 'bo-')
ax2.set_xlabel('cubesize [m]')
ax2.set_ylabel('(time [s])**(1/3)')
ax2.set_title('Computation time (cuberoot)')
ax2.set_xlim(ax2.get_xlim()[::-1])
ax3.semilogy(cube_sizes, times, 'go-')
ax3.set_xlabel('cubesize [m]')
ax3.set_ylabel('time [s]')
ax3.set_title('Computation time (y-log)')
ax3.set_xlim(ax3.get_xlim()[::-1])
"""
Explanation: For an estimation of required computing time for a given discretisation, let's evaulate the time for a couple of steps, plot, and extrapolate:
End of explanation
"""
# perform curve fitting with scipy.optimize
import scipy.optimize
# define function to be fit
def func(x,a,b,c):
return a + (b*np.log10(x))**(-c)
popt, pcov = scipy.optimize.curve_fit(func, cube_sizes, np.array(times), p0 = [-1, 0.5, 2])
popt
"""
Explanation: It is actually quite interesting that the computation time does not scale with cubesize to the power of three (as could be expected, given that we have a mesh in three dimensions). Or am I missing something?
Anyway, just because we can: let's assume that the scaling is somehow exponential and try to fit a model for a time prediction. Given the last plot, it looks like we could fit a logarithmic model with probably an additional exponent (as the line is obviously not straight), so something like:
$$ f(x) = a + \left( b \log_{10}(x) \right)^{-c} $$
Let's try to fit the curve with scipy.optimize.curve_fit:
End of explanation
"""
a,b,c = popt
cube_range = np.arange(200,20,-1)
times_eval = func(cube_range, a, b, c)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(cube_range, times_eval, '-')
ax.semilogy(cube_sizes, times, 'ko')
# reverse x-axis
ax.set_xlim(ax.get_xlim()[::-1])
"""
Explanation: Interesting, it looks like Noody scales with something like:
$$ f(x) = \left( 0.5 \log_{10}(x) \right)^{-12} $$
Note: if you understand more about computational complexity than me, it might not be that interesting to you at all - if this is the case, please contact me and tell me why this result could be expected...
End of explanation
"""
cube_size = 40 # m
time_est = func(cube_size, a, b, c)
print("Estimated time for a cube size of %d m: %.1f seconds" % (cube_size, time_est))
"""
Explanation: Not too bad... let's evaluate the time for a cube size of 40 m:
End of explanation
"""
NH1.change_cube_size(cube_size)
NH1.write_history(tmp_history)
start_time = time.time()
pynoddy.compute_model(tmp_history, tmp_output)
end_time = time.time()
time_comp = end_time - start_time
print("Actual computation time for a cube size of %d m: %.1f seconds" % (cube_size, time_comp))
"""
Explanation: Now let's check the actual simulation time:
End of explanation
"""
fig = plt.figure()
ax = fig.add_subplot(111)
ax.semilogy(cube_range, times_eval, '-')
ax.semilogy(cube_sizes, times, 'ko')
ax.semilogy(cube_size, time_comp, 'ro')
# reverse x-axis
ax.set_xlim(ax.get_xlim()[::-1])
"""
Explanation: Not too bad, probably in the range of the inherent variability... and if we check it in the plot:
End of explanation
"""
# perform computation for a range of cube sizes
reload(pynoddy.output)
cube_sizes = np.arange(200,49,-5)
all_volumes = []
N_tmp = pynoddy.history.NoddyHistory(history)
tmp_history = "tmp_history"
tmp_output = "tmp_output"
for cube_size in cube_sizes:
# adjust cube size
N_tmp.change_cube_size(cube_size)
N_tmp.write_history(tmp_history)
pynoddy.compute_model(tmp_history, tmp_output)
# open simulated model and determine volumes
O_tmp = pynoddy.output.NoddyOutput(tmp_output)
O_tmp.determine_unit_volumes()
all_volumes.append(O_tmp.unit_volumes)
all_volumes = np.array(all_volumes)
fig = plt.figure(figsize=(16,4))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# separate into two plots for better visibility:
for i in range(np.shape(all_volumes)[1]):
if i < 4:
ax1.plot(cube_sizes, all_volumes[:,i], 'o-', label='unit %d' %i)
else:
ax2.plot(cube_sizes, all_volumes[:,i], 'o-', label='unit %d' %i)
ax1.legend(loc=2)
ax2.legend(loc=2)
# reverse axes
ax1.set_xlim(ax1.get_xlim()[::-1])
ax2.set_xlim(ax2.get_xlim()[::-1])
ax1.set_xlabel("Block size [m]")
ax1.set_ylabel("Total unit volume [m**3]")
ax2.set_xlabel("Block size [m]")
ax2.set_ylabel("Total unit volume [m**3]")
"""
Explanation: Anyway, the point of this excercise was not a precise evaluation of Noddy's computational complexity, but to provide a simple means of evaluating computation time for a high resolution model, using the flexibility of writing simple scripts using pynoddy, and a couple of additional python modules.
For a realistic case, it should, of course, be sufficient to determine the time based on a lot less computed points. If you like, test it with your favourite model and tell me if it proved useful (or not)!
Simple convergence study
So: why would we want to run a high-resolution model, anyway? Well, of course, it produces nicer pictures - but on a scientific level, that's completely irrelevant (haha, not true - so nice if it would be...).
Anyway, if we want to use the model in a scientific study, for example to evaluate volume of specific units, or to estimate the geological topology (Mark is working on this topic with some cool ideas - example to be implemented here, "soon"), we want to know if the resolution of the model is actually high enough to produce meaningful results.
As a simple example of the evaluation of model resolution, we will here inlcude a volume convergence study, i.e. we will estimate at which level of increasing model resolution the estimated block volumes do not change anymore.
The entire procedure is very similar to the computational time evaluation above, only that we now also analyse the output and determine the rock volumes of each defined geological unit:
End of explanation
"""
|
transcranial/keras-js | notebooks/layers/embeddings/Embedding.ipynb | mit | input_dim = 5
output_dim = 3
input_length = 7
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1200 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
arr_in = np.random.randint(0, input_dim - 1, data_in_shape)
data_in = arr_in.ravel().tolist()
print('')
print('in shape:', data_in_shape)
print('in:', data_in)
result = model.predict(np.array([arr_in]))
data_out_shape = result[0].shape
data_out = format_decimal(result[0].ravel().tolist())
print('out shape:', data_out_shape)
print('out:', data_out)
DATA['embeddings.Embedding.0'] = {
'input': {'data': data_in, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out, 'shape': data_out_shape}
}
"""
Explanation: Embedding
[embeddings.Embedding.0] input_dim 5, output_dim 3, input_length=7, mask_zero=False
End of explanation
"""
input_dim = 20
output_dim = 5
input_length = 10
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=True)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1210 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
arr_in = np.random.randint(0, input_dim - 1, data_in_shape)
data_in = arr_in.ravel().tolist()
print('')
print('in shape:', data_in_shape)
print('in:', data_in)
result = model.predict(np.array([arr_in]))
data_out_shape = result[0].shape
data_out = format_decimal(result[0].ravel().tolist())
print('out shape:', data_out_shape)
print('out:', data_out)
DATA['embeddings.Embedding.1'] = {
'input': {'data': data_in, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out, 'shape': data_out_shape}
}
"""
Explanation: [embeddings.Embedding.1] input_dim 20, output_dim 5, input_length=10, mask_zero=True
End of explanation
"""
input_dim = 33
output_dim = 2
input_length = 5
data_in_shape = (input_length,)
emb = Embedding(input_dim, output_dim, input_length=input_length, mask_zero=False)
layer_0 = Input(shape=data_in_shape)
layer_1 = emb(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
weights = []
for i, w in enumerate(model.get_weights()):
np.random.seed(1220 + i)
weights.append(2 * np.random.random(w.shape) - 1)
model.set_weights(weights)
print('W shape:', weights[0].shape)
print('W:', format_decimal(weights[0].ravel().tolist()))
arr_in = np.random.randint(0, input_dim - 1, data_in_shape)
data_in = arr_in.ravel().tolist()
print('')
print('in shape:', data_in_shape)
print('in:', data_in)
result = model.predict(np.array([arr_in]))
data_out_shape = result[0].shape
data_out = format_decimal(result[0].ravel().tolist())
print('out shape:', data_out_shape)
print('out:', data_out)
DATA['embeddings.Embedding.2'] = {
'input': {'data': data_in, 'shape': data_in_shape},
'weights': [{'data': format_decimal(w.ravel().tolist()), 'shape': w.shape} for w in weights],
'expected': {'data': data_out, 'shape': data_out_shape}
}
"""
Explanation: [embeddings.Embedding.2] input_dim 33, output_dim 2, input_length=5, mask_zero=False
End of explanation
"""
import os
filename = '../../../test/data/layers/embeddings/Embedding.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
print(json.dumps(DATA))
"""
Explanation: export for Keras.js tests
End of explanation
"""
|
quantumlib/Cirq | docs/protocols.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2020 The Cirq Developers
End of explanation
"""
try:
import cirq
except ImportError:
print("installing cirq...")
!pip install --quiet cirq
print("installed cirq.")
import cirq
"""
Explanation: Protocols
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://quantumai.google/cirq/protocols"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/protocols.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/protocols.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/protocols.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
</td>
</table>
End of explanation
"""
print(cirq.X)
print("cirq.X unitary:\n", cirq.unitary(cirq.X))
a, b = cirq.LineQubit.range(2)
circuit = cirq.Circuit(cirq.X(a), cirq.Y(b))
print(circuit)
print("circuit unitary:\n", cirq.unitary(circuit))
"""
Explanation: Introduction
Cirq's protocols are very similar concept to Python's built-in protocols that were introduced in PEP 544.
Python's built-in protocols are extremely convenient. For example, behind all the for loops and list comprehensions you can find the Iterator protocol.
As long as an object has the __iter__() magic method that returns an iterator object, it has iterator support.
An iterator object has to define __iter__() and __next__() magic methods, that defines the iterator protocol.
The iter(val) builtin function returns an iterator for val if it defines the above methods, otherwise throws a TypeError. Cirq protocols work similarly.
A canonical Cirq protocol example is the unitary protocol that allows to check the unitary matrix of values that support the protocol by calling cirq.unitary(val).
End of explanation
"""
try:
print(cirq.unitary(a)) ## error!
except Exception as e:
print("As expected, a qubit does not have a unitary. The error: ")
print(e)
"""
Explanation: When an object does not support a given protocol, an error is thrown.
End of explanation
"""
print(cirq.unitary(cirq.Y))
"""
Explanation: What is a protocol?
A protocol is a combination of the following two items:
- a SupportsXYZ class, which defines and documents all the magic functions that need to be implemented in order to support that given protocol
- the entrypoint function(s), which are exposed to the main cirq namespace as cirq.xyz()
Note: While the protocol is technically both of these things, we refer to the public utility functions interchangeably as protocols. See the list of them below.
Cirq's protocols
For a complete list of Cirq protocols, refer to the cirq.protocols package.
Here we provide a list of frequently used protocols for debugging, simulation and testing.
| Protocol | Description |
|----------|-------|
|cirq.act_on| Allows an object (operations or gates) to act on a state, particularly within simulators. |
|cirq.apply_channel| High performance evolution under a channel evolution. |
|cirq.apply_mixture| High performance evolution under a mixture of unitaries evolution. |
|cirq.apply_unitaries| Apply a series of unitaries onto a state tensor. |
|cirq.apply_unitary| High performance left-multiplication of a unitary effect onto a tensor. |
|cirq.approx_eq| Approximately compares two objects. |
|cirq.circuit_diagram_info| Retrieves information for drawing operations within circuit diagrams. |
|cirq.commutes| Determines whether two values commute. |
|cirq.control_keys| Gets the keys that the value is classically controlled by. |
|cirq.definitely_commutes| Determines whether two values definitely commute. |
|cirq.decompose| Recursively decomposes a value into cirq.Operations meeting a criteria. |
|cirq.decompose_once| Decomposes a value into operations, if possible. |
|cirq.decompose_once_with_qubits| Decomposes a value into operations on the given qubits. |
|cirq.equal_up_to_global_phase| Determine whether two objects are equal up to global phase. |
|cirq.has_kraus| Returns whether the value has a Kraus representation. |
|cirq.has_mixture| Returns whether the value has a mixture representation. |
|cirq.has_stabilizer_effect| Returns whether the input has a stabilizer effect. |
|cirq.has_unitary| Determines whether the value has a unitary effect. |
|cirq.inverse| Returns the inverse val**-1 of the given value, if defined. |
|cirq.is_measurement| Determines whether or not the given value is a measurement. |
|cirq.is_parameterized| Returns whether the object is parameterized with any Symbols. |
|cirq.kraus| Returns a Kraus representation of the given channel. |
|cirq.measurement_key| Get the single measurement key for the given value. |
|cirq.measurement_keys| Gets the measurement keys of measurements within the given value. |
|cirq.mixture| Return a sequence of tuples representing a probabilistic unitary. |
|cirq.num_qubits| Returns the number of qubits, qudits, or qids val operates on. |
|cirq.parameter_names| Returns parameter names for this object. |
|cirq.parameter_symbols| Returns parameter symbols for this object. |
|cirq.pauli_expansion| Returns coefficients of the expansion of val in the Pauli basis. |
|cirq.phase_by| Returns a phased version of the effect. |
|cirq.pow| Returns val**factor of the given value, if defined. |
|cirq.qasm| Returns QASM code for the given value, if possible. |
|cirq.qid_shape| Returns a tuple describing the number of quantum levels of each |
|cirq.quil| Returns the QUIL code for the given value. |
|cirq.read_json| Read a JSON file that optionally contains cirq objects. |
|cirq.resolve_parameters| Resolves symbol parameters in the effect using the param resolver. |
|cirq.to_json| Write a JSON file containing a representation of obj. |
|cirq.trace_distance_bound| Returns a maximum on the trace distance between this effect's input |
|cirq.trace_distance_from_angle_list| Given a list of arguments of the eigenvalues of a unitary matrix, |
|cirq.unitary| Returns a unitary matrix describing the given value. |
|cirq.validate_mixture| Validates that the mixture's tuple are valid probabilities. |
Quantum operator representation protocols
The following family of protocols is an important and frequently used set of features of Cirq and it is worthwhile mentioning them and and how they interact with each other. They are, in the order of increasing generality:
*unitary
*kraus
*mixture
All these protocols make it easier to work with different representations of quantum operators, namely:
- finding that representation (unitary, kraus, mixture),
- determining whether the operator has that representation (has_*)
- and applying them (apply_*) on a state vector.
Unitary
The *unitary protocol is the least generic, as only unitary operators should implement it. The cirq.unitary function returns the matrix representation of the operator in the computational basis. We saw an example of the unitary protocol above, but let's see the unitary matrix of the Pauli-Y operator as well:
End of explanation
"""
probabilistic_x = cirq.X.with_probability(.3)
for p, op in cirq.mixture(probabilistic_x):
print(f"probability: {p}")
print("operator:")
print(op)
"""
Explanation: Mixture
The *mixture protocol should be implemented by operators that are unitary-mixtures. These probabilistic operators are represented by a list of tuples ($p_i$, $U_i$), where each unitary effect $U_i$ occurs with a certain probability $p_i$, and $\sum p_i = 1$. Probabilities are a Python float between 0.0 and 1.0, and the unitary matrices are numpy arrays.
Constructing simple probabilistic gates in Cirq is easiest with the with_probability method.
End of explanation
"""
# cirq.Y has a unitary effect but does not implement SupportsMixture
# thus mixture protocols will return ((1, cirq.unitary(Y)))
print(cirq.mixture(cirq.Y))
print(cirq.has_mixture(cirq.Y))
"""
Explanation: In case an operator does not implement SupportsMixture, but does implement SupportsUnitary, *mixture functions fall back to the *unitary methods. It is easy to see that a unitary operator $U$ is just a "mixture" of a single unitary with probability $p=1$.
End of explanation
"""
cirq.kraus(cirq.DepolarizingChannel(p=0.3))
"""
Explanation: Channel
The kraus representation is the operator sum representation of a quantum operator (a channel):
$$
\rho \rightarrow \sum_{k=0}^{r-1} A_k \rho A_k^\dagger
$$
These matrices are required to satisfy the trace preserving condition
$$
\sum_{k=0}^{r-1} A_k^\dagger A_k = I
$$
where $I$ is the identity matrix. The matrices $A_k$ are sometimes called Kraus or noise operators.
The cirq.kraus returns a tuple of numpy arrays, one for each of the Kraus operators:
End of explanation
"""
cirq.kraus(cirq.X.with_probability(0.25))
"""
Explanation: In case the operator does not implement SupportsKraus, but it does implement SupportsMixture, the *kraus protocol will generate the Kraus operators based on the *mixture representation.
$$
((p_0, U_0),(p_1, U_1),\ldots,(p_n, U_n)) \rightarrow (\sqrt{p_0}U_0, \sqrt{p_1}U_1, \ldots, \sqrt{p_n}U_n)
$$
Thus for example ((0.25, X), (0.75, I)) -> (0.5 X, sqrt(0.75) I):
End of explanation
"""
print(cirq.kraus(cirq.Y))
print(cirq.unitary(cirq.Y))
print(cirq.has_kraus(cirq.Y))
"""
Explanation: In the simplest case of a unitary operator, cirq.kraus returns a one-element tuple with the same unitary as returned by cirq.unitary:
End of explanation
"""
|
scotthuang1989/Python-3-Module-of-the-Week | algorithm/functools.ipynb | apache-2.0 | import functools
def myfunc(a, b=2):
"Docstring for myfunc()."
print(' called myfunc with:', (a, b))
def show_details(name, f, is_partial=False):
"Show details of a callable object."
print('{}:'.format(name))
print(' object:', f)
if not is_partial:
print(' __name__:', f.__name__)
if is_partial:
print(' func:', f.func)
print(' args:', f.args)
print(' keywords:', f.keywords)
return
show_details('myfunc', myfunc)
myfunc('a', 3)
print()
# Set a different default value for 'b', but require
# the caller to provide 'a'.
p1 = functools.partial(myfunc, b=4)
show_details('partial with named default', p1, True)
p1('passing a')
p1('override b', b=5)
print()
# Set default values for both 'a' and 'b'.
p2 = functools.partial(myfunc, 'default a', b=99)
show_details('partial with defaults', p2, True)
p2()
p2(b='override b')
print()
print('Insufficient arguments:')
p1()
"""
Explanation: The functools module provides tools for adapting or extending functions and other callable objects, without completely rewriting them.
Decorators
The primary tool supplied by the functools module is the class partial, which can be used to “wrap” a callable object with default arguments. The resulting object is itself callable and can be treated as though it is the original function. It takes all of the same arguments as the original, and can be invoked with extra positional or named arguments as well. A partial can be used instead of a lambda to provide default arguments to a function, while leaving some arguments unspecified.
End of explanation
"""
import functools
def myfunc(a, b=2):
"Docstring for myfunc()."
print(' called myfunc with:', (a, b))
def show_details(name, f):
"Show details of a callable object."
print('{}:'.format(name))
print(' object:', f)
print(' __name__:', end=' ')
try:
print(f.__name__)
except AttributeError:
print('(no __name__)')
print(' __doc__', repr(f.__doc__))
print()
show_details('myfunc', myfunc)
p1 = functools.partial(myfunc, b=4)
show_details('raw wrapper', p1)
print('Updating wrapper:')
print(' assign:', functools.WRAPPER_ASSIGNMENTS)
print(' update:', functools.WRAPPER_UPDATES)
print()
functools.update_wrapper(p1, myfunc)
show_details('updated wrapper', p1)
"""
Explanation: Acquiring Function Properties
The partial object does not have name or doc attributes by default, and without those attributes, decorated functions are more difficult to debug. Using update_wrapper(), copies or adds attributes from the original function to the partial object.
End of explanation
"""
import functools
class MyClass:
"Demonstration class for functools"
def __call__(self, e, f=6):
"Docstring for MyClass.__call__"
print(' called object with:', (self, e, f))
def show_details(name, f):
"Show details of a callable object."
print('{}:'.format(name))
print(' object:', f)
print(' __name__:', end=' ')
try:
print(f.__name__)
except AttributeError:
print('(no __name__)')
print(' __doc__', repr(f.__doc__))
return
o = MyClass()
show_details('instance', o)
o('e goes here')
print()
p = functools.partial(o, e='default for e', f=8)
functools.update_wrapper(p, o)
show_details('instance wrapper', p)
p()
"""
Explanation: Other Callables
Partials work with any callable object, not just with standalone functions.
End of explanation
"""
import functools
def standalone(self, a=1, b=2):
"Standalone function"
print(' called standalone with:', (self, a, b))
if self is not None:
print(' self.attr =', self.attr)
class MyClass:
"Demonstration class for functools"
def __init__(self):
self.attr = 'instance attribute'
method1 = functools.partialmethod(standalone)
method2 = functools.partial(standalone)
o = MyClass()
print('standalone')
standalone(None)
print()
print('method1 as partialmethod')
o.method1()
print()
print('method2 as partial')
try:
o.method2()
except TypeError as err:
print('ERROR: {}'.format(err))
"""
Explanation: Methods and Functions
While partial() returns a callable ready to be used directly, partialmethod() returns a callable ready to be used as an unbound method of an object. In the following example, the same standalone function is added as an attribute of MyClass twice, once using partialmethod() as method1() and again using partial() as method2().
End of explanation
"""
import functools
def show_details(name, f):
"Show details of a callable object."
print('{}:'.format(name))
print(' object:', f)
print(' __name__:', end=' ')
try:
print(f.__name__)
except AttributeError:
print('(no __name__)')
print(' __doc__', repr(f.__doc__))
print()
def simple_decorator(f):
@functools.wraps(f)
def decorated(a='decorated defaults', b=1):
print(' decorated:', (a, b))
print(' ', end=' ')
return f(a, b=b)
return decorated
def myfunc(a, b=2):
"myfunc() is not complicated"
print(' myfunc:', (a, b))
return
# The raw function
show_details('myfunc', myfunc)
myfunc('unwrapped, default b')
myfunc('unwrapped, passing b', 3)
print()
# Wrap explicitly
wrapped_myfunc = simple_decorator(myfunc)
show_details('wrapped_myfunc', wrapped_myfunc)
wrapped_myfunc()
wrapped_myfunc('args to wrapped', 4)
print()
# Wrap with decorator syntax
@simple_decorator
def decorated_myfunc(a, b):
myfunc(a, b)
return
show_details('decorated_myfunc', decorated_myfunc)
decorated_myfunc()
decorated_myfunc('args to decorated', 4)
"""
Explanation: method1() can be called from an instance of MyClass, and the instance is passed as the first argument just as with methods defined normally. method2() is not set up as a bound method, and so the self argument must be passed explicitly, or the call will result in a TypeError.
Acquiring Function Properties for Decorators
Updating the properties of a wrapped callable is especially useful when used in a decorator, since the transformed function ends up with properties of the original “bare” function.
End of explanation
"""
import functools
import inspect
from pprint import pprint
@functools.total_ordering
class MyObject:
def __init__(self, val):
self.val = val
def __eq__(self, other):
print(' testing __eq__({}, {})'.format(
self.val, other.val))
return self.val == other.val
def __gt__(self, other):
print(' testing __gt__({}, {})'.format(
self.val, other.val))
return self.val > other.val
print('Methods:\n')
pprint(inspect.getmembers(MyObject, inspect.isfunction))
a = MyObject(1)
b = MyObject(2)
print('\nComparisons:')
for expr in ['a < b', 'a <= b', 'a == b', 'a >= b', 'a > b']:
print('\n{:<6}:'.format(expr))
result = eval(expr)
print(' result of {}: {}'.format(expr, result))
"""
Explanation: Comparison
Under Python 2, classes could define a __cmp__() method that returns -1, 0, or 1 based on whether the object is less than, equal to, or greater than the item being compared. Python 2.1 introduced the rich comparison methods API (__lt__(), __le__(), __eq__(), __ne__(), __gt__(), and __ge__()), which perform a single comparison operation and return a boolean value. Python 3 deprecated __cmp__() in favor of these new methods and functools provides tools to make it easier to write classes that comply with the new comparison requirements in Python 3.
Rich Comparison
The rich comparison API is designed to allow classes with complex comparisons to implement each test in the most efficient way possible. However, for classes where comparison is relatively simple, there is no point in manually creating each of the rich comparison methods. The total_ordering() class decorator takes a class that provides some of the methods, and adds the rest of them.
End of explanation
"""
import functools
class MyObject:
def __init__(self, val):
self.val = val
def __str__(self):
return 'MyObject({})'.format(self.val)
def compare_obj(a, b):
"""Old-style comparison function.
"""
print('comparing {} and {}'.format(a, b))
if a.val < b.val:
return -1
elif a.val > b.val:
return 1
return 0
# Make a key function using cmp_to_key()
get_key = functools.cmp_to_key(compare_obj)
def get_key_wrapper(o):
"Wrapper function for get_key to allow for print statements."
new_key = get_key(o)
print('key_wrapper({}) -> {!r}'.format(o, new_key))
return new_key
objs = [MyObject(x) for x in range(5, 0, -1)]
for o in sorted(objs, key=get_key_wrapper):
print(o)
"""
Explanation: Collation Order
Since old-style comparison functions are deprecated in Python 3, the cmp argument to functions like sort() are also no longer supported. Older programs that use comparison functions can use cmp_to_key() to convert them to a function that returns a collation key, which is used to determine the position in the final sequence.
End of explanation
"""
import functools
@functools.lru_cache()
def expensive(a, b):
print('expensive({}, {})'.format(a, b))
return a * b
MAX = 2
print('First set of calls:')
for i in range(MAX):
for j in range(MAX):
expensive(i, j)
print(expensive.cache_info())
print('\nSecond set of calls:')
for i in range(MAX + 1):
for j in range(MAX + 1):
expensive(i, j)
print(expensive.cache_info())
print('\nClearing cache:')
expensive.cache_clear()
print(expensive.cache_info())
print('\nThird set of calls:')
for i in range(MAX):
for j in range(MAX):
expensive(i, j)
print(expensive.cache_info())
"""
Explanation: Caching
The lru_cache() decorator wraps a function in a least-recently-used cache. Arguments to the function are used to build a hash key, which is then mapped to the result. Subsequent calls with the same arguments will fetch the value from the cache instead of calling the function. The decorator also adds methods to the function to examine the state of the cache (cache_info()) and empty the cache (cache_clear()).
End of explanation
"""
import functools
def do_reduce(a, b):
print('do_reduce({}, {})'.format(a, b))
return a + b
data = range(1, 5)
print(data)
result = functools.reduce(do_reduce, data)
print('result: {}'.format(result))
"""
Explanation: Reducing a Data Set
The reduce() function takes a callable and a sequence of data as input and produces a single value as output based on invoking the callable with the values from the sequence and accumulating the resulting output.
End of explanation
"""
import functools
@functools.singledispatch
def myfunc(arg):
print('default myfunc({!r})'.format(arg))
@myfunc.register(int)
def myfunc_int(arg):
print('myfunc_int({})'.format(arg))
@myfunc.register(list)
def myfunc_list(arg):
print('myfunc_list()')
for item in arg:
print(' {}'.format(item))
myfunc('string argument')
myfunc(1)
myfunc(2.3)
myfunc(['a', 'b', 'c'])
"""
Explanation: Generic Functions
In a dynamically typed language like Python it is common to need to perform slightly different operation based on the type of an argument, especially when dealing with the difference between a list of items and a single item. It is simple enough to check the type of an argument directly, but in cases where the behavioral difference can be isolated into separate functions functools provides the singledispatch() decorator to register a set of generic functions for automatic switching based on the type of the first argument to a function.
End of explanation
"""
|
CopernicusMarineInsitu/INSTACTraining | PythonNotebooks/indexFileNavigation/index_file_download.ipynb | mit | user = '' #type CMEMS user name
password = '' #type CMEMS password
product_name = 'INSITU_BAL_NRT_OBSERVATIONS_013_032' #type aimed CMEMS in situ product
distribution_unit = 'cmems.smhi.se' #type aimed hosting institution
"""
Explanation: <h3> ABSTRACT </h3>
All CMEMS in situ data products can be found and downloaded after registration via CMEMS catalogue.
Such channel is advisable just for sporadic netCDF donwloading because when operational, interaction with the web user interface is not practical. In this context though, the use of scripts for ftp file transference is is a much more advisable approach.
As long as every line of such files contains information about the netCDFs contained within the different directories see at tips why, it is posible for users to loop over its lines to download only those that matches a number of specifications such as spatial coverage, time coverage, provider, data_mode, parameters or file_name related (region, data type, TS or PF, platform code, or/and platform category, timestamp).
<h3> PREREQUISITES </h3>
credentias
aimed in situ product name
aimed hosting distribution unit
i.e:
End of explanation
"""
import ftplib
"""
Explanation: <h3>DOWNLOAD</h3>
End of explanation
"""
ftp=ftplib.FTP(distribution_unit,user,password)
ftp.cwd("Core")
ftp.cwd(product_name)
aimedFileName = 'index_history.txt'
local_filename = aimedFileName
local_file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + aimedFileName, local_file.write)
local_file.close()
ftp.quit()
#ready when 221 Goodbye.!
"""
Explanation: 1. index history example (NRT & REP products)
End of explanation
"""
ftp=ftplib.FTP(distribution_unit,user,password)
ftp.cwd("Core")
ftp.cwd(product_name)
aimedFileName = 'index_monthly.txt'
local_filename = aimedFileName
local_file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + aimedFileName, local_file.write)
local_file.close()
ftp.quit()
#ready when 221 Goodbye.!
"""
Explanation: 2. index monthly example (NRT products)
End of explanation
"""
ftp=ftplib.FTP(distribution_unit,user,password)
ftp.cwd("Core")
ftp.cwd(product_name)
aimedFileName = 'index_latest.txt'
local_filename = aimedFileName
local_file = open(local_filename, 'wb')
ftp.retrbinary('RETR ' + aimedFileName, local_file.write)
local_file.close()
ftp.quit()
#ready when 221 Goodbye.!
"""
Explanation: 3. index latest example (NRT products)
End of explanation
"""
import numpy as np
import pandas as pd
from random import randint
index_file = 'index_history.txt' #choose index file to look at a ramdom line
index = np.genfromtxt(index_file, skip_header=6, unpack=False, delimiter=',', dtype=None,
names=['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters'])
dataset = randint(0,len(index)) #ramdom line of the index file
values = [index[dataset]['catalog_id'], '<a href='+index[dataset]['file_name']+'>'+index[dataset]['file_name']+'</a>', index[dataset]['geospatial_lat_min'], index[dataset]['geospatial_lat_max'],
index[dataset]['geospatial_lon_min'], index[dataset]['geospatial_lon_max'], index[dataset]['time_coverage_start'],
index[dataset]['time_coverage_end'], index[dataset]['provider'], index[dataset]['date_update'], index[dataset]['data_mode'],
index[dataset]['parameters']]
headers = ['catalog_id', 'file_name', 'geospatial_lat_min', 'geospatial_lat_max',
'geospatial_lon_min', 'geospatial_lon_max',
'time_coverage_start', 'time_coverage_end',
'provider', 'date_update', 'data_mode', 'parameters']
df = pd.DataFrame(values, index=headers, columns=[dataset])
df.style
"""
Explanation: <h3>QUICK VIEW</h3>
End of explanation
"""
|
intel-analytics/BigDL | apps/image-augmentation/image-augmentation.ipynb | apache-2.0 | from bigdl.dllib.nncontext import init_nncontext
from bigdl.dllib.feature.image import *
import cv2
import numpy as np
from IPython.display import Image, display
sc = init_nncontext("Image Augmentation Example")
"""
Explanation: Image Augmentation
Image Augmentation augments datasets (especially small datasets) to train model. The way to do image augmentation is to transform images by different ways. In this notebook we demonstrate how to do image augmentation using Analytics ZOO APIs.
End of explanation
"""
# create LocalImageSet from an image
local_image_set = ImageSet.read("image/test.jpg")
# create LocalImageSet from an image folder
local_image_set = ImageSet.read("image/")
# create LocalImageSet from list of images
image = cv2.imread("image/test.jpg")
local_image_set = LocalImageSet([image])
print(local_image_set.get_image())
print('isDistributed: ', local_image_set.is_distributed(), ', isLocal: ', local_image_set.is_local())
"""
Explanation: Create LocalImageSet
End of explanation
"""
# create DistributedImageSet from an image
distributed_image_set = ImageSet.read("image/test.jpg", sc, 2)
# create DistributedImageSet from an image folder
distributed_image_set = ImageSet.read("image/", sc, 2)
# create LocalImageSet from image rdd
image = cv2.imread("image/test.jpg")
image_rdd = sc.parallelize([image], 2)
label_rdd = sc.parallelize([np.array([1.0])], 2)
distributed_image_set = DistributedImageSet(image_rdd, label_rdd)
images_rdd = distributed_image_set.get_image()
label_rdd = distributed_image_set.get_label()
print(images_rdd)
print(label_rdd)
print('isDistributed: ', distributed_image_set.is_distributed(), ', isLocal: ', distributed_image_set.is_local())
print('total images:', images_rdd.count())
"""
Explanation: Create DistributedImageSet
End of explanation
"""
path = "image/test.jpg"
def transform_display(transformer, image_set):
out = transformer(image_set)
cv2.imwrite('/tmp/tmp.jpg', out.get_image(to_chw=False)[0])
display(Image(filename='/tmp/tmp.jpg'))
"""
Explanation: Transform images
End of explanation
"""
brightness = ImageBrightness(0.0, 32.0)
image_set = ImageSet.read(path)
transform_display(brightness, image_set)
"""
Explanation: Brightness
Adjust the image brightness
End of explanation
"""
transformer = ImageHue(-18.0, 18.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: Hue
Adjust image hue
End of explanation
"""
transformer = ImageSaturation(10.0, 20.0)
image_set= ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: Saturation
Adjust image saturation
End of explanation
"""
transformer = ImageChannelOrder()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: ChannelOrder
Random change the channel of an image
End of explanation
"""
transformer = ImageColorJitter()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: ColorJitter
Random adjust brightness, contrast, hue, saturation
End of explanation
"""
transformer = ImageResize(300, 300)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: Resize
Resize the roi(region of interest) according to scale
End of explanation
"""
transformer = ImageAspectScale(200, max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: AspectScale
Resize the image, keep the aspect ratio. scale according to the short edge
End of explanation
"""
transformer = ImageRandomAspectScale([100, 300], max_size = 3000)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: RandomAspectScale
Resize the image by randomly choosing a scale
End of explanation
"""
transformer = ImageChannelNormalize(20.0, 30.0, 40.0, 2.0, 3.0, 4.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: ChannelNormalize
Image channel normalize
End of explanation
"""
%%time
print("PixelNormalize takes nearly one and a half minutes. Please wait a moment.")
means = [2.0] * 3 * 500 * 375
transformer = ImagePixelNormalize(means)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: PixelNormalize
Pixel level normalizer, data(Pixel) = data(Pixel) - mean(Pixels)
End of explanation
"""
transformer = ImageCenterCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: CenterCrop
Crop a cropWidth x cropHeight patch from center of image.
End of explanation
"""
transformer = ImageRandomCrop(200, 200)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: RandomCrop
Random crop a cropWidth x cropHeight patch from an image.
End of explanation
"""
transformer = ImageFixedCrop(0.0, 0.0, 200.0, 200.0, False)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: FixedCrop
Crop a fixed area of image
End of explanation
"""
transformer = ImageFiller(0.0, 0.0, 0.5, 0.5, 255)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: Filler
Fill part of image with certain pixel value
End of explanation
"""
transformer = ImageExpand(means_r=123, means_g=117, means_b=104,
max_expand_ratio=2.0)
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: Expand
Expand image, fill the blank part with the meanR, meanG, meanB
End of explanation
"""
transformer = ImageHFlip()
image_set = ImageSet.read(path)
transform_display(transformer, image_set)
"""
Explanation: HFlip
Flip the image horizontally
End of explanation
"""
|
UChicagoPhysics/SampleExercises | exercises/electricityAndMagnetism/Electric Field of a Moving Charge.ipynb | gpl-2.0 | import numpy as np
import matplotlib.pylab as plt
#Import 3-dimensional plotting package.
from mpl_toolkits.mplot3d import axes3d
"""
Explanation: Electric Field of a Moving Charge
PROGRAM: Electric field of a moving charge
CREATED: 5/30/2018
In this problem, I plot the electric field of a moving charge for different speeds $\beta = v/c$. The charge is moving along the x-axis.
- In step 1, I import a package for plotting in 3 dimensions.
- In step 2, I define the contants in the problem (in compatible units, $m$, $s$, $kg$, $C$).
- For the charge $q$, I use the charge of an electron.
- $\epsilon_{0}$ is the permittivity of free space.
- The speed of the charge $\beta$ is a value between 0 and 1, and can be changed to see what happens to the electric field.
- $c$ is the speed of light.
- $v$ is velocity of the charge in $\frac{m}{s}$, calculated by $v = \beta c$.
- In step 3, I define a function to calculate the magnitude of the electric field. The electric field vector is
$\vec{E}(\vec{r}, t) = \frac{q}{4 \pi \epsilon_{0}} \frac{1 - \beta^{2}}{(1 - \beta^{2}sin^2(\theta))^{3/2}} \frac{\hat{R}}{R^{2}}$, so this function calculates $E(\vec{r}, t) = \frac{q}{4 \pi \epsilon_{0}} \frac{1 - \beta^{2}}{R^{2} (1 - \beta^{2}sin^2(\theta))^{3/2}}$.
- In step 4, having calculated the direction vector $\hat{R}$ by hand, by drawing pictures, I define a function to calculate the magnitude of the x-component, y-component, and z-component of the electric field. Since $\hat{R} = (\frac{x - v_{x}t}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} }, \frac{y}{\sqrt{ (x - v_{x}t)^2 + y^{2} + z^{2}} }, \frac{z}{ \sqrt{(x - v_{x}t)^2 + y^{2} + z^{2}} })$, the electric field components are
- $E_{x} = E(\vec{r}, t) \frac{x - v_{x}t}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} } = (\frac{q}{4 \pi \epsilon_{0}} \frac{1 - \beta^{2}}{R^{2} (1 - \beta^{2}sin^2(\theta))^{3/2}}) \frac{x - v_{x}t}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} }$
- $E_{y} = E(\vec{r}, t) \frac{y}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} } = (\frac{q}{4 \pi \epsilon_{0}} \frac{1 - \beta^{2}}{R^{2} (1 - \beta^{2}sin^2(\theta))^{3/2}}) \frac{y}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} }$
- $E_{z} = E(\vec{r}, t) \frac{z}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} } = (\frac{q}{4 \pi \epsilon_{0}} \frac{1 - \beta^{2}}{R^{2} (1 - \beta^{2}sin^2(\theta))^{3/2}}) \frac{z}{ \sqrt{(x - v_{x}t)^{2} + y^{2} + z^{2}} }$
- In step 5, I plot the electric field at time $t = 0.000000005$ seconds for the moving charge. The time was chosen so that the charge, which is moving close to the speed of light, has not moved very far from the origin outside my chosen plot range. The magnitude of the electric field is highly exaggerated, so that the vectors are visible. (Each component is multiplied by $10^{11}$ $\frac{N}{C}$.)
1 - Import Packages
End of explanation
"""
#Define constants - charge of an electron, permittivity of free space, velocity relative to speed of light.
q = 1.602 * 10**(-19)
e_0 = 8.854 * 10**(-12)
beta = 0.95
c = 2.997925 * 10**8
v = beta * c
"""
Explanation: 2 - Define Constants
To see what happens when the speed $\beta$ of the charge changes, modify the value of beta below.
End of explanation
"""
#Define magnitude of electric field as a function.
def E(x, y, z, t):
r = np.sqrt(x**2 + y**2 + z**2)
R = np.sqrt(r**2 + (v*t)**2 - 2 * r * (v*t) * x/np.sqrt(x**2 + y**2 + z**2))
sin_theta = np.sqrt(y**2 + z**2) / R
return q/(4*np.pi*e_0) * ((1 - beta**2)/(R**2 * (1 - beta**2 * sin_theta**2)**(3/2)))
"""
Explanation: 3 - Calculate Total Electric Field Magnitude
By drawing the vectors in the problem, and relevant triangles, calculate the magnitude of the electric field $E(x, y, z, t)$ at a point $(x, y, z)$ for a certain time $t$. Define a function that will do this calculation for any point in space and at any time.
End of explanation
"""
#Define magnitude of electric field in x, y, and z directions.
def E_x(x, y, z, t):
return E(x, y, z, t) * (x - v*t)/np.sqrt((x - v*t)**2 + y**2 + z**2)
def E_y(x, y, z, t):
return E(x, y, z, t) * (y)/np.sqrt((x - v*t)**2 + y**2 + z**2)
def E_z(x, y, z, t):
return E(x, y, z, t) * (z)/np.sqrt((x - v*t)**2 + y**2 + z**2)
"""
Explanation: 4 - Calculate Electric Field Components' Magnitude
End of explanation
"""
#Make a three-dimensional plot of the electric field.
fig = plt.figure(figsize = (8, 8))
ax = fig.gca(projection = '3d')
#Make a grid of points where vectors of the vector field are placed.
x_lim = 20
y_lim = 20
z_lim = 20
n = 5
X, Y, Z = np.meshgrid(np.arange(-x_lim, x_lim, n),
np.arange(-y_lim, y_lim, n),
np.arange(-z_lim, z_lim, n))
#Choose a time (in seconds) to plot the electric field of the charge, where the charge is at the origin for t = 0.
t = 0.000000005
#Write the vector components. Multiply by 10^11 so that the vectors are visible.
U = E_x(X, Y, Z, t) * 10**11
V = E_y(X, Y, Z, t) * 10**11
W = E_z(X, Y, Z, t) * 10**11
#Plot the vector field.
ax.quiver(X, Y, Z, U, V, W)
#Plot the x-axis, y-axis, and z-axis
X_0 = 1000*[0]
Y_0 = 1000*[0]
Z_0 = 1000*[0]
X_axis = np.linspace(-x_lim, x_lim, 1000)
ax.plot(X_axis, Y_0, Z_0, color = 'k', linewidth = 1, alpha = 0.5)
Y_axis = np.linspace(-y_lim, y_lim, 1000)
ax.plot(X_0, Y_axis, Z_0, color = 'k', linewidth = 1, alpha = 0.5)
Z_axis = np.linspace(-z_lim, z_lim, 1000)
ax.plot(X_0, Y_0, Z_axis, color = 'k', linewidth = 1, alpha = 0.5)
#Plot the charge, moving along the x-axis.
ax.plot([v*t], [0], [0], marker = 'o', markerfacecolor = 'k', markeredgecolor = 'None', alpha = 0.8)
#Adjust the viewing angle of the plot.
ax.view_init(elev = 20, azim = 275)
#Label the plot.
ax.set_xlabel('x (meters)')
ax.set_ylabel('y (meters)')
ax.set_zlabel('z (meters)')
ax.set_title('Electric Field of a Charge Moving at Constant Velocity, $\\beta = 0.95$')
#plt.savefig('Electric Field of a Charge Moving at Constant Velocity, B = 0.95.png')
plt.show()
"""
Explanation: 5 - Plot Electric Field in Three Dimensions
The magnitude of the electric field is exaggerated so that it is visible.
End of explanation
"""
|
joshnsolomon/phys202-2015-work | assignments/assignment04/MatplotlibEx02.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Matplotlib Exercise 2
Imports
End of explanation
"""
!head -n 30 open_exoplanet_catalogue.txt
"""
Explanation: Exoplanet properties
Over the past few decades, astronomers have discovered thousands of extrasolar planets. The following paper describes the properties of some of these planets.
http://iopscience.iop.org/1402-4896/2008/T130/014001
Your job is to reproduce Figures 2 and 4 from this paper using an up-to-date dataset of extrasolar planets found on this GitHub repo:
https://github.com/OpenExoplanetCatalogue/open_exoplanet_catalogue
A text version of the dataset has already been put into this directory. The top of the file has documentation about each column of data:
End of explanation
"""
data = np.genfromtxt('open_exoplanet_catalogue.txt',delimiter=',')
assert data.shape==(1993,24)
"""
Explanation: Use np.genfromtxt with a delimiter of ',' to read the data into a NumPy array called data:
End of explanation
"""
#referenced matpltlib.org to learn how to create a histogram
x = data[:,2]
x.sort()
b = np.log10(x[0:1069])
plt.hist(b,bins=50)
plt.xlabel('Mass in terms of Jupiter masses (logarithmic scale)')
plt.ylabel('Frequency')
plt.title('Mass Distribution of Extrasolar Planets')
print(x)
assert True # leave for grading
"""
Explanation: Make a histogram of the distribution of planetary masses. This will reproduce Figure 2 in the original paper.
Customize your plot to follow Tufte's principles of visualizations.
Customize the box, grid, spines and ticks to match the requirements of this data.
Pick the number of bins for the histogram appropriately.
End of explanation
"""
plt.figure(figsize=(9,6))
a = plt.scatter(data[:,5],data[:,6],marker='.')
plt.semilogx()
plt.xlim(10**-3, 10**3)
plt.xlabel('Semi-major axis (Astronomical Units)')
plt.ylabel('Eccentricity')
plt.title('Eccentricty versus Semi-major axis')
assert True # leave for grading
"""
Explanation: Make a scatter plot of the orbital eccentricity (y) versus the semimajor axis. This will reproduce Figure 4 of the original paper. Use a log scale on the x axis.
Customize your plot to follow Tufte's principles of visualizations.
Customize the box, grid, spines and ticks to match the requirements of this data.
End of explanation
"""
|
JackDi/phys202-2015-work | assignments/assignment03/NumpyEx03.ipynb | mit | import numpy as np
%matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
import antipackage
import github.ellisonbg.misc.vizarray as va
"""
Explanation: Numpy Exercise 3
Imports
End of explanation
"""
def brownian(maxt, n):
"""Return one realization of a Brownian (Wiener) process with n steps and a max time of t."""
t = np.linspace(0.0,maxt,n)
h = t[1]-t[0]
Z = np.random.normal(0.0,1.0,n-1)
dW = np.sqrt(h)*Z
W = np.zeros(n)
W[1:] = dW.cumsum()
return t, W
"""
Explanation: Geometric Brownian motion
Here is a function that produces standard Brownian motion using NumPy. This is also known as a Wiener Process.
End of explanation
"""
# YOUR CODE HERE
# t=brownian(1.0,10)
# w=brownian(1.0,10)
# np.split(t,1)
t , W = brownian(1.0,1000)
assert isinstance(t, np.ndarray)
assert isinstance(W, np.ndarray)
assert t.dtype==np.dtype(float)
assert W.dtype==np.dtype(float)
assert len(t)==len(W)==1000
"""
Explanation: Call the brownian function to simulate a Wiener process with 1000 steps and max time of 1.0. Save the results as two arrays t and W.
End of explanation
"""
# YOUR CODE HERE
plt.plot(t,W)
plt.title ("Brownian Function")
plt.xlabel("Time (seconds)")
plt.ylabel("Brownian Value")
assert True # this is for grading
"""
Explanation: Visualize the process using plt.plot with t on the x-axis and W(t) on the y-axis. Label your x and y axes.
End of explanation
"""
# YOUR CODE HERE
dW=np.diff(W,n=1)
mean=dW.mean()
std=dW.std()
assert len(dW)==len(W)-1
assert dW.dtype==np.dtype(float)
"""
Explanation: Use np.diff to compute the changes at each step of the motion, dW, and then compute the mean and standard deviation of those differences.
End of explanation
"""
def geo_brownian(t, W, X0, mu, sigma):
"Return X(t) for geometric brownian motion with drift mu, volatility sigma."""
x=X0*np.exp((mu-(sigma**2)/2)*t+sigma*W*t)
return x
assert True # leave this for grading
"""
Explanation: Write a function that takes $W(t)$ and converts it to geometric Brownian motion using the equation:
$$
X(t) = X_0 e^{((\mu - \sigma^2/2)t + \sigma W(t))}
$$
Use Numpy ufuncs and no loops in your function.
End of explanation
"""
# YOUR CODE HERE
a=geo_brownian(t,W,1.0,0.5,.3)
plt.plot(t,a)
plt.xlabel("Time")
plt.ylabel('X(t)')
plt.title("Geo Brownian Plot over Time")
assert True # leave this for grading
"""
Explanation: Use your function to simulate geometric brownian motion, $X(t)$ for $X_0=1.0$, $\mu=0.5$ and $\sigma=0.3$ with the Wiener process you computed above.
Visualize the process using plt.plot with t on the x-axis and X(t) on the y-axis. Label your x and y axes.
End of explanation
"""
|
datapolitan/lede_algorithms | class5_2/kmeans.ipynb | gpl-2.0 | !curl -O http://www.cs.cornell.edu/home/llee/data/convote/convote_v1.1.tar.gz
!tar -zxvf convote_v1.1.tar.gz
paths = glob.glob("convote_v1.1/data_stage_one/development_set/*")
speeches = []
for path in paths:
speech = {}
filename = path[-26:]
speech['filename'] = filename
speech['bill_no'] = filename[:3]
speech['speaker_no'] = filename[4:10]
speech['bill_vote'] = filename[-5]
speech['party'] = filename[-7]
# Open the file
speech_file = open(path, 'r')
# Read the stuff out of it
speech['contents'] = speech_file.read()
cleaned_contents = re.sub(r"[^ \w]",'', speech['contents'])
cleaned_contents = re.sub(r" +",' ', cleaned_contents)
cleaned_contents = cleaned_contents.strip()
words = cleaned_contents.split(' ')
speech['word_count'] = len(words)
speeches.append(speech)
speeches[:5]
speeches_df = pd.DataFrame(speeches)
speeches_df.head()
speeches_df["word_count"].describe()
"""
Explanation: We'll be using data from http://www.cs.cornell.edu/home/llee/data/convote.html to explore k-means clustering
End of explanation
"""
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(max_features=10000, stop_words='english')
longer_speeches = speeches_df[speeches_df["word_count"] > 92]
#filtering for word counts greater than 92 (our median length)
X = vectorizer.fit_transform(longer_speeches['contents'])
from sklearn.cluster import KMeans
number_of_clusters = 7
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
print("Cluster %d:" % i),
for ind in order_centroids[i, :15]:
print(' %s' % terms[ind]),
print ''
additional_stopwords = ['mr','congress','chairman','madam','amendment','legislation','speaker']
import nltk
english_stopwords = nltk.corpus.stopwords.words('english')
new_stopwords = additional_stopwords + english_stopwords
vectorizer = TfidfVectorizer(max_features=10000, stop_words=new_stopwords)
longer_speeches = speeches_df[speeches_df["word_count"] > 92]
X = vectorizer.fit_transform(longer_speeches['contents'])
number_of_clusters = 7
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
print("Cluster %d:" % i),
for ind in order_centroids[i, :15]:
print(' %s' % terms[ind]),
print ''
longer_speeches["k-means label"] = km.labels_
longer_speeches.head()
china_speeches = longer_speeches[longer_speeches["k-means label"] == 1]
china_speeches.head()
vectorizer = TfidfVectorizer(max_features=10000, stop_words=new_stopwords)
X = vectorizer.fit_transform(china_speeches['contents'])
number_of_clusters = 5
km = KMeans(n_clusters=number_of_clusters)
km.fit(X)
print("Top terms per cluster:")
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(number_of_clusters):
print("Cluster %d:" % i),
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind]),
print ''
km.get_params()
km.score(X)
"""
Explanation: Notice that we have a lot of speeches that are relatively short. They probably aren't the best for clustering because of their brevity
Time to bring the TF-IDF vectorizer
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.3/examples/contact_spots.ipynb | gpl-3.0 | #!pip install -I "phoebe>=2.3,<2.4"
"""
Explanation: Contact Binary with Spots
Setup
Let's first make sure we have the latest version of PHOEBE 2.3 installed (uncomment this line if running in an online notebook session such as colab).
End of explanation
"""
import phoebe
from phoebe import u # units
logger = phoebe.logger()
b = phoebe.default_binary(contact_binary=True)
"""
Explanation: As always, let's do imports and initialize a logger and a new bundle.
End of explanation
"""
b.add_dataset('lc', times=phoebe.linspace(0,0.5,101))
b.run_compute(irrad_method='none', model='no_spot')
"""
Explanation: Model without Spots
End of explanation
"""
b.add_feature('spot', component='primary', feature='spot01', relteff=0.9, radius=20, colat=90, long=-45)
b.run_compute(irrad_method='none', model='with_spot')
"""
Explanation: Adding Spots
Let's add a spot to the primary component in our binary. Note that if you attempt to attach to the 'contact_envelope' component, an error will be raised. Spots can only be attached to star components.
The 'colat' parameter defines the latitude on the star measured from its North (spin) Pole. The 'long' parameter measures the longitude of the spot - with longitude = 0 being defined as pointing towards the other star at t0. See to spots tutorial for more details.
End of explanation
"""
afig, mplfig = b.plot(show=True, legend=True)
"""
Explanation: Comparing Light Curves
End of explanation
"""
b.remove_dataset(kind='lc')
b.remove_model(model=['with_spot', 'no_spot'])
b.add_dataset('mesh', compute_times=b.to_time(0.25), columns='teffs')
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
"""
Explanation: Spots near the "neck"
Since the spots are still defined with the coordinate system of the individual star components, this can result in spots that are distorted and even "cropped" at the neck. Furthermore, spots with long=0 could be completely "hidden" by the neck or result in a ring around the neck.
To see this, let's plot our mesh with teff as the facecolor.
End of explanation
"""
b.set_value('long', value=-30)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
"""
Explanation: Now if we set the long closer to the neck, we'll see it get cropped by the boundary between the two components. If we need a spot that crosses between the two "halves" of the contact, we'd have to add separate spots to each component, with each getting cropped at the boundary.
End of explanation
"""
b.set_value('long', value=0.0)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
"""
Explanation: If we set long to zero, the spot completely disappears (as there is nowhere in the neck that is still on the surface.
End of explanation
"""
b.set_value('radius', value=40)
b.run_compute(irrad_method='none')
afig, mplfig = b.plot(fc='teffs', ec='face', fcmap='plasma', show=True)
"""
Explanation: But if we increase the radius large enough, we'll get a ring.
End of explanation
"""
|
mdeff/ntds_2016 | toolkit/02_sol_exploitation.ipynb | mit | import pandas as pd
import numpy as np
from IPython.display import display
import os.path
folder = os.path.join('..', 'data', 'social_media')
fb = pd.read_sql('facebook', 'sqlite:///' + os.path.join(folder, 'facebook.sqlite'), index_col='index')
tw = pd.read_sql('twitter', 'sqlite:///' + os.path.join(folder, 'twitter.sqlite'), index_col='index')
display(fb[:5])
display(tw[:5])
"""
Explanation: A Python Tour of Data Science: Data Exploitation
Michaël Defferrard, PhD student, EPFL LTS2
Exercise: problem definition
Theme of the exercise: understand the impact of your communication on social networks. A real life situation: the marketing team needs help in identifying which were the most engaging posts they made on social platforms to prepare their next AdWords campaign.
This notebook is the second part of the exercise. Given the data we collected from Facebook an Twitter in the last exercise, we will construct an ML model and evaluate how good it is to predict the number of likes of a post / tweet given the content.
1 Data importation
Use pandas to import the facebook.sqlite and twitter.sqlite databases.
Print the 5 first rows of both tables.
The facebook.sqlite and twitter.sqlite SQLite databases can be created by running the data acquisition and exploration exercise.
End of explanation
"""
from sklearn.feature_extraction.text import CountVectorizer
nwords = 200 # 100
def compute_bag_of_words(text, nwords):
vectorizer = CountVectorizer(max_features=nwords)
vectors = vectorizer.fit_transform(text)
vocabulary = vectorizer.get_feature_names()
return vectors, vocabulary
fb_bow, fb_vocab = compute_bag_of_words(fb.text, nwords)
#fb_p = pd.Panel({'orig': fb, 'bow': fb_bow})
display(fb_bow)
display(fb_vocab[100:110])
tw_bow, tw_vocab = compute_bag_of_words(tw.text, nwords)
display(tw_bow)
"""
Explanation: 2 Vectorization
First step: transform the data into a format understandable by the machine. What to do with text ? A common choice is the so-called bag-of-word model, where we represent each word a an integer and simply count the number of appearances of a word into a document.
Example
Let's say we have a vocabulary represented by the following correspondance table.
| Integer | Word |
|:-------:|---------|
| 0 | unknown |
| 1 | dog |
| 2 | school |
| 3 | cat |
| 4 | house |
| 5 | work |
| 6 | animal |
Then we can represent the following document
I have a cat. Cats are my preferred animals.
by the vector $x = [6, 0, 0, 2, 0, 0, 1]^T$.
Tasks
Construct a vocabulary of the 100 most occuring words in your dataset.
Build a vector $x \in \mathbb{R}^{100}$ for each document (post or tweet).
Tip: the natural language modeling libraries nltk and gensim are useful for advanced operations. You don't need them here.
Arise a first data cleaning question. We may have some text in french and other in english. What do we do ?
End of explanation
"""
def print_most_frequent(bow, vocab, n=10):
idx = np.argsort(bow.sum(axis=0))
for i in range(10):
j = idx[0, -i]
print(vocab[j])
print_most_frequent(tw_bow, tw_vocab)
print('---')
print_most_frequent(fb_bow, fb_vocab)
"""
Explanation: Exploration question: what are the 5 most used words ? Exploring your data while playing with it is a useful sanity check.
End of explanation
"""
X = tw_bow
y = tw['likes'].values
n, d = X.shape
assert n == y.size
print(X.shape)
print(y.shape)
# Training and testing sets.
test_size = n // 2
print('Split: {} testing and {} training samples'.format(test_size, y.size - test_size))
perm = np.random.permutation(y.size)
X_test = X[perm[:test_size]]
X_train = X[perm[test_size:]]
y_test = y[perm[:test_size]]
y_train = y[perm[test_size:]]
"""
Explanation: 3 Pre-processing
The independant variables $X$ are the bags of words.
The target $y$ is the number of likes.
Split in half for training and testing sets.
End of explanation
"""
import scipy.sparse
class LinearRegression(object):
def predict(self, X):
"""Return the predicted class given the features."""
return X.dot(self.w) + self.b
def fit(self, X, y):
"""Learn the model's parameters given the training data, the closed-form way."""
n, d = X.shape
self.b = y.mean()
A = X.T.dot(X)
b = X.T.dot(y - self.b)
#self.w = np.linalg.solve(A, b)
self.w = scipy.sparse.linalg.spsolve(A, b)
def evaluate(y_pred, y_true):
return np.linalg.norm(y_pred - y_true, ord=2)**2 / y_true.size
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse = evaluate(y_pred, y_test)
print('mse: {:.4f}'.format(mse))
"""
Explanation: 4 Linear regression
Using numpy, fit and evaluate the linear model $$\hat{w}, \hat{b} = \operatorname*{arg min}_{w,b} \| Xw + b - y \|_2^2.$$
Please define a class LinearRegression with two methods:
1. fit learn the parameters $w$ and $b$ of the model given the training examples.
2. predict gives the estimated number of likes of a post / tweet. That will be used to evaluate the model on the testing set.
To evaluate the classifier, create an accuracy(y_pred, y_true) function which computes the mean squared error $\frac1n \| \hat{y} - y \|_2^2$.
Hint: you may want to use the function scipy.sparse.linalg.spsolve().
If solve and spsolve tells you that your matrix is singular, please read this good comment. Potential solutions:
1. Is there any post / tweet without any word from the vocabulary ? I.e. a row of $X$ made only of zeroes. If yes, remove this row or enlarge the vocabulary.
2. Identify and remove redundant features, i.e. words, who are linear combinations of others.
3. What else could we do ?
End of explanation
"""
idx = np.argsort(abs(model.w))
for i in range(20):
j = idx[-1-i]
print('weight: {:5.2f}, word: {}'.format(model.w[j], tw_vocab[j]))
"""
Explanation: Interpretation: what are the most important words a post / tweet should include ?
End of explanation
"""
import ipywidgets
from IPython.display import clear_output
slider = ipywidgets.widgets.IntSlider(
value=1,
min=1,
max=nwords,
step=1,
description='nwords',
)
def handle(change):
"""Handler for value change: fit model and print performance."""
nwords = change['new']
clear_output()
print('nwords = {}'.format(nwords))
model = LinearRegression()
model.fit(X_train[:, :nwords], y_train)
y_pred = model.predict(X_test[:, :nwords])
mse = evaluate(y_pred, y_test)
print('mse: {:.4f}'.format(mse))
slider.observe(handle, names='value')
display(slider)
slider.value = nwords # As if someone moved the slider.
"""
Explanation: 5 Interactivity
Create a slider for the number of words, i.e. the dimensionality of the samples $x$.
Print the accuracy for each change on the slider.
End of explanation
"""
from sklearn import linear_model, metrics
model = linear_model.LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse = metrics.mean_squared_error(y_test, y_pred)
assert np.allclose(evaluate(y_pred, y_test), mse)
print('mse: {:.4f}'.format(mse))
"""
Explanation: 6 Scikit learn
Fit and evaluate the linear regression model using sklearn.
Evaluate the model with the mean squared error metric provided by sklearn.
Compare with your implementation.
End of explanation
"""
import os
os.environ['KERAS_BACKEND'] = 'theano' # tensorflow
import keras
model = keras.models.Sequential()
model.add(keras.layers.Dense(output_dim=50, input_dim=nwords, activation='relu'))
model.add(keras.layers.Dense(output_dim=20, activation='relu'))
model.add(keras.layers.Dense(output_dim=1, activation='relu'))
model.compile(loss='mse', optimizer='sgd')
model.fit(X_train.toarray(), y_train, nb_epoch=20, batch_size=100)
y_pred = model.predict(X_test.toarray(), batch_size=32)
mse = evaluate(y_test, y_pred.squeeze())
print('mse: {:.4f}'.format(mse))
"""
Explanation: 7 Deep Learning
Try a simple deep learning model !
Another modeling choice would be to use a Recurrent Neural Network (RNN) and feed it the sentence words after words.
End of explanation
"""
from matplotlib import pyplot as plt
plt.style.use('ggplot')
%matplotlib inline
n = 100
plt.figure(figsize=(15, 5))
plt.plot(y_test[:n], '.', alpha=.7, markersize=10, label='ground truth')
plt.plot(y_pred[:n], '.', alpha=.7, markersize=10, label='prediction')
plt.legend()
plt.show()
"""
Explanation: 8 Evaluation
Use matplotlib to plot a performance visualization. E.g. the true number of likes and the real number of likes for all posts / tweets.
What do you observe ? What are your suggestions to improve the performance ?
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.16/_downloads/plot_decoding_unsupervised_spatial_filter.ipynb | bsd-3-clause | # Authors: Jean-Remi King <jeanremi.king@gmail.com>
# Asish Panda <asishrocks95@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.datasets import sample
from mne.decoding import UnsupervisedSpatialFilter
from sklearn.decomposition import PCA, FastICA
print(__doc__)
# Preprocess data
data_path = sample.data_path()
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
tmin, tmax = -0.1, 0.3
event_id = dict(aud_l=1, aud_r=2, vis_l=3, vis_r=4)
raw = mne.io.read_raw_fif(raw_fname, preload=True)
raw.filter(1, 20, fir_design='firwin')
events = mne.read_events(event_fname)
picks = mne.pick_types(raw.info, meg=False, eeg=True, stim=False, eog=False,
exclude='bads')
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=False,
picks=picks, baseline=None, preload=True,
verbose=False)
X = epochs.get_data()
"""
Explanation: Analysis of evoked response using ICA and PCA reduction techniques
This example computes PCA and ICA of evoked or epochs data. Then the
PCA / ICA components, a.k.a. spatial filters, are used to transform
the channel data to new sources / virtual channels. The output is
visualized on the average of all the epochs.
End of explanation
"""
pca = UnsupervisedSpatialFilter(PCA(30), average=False)
pca_data = pca.fit_transform(X)
ev = mne.EvokedArray(np.mean(pca_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev.plot(show=False, window_title="PCA", time_unit='s')
"""
Explanation: Transform data with PCA computed on the average ie evoked response
End of explanation
"""
ica = UnsupervisedSpatialFilter(FastICA(30), average=False)
ica_data = ica.fit_transform(X)
ev1 = mne.EvokedArray(np.mean(ica_data, axis=0),
mne.create_info(30, epochs.info['sfreq'],
ch_types='eeg'), tmin=tmin)
ev1.plot(show=False, window_title='ICA', time_unit='s')
plt.show()
"""
Explanation: Transform data with ICA computed on the raw epochs (no averaging)
End of explanation
"""
|
ozorich/phys202-2015-work | assignments/assignment05/MatplotlibEx03.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
"""
Explanation: Matplotlib Exercise 3
Imports
End of explanation
"""
def well2d(x, y, nx, ny, L=1.0):
"""Compute the 2d quantum well wave function."""
scalarfield=(2/L*np.sin(nx*np.pi*x/L)*np.sin(ny*np.pi*y/L))
well=scalarfield
return well
psi = well2d(np.linspace(0,1,10), np.linspace(0,1,10), 1, 1)
assert len(psi)==10
assert psi.shape==(10,)
"""
Explanation: Contour plots of 2d wavefunctions
The wavefunction of a 2d quantum well is:
$$ \psi_{n_x,n_y}(x,y) = \frac{2}{L}
\sin{\left( \frac{n_x \pi x}{L} \right)}
\sin{\left( \frac{n_y \pi y}{L} \right)} $$
This is a scalar field and $n_x$ and $n_y$ are quantum numbers that measure the level of excitation in the x and y directions. $L$ is the size of the well.
Define a function well2d that computes this wavefunction for values of x and y that are NumPy arrays.
End of explanation
"""
x=np.linspace(0,1,100)
y=np.linspace(0,1,100)
psi1=well2d(x,y,3,2,1.0)
psi2=well2d(x,y,3,2,1.0)
X,Y=np.meshgrid(psi1,psi2) #help from Making Contour Plots wiht Python on http://bulldog2.redlands.edu/facultyfolder/deweerd/tutorials/Tutorial-ContourPlot.pdf
plt.contour(Y)
assert True # use this cell for grading the contour plot
"""
Explanation: The contour, contourf, pcolor and pcolormesh functions of Matplotlib can be used for effective visualizations of 2d scalar fields. Use the Matplotlib documentation to learn how to use these functions along with the numpy.meshgrid function to visualize the above wavefunction:
Use $n_x=3$, $n_y=2$ and $L=0$.
Use the limits $[0,1]$ for the x and y axis.
Customize your plot to make it effective and beautiful.
Use a non-default colormap.
Add a colorbar to you visualization.
First make a plot using one of the contour functions:
End of explanation
"""
# YOUR CODE HERE
plt.pcolor(Y) #worked with Jack Porter
assert True # use this cell for grading the pcolor plot
"""
Explanation: Next make a visualization using one of the pcolor functions:
End of explanation
"""
|
RTHMaK/RPGOne | scipy-2017-sklearn-master/notebooks/11 Text Feature Extraction.ipynb | apache-2.0 | X = ["Some say the world will end in fire,",
"Some say in ice."]
len(X)
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
vectorizer.fit(X)
vectorizer.vocabulary_
X_bag_of_words = vectorizer.transform(X)
X_bag_of_words.shape
X_bag_of_words
X_bag_of_words.toarray()
vectorizer.get_feature_names()
vectorizer.inverse_transform(X_bag_of_words)
"""
Explanation: SciPy 2016 Scikit-learn Tutorial
Methods - Text Feature Extraction with Bag-of-Words
In many tasks, like in the classical spam detection, your input data is text.
Free text with variables length is very far from the fixed length numeric representation that we need to do machine learning with scikit-learn.
However, there is an easy and effective way to go from text data to a numeric representation using the so-called bag-of-words model, which provides a data structure that is compatible with the machine learning aglorithms in scikit-learn.
<img src="figures/bag_of_words.svg" width="100%">
Let's assume that each sample in your dataset is represented as one string, which could be just a sentence, an email, or a whole news article or book. To represent the sample, we first split the string into a list of tokens, which correspond to (somewhat normalized) words. A simple way to do this to just split by whitespace, and then lowercase the word.
Then, we build a vocabulary of all tokens (lowercased words) that appear in our whole dataset. This is usually a very large vocabulary.
Finally, looking at our single sample, we could show how often each word in the vocabulary appears.
We represent our string by a vector, where each entry is how often a given word in the vocabulary appears in the string.
As each sample will only contain very few words, most entries will be zero, leading to a very high-dimensional but sparse representation.
The method is called "bag-of-words," as the order of the words is lost entirely.
End of explanation
"""
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vectorizer = TfidfVectorizer()
tfidf_vectorizer.fit(X)
import numpy as np
np.set_printoptions(precision=2)
print(tfidf_vectorizer.transform(X).toarray())
"""
Explanation: tf-idf Encoding
A useful transformation that is often applied to the bag-of-word encoding is the so-called term-frequency inverse-document-frequency (tf-idf) scaling, which is a non-linear transformation of the word counts.
The tf-idf encoding rescales words that are common to have less weight:
End of explanation
"""
# look at sequences of tokens of minimum length 2 and maximum length 2
bigram_vectorizer = CountVectorizer(ngram_range=(2, 2))
bigram_vectorizer.fit(X)
bigram_vectorizer.get_feature_names()
bigram_vectorizer.transform(X).toarray()
"""
Explanation: tf-idfs are a way to represent documents as feature vectors. tf-idfs can be understood as a modification of the raw term frequencies (tf); the tf is the count of how often a particular word occurs in a given document. The concept behind the tf-idf is to downweight terms proportionally to the number of documents in which they occur. Here, the idea is that terms that occur in many different documents are likely unimportant or don't contain any useful information for Natural Language Processing tasks such as document classification. If you are interested in the mathematical details and equations, we have compiled an external IPython Notebook that walks you through the computation.
Bigrams and N-Grams
In the example illustrated in the figure at the beginning of this notebook, we used the so-called 1-gram (unigram) tokenization: Each token represents a single element with regard to the splittling criterion.
Entirely discarding word order is not always a good idea, as composite phrases often have specific meaning, and modifiers like "not" can invert the meaning of words.
A simple way to include some word order are n-grams, which don't only look at a single token, but at all pairs of neighborhing tokens. For example, in 2-gram (bigram) tokenization, we would group words together with an overlap of one word; in 3-gram (trigram) splits we would create an overlap two words, and so forth:
original text: "this is how you get ants"
1-gram: "this", "is", "how", "you", "get", "ants"
2-gram: "this is", "is how", "how you", "you get", "get ants"
3-gram: "this is how", "is how you", "how you get", "you get ants"
Which "n" we choose for "n-gram" tokenization to obtain the optimal performance in our predictive model depends on the learning algorithm, dataset, and task. Or in other words, we have consider "n" in "n-grams" as a tuning parameters, and in later notebooks, we will see how we deal with these.
Now, let's create a bag of words model of bigrams using scikit-learn's CountVectorizer:
End of explanation
"""
gram_vectorizer = CountVectorizer(ngram_range=(1, 2))
gram_vectorizer.fit(X)
gram_vectorizer.get_feature_names()
gram_vectorizer.transform(X).toarray()
"""
Explanation: Often we want to include unigrams (single tokens) AND bigrams, wich we can do by passing the following tuple as an argument to the ngram_range parameter of the CountVectorizer function:
End of explanation
"""
X
char_vectorizer = CountVectorizer(ngram_range=(2, 2), analyzer="char")
char_vectorizer.fit(X)
print(char_vectorizer.get_feature_names())
"""
Explanation: Character n-grams
Sometimes it is also helpful not only to look at words, but to consider single characters instead.
That is particularly useful if we have very noisy data and want to identify the language, or if we want to predict something about a single word.
We can simply look at characters instead of words by setting analyzer="char".
Looking at single characters is usually not very informative, but looking at longer n-grams of characters could be:
End of explanation
"""
zen = """Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambiguity, refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Although that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain, it's a bad idea.
If the implementation is easy to explain, it may be a good idea.
Namespaces are one honking great idea -- let's do more of those!"""
#%load solutions/11_ngrams.py
"""
Explanation: Exercise
Compute the bigrams from "zen of python" as given below (or by import this), and find the most common trigram.
We want to treat each line as a separate document. You can achieve this by splitting the string by newlines (\n).
Compute the Tf-idf encoding of the data. Which words have the highest tf-idf score? Why?
What changes if you use TfidfVectorizer(norm="none")?
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.20/_downloads/4a39dd4a31cad8a0e098b02526b9c3d3/plot_covariance_whitening_dspm.ipynb | bsd-3-clause | # Author: Denis A. Engemann <denis.engemann@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import spm_face
from mne.minimum_norm import apply_inverse, make_inverse_operator
from mne.cov import compute_covariance
print(__doc__)
"""
Explanation: Demonstrate impact of whitening on source estimates
This example demonstrates the relationship between the noise covariance
estimate and the MNE / dSPM source amplitudes. It computes source estimates for
the SPM faces data and compares proper regularization with insufficient
regularization based on the methods described in [1]. The example demonstrates
that improper regularization can lead to overestimation of source amplitudes.
This example makes use of the previous, non-optimized code path that was used
before implementing the suggestions presented in [1].
This example does quite a bit of processing, so even on a
fast machine it can take a couple of minutes to complete.
<div class="alert alert-danger"><h4>Warning</h4><p>Please do not copy the patterns presented here for your own
analysis, this is example is purely illustrative.</p></div>
References
.. [1] Engemann D. and Gramfort A. (2015) Automated model selection in
covariance estimation and spatial whitening of MEG and EEG signals,
vol. 108, 328-342, NeuroImage.
End of explanation
"""
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D.ds'
raw = io.read_raw_ctf(raw_fname % 1) # Take first run
# To save time and memory for this demo, we'll just use the first
# 2.5 minutes (all we need to get 30 total events) and heavily
# resample 480->60 Hz (usually you wouldn't do either of these!)
raw = raw.crop(0, 150.).load_data()
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(None, 20.)
events = mne.find_events(raw, stim_channel='UPPT001')
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.5
baseline = (None, 0)
reject = dict(mag=3e-12)
# Make forward
trans = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces1_3D_raw-trans.fif'
src = data_path + '/subjects/spm/bem/spm-oct-6-src.fif'
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(raw.info, trans, src, bem)
del src
# inverse parameters
conditions = 'faces', 'scrambled'
snr = 3.0
lambda2 = 1.0 / snr ** 2
clim = dict(kind='value', lims=[0, 2.5, 5])
"""
Explanation: Get data
End of explanation
"""
samples_epochs = 5, 15,
method = 'empirical', 'shrunk'
colors = 'steelblue', 'red'
evokeds = list()
stcs = list()
methods_ordered = list()
for n_train in samples_epochs:
# estimate covs based on a subset of samples
# make sure we have the same number of conditions.
events_ = np.concatenate([events[events[:, 2] == id_][:n_train]
for id_ in [event_ids[k] for k in conditions]])
events_ = events_[np.argsort(events_[:, 0])]
epochs_train = mne.Epochs(raw, events_, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject,
decim=8)
epochs_train.equalize_event_counts(event_ids)
assert len(epochs_train) == 2 * n_train
# We know some of these have too few samples, so suppress warning
# with verbose='error'
noise_covs = compute_covariance(
epochs_train, method=method, tmin=None, tmax=0, # baseline only
return_estimators=True, rank=None, verbose='error') # returns list
# prepare contrast
evokeds = [epochs_train[k].average() for k in conditions]
del epochs_train, events_
# do contrast
# We skip empirical rank estimation that we introduced in response to
# the findings in reference [1] to use the naive code path that
# triggered the behavior described in [1]. The expected true rank is
# 274 for this dataset. Please do not do this with your data but
# rely on the default rank estimator that helps regularizing the
# covariance.
stcs.append(list())
methods_ordered.append(list())
for cov in noise_covs:
inverse_operator = make_inverse_operator(evokeds[0].info, forward,
cov, loose=0.2, depth=0.8)
assert len(inverse_operator['sing']) == 274 # sanity check
stc_a, stc_b = (apply_inverse(e, inverse_operator, lambda2, "dSPM",
pick_ori=None) for e in evokeds)
stc = stc_a - stc_b
methods_ordered[-1].append(cov['method'])
stcs[-1].append(stc)
del inverse_operator, evokeds, cov, noise_covs, stc, stc_a, stc_b
del raw, forward # save some memory
"""
Explanation: Estimate covariances
End of explanation
"""
fig, (axes1, axes2) = plt.subplots(2, 3, figsize=(9.5, 5))
for ni, (n_train, axes) in enumerate(zip(samples_epochs, (axes1, axes2))):
# compute stc based on worst and best
ax_dynamics = axes[1]
for stc, ax, method, kind, color in zip(stcs[ni],
axes[::2],
methods_ordered[ni],
['best', 'worst'],
colors):
brain = stc.plot(subjects_dir=subjects_dir, hemi='both', clim=clim,
initial_time=0.175, background='w', foreground='k')
brain.show_view('ven')
im = brain.screenshot()
brain.close()
ax.axis('off')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.imshow(im)
ax.set_title('{0} ({1} epochs)'.format(kind, n_train * 2))
# plot spatial mean
stc_mean = stc.data.mean(0)
ax_dynamics.plot(stc.times * 1e3, stc_mean,
label='{0} ({1})'.format(method, kind),
color=color)
# plot spatial std
stc_var = stc.data.std(0)
ax_dynamics.fill_between(stc.times * 1e3, stc_mean - stc_var,
stc_mean + stc_var, alpha=0.2, color=color)
# signal dynamics worst and best
ax_dynamics.set(title='{0} epochs'.format(n_train * 2),
xlabel='Time (ms)', ylabel='Source Activation (dSPM)',
xlim=(tmin * 1e3, tmax * 1e3), ylim=(-3, 3))
ax_dynamics.legend(loc='upper left', fontsize=10)
fig.subplots_adjust(hspace=0.2, left=0.01, right=0.99, wspace=0.03)
"""
Explanation: Show the resulting source estimates
End of explanation
"""
|
RyanAlberts/Springbaord-Capstone-Project | Statistics_Exercises/Mini_Project_Naive_Bayes.ipynb | mit | %matplotlib inline
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from six.moves import range
# Setup Pandas
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
# Setup Seaborn
sns.set_style("whitegrid")
sns.set_context("poster")
"""
Explanation: Basic Text Classification with Naive Bayes
In the mini-project, you'll learn the basics of text analysis using a subset of movie reviews from the rotten tomatoes database. You'll also use a fundamental technique in Bayesian inference, called Naive Bayes. This mini-project is based on Lab 10 of Harvard's CS109 class. Please free to go to the original lab for additional exercises and solutions.
End of explanation
"""
critics = pd.read_csv('./critics.csv')
#let's drop rows with missing quotes
critics = critics[~critics.quote.isnull()]
critics.head()
"""
Explanation: Table of Contents
Rotten Tomatoes Dataset
Explore
The Vector Space Model and a Search Engine
In Code
Naive Bayes
Multinomial Naive Bayes and Other Likelihood Functions
Picking Hyperparameters for Naive Bayes and Text Maintenance
Interpretation
Rotten Tomatoes Dataset
End of explanation
"""
n_reviews = len(critics)
n_movies = critics.rtid.unique().size
n_critics = critics.critic.unique().size
print("Number of reviews: {:d}".format(n_reviews))
print("Number of critics: {:d}".format(n_critics))
print("Number of movies: {:d}".format(n_movies))
df = critics.copy()
df['fresh'] = df.fresh == 'fresh'
grp = df.groupby('critic')
counts = grp.critic.count() # number of reviews by each critic
means = grp.fresh.mean() # average freshness for each critic
means[counts > 100].hist(bins=10, edgecolor='w', lw=1)
plt.xlabel("Average Rating per critic")
plt.ylabel("Number of Critics")
plt.yticks([0, 2, 4, 6, 8, 10]);
"""
Explanation: Explore
End of explanation
"""
from sklearn.feature_extraction.text import CountVectorizer
text = ['Hop on pop', 'Hop off pop', 'Hop Hop hop']
print("Original text is\n{}".format('\n'.join(text)))
vectorizer = CountVectorizer(min_df=0)
# call `fit` to build the vocabulary
vectorizer.fit(text)
# call `transform` to convert text to a bag of words
x = vectorizer.transform(text)
# CountVectorizer uses a sparse array to save memory, but it's easier in this assignment to
# convert back to a "normal" numpy array
x = x.toarray()
print("")
print("Transformed text vector is \n{}".format(x))
# `get_feature_names` tracks which word is associated with each column of the transformed x
print("")
print("Words for each feature:")
print(vectorizer.get_feature_names())
# Notice that the bag of words treatment doesn't preserve information about the *order* of words,
# just their frequency
def make_xy(critics, vectorizer=None):
#Your code here
if vectorizer is None:
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(critics.quote)
X = X.tocsc() # some versions of sklearn return COO format
y = (critics.fresh == 'fresh').values.astype(np.int)
return X, y
X, y = make_xy(critics)
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set I</h3>
<br/>
<b>Exercise:</b> Look at the histogram above. Tell a story about the average ratings per critic. What shape does the distribution look like? What is interesting about the distribution? What might explain these interesting things?
</div>
The Vector Space Model and a Search Engine
All the diagrams here are snipped from Introduction to Information Retrieval by Manning et. al. which is a great resource on text processing. For additional information on text mining and natural language processing, see Foundations of Statistical Natural Language Processing by Manning and Schutze.
Also check out Python packages nltk, spaCy, pattern, and their associated resources. Also see word2vec.
Let us define the vector derived from document $d$ by $\bar V(d)$. What does this mean? Each document is treated as a vector containing information about the words contained in it. Each vector has the same length and each entry "slot" in the vector contains some kind of data about the words that appear in the document such as presence/absence (1/0), count (an integer) or some other statistic. Each vector has the same length because each document shared the same vocabulary across the full collection of documents -- this collection is called a corpus.
To define the vocabulary, we take a union of all words we have seen in all documents. We then just associate an array index with them. So "hello" may be at index 5 and "world" at index 99.
Suppose we have the following corpus:
A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree. The grapes seemed ready to burst with juice, and the Fox's mouth watered as he gazed longingly at them.
Suppose we treat each sentence as a document $d$. The vocabulary (often called the lexicon) is the following:
$V = \left{\right.$ a, along, and, as, at, beautiful, branches, bunch, burst, day, fox, fox's, from, gazed, grapes, hanging, he, juice, longingly, mouth, of, one, ready, ripe, seemed, spied, the, them, to, trained, tree, vine, watered, with$\left.\right}$
Then the document
A Fox one day spied a beautiful bunch of ripe grapes hanging from a vine trained along the branches of a tree
may be represented as the following sparse vector of word counts:
$$\bar V(d) = \left( 4,1,0,0,0,1,1,1,0,1,1,0,1,0,1,1,0,0,0,0,2,1,0,1,0,0,1,0,0,0,1,1,0,0 \right)$$
or more succinctly as
[(0, 4), (1, 1), (5, 1), (6, 1), (7, 1), (9, 1), (10, 1), (12, 1), (14, 1), (15, 1), (20, 2), (21, 1), (23, 1),
(26, 1), (30, 1), (31, 1)]
along with a dictionary
{
0: a, 1: along, 5: beautiful, 6: branches, 7: bunch, 9: day, 10: fox, 12: from, 14: grapes,
15: hanging, 19: mouth, 20: of, 21: one, 23: ripe, 24: seemed, 25: spied, 26: the,
30: tree, 31: vine,
}
Then, a set of documents becomes, in the usual sklearn style, a sparse matrix with rows being sparse arrays representing documents and columns representing the features/words in the vocabulary.
Notice that this representation loses the relative ordering of the terms in the document. That is "cat ate rat" and "rat ate cat" are the same. Thus, this representation is also known as the Bag-Of-Words representation.
Here is another example, from the book quoted above, although the matrix is transposed here so that documents are columns:
Such a matrix is also catted a Term-Document Matrix. Here, the terms being indexed could be stemmed before indexing; for instance, jealous and jealousy after stemming are the same feature. One could also make use of other "Natural Language Processing" transformations in constructing the vocabulary. We could use Lemmatization, which reduces words to lemmas: work, working, worked would all reduce to work. We could remove "stopwords" from our vocabulary, such as common words like "the". We could look for particular parts of speech, such as adjectives. This is often done in Sentiment Analysis. And so on. It all depends on our application.
From the book:
The standard way of quantifying the similarity between two documents $d_1$ and $d_2$ is to compute the cosine similarity of their vector representations $\bar V(d_1)$ and $\bar V(d_2)$:
$$S_{12} = \frac{\bar V(d_1) \cdot \bar V(d_2)}{|\bar V(d_1)| \times |\bar V(d_2)|}$$
There is a far more compelling reason to represent documents as vectors: we can also view a query as a vector. Consider the query q = jealous gossip. This query turns into the unit vector $\bar V(q)$ = (0, 0.707, 0.707) on the three coordinates below.
The key idea now: to assign to each document d a score equal to the dot product:
$$\bar V(q) \cdot \bar V(d)$$
Then we can use this simple Vector Model as a Search engine.
In Code
End of explanation
"""
#your turn
"""
Explanation: Naive Bayes
From Bayes' Theorem, we have that
$$P(c \vert f) = \frac{P(c \cap f)}{P(f)}$$
where $c$ represents a class or category, and $f$ represents a feature vector, such as $\bar V(d)$ as above. We are computing the probability that a document (or whatever we are classifying) belongs to category c given the features in the document. $P(f)$ is really just a normalization constant, so the literature usually writes Bayes' Theorem in context of Naive Bayes as
$$P(c \vert f) \propto P(f \vert c) P(c) $$
$P(c)$ is called the prior and is simply the probability of seeing class $c$. But what is $P(f \vert c)$? This is the probability that we see feature set $f$ given that this document is actually in class $c$. This is called the likelihood and comes from the data. One of the major assumptions of the Naive Bayes model is that the features are conditionally independent given the class. While the presence of a particular discriminative word may uniquely identify the document as being part of class $c$ and thus violate general feature independence, conditional independence means that the presence of that term is independent of all the other words that appear within that class. This is a very important distinction. Recall that if two events are independent, then:
$$P(A \cap B) = P(A) \cdot P(B)$$
Thus, conditional independence implies
$$P(f \vert c) = \prod_i P(f_i | c) $$
where $f_i$ is an individual feature (a word in this example).
To make a classification, we then choose the class $c$ such that $P(c \vert f)$ is maximal.
There is a small caveat when computing these probabilities. For floating point underflow we change the product into a sum by going into log space. This is called the LogSumExp trick. So:
$$\log P(f \vert c) = \sum_i \log P(f_i \vert c) $$
There is another caveat. What if we see a term that didn't exist in the training data? This means that $P(f_i \vert c) = 0$ for that term, and thus $P(f \vert c) = \prod_i P(f_i | c) = 0$, which doesn't help us at all. Instead of using zeros, we add a small negligible value called $\alpha$ to each count. This is called Laplace Smoothing.
$$P(f_i \vert c) = \frac{N_{ic}+\alpha}{N_c + \alpha N_i}$$
where $N_{ic}$ is the number of times feature $i$ was seen in class $c$, $N_c$ is the number of times class $c$ was seen and $N_i$ is the number of times feature $i$ was seen globally. $\alpha$ is sometimes called a regularization parameter.
Multinomial Naive Bayes and Other Likelihood Functions
Since we are modeling word counts, we are using variation of Naive Bayes called Multinomial Naive Bayes. This is because the likelihood function actually takes the form of the multinomial distribution.
$$P(f \vert c) = \frac{\left( \sum_i f_i \right)!}{\prod_i f_i!} \prod_{f_i} P(f_i \vert c)^{f_i} \propto \prod_{i} P(f_i \vert c)$$
where the nasty term out front is absorbed as a normalization constant such that probabilities sum to 1.
There are many other variations of Naive Bayes, all which depend on what type of value $f_i$ takes. If $f_i$ is continuous, we may be able to use Gaussian Naive Bayes. First compute the mean and variance for each class $c$. Then the likelihood, $P(f \vert c)$ is given as follows
$$P(f_i = v \vert c) = \frac{1}{\sqrt{2\pi \sigma^2_c}} e^{- \frac{\left( v - \mu_c \right)^2}{2 \sigma^2_c}}$$
<div class="span5 alert alert-info">
<h3>Exercise Set II</h3>
<p><b>Exercise:</b> Implement a simple Naive Bayes classifier:</p>
<ol>
<li> split the data set into a training and test set
<li> Use `scikit-learn`'s `MultinomialNB()` classifier with default parameters.
<li> train the classifier over the training set and test on the test set
<li> print the accuracy scores for both the training and the test sets
</ol>
What do you notice? Is this a good classifier? If not, why not?
</div>
End of explanation
"""
# Your turn.
"""
Explanation: Picking Hyperparameters for Naive Bayes and Text Maintenance
We need to know what value to use for $\alpha$, and we also need to know which words to include in the vocabulary. As mentioned earlier, some words are obvious stopwords. Other words appear so infrequently that they serve as noise, and other words in addition to stopwords appear so frequently that they may also serve as noise.
First, let's find an appropriate value for min_df for the CountVectorizer. min_df can be either an integer or a float/decimal. If it is an integer, min_df represents the minimum number of documents a word must appear in for it to be included in the vocabulary. If it is a float, it represents the minimum percentage of documents a word must appear in to be included in the vocabulary. From the documentation:
min_df: When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float, the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None.
<div class="span5 alert alert-info">
<h3>Exercise Set III</h3>
<p><b>Exercise:</b> Construct the cumulative distribution of document frequencies (df). The $x$-axis is a document count $x_i$ and the $y$-axis is the percentage of words that appear less than $x_i$ times. For example, at $x=5$, plot a point representing the percentage or number of words that appear in 5 or fewer documents.</p>
<p><b>Exercise:</b> Look for the point at which the curve begins climbing steeply. This may be a good value for `min_df`. If we were interested in also picking `max_df`, we would likely pick the value where the curve starts to plateau. What value did you choose?</p>
</div>
End of explanation
"""
from sklearn.model_selection import KFold
def cv_score(clf, X, y, scorefunc):
result = 0.
nfold = 5
for train, test in KFold(nfold).split(X): # split data into train/test groups, 5 times
clf.fit(X[train], y[train]) # fit the classifier, passed is as clf.
result += scorefunc(clf, X[test], y[test]) # evaluate score function on held-out data
return result / nfold # average
"""
Explanation: The parameter $\alpha$ is chosen to be a small value that simply avoids having zeros in the probability computations. This value can sometimes be chosen arbitrarily with domain expertise, but we will use K-fold cross validation. In K-fold cross-validation, we divide the data into $K$ non-overlapping parts. We train on $K-1$ of the folds and test on the remaining fold. We then iterate, so that each fold serves as the test fold exactly once. The function cv_score performs the K-fold cross-validation algorithm for us, but we need to pass a function that measures the performance of the algorithm on each fold.
End of explanation
"""
def log_likelihood(clf, x, y):
prob = clf.predict_log_proba(x)
rotten = y == 0
fresh = ~rotten
return prob[rotten, 0].sum() + prob[fresh, 1].sum()
"""
Explanation: We use the log-likelihood as the score here in scorefunc. The higher the log-likelihood, the better. Indeed, what we do in cv_score above is to implement the cross-validation part of GridSearchCV.
The custom scoring function scorefunc allows us to use different metrics depending on the decision risk we care about (precision, accuracy, profit etc.) directly on the validation set. You will often find people using roc_auc, precision, recall, or F1-score as the scoring function.
End of explanation
"""
from sklearn.model_selection import train_test_split
_, itest = train_test_split(range(critics.shape[0]), train_size=0.7)
mask = np.zeros(critics.shape[0], dtype=np.bool)
mask[itest] = True
"""
Explanation: We'll cross-validate over the regularization parameter $\alpha$.
Let's set up the train and test masks first, and then we can run the cross-validation procedure.
End of explanation
"""
from sklearn.naive_bayes import MultinomialNB
#the grid of parameters to search over
alphas = [.1, 1, 5, 10, 50]
best_min_df = None # YOUR TURN: put your value of min_df here.
#Find the best value for alpha and min_df, and the best classifier
best_alpha = None
maxscore=-np.inf
for alpha in alphas:
vectorizer = CountVectorizer(min_df=best_min_df)
Xthis, ythis = make_xy(critics, vectorizer)
Xtrainthis = Xthis[mask]
ytrainthis = ythis[mask]
# your turn
print("alpha: {}".format(best_alpha))
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set IV</h3>
<p><b>Exercise:</b> What does using the function `log_likelihood` as the score mean? What are we trying to optimize for?</p>
<p><b>Exercise:</b> Without writing any code, what do you think would happen if you choose a value of $\alpha$ that is too high?</p>
<p><b>Exercise:</b> Using the skeleton code below, find the best values of the parameter `alpha`, and use the value of `min_df` you chose in the previous exercise set. Use the `cv_score` function above with the `log_likelihood` function for scoring.</p>
</div>
End of explanation
"""
vectorizer = CountVectorizer(min_df=best_min_df)
X, y = make_xy(critics, vectorizer)
xtrain=X[mask]
ytrain=y[mask]
xtest=X[~mask]
ytest=y[~mask]
clf = MultinomialNB(alpha=best_alpha).fit(xtrain, ytrain)
#your turn. Print the accuracy on the test and training dataset
training_accuracy = clf.score(xtrain, ytrain)
test_accuracy = clf.score(xtest, ytest)
print("Accuracy on training data: {:2f}".format(training_accuracy))
print("Accuracy on test data: {:2f}".format(test_accuracy))
from sklearn.metrics import confusion_matrix
print(confusion_matrix(ytest, clf.predict(xtest)))
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set V: Working with the Best Parameters</h3>
<p><b>Exercise:</b> Using the best value of `alpha` you just found, calculate the accuracy on the training and test sets. Is this classifier better? Why (not)?</p>
</div>
End of explanation
"""
words = np.array(vectorizer.get_feature_names())
x = np.eye(xtest.shape[1])
probs = clf.predict_log_proba(x)[:, 0]
ind = np.argsort(probs)
good_words = words[ind[:10]]
bad_words = words[ind[-10:]]
good_prob = probs[ind[:10]]
bad_prob = probs[ind[-10:]]
print("Good words\t P(fresh | word)")
for w, p in zip(good_words, good_prob):
print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p)))
print("Bad words\t P(fresh | word)")
for w, p in zip(bad_words, bad_prob):
print("{:>20}".format(w), "{:.2f}".format(1 - np.exp(p)))
"""
Explanation: Interpretation
What are the strongly predictive features?
We use a neat trick to identify strongly predictive features (i.e. words).
first, create a data set such that each row has exactly one feature. This is represented by the identity matrix.
use the trained classifier to make predictions on this matrix
sort the rows by predicted probabilities, and pick the top and bottom $K$ rows
End of explanation
"""
x, y = make_xy(critics, vectorizer)
prob = clf.predict_proba(x)[:, 0]
predict = clf.predict(x)
bad_rotten = np.argsort(prob[y == 0])[:5]
bad_fresh = np.argsort(prob[y == 1])[-5:]
print("Mis-predicted Rotten quotes")
print('---------------------------')
for row in bad_rotten:
print(critics[y == 0].quote.iloc[row])
print("")
print("Mis-predicted Fresh quotes")
print('--------------------------')
for row in bad_fresh:
print(critics[y == 1].quote.iloc[row])
print("")
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set VI</h3>
<p><b>Exercise:</b> Why does this method work? What does the probability for each row in the identity matrix represent</p>
</div>
The above exercise is an example of feature selection. There are many other feature selection methods. A list of feature selection methods available in sklearn is here. The most common feature selection technique for text mining is the chi-squared $\left( \chi^2 \right)$ method.
Prediction Errors
We can see mis-predictions as well.
End of explanation
"""
#your turn
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set VII: Predicting the Freshness for a New Review</h3>
<br/>
<div>
<b>Exercise:</b>
<ul>
<li> Using your best trained classifier, predict the freshness of the following sentence: *'This movie is not remarkable, touching, or superb in any way'*
<li> Is the result what you'd expect? Why (not)?
</ul>
</div>
</div>
End of explanation
"""
# http://scikit-learn.org/dev/modules/feature_extraction.html#text-feature-extraction
# http://scikit-learn.org/dev/modules/classes.html#text-feature-extraction-ref
from sklearn.feature_extraction.text import TfidfVectorizer
tfidfvectorizer = TfidfVectorizer(min_df=1, stop_words='english')
Xtfidf=tfidfvectorizer.fit_transform(critics.quote)
"""
Explanation: Aside: TF-IDF Weighting for Term Importance
TF-IDF stands for
Term-Frequency X Inverse Document Frequency.
In the standard CountVectorizer model above, we used just the term frequency in a document of words in our vocabulary. In TF-IDF, we weight this term frequency by the inverse of its popularity in all documents. For example, if the word "movie" showed up in all the documents, it would not have much predictive value. It could actually be considered a stopword. By weighing its counts by 1 divided by its overall frequency, we downweight it. We can then use this TF-IDF weighted features as inputs to any classifier. TF-IDF is essentially a measure of term importance, and of how discriminative a word is in a corpus. There are a variety of nuances involved in computing TF-IDF, mainly involving where to add the smoothing term to avoid division by 0, or log of 0 errors. The formula for TF-IDF in scikit-learn differs from that of most textbooks:
$$\mbox{TF-IDF}(t, d) = \mbox{TF}(t, d)\times \mbox{IDF}(t) = n_{td} \log{\left( \frac{\vert D \vert}{\vert d : t \in d \vert} + 1 \right)}$$
where $n_{td}$ is the number of times term $t$ occurs in document $d$, $\vert D \vert$ is the number of documents, and $\vert d : t \in d \vert$ is the number of documents that contain $t$
End of explanation
"""
# Your turn
"""
Explanation: <div class="span5 alert alert-info">
<h3>Exercise Set VIII: Enrichment</h3>
<p>
There are several additional things we could try. Try some of these as exercises:
<ol>
<li> Build a Naive Bayes model where the features are n-grams instead of words. N-grams are phrases containing n words next to each other: a bigram contains 2 words, a trigram contains 3 words, and 6-gram contains 6 words. This is useful because "not good" and "so good" mean very different things. On the other hand, as n increases, the model does not scale well since the feature set becomes more sparse.
<li> Try a model besides Naive Bayes, one that would allow for interactions between words -- for example, a Random Forest classifier.
<li> Try adding supplemental features -- information about genre, director, cast, etc.
<li> Use word2vec or [Latent Dirichlet Allocation](https://en.wikipedia.org/wiki/Latent_Dirichlet_allocation) to group words into topics and use those topics for prediction.
<li> Use TF-IDF weighting instead of word counts.
</ol>
</p>
<b>Exercise:</b> Try a few of these ideas to improve the model (or any other ideas of your own). Implement here and report on the result.
</div>
End of explanation
"""
|
kscottz/PythonFromSpace | OpenStreetMapsExample.ipynb | bsd-3-clause | # See requirements.txt to set up your dev environment.
import os
import sys
import utm
import json
import scipy
import overpy
import urllib
import datetime
import urllib3
import rasterio
import subprocess
import numpy as np
import pandas as pd
import seaborn as sns
from osgeo import gdal
from planet import api
from planet.api import filters
from traitlets import link
import rasterio.tools.mask as rio_mask
from shapely.geometry import mapping, shape
from IPython.display import display, Image, HTML
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#from scipy import ndimage
import warnings
from osgeo import gdal
from osmapi import OsmApi
from geopy.geocoders import Nominatim
urllib3.disable_warnings()
from ipyleaflet import (
Map,
Marker,
TileLayer, ImageOverlay,
Polyline, Polygon, Rectangle, Circle, CircleMarker,
GeoJSON,
DrawControl
)
%matplotlib inline
# will pick up api_key via environment variable PL_API_KEY
# but can be specified using `api_key` named argument
api_keys = json.load(open("apikeys.json",'r'))
client = api.ClientV1(api_key=api_keys["PLANET_API_KEY"])
gdal.UseExceptions()
api = overpy.Overpass()
"""
Explanation: Let's start with our crazy stock list of imports and setup our environment
End of explanation
"""
# Basemap Mosaic (v1 API)
mosaicsSeries = 'global_quarterly_2017q1_mosaic'
# Planet tile server base URL (Planet Explorer Mosaics Tiles)
mosaicsTilesURL_base = 'https://tiles0.planet.com/experimental/mosaics/planet-tiles/' + mosaicsSeries + '/gmap/{z}/{x}/{y}.png'
# Planet tile server url
mosaicsTilesURL = mosaicsTilesURL_base + '?api_key=' + api_keys["PLANET_API_KEY"]
# Map Settings
# Define colors
colors = {'blue': "#009da5"}
# Define initial map center lat/long
center = [45.5231, -122.6765]
# Define initial map zoom level
zoom = 13
# Set Map Tiles URL
planetMapTiles = TileLayer(url= mosaicsTilesURL)
# Create the map
m = Map(
center=center,
zoom=zoom,
default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
# Define the draw tool type options
polygon = {'shapeOptions': {'color': colors['blue']}}
rectangle = {'shapeOptions': {'color': colors['blue']}}
# Create the draw controls
# @see https://github.com/ellisonbg/ipyleaflet/blob/master/ipyleaflet/leaflet.py#L293
dc = DrawControl(
polygon = polygon,
rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}
# Register the draw controls handler
def handle_draw(self, action, geo_json):
# Increment the action counter
global actionCount
actionCount += 1
# Remove the `style` property from the GeoJSON
geo_json['properties'] = {}
# Convert geo_json output to a string and prettify (indent & replace ' with ")
geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
AOIs[actionCount] = json.loads(geojsonStr)
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m
"""
Explanation: Let's bring up our slippy map once again.
End of explanation
"""
print AOIs[1]
myAOI = AOIs[1]["geometry"]
# build a query using the AOI and
# a cloud_cover filter that excludes 'cloud free' scenes
old = datetime.datetime(year=2017,month=1,day=1)
query = filters.and_filter(
filters.geom_filter(myAOI),
filters.range_filter('cloud_cover', lt=5),
filters.date_range('acquired', gt=old)
)
# build a request for only PlanetScope imagery
request = filters.build_search_request(
query, item_types=['PSScene3Band']
)
# if you don't have an API key configured, this will raise an exception
result = client.quick_search(request)
scenes = []
planet_map = {}
for item in result.items_iter(limit=500):
planet_map[item['id']]=item
props = item['properties']
props["id"] = item['id']
props["geometry"] = item["geometry"]
props["thumbnail"] = item["_links"]["thumbnail"]
scenes.append(props)
scenes = pd.DataFrame(data=scenes)
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(myAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
s = shape(footprint)
footprints.append(s)
overlap = 100.0*(portland.intersection(s).area / portland.area)
overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
scenes = scenes[scenes['overlap']>0.9]
print len(scenes)
# now let's clean up the datetime stuff
# make a shapely shape from our aoi
portland = shape(myAOI)
footprints = []
overlaps = []
# go through the geometry from our api call, convert to a shape and calculate overlap area.
# also save the shape for safe keeping
for footprint in scenes["geometry"].tolist():
s = shape(footprint)
footprints.append(s)
overlap = 100.0*(portland.intersection(s).area / portland.area)
overlaps.append(overlap)
# take our lists and add them back to our dataframe
scenes['overlap'] = pd.Series(overlaps, index=scenes.index)
scenes['footprint'] = pd.Series(footprints, index=scenes.index)
# now make sure pandas knows about our date/time columns.
scenes["acquired"] = pd.to_datetime(scenes["acquired"])
scenes["published"] = pd.to_datetime(scenes["published"])
scenes["updated"] = pd.to_datetime(scenes["updated"])
# first create a list of colors
colors = ["#ff0000","#00ff00","#0000ff","#ffff00","#ff00ff","#00ffff"]
# grab our scenes from the geometry/footprint geojson
footprints = scenes["geometry"].tolist()
# for each footprint/color combo
for footprint,color in zip(footprints,colors):
# create the leaflet object
feat = {'geometry':footprint,"properties":{
'style':{'color': color,'fillColor': color,'fillOpacity': 0.1,'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
# now we will draw our original AOI on top
feat = {'geometry':myAOI,"properties":{
'style':{'color': "#FFFFFF",'fillColor': "#FFFFFF",'fillOpacity': 0.1,'weight': 2}},
'type':u"Feature"}
gjson = GeoJSON(data=feat)
m.add_layer(gjson)
m
"""
Explanation: Let's review from last time.
We'll query the Planet API and get a list of scenes.
We'll then use pandas and shapely to clean up and filter the results
We'll then render the footprints of the good scenes over our AOI
End of explanation
"""
def get_products(client, scene_id, asset_type='PSScene3Band'):
"""
Ask the client to return the available products for a
given scene and asset type. Returns a list of product
strings
"""
out = client.get_assets_by_id(asset_type,scene_id)
temp = out.get()
return temp.keys()
def activate_product(client, scene_id, asset_type="PSScene3Band",product="analytic"):
"""
Activate a product given a scene, an asset type, and a product.
On success return the return value of the API call and an activation object
"""
temp = client.get_assets_by_id(asset_type,scene_id)
products = temp.get()
if( product in products.keys() ):
return client.activate(products[product]),products[product]
else:
return None
def download_and_save(client,product):
"""
Given a client and a product activation object download the asset.
This will save the tiff file in the local directory and return its
file name.
"""
out = client.download(product)
fp = out.get_body()
fp.write()
return fp.name
def scenes_are_active(scene_list):
"""
Check if all of the resources in a given list of
scene activation objects is read for downloading.
"""
retVal = True
for scene in scene_list:
if scene["status"] != "active":
print "{} is not ready.".format(scene)
return False
return True
"""
Explanation: Now we'll add in our boiler plate activation code for reference.
End of explanation
"""
to_get = scenes["id"][0:7].tolist()
activated = []
# for each scene to get
for scene in to_get:
# get the product
product_types = get_products(client,scene)
for p in product_types:
# if there is a visual product
if p == "visual": # p == "basic_analytic_dn"
print "Activating {0} for scene {1}".format(p,scene)
# activate the product
_,product = activate_product(client,scene,product=p)
activated.append(product)
"""
Explanation: Now we'll activate our scenes
End of explanation
"""
tiff_files = []
asset_type = "_3B_Visual"
# check if our scenes have been activated
if True:#scenes_are_active(activated):
for to_download,name in zip(activated,to_get):
# create the product name
name = name + asset_type + ".tif"
# if the product exists locally
if( os.path.isfile(name) ):
# do nothing
print "We have scene {0} already, skipping...".format(name)
tiff_files.append(name)
elif to_download["status"] == "active":
# otherwise download the product
print "Downloading {0}....".format(name)
fname = download_and_save(client,to_download)
tiff_files.append(fname)
print "Download done."
else:
print "Could not download, still activating"
else:
print "Scenes aren't ready yet"
sorted(tiff_files)
print tiff_files
"""
Explanation: And then download them.
End of explanation
"""
infile = tiff_files[0]
# Open the file
gtif = gdal.Open(infile)
# Get the project reference object this knows the UTM zone
reff = gtif.GetProjectionRef()
# arr is the actual image data.
arr = gtif.ReadAsArray()
# Trans is our geo transfrom array.
trans = gtif.GetGeoTransform()
# print the ref object
print reff
# find our UTM zone
i = reff.find("UTM")
print reff[i:i+12]
"""
Explanation: Let's get going with Open Street Maps.
Open Street Maps is a huge and open collection of data about the Earth.
OSM is free to query. The interfaces are powerful, but hella cryptic.
Let's say we had a pixel in an image and we wanted to know what in the world was at that pixel.
We can use the Open Street Maps Nominatim function to look up what is there, like Google maps.
We can also use the OSM interface to find the 'nodes' near our pixel.
OSM Nominatim works through Lat Long values. To get these lat long values we are going to through UTM coordinates.
To get correct the UTM values we'll need to ask GDAL what our UDM zone is.
End of explanation
"""
def pixel2utm(ds, x, y):
"""
Returns utm coordinates from pixel x, y coords
"""
xoff, a, b, yoff, d, e = ds.GetGeoTransform()
xp = a * x + b * y + xoff
yp = d * x + e * y + yoff
return(xp, yp)
def draw_point(x,y,img):
t = 20
# a cloud_cover filter that ex
img[y-t:y+t,x-t:x+t,:] = [255,0,0]
"""
Explanation: Now we are going to write a function to convert pixels to UTM
Also a quick function to plot a point
End of explanation
"""
pos = [3000,1400] # this is the pixel we want info abou
ds = gdal.Open(infile)
# take the GDAL info and make it into UTM
my_utm = pixel2utm(ds,pos[0],pos[1])
# convert UTM into Lat Long
# need to figure out how to get zone info
my_lla = utm.to_latlon(my_utm[0],my_utm[1],10,"N")
# do the lat long look up from OSM
geolocator = Nominatim()
# reverse look up the are based on lat lon
location = geolocator.reverse("{0},{1}".format(my_lla[0],my_lla[1]))
# print location info
print location.address
print location.raw
# get the OSM ID info
osm_id = int(location.raw["place_id"])
print osm_id
# create an interface to the OSM API
MyApi = OsmApi()
# Look up our position
print MyApi.NodeGet(osm_id)
"""
Explanation: Now let's query a point on our scene and see what OSM tells us.
First we'll define a pixel position
We'll use GDAL to open the scene and then map a pixel to UTM
We'll then convert the UTM value to Lat / Lon using the UTM region we found before.
Then we'll instantiate a Nominatim object and perform a revers lookup and print the results.
We'll then use the OSM Api to get node at this place.
End of explanation
"""
from matplotlib.patches import Circle
fig,ax = plt.subplots(1)
# create our plot
plt.imshow(arr[:3,:,:].transpose((1, 2, 0)))#, extent=extent)
fig = plt.gcf()
# add our annotation
plt.annotate(location.address, xy=pos, xycoords='data',
xytext=(0.25, 0.5), textcoords='figure fraction',color="red",
arrowprops=dict(arrowstyle="->"))
ax.set_aspect('equal')
# Set a point
circ = Circle((pos[0],pos[1]),60,color="red")
ax.add_patch(circ)
fig.set_size_inches(18.5, 10.5)
plt.show()
"""
Explanation: Now for completeness we'll plot our scene and add the annotation about the spot we found.
End of explanation
"""
import geopandas as gpd
fname = "./portland_parks_small.geojson"
park_df = gpd.read_file(fname)
portland_parks = json.load(open(fname,'r'))
# raw geojson works better with GDAL
geojson = [p for p in portland_parks["features"]]
# no area out of the box
p = [p.area for p in park_df["geometry"].tolist()]
park_df["area"] = pd.Series(p)
park_df["geojson"] = pd.Series(geojson)
park_df.sort_values(['area',], ascending=[1])
park_df.head()
#len(park_df)
#print park_df["wikipedia"].dropna()
"""
Explanation: Now, well, OSM is hard.
This is I wanted to show where to programaticaly query OSM for all sorts of data.
Turns out that it is a lot harder than it should be, especially if you want to work with GeoJson.
Out of scope for this talk, but let's punt.
OSM has a feature called Overpass. It is like the most convoluted Google maps ever using a very complex query language that I still don't grok.
We're going to use it to get all of the parks in Portland as GeoJSON using the web interface called Overpass Turbo.
Let's take a look at that.
Here's the query to run. Then export as GeoJSON
[bbox:{{bbox}}][timeout:1800];
way["leisure"="park"];map_to_area->.a;
foreach(
(._;>;);
is_in;
way(pivot)["leisure"="park"];
out geom;
);
Let's load up our park data
Load the file using GeoPandas (some syntactic sugar on Pandas).
Also load the raw json, and chunk out each park.
Update the area value because there is no value, not really useful except as a proxy measurement.
Update and sort our data frame.
End of explanation
"""
for p in portland_parks["features"]:
feat = {'geometry':p["geometry"],"properties":{
'style':{'color': "#00FF00",'fillColor': "#00FF00",'fillOpacity': 0.0,'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
m
"""
Explanation: Now we'll update our slippy map.
Just toss the aois in, just like our scene footprints.
End of explanation
"""
park_sz = park_df.groupby("name").sum()
park_sz = park_sz.sort_values(by='area',ascending=[0])
display(park_sz)
"""
Explanation: Now let's find the big parks.
The pandas dataframe can have multiple enteries per park.
We can use the group by command to sum up these disparate areas.
Finally we'll output the results
End of explanation
"""
def write_geojson_by_name(df,name,outfile):
"""
Take in a dataframe, a park name, and an output file name
Save the park's geojson to the specified file.
"""
temp = df[df["name"]==name]
to_write = {"type": "FeatureCollection",
"features": temp["geojson"].tolist()}
with open(outfile,'w') as fp:
fp.write(json.dumps(to_write))
def crop_scenes_to_geojson(geojson,scenes,out_name):
"""
Take in a geojson file, a list of scenes, and an output name
Call gdal and warp the scenes to match the geojson file and save the results to outname.
"""
commands = ["gdalwarp", # t
"-t_srs","EPSG:3857",
"-cutline",geojson,
"-crop_to_cutline",
"-tap",
"-tr", "3", "3"
"-overwrite"]
for tiff in scenes:
commands.append(tiff)
commands.append(out_name)
print " ".join(commands)
subprocess.call(commands)
"""
Explanation: Now to the meat of the problem.
Our goal is to get each park as a small image so we can analyze it.
We'll write a function to create a geojson file from our big geojson file
We'll also write a function that takes in our scene list, an input geojson file, and calls gdal warp to generate our small park image.
End of explanation
"""
geo_json_files = []
tif_file_names = []
unique_park_names = list(set(park_df["name"].tolist()))
for name in list(unique_park_names):
# Generate our file names
geojson_name = "./parks/"+name.replace(" ","_")+".geojson"
tif_name = "./parks/"+name.replace(" ","_")+".tif"
# write geojson
write_geojson_by_name(park_df,name,geojson_name)
# write to park file
crop_scenes_to_geojson(geojson_name,tiff_files,tif_name)
# Save the results to lists
geo_json_files.append(geojson_name)
tif_file_names.append(tif_name)
"""
Explanation: Let's put it all together
We're going to use the scenes we downloaded earlier as our input and build a little image for every park in Portland!
We just have to make a few file names and call the functions above.
If we really wanted to get fancy we could do this for every image that has our park and make a sick movie or lots of different types of a analysis.
End of explanation
"""
magic = ["mogrify","-format", "jpg", "./parks/*.tif"]
subprocess.call(magic)
for p in tif_file_names[0:20]:
print p
display(Image(p.replace('tif','jpg')))
"""
Explanation: Let's take a look at the first few parks!
matplotlib and tifs can be a bit heavy handed.
imma teach you a protip use image magick and the built in image display.
Use subprocess to tell imagemagick to convert tifs to jpg.
Then load and display the images.
WARNING: do not use imagemagick to modify geotiffs!
End of explanation
"""
def load_image3(filename):
"""Return a 3D (r, g, b) numpy array with the data in the specified TIFF filename."""
path = os.path.abspath(os.path.join('./', filename))
if os.path.exists(path):
with rasterio.open(path) as src:
b,g,r,mask = src.read()
return np.dstack([b, g, r])
def get_avg_greeness(filename):
retVal = -1.0
try:
# load the image
img = load_image3(filename)
if img is not None:
# add all the channels together, black pixels will still be zero
# this isn't a perfect method but there are very few truly black spots
# on eart
black_like_my_soul = np.add(np.add(img[:,:,0],img[:,:,1]),img[:,:,2])
# sum up the not black pixels
not_black = np.count_nonzero(black_like_my_soul)
# sum up all the green
img = np.array(img,dtype='int16')
total_green = np.sum(img[:,:,1]-((np.add(img[:,:,0],img[:,:,2])/2)))
# calculate our metric
if total_green != 0 and not_black > 0:
retVal = total_green / float(not_black)
return retVal
except Exception as e:
print e
return -1.0
greens = [get_avg_greeness(f) for f in tif_file_names]
print greens
paired = zip(tif_file_names,greens)
paired.sort(key=(lambda tup: tup[1]))
paired.reverse()
labels = [p[0][8:-4].replace("_"," ") for p in paired]
data = [p[1] for p in paired]
plt.figure(figsize=(20,6))
xlocations = np.array(range(len(paired)))+0.5
width = 1
plt.bar(xlocations, data, width=width)
plt.yticks(range(-1,25,1))
plt.xticks(xlocations+ width/2, labels)
plt.xlim(0, xlocations[-1]+width*2)
plt.ylim(-2,np.max(data)+1)
plt.title("Greeness over Average Red and Blue Per Park")
plt.gca().get_xaxis().tick_bottom()
plt.gca().get_yaxis().tick_left()
xa = plt.gca()
xa.set_xticklabels(xa.xaxis.get_majorticklabels(), rotation=90)
plt.show()
"""
Explanation: Not let's do some quick analytics -- your code goes here.
For completeness let's do some basic image processing.
For each of parks we are going to calculate the average "greeness" per pixel over the other two channels.
We do this as it controls for white pixels, like clouds.
Since there are black pixels we'll have to controll for that by only using only the non-black pixels.
We'll use numpy here, but scikit image and OpenCV have many more features.
It is also worth noting that the visual product is probably only useful for calculating areas. If you want to do real science use the Analytics products.
The real way to do this is to calculate a Normalized Difference Vegetation Index (NDVI) using the analytic product.
Here is an example of NDVI calculations.
End of explanation
"""
imgs = [p[0] for p in paired]
for p in imgs[0:35]:
print p[8:-4].replace("_"," ")
display(Image(p.replace('tif','jpg')))
"""
Explanation: Let's take a look at what this looks like in terms of images.
End of explanation
"""
opacity_map = {}
gmax = np.max(greens)
gmin = np.min(greens)
# this is a nonlinear mapping
opacity = [np.clip((float(g**2)-gmin)/float(gmax-gmin),0,1) for g in greens]
for op,name in zip(opacity,imgs):
opacity_map[name]=op
m = Map(
center=center,
zoom=zoom,
default_tiles = planetMapTiles # Uncomment to use Planet.com basemap
)
dc = DrawControl(
polygon = polygon,
rectangle = rectangle
)
# Initialize an action counter variable
actionCount = 0
AOIs = {}
# Register the draw controls handler
def handle_draw(self, action, geo_json):
# Increment the action counter
global actionCount
actionCount += 1
# Remove the `style` property from the GeoJSON
geo_json['properties'] = {}
# Convert geo_json output to a string and prettify (indent & replace ' with ")
geojsonStr = json.dumps(geo_json, indent=2).replace("'", '"')
AOIs[actionCount] = json.loads(geojsonStr)
# Attach the draw handler to the draw controls `on_draw` event
dc.on_draw(handle_draw)
m.add_control(dc)
m
for p in portland_parks["features"]:
t = "./parks/"+p["properties"]["name"].replace(" ","_") + ".tif"
feat = {'geometry':p["geometry"],"properties":{
'style':{'color': "#00FF00",'fillColor': "#00FF00",'fillOpacity': opacity_map[t],'weight': 1}},
'type':u"Feature"}
# convert to geojson
gjson = GeoJSON(data=feat)
# add it our map
m.add_layer(gjson)
m
"""
Explanation: Now let's plot over our slippy map.
We'll calculate a non-linear opacity per park and then use that for plotting.
End of explanation
"""
|
Bedrock-py/bedrock-core | examples/RAND2011study/RewireAnalysis.ipynb | lgpl-3.0 | from bedrock.client.client import BedrockAPI
import requests
import pandas
import pprint
SERVER = "http://localhost:81/"
api = BedrockAPI(SERVER)
"""
Explanation: Rand 2011 Cooperation Study
This notebook outlines how to recreate the analysis of the Rand et al. 2011 study "Dynamic social networks promote cooperation in experiments with humans" Link to Paper
This workbook focuses on the re-wire analysis workflow portion of the study
Run the cooperation analysis first for a step by step description of interacting with Bedrock, this workflow uses those concepts to complete the rewire study
End of explanation
"""
filepath = 'Rand2011PNAS_rewire_data.csv'
datafile = pandas.read_csv('Rand2011PNAS_rewire_data.csv')
datafile.head(10)
null_data = datafile[datafile.isnull().any(axis=1)]
null_data
"""
Explanation: Check for csv file locally
The following code opens the file and prints out the first part. The file must be a csv file with a header that has labels for each column. The file is comma delimited csv.
End of explanation
"""
ingest_id = 'opals.spreadsheet.Spreadsheet.Spreadsheet'
resp = api.put_source('Rand2011_rewire', ingest_id, 'default', {'file': open(filepath, "rb")})
if resp.status_code == 201:
source_id = resp.json()['src_id']
print('Source {0} successfully uploaded'.format(filepath))
else:
try:
print("Error in Upload: {}".format(resp.json()['msg']))
except Exception:
pass
try:
source_id = resp.json()['src_id']
print("Using existing source. If this is not the desired behavior, upload with a different name.")
except Exception:
print("No existing source id provided")
"""
Explanation: Now Upload the source file to the Bedrock Server
This code block uses the Spreadsheet ingest module to upload the source file to Bedrock. Note: This simply copies the file to the server, but does not create a Bedrock Matrix format
If the following fails to upload. Check that the csv file is in the correct comma delimited format with headers.
End of explanation
"""
resp = api.create_matrix(source_id, 'rand_mtx')
base_mtx = resp[0]
matrix_id = base_mtx['id']
base_mtx
"""
Explanation: Create a Bedrock Matrix from the CSV Source
In order to use the data, the data source must be converted to a Bedrock matrix. The following code steps through that process. Here we are doing a simple transform of csv to matrix. There are options to apply filters (like renaming columns, excluding colum
End of explanation
"""
analytic_id = "opals.summarize.Summarize.Summarize"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = []
summary_mtx = api.run_analytic(analytic_id, base_mtx, 'rand_mtx_summary', input_data=inputData, parameter_data=paramsData)
output = api.download_results_matrix(summary_mtx['src_id'], summary_mtx['id'], 'matrix.csv')
output
"""
Explanation: Look at basic statistics on the source data
Here we can see that Bedrock has computed some basic statistics on the source data.
For numeric data
The quartiles, max, mean, min, and standard deviation are provided
For non-numeric data
The label values and counts for each label are provided.
For both types
The proposed tags and data type that Bedrock is suggesting are provided
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByCondition.SelectByCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"colname","value":"previouslytie"},
{"attrname":"comparator","value":"=="},
{"attrname":"value","value":"0"}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie0', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
"""
Explanation: Create a filtered matrix where previouslytie==0
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByCondition.SelectByCondition"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"colname","value":"otherD"},
{"attrname":"comparator","value":"notnull"},
{"attrname":"value","value":""}
]
otherd_mtx = api.run_analytic(analytic_id, filtered_mtx, 'otherD', input_data=inputData, parameter_data=paramsData)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(nowtie) ~ C(otherD)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherd effect on nowtie
Note we have to remove rows that contain missing values for either our exogenous or endogenous factors or else clustered standard errors will fail
End of explanation
"""
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(nowtie) ~ C(otherD) + C(roundnum)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherd and roundnum effect on nowtie
Note we have to remove rows that contain missing values for either our exogenous or endogenous factors or else clustered standard errors will fail
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByCondition.SelectByCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"colname","value":"previouslytie"},
{"attrname":"comparator","value":"=="},
{"attrname":"value","value":"1"}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie1', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
"""
Explanation: Previouslytie == 1
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByCondition.SelectByCondition"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"colname","value":"otherD"},
{"attrname":"comparator","value":"notnull"},
{"attrname":"value","value":""}
]
otherd_mtx = api.run_analytic(analytic_id, filtered_mtx, 'otherD', input_data=inputData, parameter_data=paramsData)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(nowtie) ~ C(otherD)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherd effect on nowtie
Note we have to remove rows that contain missing values for either our exogenous or endogenous factors or else clustered standard errors will fail
End of explanation
"""
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(nowtie) ~ C(otherD) + C(roundnum)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherd and roundnum effect on nowtie
Note we have to remove rows that contain missing values for either our exogenous or endogenous factors or else clustered standard errors will fail
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByComplexCondition.SelectByComplexCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"condition","value":"(previouslytie == otherD)"}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie1', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
"""
Explanation: Filter where previouslytie == otherD
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByCondition.SelectByCondition"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"colname","value":"otherD"},
{"attrname":"comparator","value":"notnull"},
{"attrname":"value","value":""}
]
otherd_mtx = api.run_analytic(analytic_id, filtered_mtx, 'otherD', input_data=inputData, parameter_data=paramsData)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(act) ~ C(otherD)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":""}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherd effect on act when prevtie == otherD
End of explanation
"""
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': otherd_mtx,
'features.txt': otherd_mtx
}
paramsData = [
{"attrname":"formula","value":"C(act) ~ C(otherD) + C(roundnum)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":""}
]
result_mtx = api.run_analytic(analytic_id, otherd_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at otherD and roundnum effect on act when prevtie == otherD
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByComplexCondition.SelectByComplexCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"condition","value":'(previouslytie == 1) & ((state=="CC") | (state=="CD") | (state=="DC"))'}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie1', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"formula","value":"C(break_) ~ C(CC)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, filtered_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at CC v CD/DC
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByComplexCondition.SelectByComplexCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"condition","value":'(previouslytie == 1) & ((state=="CC") | (state=="DD"))'}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie1', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"formula","value":"C(break_) ~ C(CC)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, filtered_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at CC v DD
End of explanation
"""
analytic_id = "opals.select-from-dataframe.SelectByComplexCondition.SelectByComplexCondition"
inputData = {
'matrix.csv': base_mtx,
'features.txt': base_mtx
}
paramsData = [
{"attrname":"condition","value":'(previouslytie == 1) & ((state=="DD") | (state=="CD") | (state=="DC"))'}
]
filtered_mtx = api.run_analytic(analytic_id, base_mtx, 'prevtie1', input_data=inputData, parameter_data=paramsData)
f = api.download_results_matrix(filtered_mtx['src_id'], filtered_mtx['id'], 'matrix.csv', remote_header_file='features.txt')
f.head(10)
analytic_id = "opals.logit2.Logit2.Logit2"
inputData = {
'matrix.csv': filtered_mtx,
'features.txt': filtered_mtx
}
paramsData = [
{"attrname":"formula","value":"C(break_) ~ C(DD)"},
{"attrname":"family","value":"binomial"},
{"attrname":"clustered_rse","value":"sessionnum,playerid"}
]
result_mtx = api.run_analytic(analytic_id, filtered_mtx, 'rewire_step1', input_data=inputData, parameter_data=paramsData)
coef_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'matrix.csv')
coef_table
summary_table = api.download_results_matrix(result_mtx['src_id'], result_mtx['id'], 'summary.csv')
summary_table
"""
Explanation: Look at DD v CD/DC
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/hub/tutorials/image_feature_vector.ipynb | apache-2.0 | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: Copyright 2018 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import collections
import io
import math
import os
import random
from six.moves import urllib
from IPython.display import clear_output, Image, display, HTML
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_hub as hub
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn.metrics as sk_metrics
import time
"""
Explanation: 전이 학습으로 꽃 분류하기
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/hub/tutorials/image_feature_vector"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org에서 보기</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab에서 실행</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/hub/tutorials/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png"> GitHub에서 소스 보기</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/hub/tutorials/image_feature_vector.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">노트북 다운로드</a></td>
<td><a href="https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png">TF Hub 모델보기</a></td>
</table>
아름다운 꽃을 보고 어떤 꽃인지 궁금한 적이 있지 않으셨나요? 여러분이 처음은 아닐 테니, 사진에서 꽃의 유형을 식별하는 방법을 만들어보겠습니다!
이미지를 분류하기 위해서는 컨볼루셔널 신경망이라고 하는 특정 유형의 심층 신경망이 특히 강력한 힘을 발휘하는 것으로 입증되었습니다. 그러나 현대의 컨볼루셔널 신경망에는 수백만 개의 매개변수가 있습니다. 처음부터 훈련하려면 레이블이 지정된 많은 훈련 데이터와 많은 컴퓨팅 성능(수백 시간 이상의 GPU 시간)이 필요합니다. 레이블이 붙은 사진이 약 3천장 밖에 없고 훨씬 적은 시간만 소비하기를 원하므로 더 현명하게 판단해야 합니다.
우리는 전이 학습(transfer learning)이라는 기술을 사용하여 사전 훈련된 네트워크(약 백만 개의 일반 이미지에 대해 훈련됨)를 사용하여 특성을 추출하고 꽃 이미지를 분류하는 고유한 작업을 위해 맨 상층에서 새 레이어를 훈련합니다.
설정
End of explanation
"""
FLOWERS_DIR = './flower_photos'
TRAIN_FRACTION = 0.8
RANDOM_SEED = 2018
def download_images():
"""If the images aren't already downloaded, save them to FLOWERS_DIR."""
if not os.path.exists(FLOWERS_DIR):
DOWNLOAD_URL = 'http://download.tensorflow.org/example_images/flower_photos.tgz'
print('Downloading flower images from %s...' % DOWNLOAD_URL)
urllib.request.urlretrieve(DOWNLOAD_URL, 'flower_photos.tgz')
!tar xfz flower_photos.tgz
print('Flower photos are located in %s' % FLOWERS_DIR)
def make_train_and_test_sets():
"""Split the data into train and test sets and get the label classes."""
train_examples, test_examples = [], []
shuffler = random.Random(RANDOM_SEED)
is_root = True
for (dirname, subdirs, filenames) in tf.gfile.Walk(FLOWERS_DIR):
# The root directory gives us the classes
if is_root:
subdirs = sorted(subdirs)
classes = collections.OrderedDict(enumerate(subdirs))
label_to_class = dict([(x, i) for i, x in enumerate(subdirs)])
is_root = False
# The sub directories give us the image files for training.
else:
filenames.sort()
shuffler.shuffle(filenames)
full_filenames = [os.path.join(dirname, f) for f in filenames]
label = dirname.split('/')[-1]
label_class = label_to_class[label]
# An example is the image file and it's label class.
examples = list(zip(full_filenames, [label_class] * len(filenames)))
num_train = int(len(filenames) * TRAIN_FRACTION)
train_examples.extend(examples[:num_train])
test_examples.extend(examples[num_train:])
shuffler.shuffle(train_examples)
shuffler.shuffle(test_examples)
return train_examples, test_examples, classes
# Download the images and split the images into train and test sets.
download_images()
TRAIN_EXAMPLES, TEST_EXAMPLES, CLASSES = make_train_and_test_sets()
NUM_CLASSES = len(CLASSES)
print('\nThe dataset has %d label classes: %s' % (NUM_CLASSES, CLASSES.values()))
print('There are %d training images' % len(TRAIN_EXAMPLES))
print('there are %d test images' % len(TEST_EXAMPLES))
"""
Explanation: 꽃 데이터세트
꽃 데이터세트는 5개의 가능한 클래스 레이블이 있는 꽃 이미지로 구성됩니다.
머신러닝 모델을 훈련할 때 데이터를 훈련 및 테스트 데이터세트로 분할합니다. 훈련 데이터에서 모델을 훈련한 다음 모델이 본 적이 없는 데이터(테스트 세트)에서 모델이 얼마나 잘 동작하는지 평가합니다.
훈련 및 테스트 예제를 다운로드하고(시간이 걸릴 수 있음) 훈련 및 테스트 세트로 나눕니다.
다음 두 개의 셀을 실행합니다.
End of explanation
"""
#@title Show some labeled images
def get_label(example):
"""Get the label (number) for given example."""
return example[1]
def get_class(example):
"""Get the class (string) of given example."""
return CLASSES[get_label(example)]
def get_encoded_image(example):
"""Get the image data (encoded jpg) of given example."""
image_path = example[0]
return tf.gfile.GFile(image_path, 'rb').read()
def get_image(example):
"""Get image as np.array of pixels for given example."""
return plt.imread(io.BytesIO(get_encoded_image(example)), format='jpg')
def display_images(images_and_classes, cols=5):
"""Display given images and their labels in a grid."""
rows = int(math.ceil(len(images_and_classes) / cols))
fig = plt.figure()
fig.set_size_inches(cols * 3, rows * 3)
for i, (image, flower_class) in enumerate(images_and_classes):
plt.subplot(rows, cols, i + 1)
plt.axis('off')
plt.imshow(image)
plt.title(flower_class)
NUM_IMAGES = 15 #@param {type: 'integer'}
display_images([(get_image(example), get_class(example))
for example in TRAIN_EXAMPLES[:NUM_IMAGES]])
"""
Explanation: 데이터 탐색하기
꽃 데이터세트는 꽃 이미지로 레이블이 지정된 예들로 구성됩니다. 각 예에는 JPEG 꽃 이미지와 클래스 레이블(꽃의 종류)이 포함되어 있습니다. 레이블과 함께 몇 개의 이미지를 표시해 보겠습니다.
End of explanation
"""
LEARNING_RATE = 0.01
tf.reset_default_graph()
# Load a pre-trained TF-Hub module for extracting features from images. We've
# chosen this particular module for speed, but many other choices are available.
image_module = hub.Module('https://tfhub.dev/google/imagenet/mobilenet_v2_035_128/feature_vector/2')
# Preprocessing images into tensors with size expected by the image module.
encoded_images = tf.placeholder(tf.string, shape=[None])
image_size = hub.get_expected_image_size(image_module)
def decode_and_resize_image(encoded):
decoded = tf.image.decode_jpeg(encoded, channels=3)
decoded = tf.image.convert_image_dtype(decoded, tf.float32)
return tf.image.resize_images(decoded, image_size)
batch_images = tf.map_fn(decode_and_resize_image, encoded_images, dtype=tf.float32)
# The image module can be applied as a function to extract feature vectors for a
# batch of images.
features = image_module(batch_images)
def create_model(features):
"""Build a model for classification from extracted features."""
# Currently, the model is just a single linear layer. You can try to add
# another layer, but be careful... two linear layers (when activation=None)
# are equivalent to a single linear layer. You can create a nonlinear layer
# like this:
# layer = tf.layers.dense(inputs=..., units=..., activation=tf.nn.relu)
layer = tf.layers.dense(inputs=features, units=NUM_CLASSES, activation=None)
return layer
# For each class (kind of flower), the model outputs some real number as a score
# how much the input resembles this class. This vector of numbers is often
# called the "logits".
logits = create_model(features)
labels = tf.placeholder(tf.float32, [None, NUM_CLASSES])
# Mathematically, a good way to measure how much the predicted probabilities
# diverge from the truth is the "cross-entropy" between the two probability
# distributions. For numerical stability, this is best done directly from the
# logits, not the probabilities extracted from them.
cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels)
cross_entropy_mean = tf.reduce_mean(cross_entropy)
# Let's add an optimizer so we can train the network.
optimizer = tf.train.GradientDescentOptimizer(learning_rate=LEARNING_RATE)
train_op = optimizer.minimize(loss=cross_entropy_mean)
# The "softmax" function transforms the logits vector into a vector of
# probabilities: non-negative numbers that sum up to one, and the i-th number
# says how likely the input comes from class i.
probabilities = tf.nn.softmax(logits)
# We choose the highest one as the predicted class.
prediction = tf.argmax(probabilities, 1)
correct_prediction = tf.equal(prediction, tf.argmax(labels, 1))
# The accuracy will allow us to eval on our test set.
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
"""
Explanation: 모델 빌드하기
TF-Hub 이미지 특성 벡터 모듈을 로드하고, 여기에 선형 분류자 스택을 놓고 훈련 및 평가 연산을 추가하겠습니다. 다음 셀은 모델과 훈련을 설명하는 TF 그래프를 빌드하지만 훈련을 실행하지는 않습니다(다음 단계에서 실행할 예정).
End of explanation
"""
# How long will we train the network (number of batches).
NUM_TRAIN_STEPS = 100 #@param {type: 'integer'}
# How many training examples we use in each step.
TRAIN_BATCH_SIZE = 10 #@param {type: 'integer'}
# How often to evaluate the model performance.
EVAL_EVERY = 10 #@param {type: 'integer'}
def get_batch(batch_size=None, test=False):
"""Get a random batch of examples."""
examples = TEST_EXAMPLES if test else TRAIN_EXAMPLES
batch_examples = random.sample(examples, batch_size) if batch_size else examples
return batch_examples
def get_images_and_labels(batch_examples):
images = [get_encoded_image(e) for e in batch_examples]
one_hot_labels = [get_label_one_hot(e) for e in batch_examples]
return images, one_hot_labels
def get_label_one_hot(example):
"""Get the one hot encoding vector for the example."""
one_hot_vector = np.zeros(NUM_CLASSES)
np.put(one_hot_vector, get_label(example), 1)
return one_hot_vector
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(NUM_TRAIN_STEPS):
# Get a random batch of training examples.
train_batch = get_batch(batch_size=TRAIN_BATCH_SIZE)
batch_images, batch_labels = get_images_and_labels(train_batch)
# Run the train_op to train the model.
train_loss, _, train_accuracy = sess.run(
[cross_entropy_mean, train_op, accuracy],
feed_dict={encoded_images: batch_images, labels: batch_labels})
is_final_step = (i == (NUM_TRAIN_STEPS - 1))
if i % EVAL_EVERY == 0 or is_final_step:
# Get a batch of test examples.
test_batch = get_batch(batch_size=None, test=True)
batch_images, batch_labels = get_images_and_labels(test_batch)
# Evaluate how well our model performs on the test set.
test_loss, test_accuracy, test_prediction, correct_predicate = sess.run(
[cross_entropy_mean, accuracy, prediction, correct_prediction],
feed_dict={encoded_images: batch_images, labels: batch_labels})
print('Test accuracy at step %s: %.2f%%' % (i, (test_accuracy * 100)))
def show_confusion_matrix(test_labels, predictions):
"""Compute confusion matrix and normalize."""
confusion = sk_metrics.confusion_matrix(
np.argmax(test_labels, axis=1), predictions)
confusion_normalized = confusion.astype("float") / confusion.sum(axis=1)
axis_labels = list(CLASSES.values())
ax = sns.heatmap(
confusion_normalized, xticklabels=axis_labels, yticklabels=axis_labels,
cmap='Blues', annot=True, fmt='.2f', square=True)
plt.title("Confusion matrix")
plt.ylabel("True label")
plt.xlabel("Predicted label")
show_confusion_matrix(batch_labels, test_prediction)
"""
Explanation: 네트워크 훈련하기
이제 모델을 빌드했으므로 모델을 훈련하고 테스트세트에서 어떻게 동작하는지 살펴보겠습니다.
End of explanation
"""
incorrect = [
(example, CLASSES[prediction])
for example, prediction, is_correct in zip(test_batch, test_prediction, correct_predicate)
if not is_correct
]
display_images(
[(get_image(example), "prediction: {0}\nlabel:{1}".format(incorrect_prediction, get_class(example)))
for (example, incorrect_prediction) in incorrect[:20]])
"""
Explanation: 잘못된 예측
여기서 빌드한 모델이 잘못 동작하는 테스트 예를 자세히 살펴보겠습니다.
테스트 세트에 레이블이 잘못 지정된 예가 있습니까?
테스트 세트에 잘못된 데이터가 있습니까? 실제로 꽃 사진이 아닌 이미지가 있습니까?
모델이 실수한 이유를 이해할 수 있는 이미지가 있습니까?
End of explanation
"""
|
cathalmccabe/PYNQ | boards/Pynq-Z1/base/notebooks/pmod/pmod_grove_tmp.ipynb | bsd-3-clause | from pynq.overlays.base import BaseOverlay
base = BaseOverlay("base.bit")
"""
Explanation: Grove Temperature Sensor 1.2
This example shows how to use the Grove Temperature Sensor v1.2. You will also see how to plot a graph using matplotlib. The Grove Temperature sensor produces an analog signal, and requires an ADC.
A Grove Temperature sensor and Pynq Grove Adapter, or Pynq Shield is required. The Grove Temperature Sensor, Pynq Grove Adapter, and Grove I2C ADC are used for this example.
You can read a single value of temperature or read multiple values at regular intervals for a desired duration.
At the end of this notebook, a Python only solution with single-sample read functionality is provided.
1. Load overlay
End of explanation
"""
import math
from pynq.lib.pmod import Grove_TMP
from pynq.lib.pmod import PMOD_GROVE_G4
tmp = Grove_TMP(base.PMODB,PMOD_GROVE_G4)
temperature = tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
"""
Explanation: 2. Read single temperature
This example shows on how to get a single temperature sample from the Grove TMP sensor.
The Grove ADC is assumed to be attached to the GR4 connector of the StickIt. The StickIt module is assumed to be plugged in the 1st PMOD labeled JB. The Grove TMP sensor is connected to the other connector of the Grove ADC.
Grove ADC provides a raw sample which is converted into resistance first and then converted into temperature.
End of explanation
"""
import time
%matplotlib inline
import matplotlib.pyplot as plt
tmp.set_log_interval_ms(100)
tmp.start_log()
# Change input during this time
time.sleep(10)
tmp_log = tmp.get_log()
plt.plot(range(len(tmp_log)), tmp_log, 'ro')
plt.title('Grove Temperature Plot')
min_tmp_log = min(tmp_log)
max_tmp_log = max(tmp_log)
plt.axis([0, len(tmp_log), min_tmp_log, max_tmp_log])
plt.show()
"""
Explanation: 3. Start logging once every 100ms for 10 seconds
Executing the next cell will start logging the temperature sensor values every 100ms, and will run for 10s. You can try touch/hold the temperature sensor to vary the measured temperature.
You can vary the logging interval and the duration by changing the values 100 and 10 in the cell below. The raw samples are stored in the internal memory, and converted into temperature values.
End of explanation
"""
from time import sleep
from math import log
from pynq.lib.pmod import PMOD_GROVE_G3
from pynq.lib.pmod import PMOD_GROVE_G4
from pynq.lib import Pmod_IIC
class Python_Grove_TMP(Pmod_IIC):
"""This class controls the grove temperature sensor.
This class inherits from the PMODIIC class.
Attributes
----------
iop : _IOP
The _IOP object returned from the DevMode.
scl_pin : int
The SCL pin number.
sda_pin : int
The SDA pin number.
iic_addr : int
The IIC device address.
"""
def __init__(self, pmod_id, gr_pins, model = 'v1.2'):
"""Return a new instance of a grove OLED object.
Parameters
----------
pmod_id : int
The PMOD ID (1, 2) corresponding to (PMODA, PMODB).
gr_pins: list
The group pins on Grove Adapter. G3 or G4 is valid.
model : string
Temperature sensor model (can be found on the device).
"""
if gr_pins in [PMOD_GROVE_G3, PMOD_GROVE_G4]:
[scl_pin,sda_pin] = gr_pins
else:
raise ValueError("Valid group numbers are G3 and G4.")
# Each revision has its own B value
if model == 'v1.2':
# v1.2 uses thermistor NCP18WF104F03RC
self.bValue = 4250
elif model == 'v1.1':
# v1.1 uses thermistor NCP18WF104F03RC
self.bValue = 4250
else:
# v1.0 uses thermistor TTC3A103*39H
self.bValue = 3975
super().__init__(pmod_id, scl_pin, sda_pin, 0x50)
# Initialize the Grove ADC
self.send([0x2,0x20]);
def read(self):
"""Read temperature in Celsius from grove temperature sensor.
Parameters
----------
None
Returns
-------
float
Temperature reading in Celsius.
"""
val = self._read_grove_adc()
R = 4095.0/val - 1.0
temp = 1.0/(log(R)/self.bValue + 1/298.15)-273.15
return temp
def _read_grove_adc(self):
self.send([0])
bytes = self.receive(2)
return 2*(((bytes[0] & 0x0f) << 8) | bytes[1])
from pynq import PL
# Flush IOP state
PL.reset()
py_tmp = Python_Grove_TMP(base.PMODB, PMOD_GROVE_G4)
temperature = py_tmp.read()
print(float("{0:.2f}".format(temperature)),'degree Celsius')
"""
Explanation: 4. A Pure Python class to exercise the AXI IIC Controller inheriting from PMOD_IIC
This class is ported from http://wiki.seeedstudio.com/Grove-Temperature_Sensor/
End of explanation
"""
|
texib/deeplearning_homework | Keras_LSTM2.ipynb | mit | sql = """
SELECT
date,count(distinct cookie_pta) as uv
from
TABLE_DATE_RANGE(pixinsight.article_visitor_log_1_100_, TIMESTAMP('2017-01-01'), CURRENT_TIMESTAMP())
where venue = 'pixnet'
group by date
order by date
"""
from os import environ
# load and plot dataset
import pandas as pd
from pandas import read_csv
from pandas import datetime
from matplotlib import pyplot
import matplotlib.dates as mdates
%matplotlib notebook
# %matplotlib inline
# load dataset
def parser(x):
return datetime.strptime(x, '%Y%m%d')
series = pd.read_gbq(sql,project_id=environ['PROJECT_ID'], verbose=False, private_key=environ['GOOGLE_KEY'])#,header=0, parse_dates=[0], index_col='date', squeeze=True, date_parser=parser)
series['date'] = pd.to_datetime(series['date'],format='%Y%m%d')
series.index = series['date']
del series['date']
# summarize first few rows
print(series.head())
"""
Explanation: 原始資料來源的 SQL,這是抽樣過的資料,當中也有一筆資料是修改過的,因為當天 Server 似乎出了一些問題,導至流量大幅下降
End of explanation
"""
from sklearn.preprocessing import scale,MinMaxScaler
scaler = MinMaxScaler()
x = series.values
x = x.reshape([x.shape[0],1])
scaler.fit(x)
x_scaled = scaler.transform(x)
pyplot.figure()
pyplot.plot(x_scaled)
pyplot.show()
"""
Explanation: 進行 scale to 0-1 ,方便作為 input 及 output (因為 sigmoid 介於 0~1 之間)
End of explanation
"""
#往回看 30 天前的每一筆資料
step_size = 15
print("原始資料長度:{}".format(x_scaled.shape))
def window_stack(a, stepsize=1, width=3):
return np.hstack( a[i:1+i-width or None:stepsize] for i in range(0,width) )
import numpy as np
train_x = window_stack(x_scaled, stepsize=1, width=step_size)
# 最後一筆資料要放棄,因為沒有未來的答案作驗證
train_x = train_x[:-1]
train_x.shape
# 請注意千萬不將每一筆(Row) 當中的最後一天資料作為 Training Data 中的 Input Data
train_y = np.array([i for i in x_scaled[step_size:]])
"""
Explanation: 產生 x,y pair
舉列來說假設將 Step Size 設為 4 天,故一筆 Training Data ,為連續 4 天的流量。再來利用這4天的資料來預測第 5 天的流量
綠色的部是 Training Data(前4天的資料),藍色的部份是需要被預測的部份。示意如下圖
<img align="left" width="50%" src="./imgs/sequence_uv.png" />
End of explanation
"""
train_y.shape
train_x[0]
train_x[1]
train_y[0]
"""
Explanation: 確認產出來的 Training Data 沒有包含到 Testing Data
End of explanation
"""
# reshape input to be [samples, time steps, features]
trainX = np.reshape(train_x, (train_x.shape[0], step_size, 1))
from keras import Sequential
from keras.layers import LSTM,Dense
# create and fit the LSTM network
model = Sequential()
# input_shape(step_size,feature_dim)
model.add(LSTM(4, input_shape=(step_size,1), unroll=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam',metrics=['accuracy'])
model.summary()
"""
Explanation: Design Graph
End of explanation
"""
validation_size = 60
val_loss = []
loss = []
for _ in range(400):
history = model.fit(trainX[:-1*validation_size],
train_y[:-1*validation_size],
epochs=1,shuffle=False,
validation_data=(trainX[-1*validation_size:],
train_y[-1*validation_size:]))
loss.append(history.history['loss'])
val_loss.append(history.history['val_loss'])
model.reset_states()
"""
Explanation: 最後30 筆資料不要看
End of explanation
"""
pyplot.figure()
pyplot.plot(loss)
pyplot.plot(val_loss)
pyplot.show()
"""
Explanation: 看一下 Error Rate 曲線
End of explanation
"""
predict_y = model.predict(trainX)
train_y.shape
pyplot.figure()
pyplot.plot(scaler.inverse_transform(predict_y))
pyplot.plot(scaler.inverse_transform(train_y))
pyplot.show()
"""
Explanation: 看一下曲線擬合效果
End of explanation
"""
predict_y = model.predict(trainX[-1*validation_size:])
predict_y = scaler.inverse_transform(predict_y)
predict_y.shape
pyplot.figure()
pyplot.plot(x[-1*(validation_size+1):-1])
pyplot.plot(predict_y)
pyplot.show()
"""
Explanation: 來預測最後 60 天資料預出來的結果
End of explanation
"""
|
QuantScientist/Deep-Learning-Boot-Camp | day03/Advanced_Keras_Tutorial/5.0 Custom Layers.ipynb | mit | from keras.models import Sequential
from keras.layers import Dense, Dropout, Layer, Activation
from keras.datasets import mnist
from keras import backend as K
from keras.utils import np_utils
"""
Explanation: Custom Keras Layer
Idea:
We build a custom activation layer called Antirectifier,
which modifies the shape of the tensor that passes through it.
We need to specify two methods: get_output_shape_for and call.
Note that the same result can also be achieved via a Lambda layer (keras.layer.core.Lambda).
```python
keras.layers.core.Lambda(function, output_shape=None, arguments=None)
```
Because our custom layer is written with primitives from the Keras backend (K), our code can run both on TensorFlow and Theano.
End of explanation
"""
class Antirectifier(Layer):
'''This is the combination of a sample-wise
L2 normalization with the concatenation of the
positive part of the input with the negative part
of the input. The result is a tensor of samples that are
twice as large as the input samples.
It can be used in place of a ReLU.
# Input shape
2D tensor of shape (samples, n)
# Output shape
2D tensor of shape (samples, 2*n)
# Theoretical justification
When applying ReLU, assuming that the distribution
of the previous output is approximately centered around 0.,
you are discarding half of your input. This is inefficient.
Antirectifier allows to return all-positive outputs like ReLU,
without discarding any data.
Tests on MNIST show that Antirectifier allows to train networks
with twice less parameters yet with comparable
classification accuracy as an equivalent ReLU-based network.
'''
def compute_output_shape(self, input_shape):
shape = list(input_shape)
assert len(shape) == 2 # only valid for 2D tensors
shape[-1] *= 2
return tuple(shape)
def call(self, inputs):
inputs -= K.mean(inputs, axis=1, keepdims=True)
inputs = K.l2_normalize(inputs, axis=1)
pos = K.relu(inputs)
neg = K.relu(-inputs)
return K.concatenate([pos, neg], axis=1)
"""
Explanation: AntiRectifier Layer
End of explanation
"""
# global parameters
batch_size = 128
nb_classes = 10
nb_epoch = 10
"""
Explanation: Parametrs and Settings
End of explanation
"""
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(60000, 784)
X_test = X_test.reshape(10000, 784)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
"""
Explanation: Data Preparation
End of explanation
"""
# build the model
model = Sequential()
model.add(Dense(256, input_shape=(784,)))
model.add(Antirectifier())
model.add(Dropout(0.1))
model.add(Dense(256))
model.add(Antirectifier())
model.add(Dropout(0.1))
model.add(Dense(10))
model.add(Activation('softmax'))
# compile the model
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
# train the model
model.fit(X_train, Y_train,
batch_size=batch_size, epochs=nb_epoch,
verbose=1, validation_data=(X_test, Y_test))
"""
Explanation: Model with Custom Layer
End of explanation
"""
## your code here
"""
Explanation: Excercise
Compare with an equivalent network that is 2x bigger (in terms of Dense layers) + ReLU)
End of explanation
"""
|
molgor/spystats | notebooks/.ipynb_checkpoints/Analysis of spatial models using systematic and random samples-checkpoint.ipynb | bsd-2-clause | new_data = prepareDataFrame("/RawDataCSV/idiv_share/plotsClimateData_11092017.csv")
## En Hec
#new_data = prepareDataFrame("/home/hpc/28/escamill/csv_data/idiv/plotsClimateData_11092017.csv")
"""
Explanation: new_data['residuals1'] = results.resid
End of explanation
"""
def systSelection(dataframe,k):
n = len(dataframe)
idxs = range(0,n,k)
systematic_sample = dataframe.iloc[idxs]
return systematic_sample
##################
k = 10 # The k-th element to take as a sample
systematic_sample = systSelection(new_data,k)
ax= systematic_sample.plot(column='logBiomass',figsize=(16,10),cmap=plt.cm.Blues,edgecolors='')
"""
Explanation: Subseting the data
Three different methods for subsetting the data.
1. Using a systematic selection by index modulus
2. Using a random uniform selection by indices.
2. A geographic subselection (Clip)
Systematic selection
End of explanation
"""
def randomSelection(dataframe,p):
n = len(dataframe)
idxs = np.random.choice(n,p,replace=False)
random_sample = dataframe.iloc[idxs]
return random_sample
#################
n = len(new_data)
p = 3000 # The amount of samples taken (let's do it without replacement)
random_sample = randomSelection(n,p)
ax= random_sample.plot(column='logBiomass',figsize=(16,10),cmap=plt.cm.Blues,edgecolors='')
"""
Explanation: Random (Uniform) selection
End of explanation
"""
def subselectDataFrameByCoordinates(dataframe,namecolumnx,namecolumny,minx,maxx,miny,maxy):
"""
Returns a subselection by coordinates using the dataframe/
"""
minx = float(minx)
maxx = float(maxx)
miny = float(miny)
maxy = float(maxy)
section = dataframe[lambda x: (x[namecolumnx] > minx) & (x[namecolumnx] < maxx) & (x[namecolumny] > miny) & (x[namecolumny] < maxy) ]
return section
# COnsider the the following subregion
minx = -100
maxx = -85
miny = 30
maxy = 35
section = subselectDataFrameByCoordinates(new_data,'LON','LAT',minx,maxx,miny,maxy)
#section = new_data[lambda x: (x.LON > minx) & (x.LON < maxx) & (x.LAT > miny) & (x.LAT < maxy) ]
section.plot(column='logBiomass')
"""
Explanation: Geographic subselection
End of explanation
"""
gvg,tt = createVariogram("/apps/external_plugins/spystats/HEC_runs/results/logbiomas_logsppn_res.csv",new_data)
#For HEC
#gvg,tt = createVariogram("/home/hpc/28/escamill/spystats/HEC_runs/results/logbiomas_logsppn_res.csv",new_data)
import numpy as np
xx = np.linspace(0,1000000,1000)
gvg.plot(refresh=False)
plt.plot(xx,gvg.model.f(xx),lw=2.0,c='k')
plt.title("Empirical Variogram with fitted Whittle Model")
gvg.model
%time n_obs,rsq,params,pvals,conf_int = bundleToGLS(systematic_sample,gvg.model)
samples = map(lambda i : systSelection(new_data,i), range(20,2,-1))
samples = map(lambda i : randomSelection(new_data,3000),range(100))
plt.plot(map(lambda s : s.shape[0],samples))
"""
Explanation: Model Analysis with the empirical variogram
End of explanation
"""
### read csv files
conf_ints = pd.read_csv("/outputs/gls_confidence_int.csv")
params = pd.read_csv("/outputs/params_gls.csv")
params2 = pd.read_csv("/outputs/params2_gls.csv")
pvals = pd.read_csv("/outputs/pvalues_gls.csv")
pnobs = pd.read_csv("/outputs/n_obs.csv")
prsqs = pd.read_csv("/outputs/rsqs.csv")
params
conf_ints
pvals
plt.plot(pnobs.n_obs,prsqs.rsq)
plt.title("$R^2$ statistic for GLS on logBiomass ~ logSppn using Sp.autocor")
plt.xlabel("Number of observations")
tt = params.transpose()
tt.columns = tt.iloc[0]
tt = tt.drop(tt.index[0])
plt.plot(pnobs.n_obs,tt.Intercept)
plt.title("Intercept parameter")
plt.plot(pnobs.n_obs,tt.logSppN)
plt.title("logSppn parameter")
"""
Explanation: Analysis and Results for the systematic sample
End of explanation
"""
ccs = map(lambda s : bundleToGLS(s,gvg.model),samples)
#bundleToGLS(samples[22],gvg.model)
covMat = buildSpatialStructure(samples[8],gvg.model)
#np.linalg.pinv(covMat)
calculateGLS(samples[8],covMat)
#tt = covMat.flatten()
secvg = tools.Variogram(samples[8],'logBiomass',model=gvg.model)
DM = secvg.distance_coordinates
dm = DM.flatten()
dm.sort()
pdm = pd.DataFrame(dm)
xxx = pdm.loc[pdm[0] > 0].sort()
xxx.shape
8996780 + 3000 - (3000 * 3000)
pdm.shape
dd = samples[22].drop_duplicates(subset=['newLon','newLat'])
secvg2 = tools.Variogram(dd,'logBiomass',model=gvg.model)
covMat = buildSpatialStructure(dd,gvg.model)
calculateGLS(dd,covMat)
samples[22].shape
gvg.model.corr_f(xxx.values())
kk
gvg.model.corr_f([100])
gvg.model.corr_f([10])
"""
Explanation: Test for analysis
End of explanation
"""
|
dereneaton/ipyrad | tests/cookbook-structure-pedicularis.ipynb | gpl-3.0 | ## conda install ipyrad -c ipyrad
## conda install structure -c ipyrad
## conda install clumpp -c ipyrad
## conda install toytree -c eaton-lab
"""
Explanation: Cookbook: Parallelized STRUCTURE analyses on unlinked SNPs
As part of the ipyrad.analysis toolkit we've created convenience functions for easily distributing STRUCTURE analysis jobs on an HPC cluster, and for doing so in a programmatic and reproducible way. Importantly, our workflow allows you to easily sample different distributions of unlinked SNPs among replicate analyses, with the final inferred population structure summarized from a distribution of replicates. We also provide some simple examples of interactive plotting functions to make barplots.
Why structure?
Although there are many newer and faster implementations of STRUCTURE, such as faststructure or admixture, the original structure works much better with missing data, which is of course a common feature of RAD-seq data sets.
A note on Jupyter/IPython
This is a Jupyter notebook, a reproducible and executable document. The code in this notebook is Python (2.7), and should be executed either in a jupyter-notebook, like this one, or in an IPython terminal. Execute each cell in order to reproduce our entire analysis. We make use of the ipyparallel Python library to distribute STRUCTURE jobs across processers in parallel (more can be found about that here). The example data set used in this analysis is from the empirical example ipyrad tutorial.
Required software
You can easily install the required software for this notebook with a locally installed conda environment. Just run the commented code below in a terminal. If you are working on an HPC cluster you do not need administrator privileges to install the software in this way, since it is only installed locally.
End of explanation
"""
import ipyrad.analysis as ipa ## ipyrad analysis toolkit
import ipyparallel as ipp ## parallel processing
import toyplot ## plotting library
"""
Explanation: Import Python libraries
End of explanation
"""
##
## ipcluster start --n=4
##
## get parallel client
ipyclient = ipp.Client()
print "Connected to {} cores".format(len(ipyclient))
"""
Explanation: Parallel cluster setup
Start an ipcluster instance in a separate terminal. An easy way to do this in a jupyter-notebook running on an HPC cluster is to go to your Jupyter dashboard, and click [new], and then [terminal], and run 'ipcluster start' in that terminal. This will start a local cluster on the compute node you are connected to. See our [ipyparallel tutorial] (coming soon) for further details.
End of explanation
"""
## set N values of K to test across
kvalues = [2, 3, 4, 5, 6]
## init an analysis object
s = ipa.structure(
name="quick",
workdir="./analysis-structure",
data="./analysis-ipyrad/pedic-full_outfiles/pedic-full.ustr",
)
## set main params (use much larger values in a real analysis)
s.mainparams.burnin = 1000
s.mainparams.numreps = 5000
## submit N replicates of each test to run on parallel client
for kpop in kvalues:
s.run(kpop=kpop, nreps=4, ipyclient=ipyclient)
## wait for parallel jobs to finish
ipyclient.wait()
## return the evanno table (deltaK) for best K
etable = s.get_evanno_table(kvalues)
etable
## get admixture proportion tables avg'd across reps
tables = s.get_clumpp_table(kvalues, quiet=True)
## plot bars for a k-test in tables w/ hover labels
table = tables[3].sort_values(by=[0, 1, 2])
toyplot.bars(
table,
width=500,
height=200,
title=[[i] for i in table.index.tolist()],
xshow=False,
);
"""
Explanation: Quick guide (tldr;)
The following cell shows the quickest way to results. Further explanation of all of the features and options is provided further below.
End of explanation
"""
## the structure formatted file
strfile = "./analysis-ipyrad/pedic-full_outfiles/pedic-full.str"
## an optional mapfile, to sample unlinked SNPs
mapfile = "./analysis-ipyrad/pedic-full_outfiles/pedic-full.snps.map"
## the directory where outfiles should be written
workdir = "./analysis-structure/"
"""
Explanation: Full guide
Enter input and output file locations
The .str file is a structure formatted file output by ipyrad. It includes all SNPs present in the data set. The .snps.map file is an optional file that maps which loci each SNP is from. If this file is used then each replicate analysis will randomly sample a single SNP from each locus in reach rep. The results from many reps therefore will represent variation across unlinked SNP data sets, as well as variation caused by uncertainty. The workdir is the location where you want output files to be written and will be created if it does not already exist.
End of explanation
"""
## create a Structure object
struct = ipa.structure(name="structure-test",
data=strfile,
mapfile=mapfile,
workdir=workdir)
"""
Explanation: Create a Structure Class object
Structure is kind of an old fashioned program that requires creating quite a few input files to run, which makes it not very convenient to use in a programmatic and reproducible way. To work around this we've created a convenience wrapper object to make it easy to submit Structure jobs and to summarize their results.
End of explanation
"""
## set mainparams for object
struct.mainparams.burnin = 10000
struct.mainparams.numreps = 100000
## see all mainparams
print struct.mainparams
## see or set extraparams
print struct.extraparams
"""
Explanation: Set parameter options for this object
Our Structure object will be used to submit jobs to the cluster. It has associated with it a name, a set of input files, and a large number of parameter settings. You can modify the parameters by setting them like below. You can also use tab-completion to see all of the available options, or print them like below. See the full structure docs here for further details on the function of each parameter. In support of reproducibility, it is good practice to print both the mainparams and extraparams so it is clear which options you used.
End of explanation
"""
## a range of K-values to test
tests = [3, 4, 5, 6]
## submit batches of 20 replicate jobs for each value of K
for kpop in tests:
struct.run(
kpop=kpop,
nreps=20,
seed=12345,
ipyclient=ipyclient,
)
"""
Explanation: Submit jobs to run on the cluster
The function run() distributes jobs to run on the cluster and load-balances the parallel workload. It takes a number of arguments. The first, kpop, is the number of populations. The second, nreps, is the number of replicated runs to perform. Each rep has a different random seed, and if you entered a mapfile for your Structure object then it will subsample unlinked snps independently in each replicate. The seed argument can be used to make the replicate analyses reproducible. The extraparams.seed parameter will be generated from this for each replicate. And finally, provide it the ipyclient object that we created above. The structure object will store an asynchronous results object for each job that is submitted so that we can query whether the jobs are finished yet or not. Using a simple for-loop we'll submit 20 replicate jobs to run at four different values of K.
End of explanation
"""
## see submitted jobs (we query first 10 here)
struct.asyncs[:10]
## query a specific job result by index
if struct.asyncs[0].ready():
print struct.asyncs[0].result()
## block/wait until all jobs finished
ipyclient.wait()
"""
Explanation: Track progress until finished
You can check for finished results by using the get_clumpp_table() function, which tries to summarize the finished results files. If no results are ready it will simply print a warning message telling you to wait. If you want the notebook to block/wait until all jobs are finished then execute the wait() function of the ipyclient object, like below.
End of explanation
"""
## set some clumpp params
struct.clumppparams.m = 3 ## use largegreedy algorithm
struct.clumppparams.greedy_option = 2 ## test nrepeat possible orders
struct.clumppparams.repeats = 10000 ## number of repeats
struct.clumppparams
## run clumpp for each value of K
tables = struct.get_clumpp_table(tests)
## return the evanno table w/ deltaK
struct.get_evanno_table(tests)
"""
Explanation: Summarize replicates with CLUMPP
We ran 20 replicates per K-value hypothesis. We now need to concatenate and purmute those results so they can be summarized. For this we use the software clumpp. The default arguments to clumpp are generally good, but you can modify them the same as structure params, by accessing the .clumppparams attribute of your structure object. See the clumpp documentation for more details. If you have a large number of samples (>50) you may wish to use the largeKgreedy algorithm (m=3) for faster runtimes. Below we run clumpp for each value of K that we ran structure on. You only need to tell the get_clumpp_table() function the value of K and it will find all of the result files given the Structure object's name and workdir.
End of explanation
"""
## custom sorting order
myorder = [
"32082_przewalskii",
"33588_przewalskii",
"41478_cyathophylloides",
"41954_cyathophylloides",
"29154_superba",
"30686_cyathophylla",
"33413_thamno",
"30556_thamno",
"35236_rex",
"40578_rex",
"35855_rex",
"39618_rex",
"38362_rex",
]
print "custom ordering"
print tables[4].ix[myorder]
"""
Explanation: Sort the table order how you like it
This can be useful if, for example, you want to order the names to be in the same order as tips on your phylogeny.
End of explanation
"""
def hover(table):
hover = []
for row in range(table.shape[0]):
stack = []
for col in range(table.shape[1]):
label = "Name: {}\nGroup: {}\nProp: {}"\
.format(table.index[row],
table.columns[col],
table.ix[row, col])
stack.append(label)
hover.append(stack)
return list(hover)
"""
Explanation: A function for adding an interactive hover to our plots
The function automatically parses the table above for you. It can reorder the individuals based on their membership in each group, or based on an input list of ordered names. It returns the table of data as well as a list with information for making interactive hover boxes, which you can see below by hovering over the plots.
End of explanation
"""
for kpop in tests:
## parse outfile to a table and re-order it
table = tables[kpop]
table = table.ix[myorder]
## plot barplot w/ hover
canvas, axes, mark = toyplot.bars(
table,
title=hover(table),
width=400,
height=200,
xshow=False,
style={"stroke": toyplot.color.near_black},
)
"""
Explanation: Visualize population structure in barplots
Hover over the plot to see sample names and info in the hover box.
End of explanation
"""
## save plots for your favorite value of K
table = struct.get_clumpp_table(kpop=3)
table = table.ix[myorder]
## further styling of plot with css
style = {"stroke":toyplot.color.near_black,
"stroke-width": 2}
## build barplot
canvas = toyplot.Canvas(width=600, height=250)
axes = canvas.cartesian(bounds=("5%", "95%", "5%", "45%"))
axes.bars(table, title=hover(table), style=style)
## add names to x-axis
ticklabels = [i for i in table.index.tolist()]
axes.x.ticks.locator = toyplot.locator.Explicit(labels=ticklabels)
axes.x.ticks.labels.angle = -60
axes.x.ticks.show = True
axes.x.ticks.labels.offset = 10
axes.x.ticks.labels.style = {"font-size": "12px"}
axes.x.spine.style = style
axes.y.show = False
## options: uncomment to save plots. Only html retains hover.
import toyplot.svg
import toyplot.pdf
import toyplot.html
toyplot.svg.render(canvas, "struct.svg")
toyplot.pdf.render(canvas, "struct.pdf")
toyplot.html.render(canvas, "struct.html")
## show in notebook
canvas
"""
Explanation: Make a slightly fancier plot and save to file
End of explanation
"""
struct.get_evanno_table([3, 4, 5, 6])
"""
Explanation: Calculating the best K
Use the .get_evanno_table() function.
End of explanation
"""
struct.get_evanno_table([3, 4, 5, 6], max_var_multiple=50.)
"""
Explanation: Testing for convergence
The .get_evanno_table() and .get_clumpp_table() functions each take an optional argument called max_var_multiple, which is the max multiple by which you'll allow the variance in a 'replicate' run to exceed the minimum variance among replicates for a specific test. In the example below you can see that many reps were excluded for the higher values of K, such that fewer reps were analyzed for the final results. By excluding the reps that had much higher variance than other (one criterion for asking if they converged) this can increase the support for higher K values. If you apply this method take care to think about what it is doing and how to interpret the K values. Also take care to consider whether your replicates are using the same input SNP data but just different random seeds, or if you used a map file, in which case your replicates represent different sampled SNPs and different random seeds. I'm of the mind that there is no true K value, and sampling across a distribution of SNPs across many replicates gives you a better idea of the variance in population structure in your data.
End of explanation
"""
|
kazukiotsuka/mongobase | tutorial/MongoBase_starting_guide.ipynb | mit | %load_ext autoreload
%autoreload 2
%matplotlib inline
import sys
import time
import threading
import multiprocessing
import datetime as dt
from mongobase.mongobase import MongoBase, db_context
from bson import ObjectId
"""
Explanation: MongoBase starting guide
End of explanation
"""
x = ObjectId()
time.sleep(1)
y = ObjectId()
time.sleep(1)
z = ObjectId()
x
str(x)
x.generation_time
y.generation_time
x < y and y < z
"""
Explanation: ObjectId
First, let's talk about ObjectId.
End of explanation
"""
class Bird(MongoBase):
__collection__ = 'birds'
__structure__ = {
'_id': ObjectId,
'name': str,
'age': int,
'is_able_to_fly': bool,
'created': dt.datetime,
'updated': dt.datetime
}
__required_fields__ = ['_id', 'name']
__default_values__ = {
'_id': ObjectId(),
'is_able_to_fly': False,
'created': dt.datetime.now(dt.timezone.utc),
'updated': dt.datetime.now(dt.timezone.utc)
}
__validators__ = {}
__indexed_keys__ = {}
"""
Explanation: Actually, ObjectId is usuful. It is unique, sortable and memory efficient.
http://api.mongodb.com/python/current/api/bson/objectid.html
An ObjectId is a 12-byte unique identifier consisting of:
a 4-byte value representing the seconds since the Unix epoch,
a 3-byte machine identifier,
a 2-byte process id, and
a 3-byte counter, starting with a random value.
And also ObjectId is fast for inserting or indexing. The index size is small.
https://github.com/Restuta/mongo.Guid-vs-ObjectId-performance
Define a database model
So now, we create a simple test collection with MongoBase.
End of explanation
"""
chicken = Bird({'_id': ObjectId(), 'name': 'chicken', 'age': 3})
chicken
chicken._id.generation_time
"""
Explanation: The __structure__ part represents the definition of the model.
Basic instractions. (insert, update, find, remove)
Let's try basic instractions like inserts, updates, find and remove.
Firstly, let's begin with creating an instance to be stored.
End of explanation
"""
chicken.save()
"""
Explanation: Good chicken. Let's save while it is fresh.
End of explanation
"""
chicken.is_able_to_fly
chicken.is_able_to_fly = True
chicken.update()
"""
Explanation: Chickens are considered to be unable to fly by default. We can let it be enable by updating.
End of explanation
"""
chicken.age = 5
chicken = chicken.update()
assert chicken.age == 5, 'something wrong on update()'
chicken = Bird.findAndUpdateById(chicken._id, {'age': 6})
assert chicken.age == 6, 'something wrong on findAndUpdateById()'
"""
Explanation: You would be able to see 'is_able_to_fly': True.
Chickens grow up in several ways.
End of explanation
"""
mother_chicken = Bird({'_id': ObjectId(), 'name': 'mother chicken', 'age': 63})
mother_chicken.save()
"""
Explanation: Next let's try find methods.
End of explanation
"""
Bird.findOne({'name': 'mother chicken'})
"""
Explanation: Now we can retrieve the same document from database.
End of explanation
"""
mother_chicken.remove()
if not Bird.findOne({'_id': mother_chicken._id}):
print('Yes. The mother chicken not found. Someone might ate it.')
"""
Explanation: It is the same chicken, isn't it? great. Let's clear (eat) it.
End of explanation
"""
all_chickens = Bird.find({'name': 'chicken'}, sort=[('_id', 1)])
len(all_chickens)
"""
Explanation: Now we get all chickens which we stored so far.
End of explanation
"""
Bird.count({'name': 'chicken'})
"""
Explanation: Or we can count with count() method directly.
End of explanation
"""
all_chickens[-1]._id.generation_time == chicken._id.generation_time
"""
Explanation: Let's check if the latest chicken is equal to the one which we just saved.
End of explanation
"""
with db_context(db_uri='localhost', db_name='test') as db:
print(db)
flamingo = Bird({'_id': ObjectId(), 'name': 'flamingo', 'age': 20})
flamingo.save(db=db)
flamingo.age = 23
flamingo = flamingo.update(db=db)
assert flamingo.age == 23, 'something wrong on update()'
flamingo = Bird.findAndUpdateById(flamingo._id, {'age': 24}, db=db)
assert flamingo.age == 24, 'something wrong on findAndUpdateById()'
n_flamingo = Bird.count({'name': 'flamingo'}, db=db)
print(f'{n_flamingo} flamingo found in the test database.')
n_flamingo = Bird.count({'name': 'flamingo'})
print(f'{n_flamingo} flamingo found in the default database.')
assert n_flamingo == 0
"""
Explanation: Is that True, right?
Contextual database
MongoBase automatically creates mongodb client for each process.
But in some cases, some instances must be written or read for a different client or db.
If you use db context, it uses a designated database within the context.
Let's get try on it.
End of explanation
"""
many_pigeon = []
for i in range(10000):
many_pigeon += [Bird({'_id': ObjectId(), 'name': f'pigeon', 'age': i})]
print(many_pigeon[1])
%%time
Bird.bulk_insert(many_pigeon)
Bird.count({'name': 'pigeon'})
"""
Explanation: Bulk Operation
Many insert operations takes a large computing cost. Fortunately, MongoDB provides an operation named "bulk write".
It enables to insert many documents in one operation.
Bulk Insert
End of explanation
"""
updates = []
for pigeon in many_pigeon:
pigeon.age *= 3
updates += [pigeon]
%%time
print(len(updates))
Bird.bulk_update(updates)
"""
Explanation: Bulk Update
End of explanation
"""
%%time
for i, pigeon in enumerate(many_pigeon):
check = Bird.findOne({'_id': pigeon._id})
assert check.age == i*3
"""
Explanation: Check if all ages are updated
End of explanation
"""
Bird.delete({'name': 'pigeon'})
"""
Explanation: No error? Cool.
End of explanation
"""
def breed(i):
try:
sparrow = Bird({'_id': ObjectId(), 'name': f'sparrow', 'age': 0})
sparrow.save()
sparrow.age += 1
sparrow.update()
except Exception as e:
print(f'Exception occured. {e} in thread {threading.current_thread()}')
else:
print(f'{i} saved in thread {threading.current_thread()}.')
"""
Explanation: Multi Threading and Processing
End of explanation
"""
%%time
for i in range(1000):
t = threading.Thread(target=breed, name=f'breed sparrow {i}', args=(i,))
t.start()
Bird.delete({'name':'sparrow'})
"""
Explanation: Threading (using the same memory space)
The threading module uses threads, the multiprocessing module uses processes. The difference is that threads run in the same memory space, while processes have separate memory. This makes it a bit harder to share objects between processes with multiprocessing. Since threads use the same memory, precautions have to be taken or two threads will write to the same memory at the same time. This is what the global interpreter lock is for.
https://stackoverflow.com/questions/3044580/multiprocessing-vs-threading-python
End of explanation
"""
def breed2(tasks):
db = Bird._db() # create a MongoDB Client for the forked process
try:
for i in range(len(tasks)):
sparrow = Bird({'_id': ObjectId(), 'name': f'sparrow', 'age': 0})
sparrow.save(db=db)
sparrow.age += 1
sparrow.update(db=db)
except Exception as e:
print(f'Exception occured. {e} in process {multiprocessing.current_process()}')
else:
print(f'{len(tasks)} sparrow saved in process {multiprocessing.current_process()}.')
%%time
print(f'{multiprocessing.cpu_count()} cpu resources found.')
tasks = [[f'sparrow {i}' for i in range(250)] for j in range(4)]
process_pool = multiprocessing.Pool(4)
process_pool.map(breed2, tasks)
"""
Explanation: Multiprocessing (using the separated memory for each process)
PyMongo is not fork-safe. Care must be taken when using instances of MongoClient with fork(). Specifically, instances of MongoClient must not be copied from a parent process to a child process. Instead, the parent process and each child process must create their own instances of MongoClient. Instances of MongoClient copied from the parent process have a high probability of deadlock in the child process due to the inherent incompatibilities between fork(), threads, and locks described below. PyMongo will attempt to issue a warning if there is a chance of this deadlock occurring.
http://api.mongodb.com/python/current/faq.html#pymongo-fork-safe%3E
End of explanation
"""
|
pligor/predicting-future-product-prices | 02_preprocessing/exploration08-price_history_gaussian_process_regressor_memory_errors.ipynb | agpl-3.0 | from __future__ import division
import numpy as np
import pandas as pd
import sys
import math
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import re
import os
import csv
from helpers.outliers import MyOutliers
from skroutz_mobile import SkroutzMobile
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, r2_score
from skroutz_mobile import SkroutzMobile
from sklearn.model_selection import StratifiedShuffleSplit
from helpers.my_train_test_split import MySplitTrainTest
from sklearn.preprocessing import StandardScaler
from preprocess_price_history import PreprocessPriceHistory
from price_history import PriceHistory
from dfa import dfa
import scipy.signal as ss
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
from sklearn.cluster import KMeans
from sklearn.preprocessing import MinMaxScaler
import random
from sklearn.metrics import silhouette_score
from os.path import isfile
from preprocess_price_history import PreprocessPriceHistory
from os.path import isfile
from sklearn.gaussian_process import GaussianProcessRegressor
from mobattrs_price_history_merger import MobAttrsPriceHistoryMerger
from george import kernels
import george
from sklearn.manifold import TSNE
import matplotlib as mpl
random_state = np.random.RandomState(seed=16011984)
%matplotlib inline
mpl.rc('figure', figsize=(17,7)) #setting the default value of figsize for our plots
#https://matplotlib.org/users/customizing.html
maxlag = 4
data_path = '../../../../Dropbox/data'
mobattrs_ph_path = data_path + '/mobattrs_price_history'
mobattrs_ph_norm_path = mobattrs_ph_path + '/mobattrs_ph_norm.npy'
"""
Explanation: http://nbviewer.jupyter.org/github/alexminnaar/time-series-classification-and-clustering/blob/master/Time%20Series%20Classification%20and%20Clustering.ipynb
End of explanation
"""
csv_in = "../price_history_03_seq_start_suddens_trimmed.csv"
#csv_out = "../price_history_for_sfa.csv"
#df_fixed_width.to_csv(csv_path, encoding='utf-8', quoting=csv.QUOTE_ALL)
ph = PriceHistory(csv_in)
seq = ph.extractSequenceByLocation(0)
print type(seq)
seq.shape, seq.name
#seq
#plt.figure(figsize=(16,6))
seq.plot()
plt.show()
plt.figure(figsize=(16,6))
seq.hist()
plt.show()
"""
Explanation: Some processing
End of explanation
"""
from statsmodels.tsa.stattools import adfuller
def dickey_fuller_print(sequence):
result = adfuller(sequence)
print('ADF Statistic: %f' % result[0])
print('p-value: %f' % result[1])
print('Critical Values:')
for key, value in result[4].items():
print('\t%s: %.3f' % (key, value))
def dickey_fuller_test(sequence, threshold=0.05):
result = adfuller(sequence, maxlag=maxlag)
return result[1] < threshold
dickey_fuller_print(seq)
"""
Explanation: Dickey Fuller Test
http://machinelearningmastery.com/time-series-data-stationary-python/
End of explanation
"""
gp = GaussianProcessRegressor()
xx = 10 * np.sort(np.random.rand(15))
xx.shape
yerr = 0.2 * np.ones_like(xx)
yerr
yy = np.sin(xx) + yerr * np.random.randn(len(xx))
yy.shape
plt.errorbar(xx, yy, yerr=yerr, fmt='.k', capsize=0)
plt.show()
"""
Explanation: Quick Gaussian Process Regressor test
End of explanation
"""
kernel = np.var(yy) * kernels.ExpSquaredKernel(0.5)
kernel
gp = george.GP(kernel)
gp
gp.compute(xx, yerr=yerr)
xx_pred = np.linspace(0, 15, 500)
xx_pred.shape
pred, pred_var = gp.predict(yy, xx_pred, return_var=True)
plt.figure(figsize=(17,7))
plt.fill_between(xx_pred, pred - np.sqrt(pred_var), pred + np.sqrt(pred_var),
color="k", alpha=0.2)
plt.plot(xx_pred, pred, "k", lw=1.5, alpha=0.5)
plt.errorbar(xx, yy, yerr=yerr, fmt=".k", capsize=0)
plt.plot(xx_pred, np.sin(xx_pred), "--g")
plt.xlim(0, 15)
plt.ylim(-1.45, 1.45)
plt.xlabel("x")
plt.ylabel("y");
"""
Explanation: Gaussian Process George Library
End of explanation
"""
mobiles_path = data_path + '/mobiles'
mobs_norm_path = mobiles_path + '/mobiles_norm.csv'
assert isfile(mobs_norm_path)
df = pd.read_csv(mobs_norm_path, index_col=0, encoding='utf-8', quoting=csv.QUOTE_ALL)
df.shape
seqs = ph.extractAllSequences()
len(seqs)
"""
Explanation: Loading data
End of explanation
"""
obj = MobAttrsPriceHistoryMerger(mobs_norm_path=mobs_norm_path, price_history_csv=csv_in)
%%time
arr = obj.get_table()
arr.shape
mobattrs_ph_raw_path = mobattrs_ph_path + '/mobattrs_ph_raw.npy'
mobattrs_ph_raw_path
np.save(mobattrs_ph_raw_path, arr)
assert isfile(mobattrs_ph_raw_path)
"""
Explanation: merging
End of explanation
"""
obj.get_mob_attrs_indices()
obj.get_date_indices()
arr_norm = obj.normalize_date_columns(arr)
for cur_date_ind in obj.get_date_indices():
cur_col = arr_norm[:, cur_date_ind]
cur_mean = np.mean(cur_col)
cur_std = np.std(cur_col)
assert np.allclose(arr_norm[:, cur_date_ind], (cur_col - cur_mean) / cur_std)
#Let's also normalize the price, the targets
price_col = arr_norm[:, MobAttrsPriceHistoryMerger.PRICE_IND]
price_mean = np.mean(price_col)
price_std = np.std(price_col)
arr_norm[:, MobAttrsPriceHistoryMerger.PRICE_IND] = (price_col - price_mean) / price_std
np.save(mobattrs_ph_norm_path, arr_norm)
assert isfile(mobattrs_ph_norm_path)
arr_norm.shape
"""
Explanation: normalizing price history info
actually normalizing the columns related to dates and the targets, the price itself
End of explanation
"""
arr_norm = np.load(mobattrs_ph_norm_path)
arr_norm.shape
XX = arr_norm[:, :MobAttrsPriceHistoryMerger.PRICE_IND]
XX.shape
yy = arr_norm[:, MobAttrsPriceHistoryMerger.PRICE_IND]
yy.shape
# gp = GaussianProcessRegressor(copy_X_train=False)
# gp.fit(XX, yy)
"""
Explanation: Gaussian Process Regressor
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA()
%%time
pca.fit(XX)
plt.figure(1, figsize=(12, 5))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(pca.explained_variance_ratio_, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('explained_variance_')
plt.show()
explainedVar = 0
explainedVars = []
for ii in range(len(pca.explained_variance_ratio_)):
explainedVar += pca.explained_variance_ratio_[ii]
explainedVars.append(explainedVar)
plt.figure(1, figsize=(16, 5))
plt.clf()
plt.axes([.2, .2, .7, .7])
plt.plot(explainedVars, linewidth=2)
plt.axis('tight')
plt.xlabel('n_components')
plt.ylabel('cumulative explained variance')
plt.show()
explainedVariancePercentage = 0
pointWherVarExplainedOver95percent = -1
ii = 0
while explainedVariancePercentage < 0.95:
explainedVariancePercentage += pca.explained_variance_ratio_[ii]
pointWherVarExplainedOver95percent += 1
ii += 1
pointWherVarExplainedOver95percent
pca_reduced = PCA(n_components=pointWherVarExplainedOver95percent)
%%time
XX_reduced = pca_reduced.fit_transform(XX)
XX_reduced.shape
# gp = GaussianProcessRegressor(copy_X_train=False)
# gp.fit(XX_reduced, yy)
sum(explainedVars[:2])
pca_2d = PCA(n_components=2, random_state=random_state)
pca_2d
%time
XX_pca2d = pca_2d.fit_transform(XX)
XX_pca2d.shape
plt.scatter(XX_pca2d[:, 0], XX_pca2d[:, 1], marker ='.')
plt.show()
#memory error
# gp = GaussianProcessRegressor(copy_X_train=False)
# gp.fit(XX_pca2d, yy)
XX_1 = PCA(n_components=1, random_state=random_state).fit_transform(XX)
XX_1.shape
#memory error
# gp = GaussianProcessRegressor(copy_X_train=False)
# gp.fit(XX_1, yy)
"""
Explanation: Dimensionality reduction
End of explanation
"""
from time import sleep
count_prev = None
count = len(XX)
while count_prev != count:
count_prev = count
try:
gp = GaussianProcessRegressor(copy_X_train=False)
gp.fit(XX[:count], yy[:count])
count = int(count * 1.5)
except MemoryError:
print "memory error"
count //= 2
print count
sleep(1)
"""
Explanation: Conclusion
is that the issue is with the large number of rows / instances that bring the gaussian process regressor to memory error even if we use only the single first principal component. There is nothing more of a dimensionality reduction we could do
Let's see how much instances GP regressor can handle without hitting out of memory error
End of explanation
"""
tsne = TSNE(n_components=2, perplexity=2)
#memory error
# XX_tsne2 = tsne.fit_transform(XX)
# XX_tsne2.shape
#memory error
# %%time
# XX_tsne2 = tsne.fit_transform(XX_reduced)
"""
Explanation: t-SNE dimensionality reduction
let's bring on the big guns
End of explanation
"""
from sklearn.manifold import Isomap
isomap = Isomap(n_components=2, n_neighbors=2)
#Memory Error
# %%time
# XX_isomap = isomap.fit_transform(XX)
%%time
XX_isomap = isomap.fit_transform(XX_reduced)
XX_isomap.shape
plt.scatter(XX[:, 0], XX[:, 1])
plt.show()
"""
Explanation: Isomap Dimensionality Reduction
End of explanation
"""
from sklearn.manifold import MDS
mds = MDS(n_components=2, max_iter=10, n_init=1, random_state=random_state)
#memory error
# %%time
# XX_mds = mds.fit_transform(XX)
#memory error
# %%time
# XX_mds = mds.fit_transform(XX_reduced)
"""
Explanation: MDS dimensionality reduction
End of explanation
"""
from sklearn.decomposition import KernelPCA
rbf_pca = KernelPCA(n_components=2, random_state=random_state, kernel='rbf')
rbf_pca
%%time
XX_rbf = rbf_pca.fit_transform(XX)
"""
Explanation: Kernel PCA
End of explanation
"""
metric = random_state.randn(XX.shape[1], XX.shape[1])
metric = metric.dot(metric.T)
metric.shape
kernel = kernels.ExpSquaredKernel(metric=metric, ndim=XX.shape[1])
gp = george.GP(kernel)
gp
gp.compute(XX)
xx_pred = np.linspace(0, 15, 500)
xx_pred.shape
pred, pred_var = gp.predict(yy, xx_pred, return_var=True)
plt.figure(figsize=(16,8))
seq.plot()
plt.plot(preds, 'o')
plt.show()
#preds - seq.values
"""
Explanation: George Approach
End of explanation
"""
|
NYUDataBootcamp/Materials | Code/notebooks/bootcamp_adv_scraping.ipynb | mit | import pandas as pd # data package
import matplotlib.pyplot as plt # graphics
import datetime as dt # date tools, used to note current date
%matplotlib inline
"""
Explanation: Web scraping
Date: 28 March 2017
@author: Daniel Csaba
Preliminaries
Import usual packages.
End of explanation
"""
import requests # you might have to install this
url = 'https://newyork.craigslist.org/search/roo?query=east+village&availabilityMode=0'
cl = requests.get(url)
cl
"""
Explanation: We have seen how to input data from csv and xls files -- either online or from our computer and through APIs. Sometimes the data is only available as specific part of a website.
We want to access the source code of the website and systematically extract the relevant information.
Again, use Google fu to find useful links. Here are a couple:
* link 1
* link 2
* link 3
Structure of web pages (very simplistic)
Hypertext Markup Language (HTML) specifies the structure and main content of the site -- tells the browser how to layout content. Think of Markdown.
It is structured using tags.
html
<html>
<head>
(Meta) Information about the page.
</head>
<body>
<p>
This is a paragraph.
</p>
<table>
This is a table
</table>
</body>
</html>
Tags determine the content and layout depending on their relation to other tags. Useful terminology:
child -- a child is a tag inside another tag. The p tag above is a child of the body tag.
parent -- a parent is the tag another tag is inside. The body tag above is a parent of the p tag.
sibling -- a sibling is a tag that is nested inside the same parent as another tag. The head and body tags above are siblings.
There are many different tags -- take a look at a reference list. You won't and shouldn't remember all of them but it's useful to have a rough idea about them.
And take a look at a real example -- open page, then right click: "View Page Source"
In the real example you will see that there is more information after the tag, most commanly a class and an id. Something similar to the following:
html
<html>
<head class='main-head'>
(Meta) Information about the page.
</head>
<body>
<p class='inner-paragraph' id='001'>
This is a paragraph.
<a href="https://www.dataquest.io">Learn Data Science Online</a>
</p>
<table class='inner-table' id='002'>
This is a table
</table>
</body>
</html>
The class and id information will help us in locating the information we are looking for in a systematic way. (Originally, classes and ids are used by CSS to determine which HTML elements to apply certain styles to)
Useful way to explore the html and the corresponding website is right clicking on the web page and then clicking on Inspect element -- interpretation of the html by the browser
Suppose we want to check prices for renting a room in Manhattan in Craigslist. Let's check for example the rooms & shares section for the East Village.
Accessing web pages
We have to download the content of the webpage -- i.e. get the contents structured by the HTML. This we can do with the requests library, which is a human readable HTTP (HyperText Transfer Protocol) library for python. You cna find the Quickstart Documentation here.
End of explanation
"""
cl.status_code
"""
Explanation: After running our request, we get a Response object. This object has a status_code property, which indicates if the page was downloaded successfully.
A status_code of 200 means that the page downloaded successfully.
* status code starting with a 2 generally indicates success
* a code starting with a 4 or a 5 indicates an error.
End of explanation
"""
url = 'https://newyork.craigslist.org/search/roo'
keys = {'query' : 'east village', 'availabilityMode' : '0'}
cl_extra = requests.get(url, params=keys)
# see if the URL was specified successfully
cl_extra.url
"""
Explanation: You might want to query for different things and download information for all of them.
* You can pass this as extra information (defined as a dictionary with keys and values).
* Best way to learn about the available keys is by "changing" things on the site and see how the url changes
End of explanation
"""
cl.url
"""
Explanation: Check tab completion
End of explanation
"""
cl.text[:300]
cl.content[:500] # this works also for information which is not purely text
"""
Explanation: To print out the content of the html file, use the content or text properties
This is going to be ugly and unreadable
End of explanation
"""
from bs4 import BeautifulSoup
BeautifulSoup?
cl_soup = BeautifulSoup(cl.content, 'html.parser')
"""
Explanation: Extracting information from a web page
Now that we have the content of the web page we want to extraxt certain information. BeautifulSoup is a Python package which helps us in doing that. See the documentation for more information.
We first have to import the library, and create an instance of the BeautifulSoup class to parse our document:
End of explanation
"""
#print(cl_soup.prettify())
print('Type:', type(cl_soup))
# we can access a tag
print('Title: ', cl_soup.title)
# or only the text content
print('Title: ', cl_soup.title.text) # or
print('Title: ', cl_soup.title.get_text())
"""
Explanation: Print this out in a prettier way.
End of explanation
"""
cl_soup.find_all?
"""
Explanation: We can find all tags of certain type with the find_all method. This returns a list.
End of explanation
"""
cl_soup.find_all('p')[0]
"""
Explanation: To get the first paragraph in the html write
End of explanation
"""
cl_soup.find_all('p')[0].get_text()
"""
Explanation: This is a lot of information and we want to extract some part of it. Use the text or get_text() method to get the text content.
End of explanation
"""
list(cl_soup.find_all('p')[0].children)
"""
Explanation: This is still messy. We will need a smarter search.
As all the tags are nested, we can move through the structure one level at a time. We can first select all the elements at the top level of the page using the children property of soup. For example here are the children of the first paragraph tag.
Note: children returns a list iterator, so we need to call the list function on it.
End of explanation
"""
cl_soup.find_all('span', class_='result-price')[0].get_text()
cl_soup.find_all('span', class_='result-price')[:10]
prices = cl_soup.find_all('span', class_='result-price')
price_data = [price.get_text() for price in prices]
price_data[:10]
len(price_data)
"""
Explanation: Look for tags based on their class. This is extremely useful for efficiently locating information.
End of explanation
"""
cl_soup.find_all('li', class_='result-row')[0]
ads = cl_soup.find_all('li', class_='result-row')
# we can access values of the keys by using a dictionary like syntax
ads[5].find('a', class_='result-title hdrlnk')
ads[5].find('a', class_='result-title hdrlnk')['href']
data = [[ad.find('a', class_='result-title hdrlnk').get_text(),
ad.find('a', class_='result-title hdrlnk')['data-id'],
ad.find('span', class_='result-price').get_text()] for ad in ads ]
"""
Explanation: We are getting more cells than we want -- there were only 120 listings on the page. Check the ads with "Inspect Element". There are duplicates. We need a different tag level (<li>)
End of explanation
"""
# if it exists then the type is
type(ads[0].find('span', class_='result-price'))
"""
Explanation: What's going wrong? Some ads don't have a price listed, so we can't retrieve it.
End of explanation
"""
import bs4
data = [[ad.find('a', class_='result-title hdrlnk').get_text(),
ad.find('a', class_='result-title hdrlnk')['data-id'],
ad.find('span', class_='result-price').get_text()] for ad in ads
if type(ad.find('span', class_='result-price'))==bs4.element.Tag]
data[:10]
df = pd.DataFrame(data)
df.head(10)
df.shape
"""
Explanation: If it does not find the price, it returns a NoneType. We might exploit this fact to select only the valid links.
End of explanation
"""
df.columns = ['Title', 'ID', 'Price']
df.head()
"""
Explanation: We only have 118 listing because 2 listings did not have a price.
End of explanation
"""
cl_soup.find('span', class_='totalcount')
"""
Explanation: We could do text anaylsis and see what words are common in ads which has a relatively higher price.
This approach is not really efficient because it only gets the first page of the search results. We see on the top of the CL page the total number of listings. In the Inspection mode we can pick an element from the page and check how it is defined in the html -- this is useful to get tags and classes efficiently.
For example, the total number of ads is a span tag with a 'totalcount' class.
End of explanation
"""
# First we get the total number of listings in real time
url = 'https://newyork.craigslist.org/search/roo?query=east+village&availabilityMode=0'
cl = requests.get(url)
cl_soup = BeautifulSoup(cl.content, 'html.parser')
total_count = int(cl_soup.find('span', class_='totalcount').get_text())
print(total_count)
"""
Explanation: We can see if we start clicking on the 2nd nd 3rd pages of the results that there is a structure in how they are defined
First page:
https://newyork.craigslist.org/search/roo?query=east+village&availabilityMode=0
Second page:
https://newyork.craigslist.org/search/roo?s=120&availabilityMode=0&query=east%20village
Third page:
https://newyork.craigslist.org/search/roo?s=240&availabilityMode=0&query=east%20village
The number after roo?s= in the domain specifies where the listings are starting from (not inclusive). In fact, if we modify it ourselves we can fine-tune the page starting from the corresponding listing and then showing 120 listings. Try it!
We can also define the first page by puttig s=0& after roo? like this:
https://newyork.craigslist.org/search/roo?s=0&availabilityMode=0&query=east%20village
End of explanation
"""
# 1) Specify the url
for page in range(0, total_count, 120):
print('https://newyork.craigslist.org/search/roo?s={}&availabilityMode=0&query=east%20village'.format(page))
# Next we write a loop to scrape all pages
df = pd.DataFrame({'Title' : [], 'ID' : [], 'Price' : []})
for page in range(0, total_count, 120):
url = 'https://newyork.craigslist.org/search/roo?s={}&availabilityMode=0&query=east%20village'.format(page)
cl = requests.get(url)
cl_soup = BeautifulSoup(cl.content, 'html.parser')
ads = cl_soup.find_all('li', class_='result-row')
data = pd.DataFrame([[ad.find('a', class_='result-title hdrlnk').get_text(),
ad.find('a', class_='result-title hdrlnk')['data-id'],
ad.find('span', class_='result-price').get_text()] for ad in ads
if type(ad.find('span', class_='result-price'))==bs4.element.Tag],
columns=['Title', 'ID', 'Price'])
df = df.append(data, ignore_index=True)
df.head()
# Do the same using the `extend` method
data = []
for page in range(0, total_count, 120):
url = 'https://newyork.craigslist.org/search/roo?s={}&availabilityMode=0&query=east%20village'.format(page)
cl = requests.get(url)
cl_soup = BeautifulSoup(cl.content, 'html.parser')
ads = cl_soup.find_all('li', class_='result-row')
data_page = [[ad.find('a', class_='result-title hdrlnk').get_text(),
ad.find('a', class_='result-title hdrlnk')['data-id'],
ad.find('span', class_='result-price').get_text()] for ad in ads
if type(ad.find('span', class_='result-price'))==bs4.element.Tag]
data.extend(data_page)
df = pd.DataFrame(data, columns=['Title', 'ID', 'Price'])
df.head()
df.shape
df.tail()
"""
Explanation: We have the total number of listings with the given search specification. Breaking down the steps:
1) Specify the url of each page we want to scrape
2) For each page scrape the data -- we will reuse the code what we already have for one page
3) Save the data into one dataframe -- we can use the append method for DataFrames or the extend method for lists
End of explanation
"""
from pygeocoder import Geocoder
# check for one of the locations how it's working
# some addresses might not be valid -- it goes through Google's API
loc = Geocoder.geocode('Bozeman, Montana')
loc.coordinates
Geocoder.geocode('Stanford, California').coordinates
"""
Explanation: We have scraped all the listings from CL in section "Rooms and Shares" for the East Village.
Exercise
Suppose you have a couple of destinations in mind and you want to check the weather for each of them for this Friday. You want to get it from the National Weather Service.
These are the places I want to check (suppose there are many more and you want to automate it):
python
locations = ['Bozeman, Montana', 'White Sands National Monument', 'Stanford University, California']
It seems that the NWS is using latitude and longitude coordinates in its search.
i.e. for White Sands
http://forecast.weather.gov/MapClick.php?lat=32.38092788700044&lon=-106.4794398029997
Would be cool to pass these on as arguments.
After some Google fu (i.e. "latitude and longitude of location python") find a post by Chris Albon which describes exactly what we want.
"Geocoding (converting a phyiscal address or location into latitude/longitude) and reverse geocoding (converting a lat/long to a phyiscal address or location)[...] Python offers a number of packages to make the task incredibly easy [...] use pygeocoder, a wrapper for Google's geo-API, to both geocode and reverse geocode.
Install pygeocoder through pip install pygeocoder (from conda only the OSX version is available).
End of explanation
"""
locations = ['Bozeman, Montana', 'White Sands National Monument', 'Stanford University, California']
coordinates = [Geocoder.geocode(location).coordinates for location in locations]
coordinates
for location, coordinate in zip(locations, coordinates):
print('The coordinates of {} are:'.format(location), coordinate)
"""
Explanation: We can check whether it's working fine at http://www.latlong.net/
End of explanation
"""
keys = {}
for location, coordinate in zip(locations, coordinates):
keys[location] = {'lat' : coordinate[0], 'lon' : coordinate[1]}
keys
"""
Explanation: Define a dictionary for the parameters we want to pass to the GET request for NWS server.
End of explanation
"""
keys[locations[0]]
url = 'http://forecast.weather.gov/MapClick.php'
nws = requests.get(url, params=keys[locations[0]])
nws.status_code
nws.url
nws.content[:300]
nws_soup = BeautifulSoup(nws.content, 'html.parser')
seven = nws_soup.find('div', id='seven-day-forecast-container')
seven.find(text='Friday')
seven.find(text='Friday').parent
seven.find(text='Friday').parent.parent
seven.find(text='Friday').parent.parent.find('p', class_='temp temp-high').get_text()
data = []
for location in locations:
nws = requests.get(url, params=keys[location])
nws_soup = BeautifulSoup(nws.content, 'html.parser')
seven = nws_soup.find('div', id='seven-day-forecast-container')
temp = seven.find(text='Friday').parent.parent.find('p', class_='temp temp-high').get_text()
data.append([location, temp])
df_weather = pd.DataFrame(data, columns=['Location', 'Friday weather'])
df_weather
df_weather['high_temp'] = df_weather['Friday weather'].str.rsplit().str.get(1).astype(float)
df_weather['high_temp'].std()
"""
Explanation: Recall the format of the url associated with a particular location
http://forecast.weather.gov/MapClick.php?lat=32.38092788700044&lon=-106.4794398029997
End of explanation
"""
|
statkraft/shyft-doc | notebooks/api/api-intro.ipynb | lgpl-3.0 | %pylab inline
import os
import sys
import datetime as dt
import numpy as np
from matplotlib import pyplot as plt
from netCDF4 import Dataset
# try to auto-configure the path. This will work in the case
# that you have checked out the doc and data repositories
# at same level. Make sure this is done **before** importing shyft
shyft_data_path = os.path.abspath("../../../shyft-data")
if os.path.exists(shyft_data_path) and 'SHYFT_DATA' not in os.environ:
os.environ['SHYFT_DATA']=shyft_data_path
# shyft should be available either by it's install in python
# or by PYTHONPATH set by user prior to starting notebook.
# If you have cloned the repositories according to guidelines:
# shyft_path=os.path.abspath('../../../shyft')
# sys.path.insert(0,shyft_path)
from shyft import api
import shyft.api.pt_gs_k
from shyft.api import shyftdata_dir
"""
Explanation: The Shyft api
Introduction
At its core, Shyft provides functionality through an API (Application Programming Interface). Core functionality of Shyft is available through this API.
We begin the tutorials by introducing the API as it provides the building blocks for the framework. We recommend at least working through the first of these examples. Once you have an understanding of the basic api features, you can move toward configured runs that make use of orchestation. To make use of configured runs, you need to understand how we 'serialize' configurations and input data through repositories.
In a separate of the simulation tutorials, we cover conducting a very simple simulation of an example catchment using configuration files. This is a typical use case, but assumes that you have a model well configured and ready for simulation. In practice, one is interested in working with the model, testing different configurations, and evaluating different data sources. This is in fact a key idea of Shyft -- to make it simple to evaluate the impact of the selection of model routine on the performance of the simulation.
In this notebook we walk through a very simple lower level paradigm of using the Shyft api directly to conduct an individual simulation.
1. Loading required python modules and setting path to SHyFT installation
For the notebook tutorials we require several imports. In addition, be sure your shyft environment is correctly configured. This is required before importing shyft itself. Lastly, import the shyft classes and modules.
End of explanation
"""
# load the data from the example datasets
cell_data = Dataset( os.path.join(shyftdata_dir, 'netcdf/orchestration-testdata/cell_data.nc'))
# plot the coordinates of the cell data provided
# fetch the x- and y-location of the cells
x = cell_data.variables['x'][:]
y = cell_data.variables['y'][:]
z = cell_data.variables['z'][:]
cid = cell_data.variables['catchment_id'][:]
# and make a quick catchment map...
# using a scatter plot of the cells
fig, ax = plt.subplots(figsize=(15,5))
cm = plt.cm.get_cmap('rainbow')
elv_col = ax.scatter(x, y, c=z, marker='.', s=40, lw=0, cmap=cm)
# cm = plt.cm.get_cmap('gist_gray')
# cid_col = ax.scatter(x, y, c=cid, marker='.', s=40, lw=0, alpha=0.4, cmap=cm)
plt.colorbar(elv_col).set_label('catchment elevation [m]')
# plt.colorbar(cid_col).set_label('catchment indices [id]')
plt.title('Nea Nidelva Catchment')
# print(set(cid))
"""
Explanation: 2. Build a Shyft model
The first point of simulation is to define the model that you will create. In this example, we will use Shyft's pure api approach to create a model from scratch.
The simulation domain
What is required to set up a simulation? At the most basic level of Shyft, we need to define the simulation domain / geometry. Shyft does not care about the specific shape of the cells. Shyft just needs a 'geocentroid location' and an area. We will create a container of this information as a first step to provide to one of Shyft's model types later.
We are going to be working with the data from the Nea-Nidelva catchment, example dataset. This is available in the shyft-data repository. Above, you should have set your SHYFT_DATA environment variable to point to this directory so that we can easily read the data.
The first thing to do is to take a look at the geography of our cells.
End of explanation
"""
print(cell_data.variables.keys())
"""
Explanation: Create a collection of simulation cells
In Shyft we work with 'cells', which is the basic simulation unit. In the example netcdf file, we provide the attributes for the cells we are going to plot. But you made need to extract this information from your own GIS, or other data. The essential variables that are minimally required include:
x, generally an easting coordinate in UTM space, [meters]
y, generally a northing coordinate in UTM space, [meters]
z, elevation [meters]
area, the area of the cell, [square meters]
land cover type fractions (these are float values that sum to 1):
glacier
lake
reservoir
forest
unspecified
catchment_id, an integer to associate the cell with a catchment
a radiation factor (set to 0.9 by default)
If you look at the netcdf file, you'll see these are included:
End of explanation
"""
# Let's first create a 'container' that will hold all of our model domains cells:
cell_data_vector = api.GeoCellDataVector()
# help(cell_data_vector)
#help(api.GeoPoint)
# from the netcdf file dimensions
num_cells = cell_data.dimensions['cell'].size
for i in range(num_cells):
gp = api.GeoPoint(x[i], y[i], z[i]) # recall, we extracted x,y,z above
cid = cell_data.variables['catchment_id'][i]
cell_area = cell_data.variables['area'][i]
# land fractions:
glac = cell_data.variables['glacier-fraction'][i]
lake = cell_data.variables['lake-fraction'][i]
rsvr = cell_data.variables['reservoir-fraction'][i]
frst = cell_data.variables['forest-fraction'][i]
unsp = 1 - (glac + lake + rsvr + frst)
land_cover_frac = api.LandTypeFractions(float(glac), float(lake), float(rsvr), float(frst), float(unsp))
rad_fx = 0.9
# note, for now we need to make sure we cast some types to pure python, not numpy
geo_cell_data = api.GeoCellData(gp, float(cell_area), int(cid), rad_fx, land_cover_frac)
cell_data_vector.append(geo_cell_data)
# put it all together to initialize a model, we'll use PTGSK
params = api.pt_gs_k.PTGSKParameter()
model = api.pt_gs_k.PTGSKModel(cell_data_vector, params)
"""
Explanation: So the first step is to extract these from the netcdf file, and get them into the model. Note that extracting these values is a pre-processing step that you will likely use a GIS system for. We do not cover that in this notebook.
GeoCellDataVector
In Shyft, there are many custom classes used to optimize the shuffling of data. Frequently you will encounter some type of XxxVector classes. These are essentially lists, and behave as normal python lists. In the following steps we are going to populate this vector with a collection of type api.GeoCellData, which is a custom Shyft type that contains information about the model cells.
Next, we'll need two other Shyft api classes, the GeoCellData class and the GeoPoint class. These are specialized objects that hold information about each cell. We create these for every cell in the netcdf file. Notice that our netcdf file is quite simple, with one single dimension: cell. So to fill our cell_data_vector, we'll simply iterate over the cell values in the netcdf file.
End of explanation
"""
re = api.ARegionEnvironment()
# map the variable names in the netcdf file to the source types
source_map = {'precipitation' : ('precipitation.nc', api.PrecipitationSource, re.precipitation),
'global_radiation' : ('radiation.nc', api.RadiationSource, re.radiation),
'temperature' : ('temperature.nc', api.TemperatureSource, re.temperature),
'wind_speed' : ('wind_speed.nc', api.WindSpeedSource, re.wind_speed),
'relative_humidity' : ('relative_humidity.nc', api.RelHumSource, re.rel_hum) }
for var, (file_name, source_type, source_vec) in source_map.items():
nci = Dataset( os.path.join(shyftdata_dir, 'netcdf/orchestration-testdata/' + file_name))
time = api.UtcTimeVector([int(t) for t in nci.variables['time'][:]])
delta_t = time[1] - time[0] if len(time) > 1 else api.deltahours(1)
for i in range(nci.dimensions['station'].size):
x = nci.variables['x'][i]
y = nci.variables['y'][i]
z = nci.variables['z'][i]
gp = api.GeoPoint(float(x),float( y), float(z))
data = nci.variables[var][:, i]
time_axis = api.TimeAxis(int(time[0]), delta_t, len(time))
dts = api.TsFactory().create_time_point_ts(time_axis.total_period(),
time, data, api.POINT_AVERAGE_VALUE)
# add it to the variable source vector
source_vec.append(source_type(gp, dts))
nci.close()
# let's take a look at the 'source' data.
# this is exactly the data that will be 'fed' to the interpolation routines
region_environment = re
def plot_station_data(region_environment):
""" plot the data within each source vector of the `ARegionEnvironment`
"""
for fv, sv in region_environment.variables:
n_stn = len(sv)
fig, ax = plt.subplots(figsize=(15,5))
for stn in range(n_stn):
t, d = [dt.datetime.utcfromtimestamp(t_.start) for t_ in sv[stn].ts.time_axis], sv[stn].ts.values
ax.plot(t, d, label=stn)
plt.title(fv)
plt.legend()
plot_station_data(region_environment)
"""
Explanation: The model class
In the case of this tutorial, the model class has been instantiated as a api.pt_gs_k.PTGSKModel. This means that it is using the "PTGSK" model stack. You can instantiate models of type: api.pt_gs_k.PTGSKModel, api.pt_hs_k.PTHSKModel, api.pt_ss_k.PTSSKModel, and api.hbv_stack.HbvModel.
These are central structures in Shyft. All the simulation information is contained within this class.
3. Read forcing data
The next step in the simulation is to get the forcing data. For the tutorials we have provided the data in a series of netcdf files located in the shyft-data repository. Recall that when you set up shyft, you are instructed to clone this repository in parallel to the shyft-doc repository. If you have not done this, then you will have to be sure you have set the SHYFT_DATA environment variable correctly.
Below, we are going to use the shyft.api.ARegionEnvironment class to containerize the forcing data. In this tutorial, this data is station based, but it could be gridded forcing data as well. Once we read the data from the netcdf files, we'll inject it into the appropriate SourceVectors which are then used for the interpolation step.
End of explanation
"""
from shyft.time_series import deltahours,TimeAxisFixedDeltaT,Calendar
# next step, distribute the data to the cells
# we need to prepare for the interpolation
# and define the length of the simulation (e.g. time_axis)
# 1. define the time_axis of the simulation:
cal = Calendar()
simulation_ta = api.TimeAxisFixedDeltaT(cal.time(2013, 9, 1, 0, 0, 0), deltahours(24), 365)
# 2. interpolate the data from the region_environment to the model cells:
model_interpolation_parameter = api.InterpolationParameter()
model.run_interpolation(model_interpolation_parameter, simulation_ta, region_environment)
# at this point we could look at the time series for every cell. Or plot a spatial map...
# TODO: https://data-dive.com/cologne-bike-rentals-interactive-map-bokeh-dynamic-choropleth
from ipywidgets import interact
import numpy as np
from bokeh.io import push_notebook, show, output_notebook
from bokeh.plotting import figure
from bokeh.palettes import viridis
forcing_variables = [fv for fv, sv in region_environment.variables]
output_notebook()
p = figure(title='Forcing Variables', plot_height=300, plot_width=800)
pallette = viridis(100)
cells = model.get_cells()
x = np.array([cell.geo.mid_point().x for cell in cells])
y = np.array([cell.geo.mid_point().y for cell in cells])
def plot_cell_data(model, fv, time_step):
""" plot the data for each cell in the model
Note
-----
This is for demonstration only. Normally one would plot the input data from the
netcdf file directly, but the purpose is to show one how the data is containerized
within Shyft.
"""
if fv == 'temperature': minv, maxv = -40, 40
if fv == 'precipitation': minv, maxv = 0, 400
if fv == 'wind_speed': minv, maxv = 0, 10
else: minv, maxv = -1, 1
def rgb(minv, maxv, val, n, pallette):
dd = np.linspace(minv, maxv, n)
idx = (np.abs(dd - val)).argmin()
return pallette[idx]
# Once we have the cells, we can get their coordinate information
# and fetch the x- and y-location of the cells
data = np.array([getattr(c.env_ts,fv).values[time_step] for c in cells])
colors = [rgb(minv, maxv, d, 100, pallette) for d in data]
r = p.scatter(x, y, radius=300, fill_color=colors, fill_alpha=1, line_color=None)
return p, r
p, r = plot_cell_data(model, 'temperature', 0)
def update(fv='temperature', time_step=0):
plot_cell_data(model, fv, time_step)
push_notebook()
show(p, notebook_handle=True)
interact(update, fv=forcing_variables, time_step=(0,model.time_axis.n))
"""
Explanation: Note that we are simply using a constant relative humidity and simple radiation model. Again, depending on what data you are using, you'll have different input datasets.
4. Interpolate forcing data to the cells
We are now ready to set up the model interpolation process. In this step, the data is distributed to the cells within your model. Recall that each cell maintains spatial awareness. The interpolation routines within Shyft utilize this information to conduct either Bayesian kriging (temperature) or Inverse Distance Weighting interpolation. See the api documentation, as new methods are being added.
End of explanation
"""
# #
s0 = api.pt_gs_k.PTGSKStateVector()
for i in range(model.size()):
si = api.pt_gs_k.PTGSKState()
si.kirchner.q = 0.2
s0.append(si)
model.set_states(s0)
model.set_state_collection(-1, True)
"""
Explanation: 5. Conduct the simulation
We now have a model that is ready for simulation. All the data from our point observations is interpolated to the cells, and we have the env_ts of each cell populated.
The next step is simply to run the simulation.
End of explanation
"""
def runnable(reg_mod):
""" returns True if model is properly configured
**note** this is specific depending on your model's input data requirements """
return all((reg_mod.initial_state.size() > 0, reg_mod.time_axis.size() > 0,
all([len(getattr(reg_mod.region_env, attr)) > 0 for attr in
("temperature", "wind_speed", "precipitation", "rel_hum", "radiation")])))
# run the model, e.g. as you may configure it in a script:
%time
if True: #runnable(model):
model.revert_to_initial_state()
model.run_cells()
else:
print('Something wrong with model configuration.')
"""
Explanation: As a habit, we have a quick "sanity check" function to see if the model is runnable. Itis recommended to have this function when you create 'run scripts'.
End of explanation
"""
# We can make a quick plot of the data of each sub-catchment
fig, ax = plt.subplots(figsize=(20,15))
# plot each catchment discharge in the catchment_ids
for i,cid in enumerate(model.catchment_ids):
# a ts.time_axis can be enumerated to it's UtcPeriod,
# that will have a .start and .end of type utctimestamp
# to use matplotlib support for datetime-axis, we convert it to datetime (as above)
ts_timestamps = [dt.datetime.utcfromtimestamp(p.start) for p in model.time_axis]
data = model.statistics.discharge([int(cid)]).values
ax.plot(ts_timestamps,data, label = "{}".format(model.catchment_ids[i]))
fig.autofmt_xdate()
ax.legend(title="Catch. ID")
ax.set_ylabel("discharge [m3 s-1]")
"""
Explanation: Okay, so the simulation was run. Now we may be interested in looking at some of the output. We'll take a brief summary glance in the next section, and save a deeper dive into the simulation results for another notebook.
6. Take a look at simulation results
The first step will be simply to look at the discharge results for each subcatchment within our simulation domain.
End of explanation
"""
|
AllenDowney/ThinkStats2 | examples/central_limit_theorem.ipynb | gpl-3.0 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def decorate(**options):
"""Decorate the current axes.
Call decorate with keyword arguments like
decorate(title='Title',
xlabel='x',
ylabel='y')
The keyword arguments can be any of the axis properties
https://matplotlib.org/api/axes_api.html
"""
ax = plt.gca()
ax.set(**options)
handles, labels = ax.get_legend_handles_labels()
if handles:
ax.legend(handles, labels)
plt.tight_layout()
def normal_probability_plot(sample, fit_color='0.8', **options):
"""Makes a normal probability plot with a fitted line.
sample: sequence of numbers
fit_color: color string for the fitted line
options: passed along to Plot
"""
n = len(sample)
xs = np.random.normal(0, 1, n)
xs.sort()
mean, std = np.mean(sample), np.std(sample)
fit_ys = mean + std * xs
plt.plot(xs, fit_ys, color=fit_color, label='model')
ys = np.array(sample, copy=True)
ys.sort()
plt.plot(xs, ys, **options)
"""
Explanation: Example from Think Stats
http://thinkstats2.com
Copyright 2019 Allen B. Downey
MIT License: https://opensource.org/licenses/MIT
End of explanation
"""
def make_expo_samples(beta=2.0, iters=1000):
"""Generates samples from an exponential distribution.
beta: parameter
iters: number of samples to generate for each size
returns: map from sample size to sample
"""
samples = {}
for n in [1, 10, 100]:
sample = [np.sum(np.random.exponential(beta, n))
for _ in range(iters)]
samples[n] = sample
return samples
"""
Explanation: Central Limit Theorem
If you add up independent variates from a distribution with finite mean and variance, the sum converges on a normal distribution.
The following function generates samples with difference sizes from an exponential distribution.
End of explanation
"""
def normal_plot_samples(samples, ylabel=''):
"""Makes normal probability plots for samples.
samples: map from sample size to sample
label: string
"""
plt.figure(figsize=(8, 3))
plot = 1
for n, sample in samples.items():
plt.subplot(1, 3, plot)
plot += 1
normal_probability_plot(sample)
decorate(title='n=%d' % n,
xticks=[],
yticks=[],
xlabel='Random normal variate',
ylabel=ylabel)
"""
Explanation: This function generates normal probability plots for samples with various sizes.
End of explanation
"""
samples = make_expo_samples()
normal_plot_samples(samples, ylabel='Sum of expo values')
"""
Explanation: The following plot shows how the sum of exponential variates converges to normal as sample size increases.
End of explanation
"""
def make_lognormal_samples(mu=1.0, sigma=1.0, iters=1000):
"""Generates samples from a lognormal distribution.
mu: parmeter
sigma: parameter
iters: number of samples to generate for each size
returns: list of samples
"""
samples = {}
for n in [1, 10, 100]:
sample = [np.sum(np.random.lognormal(mu, sigma, n))
for _ in range(iters)]
samples[n] = sample
return samples
samples = make_lognormal_samples()
normal_plot_samples(samples, ylabel='sum of lognormal values')
"""
Explanation: The lognormal distribution has higher variance, so it requires a larger sample size before it converges to normal.
End of explanation
"""
def make_pareto_samples(alpha=1.0, iters=1000):
"""Generates samples from a Pareto distribution.
alpha: parameter
iters: number of samples to generate for each size
returns: list of samples
"""
samples = {}
for n in [1, 10, 100]:
sample = [np.sum(np.random.pareto(alpha, n))
for _ in range(iters)]
samples[n] = sample
return samples
samples = make_pareto_samples()
normal_plot_samples(samples, ylabel='sum of Pareto values')
"""
Explanation: The Pareto distribution has infinite variance, and sometimes infinite mean, depending on the parameters. It violates the requirements of the CLT and does not generally converge to normal.
End of explanation
"""
def generate_correlated(rho, n):
"""Generates a sequence of correlated values from a standard normal dist.
rho: coefficient of correlation
n: length of sequence
returns: iterator
"""
x = np.random.normal(0, 1)
yield x
sigma = np.sqrt(1 - rho**2)
for _ in range(n-1):
x = np.random.normal(x * rho, sigma)
yield x
from scipy.stats import norm
from scipy.stats import expon
def generate_expo_correlated(rho, n):
"""Generates a sequence of correlated values from an exponential dist.
rho: coefficient of correlation
n: length of sequence
returns: NumPy array
"""
normal = list(generate_correlated(rho, n))
uniform = norm.cdf(normal)
expo = expon.ppf(uniform)
return expo
def make_correlated_samples(rho=0.9, iters=1000):
"""Generates samples from a correlated exponential distribution.
rho: correlation
iters: number of samples to generate for each size
returns: list of samples
"""
samples = {}
for n in [1, 10, 100]:
sample = [np.sum(generate_expo_correlated(rho, n))
for _ in range(iters)]
samples[n] = sample
return samples
samples = make_correlated_samples()
normal_plot_samples(samples,
ylabel='Sum of correlated exponential values')
"""
Explanation: If the random variates are correlated, that also violates the CLT, so the sums don't generally converge.
To generate correlated values, we generate correlated normal values and then transform to whatever distribution we want.
End of explanation
"""
# Solution goes here
# Solution goes here
# Solution goes here
# Solution goes here
"""
Explanation: Exercises
Exercise: In Section 5.4, we saw that the distribution of adult weights is approximately lognormal. One possible explanation is that the weight a person gains each year is proportional to their current weight. In that case, adult weight is the product of a large number of multiplicative factors:
w = w0 f1 f2 ... fn
where w is adult weight, w0 is birth weight, and fi is the weight gain factor for year i.
The log of a product is the sum of the logs of the factors:
logw = log w0 + log f1 + log f2 + ... + log fn
So by the Central Limit Theorem, the distribution of logw is approximately normal for large n, which implies that the distribution of w is lognormal.
To model this phenomenon, choose a distribution for f that seems reasonable, then generate a sample of adult weights by choosing a random value from the distribution of birth weights, choosing a sequence of factors from the distribution of f, and computing the product. What value of n is needed to converge to a lognormal distribution?
End of explanation
"""
|
tclaudioe/Scientific-Computing | SC1/03_floating_point_arithmetic.ipynb | bsd-3-clause | import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: <center>
<h1> INF-285 - Computación Científica / ILI-285 - Computación Científica I</h1>
<h2> Floating Point Arithmetic </h2>
<h2> <a href="#acknowledgements"> [S]cientific [C]omputing [T]eam </a> </h2>
<h2> Version: 1.17</h2>
</center>
Table of Contents
Introduction
The nature of floating point numbers
Visualization of floating point numbers
What is the first integer that is not representable in double precision?
Loss of significance
Loss of significance in funcion evaluation
Another analysis (example from textbook)
Acknowledgements
End of explanation
"""
int('0b11', 2)
bin(9)
bin(2**53)
"""
Explanation: <div id='intro' />
Introduction
Hello! This notebook is an introduction to how our computers handle the representation of real numbers using double-precision floating-point format. To understand the contents of this notebook you should at least have a basic notion of how binary numbers work.
The aforementioned format occupies 64 bits which are divided as follows:
1 bit for the sign
11 bits for the exponent
52 bits for the mantissa
This means that the very next representable number after $1$ is $1 + 2^{-52}$, and their difference, $2^{-52}$, is $\epsilon_{mach}$.
Additionally, if you'd like to quickly go from a base-2 integer to a base-10 integer and viceversa, Python has some functions that can help you.
End of explanation
"""
import bitstring as bs
"""
Explanation: <div id='nature' />
The nature of floating point numbers
As we know, float representations of real numbers are just a finite and bounded representation of them. Interestingly, these floating numbers are distributed across the real number line.
To see this, it's important to keep in mind the following property:
\begin{equation} \left|\frac{\text{fl}(x)-x}{x}\right| \leq \frac{1}{2} \epsilon_{\text{mach}} \end{equation}
where $\text{fl}(x)$ is the float representation of $x \in R$. What it says is that the relative error in representing any non-zero real number x, is bounded by a quantity that depends on the system representation ($\epsilon_{\text{mach}}$).
Maybe now you're thinking: what relationship does this have with the distribution of floating point numbers? You see, if we rewrite the previous property as follows:
\begin{equation} |\text{fl}(x)-x| \leq \frac{1}{2} \epsilon_{\text{mach}} |x| \end{equation}
it becomes clear: The absolute error (distance) between a real number and its floating point representation is proportional to the real number's magnitude.
Intuitively speaking, if the representation error of a number increases as its magnitude increases, then it's quite natural that the distance between a floating point number and the next representable floating point number will increase as the magnitude of such number increases (and conversely). Can you prove it? For now we will prove it experimentally.
We will use a library named bitstring to handle different number representations. You can install it with:
pip install bitstring
End of explanation
"""
def next_float(f):
#packing double-precision foat
b = bs.pack('>d', f)
#extracting mantisa as unsigned int
#and adding up 1
m = b[12:].uint
m += 1
#putting the result in his place
b[12:] = m
return b.float
def epsilon(f):
next_f = next_float(f)
return next_f - f
"""
Explanation: The next two functions are self-explanatory:
next_float(f) computes the next representable float number.
epsilon(f) computes the difference between f and the next representable float number
End of explanation
"""
epsilon(1)
"""
Explanation: So if we compute epsilon(1) we should get the epsilon machine number. Let's try it:
End of explanation
"""
#values between 10**-32 and 10**+32
values = np.array([10**i for i in range(-32,32)]).astype(float)
#corresponding epsilons
vepsilon = np.vectorize(epsilon)
eps = vepsilon(values)
"""
Explanation: In order to prove our hypotesis, we will create an array of values: [1e-32, 1e-31, ..., 1e31, 1e32] and compute their corresponding epsilon.
End of explanation
"""
fig = plt.figure(figsize=(10,5))
plt.subplot(121)
plt.plot(values, eps,'.',markersize=20)
plt.xlabel('Values')
plt.ylabel('Corresponding Epsilons')
plt.title('Epsilons v/s Values')
plt.grid(True)
plt.subplot(122)
plt.loglog(values, eps,'.')
plt.xlabel('Values')
plt.ylabel('Corresponding Epsilons')
plt.title('Epsilons v/s Values')
plt.grid(True)
fig.tight_layout()
plt.show()
"""
Explanation: We now include a comparison between a linear scale plot and a loglog scale plot. Which one is more useful here?
End of explanation
"""
def to_binary(f):
b = bs.pack('>d', f)
b = b.bin
#show sign + exponent + mantisa
print(b[0]+' '+b[1:12]+ ' '+b[12:])
"""
Explanation: As you can see, the hypothesis was right. In other words: Floating point numbers are not linearly distributed across the real numbers, and the distance between them is proportional to their magnitude. Tiny numbers (~ 0) are closer to each other than big numbers are.
<div id='visualization' />
Visualization of floating point numbers
With the help of bitstring library we could write a function to visualize floating point numbers in their binary representation
End of explanation
"""
to_binary(1.)
int('0b01111111111', 2)
to_binary(1.+epsilon(1.))
to_binary(+0.)
to_binary(-0.)
to_binary(np.inf)
to_binary(-np.inf)
to_binary(np.nan)
to_binary(-np.nan)
to_binary(2.**-1074)
print(2.**-1074)
to_binary(2.**-1075)
print(2.**-1075)
to_binary(9.4)
"""
Explanation: Let's see some intereseting examples
End of explanation
"""
to_binary(1)
to_binary(1+2**-52)
"""
Explanation: <div id='firstinteger' />
What is the first integer that is not representable in double precision?
Recall that $\epsilon_{\text{mach}}=2^{-52}$ in double precision.
End of explanation
"""
for i in np.arange(11):
to_binary(1+i*2**-55)
"""
Explanation: This means that if we want to store any number in the interval $[1,1+\epsilon_{\text{mach}}]$, only the numbers $1$ and $1+\epsilon_{\text{mach}}$ will be stored. For example, compare the exponent and the mantissa in the previous cell with the following outputs:
End of explanation
"""
for i in np.arange(11):
to_binary((1+i*2**-55)*2**52)
"""
Explanation: We can now scale this difference such that the scaling factor multiplied by $\epsilon_{\text{mach}}$ is one. The factor will be $2^{52}$. This means $2^{52}\,\epsilon_{\text{mach}}=1$. Repeating the same example as before, but with the scaling factor, we obtain:
End of explanation
"""
to_binary(2**52)
to_binary(2**52+1)
"""
Explanation: Which means we can only exactly store the numbers:
End of explanation
"""
to_binary(2**53)
to_binary(2**53+1)
"""
Explanation: The distance from $2^{52}$ and the following number representable is $1$ !!!! So, what would happen if we were to store $2^{53}+1$?
End of explanation
"""
a = 1.
b = 2.**(-52) #emach
result_1 = a + b # arithmetic result is 1.0000000000000002220446049250313080847263336181640625
result_1b = result_1-1.0
print("{0:.1000}".format(result_1))
print(result_1b)
print(b)
c = 2.**(-53)
result_2 = a + c # arithmetic result is 1.00000000000000011102230246251565404236316680908203125
np.set_printoptions(precision=16)
print("{0:.1000}".format(result_2))
print(result_2-a)
to_binary(result_2)
to_binary(result_2-a)
d = 2.**(-53) + 2.**(-54)
result_3 = a + d # arithmetic result is 1.000000000000000166533453693773481063544750213623046875
print("{0:.1000}".format(result_3))
to_binary(result_3)
to_binary(d)
"""
Explanation: We can't store the Integer $2^{53}+1$! Thus, the first integer not representable is $2^{53}+1$.
<div id='loss' />
Loss of significance
As we mentioned, there's a small leap between 1 and the next representable number, which means that if you want to represent a number between those two, you won't be able to do so; that number is nonexistent as it is for the computer, so it'll have to round it to a representable number before storing it in memory.
End of explanation
"""
e = 2.**(-1)
f = b/2. # emach/2
result_4 = e + f # 0.50000000000000011102230246251565404236316680908203125
print("{0:.1000}".format(result_4))
result_5 = e + b # 0.5000000000000002220446049250313080847263336181640625
print("{0:.1000}".format(result_5))
g = b/4.
result_5 = e + g # 0.500000000000000055511151231257827021181583404541015625
print("{0:.1000}".format(result_5))
"""
Explanation: As you can see, if you try to save a number between $1$ and $1 + \epsilon _{mach}$, it will have to be rounded (according to some criteria) to a representable number before being stored, thus creating a difference between the <i>real</i> number and the <i>stored</i> number. This is an example of loss of significance.
Does that mean that the "leap" between representable numbers is <i>always</i> going to be $\epsilon _{mach}$? Of course not! Some numbers will require smaller leaps, and some others will require bigger leaps.
In any interval of the form $[2^n,2^{n+1}]$ for a representable $n\in \mathbb{Z}$, the leap is constant. For example, all the numbers between $2^{-1}$ and $2^0$ (but excluding $2^0$) have a distance of $\epsilon {mach}/2$ between them. All the numbers between $2^0$ and $2^1$ (excluding $2^1$) have a distance of $\epsilon {mach}$ between them. Those between $2^1$ and $2^2$ (not including $2^2$) have a distance of $2\,\epsilon _{mach}$ between them, and so on and so forth.
End of explanation
"""
num_1 = a
num_2 = b
result = a + b
print("{0:.1000}".format(result))
"""
Explanation: We'll let the students find some representable numbers and some non-representable numbers. It's important to note that loss significance can occur in many operations and functions other that the simple addition of two numbers.
End of explanation
"""
x = np.arange(-10,10,0.1)
y = (1-np.cos(x))/(np.sin(x)**2)
plt.figure()
plt.plot(x,y,'.')
plt.grid(True)
plt.show()
x = np.arange(-10,10,0.1)
y = 1/(1+np.cos(x))
plt.figure()
plt.plot(x,y,'.')
plt.grid(True)
plt.show()
x = np.arange(-1,1,0.01)
y = (1-np.cos(x))/np.sin(x)**2
plt.figure()
plt.plot(x,y,'.',markersize=10)
plt.grid(True)
plt.show()
y = 1/(1+np.cos(x))
plt.figure()
plt.plot(x,y,'.',markersize=10)
plt.grid(True)
plt.show()
"""
Explanation: <div id='func' />
Loss of significance in function evaluation
Loss of Significance is present too in the representation of functions. A classical example (which you can see in the guide book), is the next function:
\begin{equation}f(x)= \frac{1 - \cos x}{\sin^{2}x} \end{equation}
Applying trigonometric identities, we can obtain the 'equivalent' function:
\begin{equation}f(x)= \frac{1}{1 + \cos x} \end{equation}
Both of these functions are apparently equal. Nevertheless, its graphics say to us another thing when $x$ is equal to zero.
End of explanation
"""
f1 = lambda x: (1.-np.cos(x))/(np.sin(x)**2)
f2 = lambda x: 1./(1+np.cos(x))
f3 = lambda x: (1.-np.cos(x))
f4 = lambda x: (np.sin(x)**2)
x = np.logspace(-19,0,20)[-1:0:-1]
o1 = f1(x)
o2 = f2(x)
o3 = f3(x)
o4 = f4(x)
print("x, f1(x), f2(x), f3(x), f4(x)")
for i in np.arange(len(x)):
print("%1.10f, %1.10f, %1.10f, %1.25f, %1.25f" % (x[i],o1[i],o2[i],o3[i],o4[i]))
"""
Explanation: When $x$ is equal to zero, the first function has an indetermination, but previously, the computer subtracted numbers that were almost equal. This leads to a loss of significance, turning the expression close to this point to zero. However, modifying this expression towards the second function eliminates this substraction, fixing the error in its calculation when $x=0$.
In conclusion, two representations of a function can be equal to us, but different for the computer!
<div id='another' />
Another analysis (example from textbook)
End of explanation
"""
|
gfeiden/Notebook | Projects/ngc2516_spots/.ipynb_checkpoints/bolometric_corrections-checkpoint.ipynb | mit | # change directory
%cd ../../../Projects/starspot/starspot/
from color import bolcor as bc
"""
Explanation: Bolometric Corrections
Details about the bolometric correction package can be found in the GitHub repository starspot.
End of explanation
"""
bc.utils.log_init('table_limits.log') # initialize bolometric correction log file
"""
Explanation: Before requesting bolometric corrections, we need to first initialize the package, which loads the appropriate bolometric corrections tables into memory to permit faster computation of corrections hereafter. The procedure for initializing the tables can be found in the README file in the starspot.color repository. First we need to initialize a log file, where various procedural steps in the code are tracked.
End of explanation
"""
FeH = 0.0 # dex; atmospheric [Fe/H]
aFe = 0.0 # dex; atmospheric [alpha/Fe]
brand = 'marcs' # use theoretical corrections from MARCS atmospheres
phot_filters = ['U', 'B', 'V', 'R', 'I', 'J', 'H', 'K'] # select a subset of filters
bc.bolcorrection.bc_init(FeH, aFe, brand, phot_filters) # initialize tables
"""
Explanation: Next, we need to load the appropriate tables.
End of explanation
"""
bc.bolcorrection.bc_eval(5300.0, 4.61, -0.353, len(phot_filters)) # approx. 0.9 Msun star at 120 Myr.
bc.bolcorrection.bc_eval(3000.0, 4.94, -2.65, len(phot_filters)) # approx. 0.1 Msun star at 120 Myr.
"""
Explanation: Now that we have established which set of bolometric correction tables we wish to use, it's possible to compute bolometric correction using either the Isochrone.colorize feature, or by submitting individual requests. First, let's take a look at a couple valid queries. Note that the query is submitted as bc_eval(Teff, logg, logL/Lsun, N_filers)
End of explanation
"""
bc.bolcorrection.bc_eval(2204.0, 4.83, -3.47, len(phot_filters)) # outside of grid -- should return garbage.
"""
Explanation: The extremely large (or small) magnitudes for the 5300 K star is very strange. These issues do not occur for the same command execution in the terminal shell.
Now, what happens when we happen to request a temperature for a value outside the grid?
End of explanation
"""
bc.bolcorrection.bc_clean()
bc.utils.log_close()
"""
Explanation: Curiously, it returns values that do not appear to be out of line. It's quite possible that the code is somehow attempting to extrapolate since we use a 4-point Lagrange inteprolation, which may unknowingly provide an extrapolation beyond the defined grid space. Comparing to Phoenix model atmospheres with the Caffau et al. (2011) solar abundances for the Johnson-Cousins and 2MASS systems, our optical $BV(RI)_C$ magnitudes are systematically 1.0 mag fainter than Phoenix at 120 Myr, and our $JHK$ magnitudes are fainter by approximately 0.04 mag at the same age.
Finally, safely deallocate memory and close the log file.
End of explanation
"""
|
kitefu/Testing | data_statistic.ipynb | mit | data_rang = 9
pr_type = ['a', 'b', 'c', 'd']
p_type = [ np.random.choice(pr_type) for i in range(data_rang) ]
data = {'product_name' : ['x0', 'x1', 'x3', 'x2', 'x4', 'x5', 'x6', 'x7', 'x8'],
'T1': np.random.randint(100, size = [data_rang]),
'T2': np.random.randint(100, size = [data_rang]),
'T3': np.random.randint(100, size = [data_rang]),
'T4': np.random.randint(100, size = [data_rang]),
'T5': np.random.randint(100, size = [data_rang]),
'T6': np.random.randint(100, size = [data_rang]),
'T7': np.random.randint(100, size = [data_rang]),
'product_type': p_type}
test_data = pd.DataFrame(data, columns = ['product_name', 'T1', 'T2', 'T3', 'T4', 'T5', 'T6', 'T7', 'product_type'])
print test_data
"""
Explanation: construct a test dataframe
这里构造一个测试数据, T1~T7的列代表某个时间下的产品销售量
product_type列代表产品所属的类型,预设4个类型;
product_name代表产品名
End of explanation
"""
def dealing_data(data, start_time, end_time):
product_ty = set(data['product_type'])
result_df = pd.DataFrame()
for item in product_ty:
tmp_data = data[data['product_type'] == item]
slice_data = slicing_data(tmp_data, start_time, end_time)
columns_name = ['product_name', 'product_type']
tmp_data = tmp_data.loc[:, columns_name]
tmp_data['statistic'] = np.zeros(np.array(tmp_data).shape[0])
tmp_data['statistic'] = np.sum(slice_data, axis = 1)
tmp_data = tmp_data.sort_values('statistic', ascending = False)
tmp_data['rank'] = range(len(tmp_data))
tmp_data['rank'] += 1
result_df = result_df.append(tmp_data)
print result_df
return result_df
def slicing_data(data, start_time, end_time):
#select_column = [pd.to_datetime(start_time), pd.to_datetime(end_time)]
#print data
#print "***********",data[select_column]
return data.loc[:, start_time : end_time]
#return data[select_column]
"""
Explanation: main function
dealing_data 把传入的数据根据产品类型做数据截取,截取后的数据为在某个指定时间段内,每个产品类的所有产品在这个时间段内的销售数据
然后再统计该时间段内每个类产品的销售总量,并排序
End of explanation
"""
def query_rank(data, query_product_name, start_time, end_time):
re_data = dealing_data(data, start_time, end_time)
result = re_data[re_data['product_name'] == query_product_name]['rank'].values
return result[0]
#查询 产品名为 x6 在 T1到T4这段时间内在同类产品中的销售排名
result_rank = query_rank(test_data, 'x6', 'T1', 'T3')
print "query result , the rank is %d"%result_rank
# 返回总的销售排名表
"""
Explanation: 查询函数, 通过输入指定产品名和起止时间参数, 返回该产品在该类中的销售排名
End of explanation
"""
def dealing_data_b(data, start_time, end_time):
product_ty = set(data['product_type'])
result_df = pd.DataFrame()
for item in product_ty:
tmp_data = data[data['product_type'] == item]
slice_data = slicing_data_b(tmp_data, start_time, end_time)
columns_name = ['product_name', 'product_type']
tmp_data = tmp_data.loc[:, columns_name]
tmp_data['statistic'] = np.zeros(np.array(tmp_data).shape[0])
tmp_data['statistic'] = np.sum(slice_data, axis = 1)
tmp_data = tmp_data.sort_values('statistic', ascending = False)
tmp_data['rank'] = range(len(tmp_data))
tmp_data['rank'] += 1
result_df = result_df.append(tmp_data)
print result_df
return result_df
def slicing_data_b(data, start_time, end_time):
#select_column = [pd.to_datetime(start_time), pd.to_datetime(end_time)]
select_columns = [ it for it in pd.date_range(start_time, end_time)]
#print data
#print "***********",data[select_column]
#return data.loc[:, start_time : end_time]
return data[select_columns]
def query_rank_b(data, query_product_name, start_time, end_time):
re_data = dealing_data_b(data, start_time, end_time)
result = re_data[re_data['product_name'] == query_product_name]['rank'].values
return result[0]
# construct dataframe
sale_value_date = { el: np.random.randint(100, size = [data_rang]) for el in pd.date_range('20100101', '20100109')}
sale_value_date_df = pd.DataFrame(sale_value_date)
print sale_value_date_df
pr_type = ['a', 'b', 'c', 'd']
p_name = [ "x" + str(i) for i in range(data_rang) ]
p_type = [ np.random.choice(pr_type) for i in range(data_rang) ]
product_data = {'product_name': p_name,
'product_type': p_type}
product_data_df = pd.DataFrame(product_data, columns = ['product_name', 'product_type'])
print product_data_df
df = pd.concat([sale_value_date_df, product_data_df], axis = 1)
print df
result_rank_b = query_rank_b(df, 'x4', '2010-01-01', '2010-01-05')
print "query result , the rank is %d"%result_rank_b
"""
Explanation: another form of datafram
End of explanation
"""
|
WMD-group/MacroDensity | tutorials/Slab/SlabCalculation.ipynb | mit | %matplotlib inline
import sys
import macrodensity as md
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import os
"""
Explanation: Ionisation potential of a bulk material
In this example we use MacroDensity with VASP to align the energy levels of a simple bulk material.
The procedure involves two DFT calculations, yielding different important values
A bulk calculation, this provides us with the VBM eigenvalue under the assumption of zero potential ($\epsilon_{vbm}$)
A slab calculation, this provides us with the vacuum level with resepct to the potential inside the material, the difference between these values is the surface dipole ($D_s$).
The ionisation potential ($IP$) is then obtained from:
$IP = D_s - \epsilon_{vbm}$
End of explanation
"""
extrema = md.vasp_tools.get_band_extrema('OUTCAR_MoO3_bulk')
print(extrema)
"""
Explanation: Bulk calculation
In this calculation we calculate the eigenvalues of the bulk material, under the assumption of zero average potential.
You find the eigenvalues printed after the line "band No. band energies occupation" in the OUTCAR
I have written a small script to do this witin MacroDensity - vasp_tools.get_band_extrema
Let's try it out on the OUTCAR_bulk file in this directory
End of explanation
"""
if os.path.isfile('LOCPOT.MoO3.vasp'):
print('LOCPOT already exists')
else:
os.system('bunzip2 LOCPOT.MoO3.vasp.bz2')
input_file = 'LOCPOT_MoO3.vasp'
lattice_vector = 7.43
output_file = 'planar.dat'
"""
Explanation: Slab calculation
Now we do a calculation of the slab to get the potential profile. Important settings for the INCAR file:
LVHAR = .TRUE. # This generates a LOCPOT file with the potential
In your example directory there should already be a LOCPOT_MoO3.vasp file. This is the one we will use to analyse the potential and extract the vacuum level and the surface dipole.
In the sample PlanarAverage.py file, all we have to edit are the top three lines. Of these the only one that is not obvious is the lattice_vector parameter. This is just the periodicity of the slab in the direction normal to the surface. In the picture below, this is just the distance between the layers of SnO$_2$.
<img src="MoO3.png">
End of explanation
"""
vasp_pot, NGX, NGY, NGZ, Lattice = md.read_vasp_density(input_file)
vector_a,vector_b,vector_c,av,bv,cv = md.matrix_2_abc(Lattice)
resolution_x = vector_a/NGX
resolution_y = vector_b/NGY
resolution_z = vector_c/NGZ
grid_pot, electrons = md.density_2_grid(vasp_pot,NGX,NGY,NGZ)
"""
Explanation: The code below is set in the PlanarAverage.py file; you don't need to edit it
End of explanation
"""
## POTENTIAL
planar = md.planar_average(grid_pot,NGX,NGY,NGZ)
## MACROSCOPIC AVERAGE
macro = md.macroscopic_average(planar,lattice_vector,resolution_z)
"""
Explanation: The code below will prompt you to say which axis you want to average along
End of explanation
"""
fig, ax1 = plt.subplots(1, 1, sharex=True)
textsize = 22
mpl.rcParams['xtick.labelsize'] = textsize
mpl.rcParams['ytick.labelsize'] = textsize
mpl.rcParams['figure.figsize'] = (10, 6)
ax1.plot(planar,label="Planar",lw=3)
ax1.plot(macro,label="Macroscopic",lw=3)
ax1.set_xlim(0,len(planar))
ax1.set_facecolor((0.95,0.95,0.95))
ax1.grid(True)
ax1.legend(fontsize=22)
plt.show()
np.savetxt(output_file,macro)
"""
Explanation: Now we can plot the results
End of explanation
"""
print"IP: %10.4f eV"%(10.51 - 1.9483)
"""
Explanation: PROTIP
Sometimes the macroscopic average potential in the slab still looks very wavey. In this case it is sometimes required to increase the lattice_vector setting from earlier, since there are sometimes numerical effects.
Get the surface dipole ($D_s$)
From inspection of the macroscopic average, saved in planar.dat we can find that the value of $V$ at each plateau is 6.58 V and -3.93 V. So the step $D_s$ is 10.51 V
The offset
Applying the equation from earlier, the IP is
End of explanation
"""
|
BinRoot/TensorFlow-Book | ch06_hmm/Concept02_hmm.ipynb | mit | import numpy as np
import tensorflow as tf
"""
Explanation: Ch 06: Concept 02
Viterbi parse of a Hidden Markov model
Import TensorFlow and Numpy
End of explanation
"""
# initial parameters can be learned on training data
# theory reference https://web.stanford.edu/~jurafsky/slp3/8.pdf
# code reference https://phvu.net/2013/12/06/sweet-implementation-of-viterbi-in-python/
class HMM(object):
def __init__(self, initial_prob, trans_prob, obs_prob):
self.N = np.size(initial_prob)
self.initial_prob = initial_prob
self.trans_prob = trans_prob
self.obs_prob = obs_prob
self.emission = tf.constant(obs_prob)
assert self.initial_prob.shape == (self.N, 1)
assert self.trans_prob.shape == (self.N, self.N)
assert self.obs_prob.shape[0] == self.N
self.obs = tf.placeholder(tf.int32)
self.fwd = tf.placeholder(tf.float64)
self.viterbi = tf.placeholder(tf.float64)
def get_emission(self, obs_idx):
slice_location = [0, obs_idx]
num_rows = tf.shape(self.emission)[0]
slice_shape = [num_rows, 1]
return tf.slice(self.emission, slice_location, slice_shape)
def forward_init_op(self):
obs_prob = self.get_emission(self.obs)
fwd = tf.multiply(self.initial_prob, obs_prob)
return fwd
def forward_op(self):
transitions = tf.matmul(self.fwd, tf.transpose(self.get_emission(self.obs)))
weighted_transitions = transitions * self.trans_prob
fwd = tf.reduce_sum(weighted_transitions, 0)
return tf.reshape(fwd, tf.shape(self.fwd))
def decode_op(self):
transitions = tf.matmul(self.viterbi, tf.transpose(self.get_emission(self.obs)))
weighted_transitions = transitions * self.trans_prob
viterbi = tf.reduce_max(weighted_transitions, 0)
return tf.reshape(viterbi, tf.shape(self.viterbi))
def backpt_op(self):
back_transitions = tf.matmul(self.viterbi, np.ones((1, self.N)))
weighted_back_transitions = back_transitions * self.trans_prob
return tf.argmax(weighted_back_transitions, 0)
"""
Explanation: Create the same HMM model as before. This time, we'll include a couple additional functions.
End of explanation
"""
def forward_algorithm(sess, hmm, observations):
fwd = sess.run(hmm.forward_init_op(), feed_dict={hmm.obs: observations[0]})
for t in range(1, len(observations)):
fwd = sess.run(hmm.forward_op(), feed_dict={hmm.obs: observations[t], hmm.fwd: fwd})
prob = sess.run(tf.reduce_sum(fwd))
return prob
"""
Explanation: Define the forward algorithm from Concept01.
End of explanation
"""
def viterbi_decode(sess, hmm, observations):
viterbi = sess.run(hmm.forward_init_op(), feed_dict={hmm.obs: observations[0]})
backpts = np.ones((hmm.N, len(observations)), 'int32') * -1
for t in range(1, len(observations)):
viterbi, backpt = sess.run([hmm.decode_op(), hmm.backpt_op()],
feed_dict={hmm.obs: observations[t],
hmm.viterbi: viterbi})
backpts[:, t] = backpt
tokens = [viterbi[:, -1].argmax()]
for i in range(len(observations) - 1, 0, -1):
tokens.append(backpts[tokens[-1], i])
return tokens[::-1]
"""
Explanation: Now, let's compute the Viterbi likelihood of the observed sequence:
End of explanation
"""
if __name__ == '__main__':
states = ('Healthy', 'Fever')
# observations = ('normal', 'cold', 'dizzy')
# start_probability = {'Healthy': 0.6, 'Fever': 0.4}
# transition_probability = {
# 'Healthy': {'Healthy': 0.7, 'Fever': 0.3},
# 'Fever': {'Healthy': 0.4, 'Fever': 0.6}
# }
# emission_probability = {
# 'Healthy': {'normal': 0.5, 'cold': 0.4, 'dizzy': 0.1},
# 'Fever': {'normal': 0.1, 'cold': 0.3, 'dizzy': 0.6}
# }
initial_prob = np.array([[0.6], [0.4]])
trans_prob = np.array([[0.7, 0.3], [0.4, 0.6]])
obs_prob = np.array([[0.5, 0.4, 0.1], [0.1, 0.3, 0.6]])
hmm = HMM(initial_prob=initial_prob, trans_prob=trans_prob, obs_prob=obs_prob)
observations = [0, 1, 1, 2, 1]
with tf.Session() as sess:
prob = forward_algorithm(sess, hmm, observations)
print('Probability of observing {} is {}'.format(observations, prob))
seq = viterbi_decode(sess, hmm, observations)
print('Most likely hidden states are {}'.format(seq))
"""
Explanation: Let's try it out on some example data:
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/b99fcf919e5d2f612fcfee22adcfc330/40_autogenerate_metadata.ipynb | bsd-3-clause | from pathlib import Path
import matplotlib.pyplot as plt
import mne
data_dir = Path(mne.datasets.erp_core.data_path())
infile = data_dir / 'ERP-CORE_Subject-001_Task-Flankers_eeg.fif'
raw = mne.io.read_raw(infile, preload=True)
raw.filter(l_freq=0.1, h_freq=40)
raw.plot(start=60)
# extract events
all_events, all_event_id = mne.events_from_annotations(raw)
"""
Explanation: Auto-generating Epochs metadata
This tutorial shows how to auto-generate metadata for ~mne.Epochs, based on
events via mne.epochs.make_metadata.
We are going to use data from the erp-core-dataset (derived from
:footcite:Kappenman2021). This is EEG data from a single participant
performing an active visual task (Eriksen flanker task).
<div class="alert alert-info"><h4>Note</h4><p>If you wish to skip the introductory parts of this tutorial, you may jump
straight to `tut-autogenerate-metadata-ern` after completing the data
import and event creation in the
`tut-autogenerate-metadata-preparation` section.</p></div>
This tutorial is loosely divided into two parts:
We will first focus on producing ERP time-locked to the visual
stimulation, conditional on response correctness and response time in
order to familiarize ourselves with the ~mne.epochs.make_metadata
function.
After that, we will calculate ERPs time-locked to the responses – again,
conditional on response correctness – to visualize the error-related
negativity (ERN), i.e. the ERP component associated with incorrect
behavioral responses.
Preparation
Let's start by reading, filtering, and producing a simple visualization of the
raw data. The data is pretty clean and contains very few blinks, so there's no
need to apply sophisticated preprocessing and data cleaning procedures.
We will also convert the ~mne.Annotations contained in this dataset to events
by calling mne.events_from_annotations.
End of explanation
"""
# metadata for each epoch shall include events from the range: [0.0, 1.5] s,
# i.e. starting with stimulus onset and expanding beyond the end of the epoch
metadata_tmin, metadata_tmax = 0.0, 1.5
# auto-create metadata
# this also returns a new events array and an event_id dictionary. we'll see
# later why this is important
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'])
# let's look at what we got!
metadata
"""
Explanation: Creating metadata from events
The basics of make_metadata
Now it's time to think about the time windows to use for epoching and
metadata generation. It is important to understand that these time windows
need not be the same! That is, the automatically generated metadata might
include information about events from only a fraction of the epochs duration;
or it might include events that occurred well outside a given epoch.
Let us look at a concrete example. In the Flankers task of the ERP CORE
dataset, participants were required to respond to visual stimuli by pressing
a button. We're interested in looking at the visual evoked responses (ERPs)
of trials with correct responses. Assume that based on literature
studies, we decide that responses later than 1500 ms after stimulus onset are
to be considered invalid, because they don't capture the neuronal processes
of interest here. We can approach this in the following way with the help of
mne.epochs.make_metadata:
End of explanation
"""
row_events = ['stimulus/compatible/target_left',
'stimulus/compatible/target_right',
'stimulus/incompatible/target_left',
'stimulus/incompatible/target_right']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events)
metadata
"""
Explanation: Specifying time-locked events
We can see that the generated table has 802 rows, each one corresponding to
an individual event in all_events. The first column, event_name,
contains the name of the respective event around which the metadata of that
specific column was generated – we'll call that the "time-locked event",
because we'll assign it time point zero.
The names of the remaining columns correspond to the event names specified in
the all_event_id dictionary. These columns contain floats; the values
represent the latency of that specific event in seconds, relative to
the time-locked event (the one mentioned in the event_name column).
For events that didn't occur within the given time window, you'll see
a value of NaN, simply indicating that no event latency could be
extracted.
Now, there's a problem here. We want investigate the visual ERPs only,
conditional on responses. But the metadata that was just created contains
one row for every event, including responses. While we could create
epochs for all events, allowing us to pass those metadata, and later subset
the created events, there's a more elegant way to handle things:
~mne.epochs.make_metadata has a row_events parameter that
allows us to specify for which events to create metadata rows, while
still creating columns for all events in the event_id dictionary.
Because the metadata, then, only pertains to a subset of our original events,
it's important to keep the returned events and event_id around for
later use when we're actually going to create our epochs, to ensure that
metadata, events, and event descriptions stay in sync.
End of explanation
"""
keep_first = 'response'
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_first=keep_first)
# visualize response times regardless of side
metadata['response'].plot.hist(bins=50, title='Response Times')
# the "first_response" column contains only "left" and "right" entries, derived
# from the initial event named "response/left" and "response/right"
print(metadata['first_response'])
"""
Explanation: Keeping only the first events of a group
The metadata now contains 400 rows – one per stimulation – and the same
number of columns as before. Great!
We have two types of responses in our data: response/left and
response/right. We would like to map those to "correct" and "incorrect".
To make this easier, we can ask ~mne.epochs.make_metadata to generate an
entirely new column that refers to the first response observed during the
given time interval. This works by passing a subset of the
:term:hierarchical event descriptors (HEDs, inspired by
:footcite:BigdelyShamloEtAl2013) used to name events via the keep_first
parameter. For example, in the case of the HEDs response/left and
response/right, we could pass keep_first='response' to generate a new
column, response, containing the latency of the respective event. This
value pertains only the first (or, in this specific example: the only)
response, regardless of side (left or right). To indicate which event
type (here: response side) was matched, a second column is added:
first_response. The values in this column are the event types without the
string used for matching, as it is already encoded as the column name, i.e.
in our example, we expect it to only contain 'left' and 'right'.
End of explanation
"""
metadata.loc[metadata['stimulus/compatible/target_left'].notna() &
metadata['stimulus/compatible/target_right'].notna(),
:]
"""
Explanation: We're facing a similar issue with the stimulus events, and now there are not
only two, but four different types: stimulus/compatible/target_left,
stimulus/compatible/target_right, stimulus/incompatible/target_left,
and stimulus/incompatible/target_right. Even more, because in the present
paradigm stimuli were presented in rapid succession, sometimes multiple
stimulus events occurred within the 1.5 second time window we're using to
generate our metadata. See for example:
End of explanation
"""
keep_first = ['stimulus', 'response']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_first=keep_first)
# all times of the time-locked events should be zero
assert all(metadata['stimulus'] == 0)
# the values in the new "first_stimulus" and "first_response" columns indicate
# which events were selected via "keep_first"
metadata[['first_stimulus', 'first_response']]
"""
Explanation: This can easily lead to confusion during later stages of processing, so let's
create a column for the first stimulus – which will always be the time-locked
stimulus, as our time interval starts at 0 seconds. We can pass a list of
strings to keep_first.
End of explanation
"""
# left-side stimulation
metadata.loc[metadata['first_stimulus'].isin(['compatible/target_left',
'incompatible/target_left']),
'stimulus_side'] = 'left'
# right-side stimulation
metadata.loc[metadata['first_stimulus'].isin(['compatible/target_right',
'incompatible/target_right']),
'stimulus_side'] = 'right'
# first assume all responses were incorrect, then mark those as correct where
# the stimulation side matches the response side
metadata['response_correct'] = False
metadata.loc[metadata['stimulus_side'] == metadata['first_response'],
'response_correct'] = True
correct_response_count = metadata['response_correct'].sum()
print(f'Correct responses: {correct_response_count}\n'
f'Incorrect responses: {len(metadata) - correct_response_count}')
"""
Explanation: Adding new columns to describe stimulation side and response correctness
Perfect! Now it's time to define which responses were correct and incorrect.
We first add a column encoding the side of stimulation, and then simply
check whether the response matches the stimulation side, and add this result
to another column.
End of explanation
"""
epochs_tmin, epochs_tmax = -0.1, 0.4 # epochs range: [-0.1, 0.4] s
reject = {'eeg': 250e-6} # exclude epochs with strong artifacts
epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,
events=events, event_id=event_id, metadata=metadata,
reject=reject, preload=True)
"""
Explanation: Creating Epochs with metadata, and visualizing ERPs
It's finally time to create our epochs! We set the metadata directly on
instantiation via the metadata parameter. Also it is important to
remember to pass events and event_id as returned from
~mne.epochs.make_metadata, as we only created metadata for a subset of
our original events by passing row_events. Otherwise, the length
of the metadata and the number of epochs would not match and MNE-Python
would raise an error.
End of explanation
"""
vis_erp = epochs['response_correct'].average()
vis_erp_slow = epochs['(not response_correct) & '
'(response > 0.3)'].average()
fig, ax = plt.subplots(2, figsize=(6, 6))
vis_erp.plot(gfp=True, spatial_colors=True, axes=ax[0])
vis_erp_slow.plot(gfp=True, spatial_colors=True, axes=ax[1])
ax[0].set_title('Visual ERPs – All Correct Responses')
ax[1].set_title('Visual ERPs – Slow Correct Responses')
fig.tight_layout()
fig
"""
Explanation: Lastly, let's visualize the ERPs evoked by the visual stimulation, once for
all trials with correct responses, and once for all trials with correct
responses and a response time greater than 0.5 seconds
(i.e., slow responses).
End of explanation
"""
metadata_tmin, metadata_tmax = -1.5, 0
row_events = ['response/left', 'response/right']
keep_last = ['stimulus', 'response']
metadata, events, event_id = mne.epochs.make_metadata(
events=all_events, event_id=all_event_id,
tmin=metadata_tmin, tmax=metadata_tmax, sfreq=raw.info['sfreq'],
row_events=row_events,
keep_last=keep_last)
"""
Explanation: Aside from the fact that the data for the (much fewer) slow responses looks
noisier – which is entirely to be expected – not much of an ERP difference
can be seen.
Applying the knowledge: visualizing the ERN component
In the following analysis, we will use the same dataset as above, but
we'll time-lock our epochs to the response events, not to the stimulus
onset. Comparing ERPs associated with correct and incorrect behavioral
responses, we should be able to see the error-related negativity (ERN) in
the difference wave.
Since we want to time-lock our analysis to responses, for the automated
metadata generation we'll consider events occurring up to 1500 ms before
the response trigger.
We only wish to consider the last stimulus and response in each time
window: Remember that we're dealing with rapid stimulus presentations in
this paradigm; taking the last response – at time point zero – and the last
stimulus – the one closest to the response – ensures we actually create
the right stimulus-response pairings. We can achieve this by passing the
keep_last parameter, which works exactly like keep_first we got to
know above, only that it keeps the last occurrences of the specified
events and stores them in columns whose names start with last_.
End of explanation
"""
# left-side stimulation
metadata.loc[metadata['last_stimulus'].isin(['compatible/target_left',
'incompatible/target_left']),
'stimulus_side'] = 'left'
# right-side stimulation
metadata.loc[metadata['last_stimulus'].isin(['compatible/target_right',
'incompatible/target_right']),
'stimulus_side'] = 'right'
# first assume all responses were incorrect, then mark those as correct where
# the stimulation side matches the response side
metadata['response_correct'] = False
metadata.loc[metadata['stimulus_side'] == metadata['last_response'],
'response_correct'] = True
metadata
"""
Explanation: Exactly like in the previous example, create new columns stimulus_side
and response_correct.
End of explanation
"""
epochs_tmin, epochs_tmax = -0.6, 0.4
baseline = (-0.4, -0.2)
reject = {'eeg': 250e-6}
epochs = mne.Epochs(raw=raw, tmin=epochs_tmin, tmax=epochs_tmax,
baseline=baseline, reject=reject,
events=events, event_id=event_id, metadata=metadata,
preload=True)
"""
Explanation: Now it's already time to epoch the data! When deciding upon the epochs
duration for this specific analysis, we need to ensure we see quite a bit of
signal from before and after the motor response. We also must be aware of
the fact that motor-/muscle-related signals will most likely be present
before the response button trigger pulse appears in our data, so the time
period close to the response event should not be used for baseline
correction. But at the same time, we don't want to use a baseline
period that extends too far away from the button event. The following values
seem to work quite well.
End of explanation
"""
epochs.metadata.loc[epochs.metadata['last_stimulus'].isna(), :]
"""
Explanation: Let's do a final sanity check: we want to make sure that in every row, we
actually have a stimulus. We use epochs.metadata (and not metadata)
because when creating the epochs, we passed the reject parameter, and
MNE-Python always ensures that epochs.metadata stays in sync with the
available epochs.
End of explanation
"""
epochs = epochs['last_stimulus.notna()']
"""
Explanation: Bummer! It seems the very first two responses were recorded before the
first stimulus appeared: the values in the stimulus column are None.
There is a very simple way to select only those epochs that do have a
stimulus (i.e., are not None):
End of explanation
"""
resp_erp_correct = epochs['response_correct'].average()
resp_erp_incorrect = epochs['not response_correct'].average()
mne.viz.plot_compare_evokeds({'Correct Response': resp_erp_correct,
'Incorrect Response': resp_erp_incorrect},
picks='FCz', show_sensors=True,
title='ERPs at FCz, time-locked to response')
# topoplot of average field from time 0.0-0.1 s
resp_erp_incorrect.plot_topomap(times=0.05, average=0.05, size=3,
title='Avg. topography 0–100 ms after '
'incorrect responses')
"""
Explanation: Time to calculate the ERPs for correct and incorrect responses.
For visualization, we'll only look at sensor FCz, which is known to show
the ERN nicely in the given paradigm. We'll also create a topoplot to get an
impression of the average scalp potentials measured in the first 100 ms after
an incorrect response.
End of explanation
"""
# difference wave: incorrect minus correct responses
resp_erp_diff = mne.combine_evoked([resp_erp_incorrect, resp_erp_correct],
weights=[1, -1])
fig, ax = plt.subplots()
resp_erp_diff.plot(picks='FCz', axes=ax, selectable=False, show=False)
# make ERP trace bolder
ax.lines[0].set_linewidth(1.5)
# add lines through origin
ax.axhline(0, ls='dotted', lw=0.75, color='gray')
ax.axvline(0, ls=(0, (10, 10)), lw=0.75, color='gray',
label='response trigger')
# mark trough
trough_time_idx = resp_erp_diff.copy().pick('FCz').data.argmin()
trough_time = resp_erp_diff.times[trough_time_idx]
ax.axvline(trough_time, ls=(0, (10, 10)), lw=0.75, color='red',
label='max. negativity')
# legend, axis labels, title
ax.legend(loc='lower left')
ax.set_xlabel('Time (s)', fontweight='bold')
ax.set_ylabel('Amplitude (µV)', fontweight='bold')
ax.set_title('Channel: FCz')
fig.suptitle('ERN (Difference Wave)', fontweight='bold')
fig
"""
Explanation: We can see a strong negative deflection immediately after incorrect
responses, compared to correct responses. The topoplot, too, leaves no doubt:
what we're looking at is, in fact, the ERN.
Some researchers suggest to construct the difference wave between ERPs for
correct and incorrect responses, as it more clearly reveals signal
differences, while ideally also improving the signal-to-noise ratio (under
the assumption that the noise level in "correct" and "incorrect" trials is
similar). Let's do just that and put it into a publication-ready
visualization.
End of explanation
"""
|
jaduimstra/nilmtk | docs/manual/user_guide/disaggregation_and_metrics.ipynb | apache-2.0 | import time
from matplotlib import rcParams
import matplotlib.pyplot as plt
%matplotlib inline
rcParams['figure.figsize'] = (13, 6)
plt.style.use('ggplot')
from nilmtk import DataSet, TimeFrame, MeterGroup, HDFDataStore
from nilmtk.disaggregate import CombinatorialOptimisation
"""
Explanation: Disaggregation and Metrics
End of explanation
"""
train = DataSet('/data/REDD/redd.h5')
test = DataSet('/data/REDD/redd.h5')
"""
Explanation: Dividing data into train and test set
End of explanation
"""
building = 1
train.buildings[building].elec.mains().plot()
"""
Explanation: Let us use building 1 for demo purposes
End of explanation
"""
train.set_window(end="30-4-2011")
test.set_window(start="30-4-2011")
train_elec = train.buildings[1].elec
test_elec = test.buildings[1].elec
train_elec.mains().plot()
test_elec.mains().plot()
"""
Explanation: Let's split data at April 30th
End of explanation
"""
fridge_meter = train_elec['fridge']
fridge_df = fridge_meter.load().next()
fridge_df.head()
mains = train_elec.mains()
mains_df = mains.load().next()
mains_df.head()
"""
Explanation: REDD data set has got appliance level data sampled every 3 or 4 seconds and mains data sampled every 1 second. Let us verify the same.
To allow disaggregation to be done on any arbitrarily large dataset, disaggregation output is dumped to disk chunk-by-chunk:
End of explanation
"""
top_5_train_elec = train_elec.submeters().select_top_k(k=5)
top_5_train_elec
"""
Explanation: Since, both of these are sampled at different frequencies, we will downsample both to 1 minute resolution. We will also select the top-5 appliances in terms of energy consumption and use them for training our FHMM and CO models.
Selecting top-5 appliances
End of explanation
"""
start=time.time()
from nilmtk.disaggregate import fhmm_exact
fhmm = fhmm_exact.FHMM()
# Note that we have given the sample period to downsample the data to 1 minute
fhmm.train(top_5_train_elec, sample_period=60)
end=time.time()
print end-start
disag_filename = '/data/REDD/redd-disag-fhmm.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
fhmm.disaggregate(test_elec.mains(), output, sample_period=60)
output.close()
disag_fhmm = DataSet(disag_filename)
disag_fhmm_elec = disag_fhmm.buildings[building].elec
from nilmtk.metrics import f1_score
f1_fhmm = f1_score(disag_fhmm_elec, test_elec)
f1_fhmm.index = disag_fhmm_elec.get_labels(f1_fhmm.index)
f1_fhmm.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("FHMM");
start=time.time()
from nilmtk.disaggregate import CombinatorialOptimisation
co = CombinatorialOptimisation()
# Note that we have given the sample period to downsample the data to 1 minute
co.train(top_5_train_elec, sample_period=60)
end=time.time()
print end-start
disag_filename = '/data/REDD/redd-disag-co.h5'
output = HDFDataStore(disag_filename, 'w')
# Note that we have mentioned to disaggregate after converting to a sample period of 60 seconds
co.disaggregate(test_elec.mains(), output, sample_period=60)
output.close()
disag_co = DataSet(disag_filename)
disag_co_elec = disag_co.buildings[building].elec
from nilmtk.metrics import f1_score
f1_co= f1_score(disag_co_elec, test_elec)
f1_co.index = disag_co_elec.get_labels(f1_co.index)
f1_co.plot(kind='barh')
plt.ylabel('appliance');
plt.xlabel('f-score');
plt.title("CO");
"""
Explanation: Training and disaggregation
FHMM
End of explanation
"""
|
pablosv/dynamic_multifarious | analysis.ipynb | gpl-3.0 | # General libraries
import os
import pickle
import numpy as np
# Plot, in nb, only when .show() is called
import matplotlib.pyplot as plt
%matplotlib notebook
plt.ioff()
# Personal libraries
import tools.evaluation as ev
import tools.plot as pt
"""
Explanation: Script that analyzes the results from multifarious assembly
Load data
We start by loading the stored files. For this, we load the evaluation library
End of explanation
"""
path = '/data/sartori/Data/multifarious'
dir_list = [ path + '/' + x for x in os.listdir(path) if x[0:15]=='Small_OscEa5_Eb']
dir_list.sort()
"""
Explanation: Before evaluating the simulaion results, we scan the folders and create a directory list. Note that each type of simulations has a five letters heading, MfErr.
End of explanation
"""
errors_data = {}
sequence_data = {}
duration_data = {}
transition_data = {}
for d in dir_list:
#if os.path.isfile(d + '/errors.npy'): # errors.npy
try:
print d
# Load errors-data
#errors = ev.errors_from_files(d)
errors = np.load(d + '/errors.npy')
# Create data-key
key = (float(d.split('_')[2][2:]), float(d.split('_')[3][3:]))
# Post-process data
sequence = ev.sequence_from_files(path=d, to_file=True)
duration = ev.durations_from_files(path=d, to_file=True)
transition = ev.transition_matrix_from_files(path=d,m=3,to_file=False)
# Append
errors_data.setdefault(key,[]).append(errors)
sequence_data.setdefault(key,[]).append(sequence)
duration_data.setdefault(key,[]).append(duration)
transition_data.setdefault(key,[]).append(transition)
except IOError:
print 'error'
np.save('./tmp/Small_OscEa5_EbMu_errors.npy', errors_data)
np.save('./tmp/Small_OscEa5_EbMu_sequence.npy', sequence_data)
np.save('./tmp/Small_OscEa5_EbMu_duration.npy', duration_data)
np.save('./tmp/Small_OscEa5_EbMu_transition.npy', transition_data)
"""
Explanation: We can now read the files and store the errors over time. We store them in a dictionary which assigns to the tuple (m,M) a list with all the errors over time
End of explanation
"""
#with open(path +'/data.npy', 'wb') as f: np.save(f, data)
data_e = np.load('/Users/admin/Repos/dynamic_multifarious/tmp/Small_Ea5_EbMu_errors.npy')[()]
data_s = np.load('/Users/admin/Repos/dynamic_multifarious/tmp/Small_Ea5_EbMu_sequence.npy')[()]
#data_d = np.load('/Users/admin/Repos/dynamic_multifarious/tmp/Small_Ea5_EbMu_duration.npy')[()]
data_t = np.load('/Users/admin/Repos/dynamic_multifarious/tmp/Small_Ea5_EbMu_transition.npy')[()]
"""
Explanation: Alternatively, we can just load the file that we may have saved before
End of explanation
"""
# Choose data-set
key = (5., -9.)
n = 2
test_errors = data_e[key][n]
print(data_s[key][n])
print(data_t[key][n][1])
#print(data_d[key][n])
# Filter errors
k = .1
time = np.arange(np.size(test_errors[:,0]))
kernel = np.exp(-k*time)
convolved = np.array([np.convolve(x, kernel) for x in test_errors.transpose()])
convolved = convolved.transpose()[:np.size(test_errors[:,0]),:]
norms = test_errors.max(0)/convolved.max(0)
# Generate plots
plt.gca().set_color_cycle(['r', 'g', 'b'])
#plt.plot( (convolved*norms), linewidth=2)
plt.plot(test_errors, lw=2)
#plt.xlim(0, 3000)
plt.savefig('./tmp/trace.pdf')
plt.show()
"""
Explanation: We can plot the error over time for one example, together with its filtered counter-part
End of explanation
"""
from numpy import linalg as LA
histograms = {}
for key in data_t.iterkeys():
count = np.zeros((3,3))
for NvecPmat in data_t[key]:
if NvecPmat[0].sum()>1:
count += NvecPmat[1]
histograms[key] = count
"""
Explanation: Analyze the retrievability of the dynamics
Create transition histograms
End of explanation
"""
# The goal is the normalized allosteric metric
A = np.array([[0,1,0],[0,0,1],[1,0,0]]) / 3.
# We run over all histograms
xyz = []
for key in histograms.keys():
if np.sum(histograms[key])==0:
distance = 1.
else:
distance = LA.norm(histograms[key]/np.sum(histograms[key]) - A)#/np.sum(histograms[key])
xyz.append( (key[0], key[1], np.abs(1-distance)) )
XYZ = np.asarray(xyz)
"""
Explanation: We can now create a metric with the distance between the obtained histogram and the goal histogram
End of explanation
"""
import matplotlib.cm as cm
Eb, mu1, score = XYZ[:,0], -XYZ[:,1], XYZ[:,2]
energies = np.arange(0, 30, .1)
fig = plt.figure(1)
ax = fig.add_subplot(111)
heatmap = plt.tricontourf(mu1, Eb, score, 10, norm=plt.Normalize(vmax=1., vmin=0.), cmap=cm.inferno,alpha=.5)
ax.scatter(mu1, Eb, c=score,cmap=cm.inferno, s=30, lw = .5)
bound1 = ax.plot(energies, energies/2, 'k--',lw=2)
bound2 = ax.plot(energies, energies*0+5., 'k--',lw=2)
plt.colorbar(heatmap,alpha=1.)
plt.xlim(0, 20)
plt.ylim(0, 11)
plt.xlabel('Chemical potential, $\mu$',fontsize=16)
plt.ylabel('Binding energy, $E_B$',fontsize=16)
plt.savefig('./tmp/space_EB_MU_EA5.pdf')
plt.show()
"""
Explanation: We can now create a metric with the distance between the obtained histogram and the goal histogram
End of explanation
"""
from numpy import linalg as LA
durations = {}
for key in data_d.iterkeys():
for wow in data_d[key]:
count = np.append(count,wow[0])
#print count
durations[key] = count
#[(7.0, -11.9), (7.0, -12.25), (7.0, -11.2), (7.0, -11.55)]
plt.hist(durations[(7,-12.25)], 30, normed=1, facecolor='orange')
plt.savefig('./tmp/Osc_peak.pdf')
plt.show()
"""
Explanation: Analyze the duration distribution // ADD ATP!!!
End of explanation
"""
[x+1 for x in [10., 15., 20., 25., 30., 35., 40.]]
reload(ev)
xyz = []
for key in data.iterkeys():
my_error = 0.
runs = len(data[key])
for err_time in data[key]: my_error += err_time[-1,0] / runs
xyz.append( (key[0], key[1], my_error) )
XYZ = np.asarray(xyz)
structures = np.split(XYZ[np.argsort(XYZ[:,1]),0], np.where(np.diff(XYZ[np.argsort(XYZ[:,1]),1]) != 0)[0]+1)
species = np.split(XYZ[np.argsort(XYZ[:,1]),1], np.where(np.diff(XYZ[np.argsort(XYZ[:,1]),1]) != 0)[0]+1)
errors = np.split(XYZ[np.argsort(XYZ[:,1]),2], np.where(np.diff(XYZ[np.argsort(XYZ[:,1]),1]) != 0)[0]+1)
"""
Explanation: BELOW BE DRAGONS!!!
Analysis of error dynamics
End of explanation
"""
fig = plt.figure()
ax1 = fig.add_subplot(111)
for sp, st, er in zip(species, structures, errors)[4::2]:
ax1.scatter(st , er, s=35, c=str(min(1.,40./sp[0])), marker="o", label='$M=$'+str(int(sp[0])))
plt.xlabel('stored structures, $m$',fontdict={'fontsize':20})
plt.ylabel('retrieval error',fontdict={'fontsize':20})
plt.axis([0, 30, 0, 1])
plt.legend(loc='lower right');
plt.show()
species_single = [min(s) for s in species]
error_min = [min(e) for e in errors]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(species_single , error_min, s=35, c='k', marker="o")
plt.xlabel('species, $M$',fontdict={'fontsize':20})
plt.ylabel('minimal retrieval error ($m=1$)',fontdict={'fontsize':20})
plt.axis([0, 300, 0, 1])
plt.show()
x = [min(s) for s in species]
m_half = [st[np.argmin(np.abs(e-.5))] for e, st in zip(errors, structures)]
e_half = [e[np.argmin(np.abs(e-.5))] for e, st in zip(errors, structures)]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(x, m_half, s=35, c='k', marker="o")
plt.xlabel('species, $M$',fontdict={'fontsize':20})
plt.ylabel('retrievable structures, $m_{1/2}$',fontdict={'fontsize':20})
plt.axis([0, 305, 0, 30])
plt.show()
"""
Explanation: We now generate a plot
End of explanation
"""
|
gchrupala/reimaginet | notes.ipynb | mit | %pylab inline
from ggplot import *
import pandas as pd
data = pd.DataFrame(
dict(epoch=range(1,11)+range(1,11)+range(1,11)+range(1,8)+range(1,11)+range(1,11),
model=hstack([repeat("char-3-grow", 10),
repeat("char-1", 10),
repeat("char-3", 10),
repeat("visual", 7),
repeat("multitask",10),
repeat("sum", 10)]),
recall=[#char-3-grow lw0222.uvt.nl:/home/gchrupala/reimaginet/run-110-phon
0.097281087565,
0.140863654538,
0.161015593762,
0.173410635746,
0.176969212315,
0.175529788085,
0.175089964014,
0.174010395842,
0.173370651739,
0.173050779688,
# char-1 yellow.uvt.nl:/home/gchrupala/repos/reimagine/run-200-phon
0.100919632147,
0.127588964414,
0.140583766493,
0.148300679728,
0.150739704118,
0.153338664534,
0.156657337065,
0.159016393443,
0.159056377449,
0.160655737705,
# char-3 yellow.uvt.nl:/home/gchrupala/repos/reimagine/run-201-phon
0.078368652539,
0.125789684126,
0.148140743703,
0.158216713315,
0.163694522191,
0.168612554978,
0.172570971611,
0.17181127549,
0.171531387445,
0.170611755298,
# visual
0.160015993603,
0.184406237505,
0.193202718912,
0.19956017593,
0.201079568173,
0.201719312275,
0.19944022391,
# multitask
0.16093562575,
0.185525789684,
0.194482207117,
0.202758896441,
0.203558576569,
0.20243902439,
0.199240303878,
0.195361855258,
0.193242702919,
0.189924030388,
# sum
0.137984806078,
0.145581767293,
0.149340263894,
0.151819272291,
0.152898840464,
0.154218312675,
0.155257896841,
0.155697720912,
0.15637744902,
0.156657337065
]))
def standardize(x):
return (x-numpy.mean(x))/numpy.std(x)
"""
Explanation: Notes
Development and evaluation of imaginet and related models.
End of explanation
"""
ggplot(data.loc[data['model'].isin(['sum','char-1','char-3','char-3-grow','multitask'])],
aes(x='epoch', y='recall', color='model')) + geom_line(size=3) + theme()
ggplot(data.loc[data['model'].isin(['visual','multitask','sum'])],
aes(x='epoch', y='recall', color='model')) + geom_line(size=3) + theme()
data_grow = pd.DataFrame(dict(epoch=range(1,11)+range(1,11),
model=hstack([repeat("gru-2-grow", 10),repeat("gru-1", 10)]),
recall=[#gru-1
0.170971611355,
0.192163134746,
0.206797281088,
0.211355457817,
0.21331467413,
0.218992403039,
0.214674130348,
0.214634146341,
0.214434226309,
0.212115153938,
# gru-2-grow
0.173730507797,
0.198320671731,
0.206117552979,
0.211715313874,
0.212914834066,
0.211915233906,
0.209956017593,
0.210795681727,
0.209076369452,
0.208996401439
]))
"""
Explanation: Image retrieval evaluation
Models:
- Sum - additively composed word embeddings (1024 dimensions)
- Visual - Imaginet with disabled textual pathway (1024 embeddings + 1 x 1024 hidden
- Multitask - Full Imaginet model (1024 embeddings + 1 x 1024 hidden)
- Char-1 - Model similar to imaginet, but trained on character-level. Captions are lowecases, with spaces removed. The model has 256 character embeddings + 3 layers of 1024 recurrent hidden layers.
- Char-3 - Like above, but 3 GRU layers
- Char-3-grow. Like above, but layers >1 initialized to pre-trained approximate identity
Remarks:
- Models NOT trained on extra train data (restval)
End of explanation
"""
ggplot(data_grow, aes(x='epoch', y='recall', color='model')) + geom_line(size=3) + theme()
"""
Explanation: Models:
- GRU-1 - Imaginet (1024 emb + 1 x 1024 hidden)
- GRU-2 grow - Imaginet (1024 emb + 2 x 1024 hidden)
Remarks:
- Models trained on extra train data (restval)
- Layers >1 initialized to pre-trained approximate identity
End of explanation
"""
|
kubeflow/pipelines | components/gcp/ml_engine/batch_predict/sample.ipynb | apache-2.0 | %%capture --no-stderr
!pip3 install kfp --upgrade
"""
Explanation: Name
Batch prediction using Cloud Machine Learning Engine
Label
Cloud Storage, Cloud ML Engine, Kubeflow, Pipeline, Component
Summary
A Kubeflow Pipeline component to submit a batch prediction job against a deployed model on Cloud ML Engine.
Details
Intended use
Use the component to run a batch prediction job against a deployed model on Cloud ML Engine. The prediction output is stored in a Cloud Storage bucket.
Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|--------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------|--------------|-----------------|---------|
| project_id | The ID of the Google Cloud Platform (GCP) project of the job. | No | GCPProjectID | | |
| model_path | The path to the model. It can be one of the following:<br/> <ul> <li>projects/[PROJECT_ID]/models/[MODEL_ID]</li> <li>projects/[PROJECT_ID]/models/[MODEL_ID]/versions/[VERSION_ID]</li> <li>The path to a Cloud Storage location containing a model file.</li> </ul> | No | GCSPath | | |
| input_paths | The path to the Cloud Storage location containing the input data files. It can contain wildcards, for example, gs://foo/*.csv | No | List | GCSPath | |
| input_data_format | The format of the input data files. See REST Resource: projects.jobs for more details. | No | String | DataFormat | |
| output_path | The path to the Cloud Storage location for the output data. | No | GCSPath | | |
| region | The Compute Engine region where the prediction job is run. | No | GCPRegion | | |
| output_data_format | The format of the output data files. See REST Resource: projects.jobs for more details. | Yes | String | DataFormat | JSON |
| prediction_input | The JSON input parameters to create a prediction job. See PredictionInput for more information. | Yes | Dict | | None |
| job_id_prefix | The prefix of the generated job id. | Yes | String | | None |
| wait_interval | The number of seconds to wait in case the operation has a long run time. | Yes | | | 30 |
Input data schema
The component accepts the following as input:
A trained model: It can be a model file in Cloud Storage, a deployed model, or a version in Cloud ML Engine. Specify the path to the model in the model_pathruntime argument.
Input data: The data used to make predictions against the trained model. The data can be in multiple formats. The data path is specified by input_paths and the format is specified by input_data_format.
Output
Name | Description | Type
:--- | :---------- | :---
job_id | The ID of the created batch job. | String
output_path | The output path of the batch prediction job | GCSPath
Cautions & requirements
To use the component, you must:
Set up a cloud environment by following this guide.
The component can authenticate to GCP. Refer to Authenticating Pipelines to GCP for details.
Grant the following types of access to the Kubeflow user service account:
Read access to the Cloud Storage buckets which contains the input data.
Write access to the Cloud Storage bucket of the output directory.
Detailed description
Follow these steps to use the component in a pipeline:
Install the Kubeflow Pipeline SDK:
End of explanation
"""
import kfp.components as comp
mlengine_batch_predict_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/ml_engine/batch_predict/component.yaml')
help(mlengine_batch_predict_op)
"""
Explanation: Load the component using KFP SDK
End of explanation
"""
!gsutil cat gs://ml-pipeline-playground/samples/ml_engine/census/test.json
"""
Explanation: Sample Code
Note: The following sample code works in an IPython notebook or directly in Python code.
In this sample, you batch predict against a pre-built trained model from gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/ and use the test data from gs://ml-pipeline-playground/samples/ml_engine/census/test.json.
Inspect the test data
End of explanation
"""
# Required Parameters
PROJECT_ID = '<Please put your project ID here>'
GCS_WORKING_DIR = 'gs://<Please put your GCS path here>' # No ending slash
# Optional Parameters
EXPERIMENT_NAME = 'CLOUDML - Batch Predict'
OUTPUT_GCS_PATH = GCS_WORKING_DIR + '/batch_predict/output/'
"""
Explanation: Set sample parameters
End of explanation
"""
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='CloudML batch predict pipeline',
description='CloudML batch predict pipeline'
)
def pipeline(
project_id = PROJECT_ID,
model_path = 'gs://ml-pipeline-playground/samples/ml_engine/census/trained_model/',
input_paths = '["gs://ml-pipeline-playground/samples/ml_engine/census/test.json"]',
input_data_format = 'JSON',
output_path = OUTPUT_GCS_PATH,
region = 'us-central1',
output_data_format='',
prediction_input = json.dumps({
'runtimeVersion': '1.10'
}),
job_id_prefix='',
wait_interval='30'):
mlengine_batch_predict_op(
project_id=project_id,
model_path=model_path,
input_paths=input_paths,
input_data_format=input_data_format,
output_path=output_path,
region=region,
output_data_format=output_data_format,
prediction_input=prediction_input,
job_id_prefix=job_id_prefix,
wait_interval=wait_interval)
"""
Explanation: Example pipeline that uses the component
End of explanation
"""
pipeline_func = pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
"""
Explanation: Compile the pipeline
End of explanation
"""
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
"""
Explanation: Submit the pipeline for execution
End of explanation
"""
OUTPUT_FILES_PATTERN = OUTPUT_GCS_PATH + '*'
!gsutil cat OUTPUT_FILES_PATTERN
"""
Explanation: Inspect prediction results
End of explanation
"""
|
setiQuest/ML4SETI | tutorials/Removing_noise_from_a_spectrogram.ipynb | apache-2.0 | import requests
import ibmseti
import os
import zipfile
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
# Create team folder (please replace my_team_name_data_folder with your team name)
mydatafolder = os.environ['PWD'] + '/' + 'my_team_name_data_folder'
if os.path.exists(mydatafolder) is False:
os.makedirs(mydatafolder)
# Download data (if you have not already downloaded the primary small dataset)
base_url = 'https://dal.objectstorage.open.softlayer.com/v1/AUTH_cdbef52bdf7a449c96936e1071f0a46b'
filename = 'primary_small.zip'
primary_small_url = '{}/simsignals_v2_zipped/{}'.format(base_url, filename)
os.system('curl {} > {}'.format(primary_small_url, mydatafolder +'/'+filename))
filename = 'public_list_primary_v2_small_1june_2017.csv'
primary_small_csv_url = '{}/simsignals_files/{}'.format(base_url, filename)
os.system('curl {} > {}'.format(primary_small_csv_url, mydatafolder +'/'+filename))
"""
Explanation: Create Noise Spectrogram and remove from Target
Setup
End of explanation
"""
zz = zipfile.ZipFile(mydatafolder + '/' + 'primary_small.zip')
csv_data = zz.namelist()[1:]
data = []
N = 1
for i,v in enumerate(csv_data):
d = zz.open(v).read()
aca = ibmseti.compamp.SimCompamp(d)
if aca.header()['signal_classification'] == 'noise':
spec = aca.get_spectrogram()
if len(data) == 0:
data = spec
else:
data = data + spec
N += 1
data = data/N
print "done"
# This is what the average noise plot looks like
fig, ax = plt.subplots(figsize=(10, 5))
ax.imshow(np.log(data), aspect = 0.5*float(data.shape[1]) / data.shape[0])
# This is the power spectrum of the image above.
plt.plot(np.sum(data, axis=0))
"""
Explanation: Calculate the Average Noise
Note: this will compute the average spectrogram using all of the "noise" spectrograms in the primary_small data. This wil take a bit of time
End of explanation
"""
# Grab some random data file
zz = zipfile.ZipFile(mydatafolder + '/' + 'primary_small.zip')
csv_data = zz.namelist()[1:]
d = zz.open(csv_data[4]).read()
aca = ibmseti.compamp.SimCompamp(d)
print aca.header()
spec = aca.get_spectrogram()
fig, ax = plt.subplots(figsize=(10, 5))
ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap="gray")
plt.plot(np.sum(spec, axis=0))
# Remove the noise, then plot the same two charts over again
spec = spec - data
spec[spec < 0] = 0.00001
fig, ax = plt.subplots(figsize=(10, 5))
ax.imshow(np.log(spec), aspect = 0.5*float(spec.shape[1]) / spec.shape[0], cmap="gray")
plt.plot(np.sum(spec, axis=0))
# Make sure that if you save this data, you save it to your team folder
"""
Explanation: Extract the noise from an image
End of explanation
"""
|
paultheastronomer/OAD-Data-Science-Toolkit | Teaching Materials/Machine Learning/ml-quickstart/tutorial.ipynb | gpl-3.0 | from __future__ import division, print_function
from sklearn.datasets import make_circles
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeRegressor
import numpy as np
import time
import matplotlib.pyplot as plt
%matplotlib nbagg
def plot_roc(fpr, tpr):
"""
Simple ROC curve plotting function.
Parameters
----------
fpr : array
False positive rate
tpr : array
True positive rate
"""
plt.plot(fpr, tpr, lw=1.5)
plt.xlim([-0.05,1.05])
plt.ylim([-0.05,1.05])
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
"""
Explanation: An Introduction to Machine Learning with Scikit-learn
By Michelle Lochner
First we need to import some tools from scikit learn and the classifiers we're going to use
End of explanation
"""
# X is the array of features, y is the array of corresponding class labels
X, y = make_circles(n_samples=1000, noise=0.1, random_state=0)
"""
Explanation: Generating data
Scikit learn has several built in datasets as well as functions to generate random data. We'll use one of these for this example but it's straightforward to put in your own data.
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7, random_state=0)
"""
Explanation: To avoid overfitting, we split the data into a training set, used to train the algorithm, and a test set, used to evaluate its performance. There's no hard and fast rule about how big your training set should be, as this is highly problem-dependent. Here, we'll use 70% of the data as training data.
End of explanation
"""
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
"""
Explanation: You should always rescale your features as many algorithms (including SVM and many neural network implementations) assume the features have zero mean and unit variance. They will likely underperform without scaling. In this example, the generated data are already scaled so it's unnecessary, but I leave this in to show you how it's done.
End of explanation
"""
plt.figure()
plt.plot(X_train[y_train==0,0], X_train[y_train==0,1],'.')
plt.plot(X_train[y_train==1,0], X_train[y_train==1,1],'.')
plt.legend(('Class 0', 'Class 1'))
"""
Explanation: Now we can have a look at our training data, where I've coloured the points by the class they belong to.
End of explanation
"""
clf = KNeighborsClassifier()
print(clf)
"""
Explanation: Classification
Let's start classifying! Scikit-learn is fully object-oriented so each classifier is an object. While there are dozens of different algorithms available, they all behave in the same way, implementing the same functions, meaning it's very easy to swap classifiers in and out.
Here we create a K-nearest neighbours classifier object, which has several hyperparameters that are set to default values.
End of explanation
"""
clf.fit(X_train, y_train)
"""
Explanation: This is the function that actually trains the classifier with our training data.
End of explanation
"""
y_pred = clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
"""
Explanation: Now that the classifier is trained, we can use it to predect the classes of our test data and have a look at the accuracy.
End of explanation
"""
probs = clf.predict_proba(X_test)
fpr,tpr, thresh = roc_curve(y_test, probs[:,1], pos_label=1)
auc = roc_auc_score(y_test, probs[:,1])
print('Area under curve', auc)
plt.figure()
plot_roc(fpr, tpr)
"""
Explanation: But since the accuracy is only part of the story, let's get out the probability of belonging to each class so that we can generate the ROC curve.
End of explanation
"""
t1 = time.time()
clf = KNeighborsClassifier()
# Define a grid of parameters over which to search, as a dictionary
params = {'n_neighbors':np.arange(1, 30, 1), 'weights':['distance', 'uniform']}
# cv=5 means we're doing 5-fold cross validation.
clf = GridSearchCV(clf, params, cv=5)
clf.fit(X_train, y_train)
print('Time taken',time.time()-t1,'seconds')
# We can see what were the best combination of parameters
print(clf.best_params_)
"""
Explanation: Optimising hyperparameters
The K-nearest neighbours classifier has several hyperparameters that we've just left as default. If we optimise these instead, we get a better result. The most robust way to do this is with one of scikit-learn's cross validation methods. Here we'll use GridSearchCV. Naturally now the algorithm will take much longer to train as it has to train and evaluate performance several times.
End of explanation
"""
y_pred = clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
"""
Explanation: Let's see if the accuracy has improved
End of explanation
"""
probs = clf.predict_proba(X_test)
fpr_knn, tpr_knn, thresh = roc_curve(y_test, probs[:,1], pos_label=1)
auc_knn = roc_auc_score(y_test, probs[:,1])
print('Area under curve', auc_knn)
plt.figure()
plot_roc(fpr_knn, tpr_knn)
"""
Explanation: The accuracy is more or less unchanged, but we do get a slightly better ROC curve
End of explanation
"""
clf = SVC(kernel='rbf', probability=True)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(accuracy_score(y_test, y_pred))
probs = clf.predict_proba(X_test)
fpr_svm, tpr_svm, thresh = roc_curve(y_test, probs[:,1], pos_label=1)
auc_svm = roc_auc_score(y_test, probs[:,1])
print('Area under curve', auc_svm)
plt.figure()
plot_roc(fpr_knn, tpr_knn)
plot_roc(fpr_svm, tpr_svm)
plt.legend(('KNN (%.3f)' %auc_knn, 'SVM (%.3f)' %auc_svm), loc='lower right')
"""
Explanation: Using a different algorithm
Scikit-learn is designed to let you easily swap algorithms out
End of explanation
"""
np.random.seed(42)
x = np.linspace(-3,3, 100)
y = np.sin(x) + np.random.randn(len(x))*0.05
N = 25
outlier_ints = np.random.randint(0, len(x), N)
y[outlier_ints] += np.random.randn(N)*1
plt.figure()
plt.plot(x,y,'.')
plt.xlabel('x');
plt.ylabel('y');
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.25, random_state=42)
y_train = y_train[np.argsort(X_train)]
X_train.sort()
y_test = y_test[np.argsort(X_test)]
X_test.sort()
X_train = X_train[:, None] # sklearn doesn't like 1d X arrays
X_test = X_test[:, None]
dt1 = DecisionTreeRegressor(max_depth=10) # An overly complicated classifier
dt2 = DecisionTreeRegressor(max_depth=3) # A simpler classifier
dt1.fit(X_train, y_train)
dt2.fit(X_train, y_train)
y_train_1 = dt1.predict(X_train)
y_train_2 = dt2.predict(X_train)
y_test_1 = dt1.predict(X_test)
y_test_2 = dt2.predict(X_test)
plt.figure()
plt.plot(x,y,'.')
plt.plot(X_test, y_test_1, lw=1.5, alpha=0.5)
plt.plot(X_test,y_test_2, lw=1.5, alpha=0.5)
plt.xlabel('x')
plt.ylabel('y')
plt.legend(('Data', 'Max depth 10', 'Max depth 3'));
"""
Explanation: You can do cross validation to tune the hyperparameters for SVM, but it takes a bit longer than for KNN.
Over-fitting
This is a somewhat contrived example to demonstrate over-fitting.
First we make some fake data with a few outliers in it.
End of explanation
"""
mse_train = np.mean((y_train-y_train_1)**2)
mse_test = np.mean((y_test-y_test_1)**2)
mse_train, mse_test
mse_train = np.mean((y_train-y_train_2)**2)
mse_test = np.mean((y_test-y_test_2)**2)
mse_train, mse_test
"""
Explanation: You can see the more complicated decision tree learns the behaviour of the spurious outliers. It's easy to check for over-fitting, whatever metric you're using (in this case mean squared error) will show much higher performance on the training than on the test set.
End of explanation
"""
dt3 = GridSearchCV(DecisionTreeRegressor(), param_grid={'max_depth': np.arange(2,12)}, cv=5)
dt3.fit(X_train, y_train)
y_train_3 = dt3.predict(X_train)
y_test_3 = dt3.predict(X_test)
mse_train = np.mean((y_train-y_train_3)**2)
mse_test = np.mean((y_test-y_test_3)**2)
mse_train, mse_test
print(dt3.best_params_)
"""
Explanation: We'll now use cross validation to automatically choose the hyperparameters and avoid over-fitting
End of explanation
"""
|
slowvak/MachineLearningForMedicalImages | notebooks/Module 3.ipynb | mit | %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
import pandas as pd
from matplotlib.colors import ListedColormap
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import train_test_split
from sklearn.model_selection import ShuffleSplit
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
import sklearn.metrics as metrics
from sklearn import tree
from IPython.display import Image
from sklearn.externals.six import StringIO
import pydotplus
from matplotlib.colors import Normalize
from sklearn.learning_curve import learning_curve
from sklearn import preprocessing
from sklearn.preprocessing import StandardScaler
# Utility function to move the midpoint of a colormap to be around
# the values of interest.
class MidpointNormalize(Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
Normalize.__init__(self, vmin, vmax, clip)
def __call__(self, value, clip=None):
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
"""
Explanation: Supervised Classification: SVM
Import Libraries
End of explanation
"""
Data=pd.read_csv ('DataExample.csv')
# if you need to print or have access to the data as numpy array you can execute the following commands
# print (Data)
# print(Data.as_matrix(columns=['NAWMpost']))
"""
Explanation: Read the dataset
In this case the training dataset is just a csv file. In case of larger dataset more advanced file fromats like hdf5 are used.
Pandas is used to load the files.
End of explanation
"""
ClassBrainTissuepost=(Data['ClassTissuePost'].values)
ClassBrainTissuepost= (np.asarray(ClassBrainTissuepost))
ClassBrainTissuepost=ClassBrainTissuepost[~np.isnan(ClassBrainTissuepost)]
ClassBrainTissuepre=(Data[['ClassTissuePre']].values)
ClassBrainTissuepre= (np.asarray(ClassBrainTissuepre))
ClassBrainTissuepre=ClassBrainTissuepre[~np.isnan(ClassBrainTissuepre)]
ClassTUMORpost=(Data[['ClassTumorPost']].values)
ClassTUMORpost= (np.asarray(ClassTUMORpost))
ClassTUMORpost=ClassTUMORpost[~np.isnan(ClassTUMORpost)]
ClassTUMORpre=(Data[['ClassTumorPre']].values)
ClassTUMORpre= (np.asarray(ClassTUMORpre))
ClassTUMORpre=ClassTUMORpre[~np.isnan(ClassTUMORpre)]
X_1 = np.stack((ClassBrainTissuepost,ClassBrainTissuepre)) # we only take the first two features.
X_2 = np.stack((ClassTUMORpost,ClassTUMORpre))
X=np.concatenate((X_1.transpose(), X_2.transpose()),axis=0)
y =np.zeros((np.shape(X))[0])
y[np.shape(X_1)[1]:]=1
X= preprocessing.scale(X)
"""
Explanation: Creating training sets
Each class of tissue in our pandas framework has a pre assigned label (Module 1).
This labels were:
- ClassTissuePost
- ClassTissuePre
- ClassTissueFlair
- ClassTumorPost
- ClassTumorPre
- ClassTumorFlair
- ClassEdemaPost
- ClassEdemaPre
- ClassEdemaFlair
For demontration purposes we will create a feature vector that contains the intesities for the tumor and white matter area from the T1w pre and post contrast images.
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
"""
Explanation: X is the feature vector
y are the labels
Split Training/Validation
End of explanation
"""
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.1, C=10).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Intensity post contrast')
plt.ylabel('Intensity pre contrast')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
# understanding margins
for C in [0.001,1000]:
fig = plt.subplot()
clf = svm.SVC(C,kernel='linear')
clf.fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx = np.linspace(x_min,x_max)
# print (xx)
xx=np.asarray(xx)
# get the separating hyperplane
w = clf.coef_[0]
# print(w)
a = -w[0] / w[1]
# print (a)
yy = a * xx - (clf.intercept_[0]) / w[1]
# print(yy)
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
"""
Explanation: Create the classifier
For the following example we will consider a SVM classifier.
The classifier is provided by the Scikit-Learn library
End of explanation
"""
print ('C=100')
model=svm.SVC(C=100,kernel='linear')
model.fit(X_train, y_train)
# make predictions
expected = y_test
predicted = model.predict(X_test)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
print (20*'---')
print ('C=0.0001')
model=svm.SVC(C=0.0001,kernel='linear')
model.fit(X_train, y_train)
# make predictions
expected = y_test
predicted = model.predict(X_test)
# summarize the fit of the model
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))
"""
Explanation: Run some basic analytics
Calculate some basic metrics.
End of explanation
"""
gamma_val =[0.01, .2,.3,.4,.9]
classifier = svm.SVC(kernel='rbf', C=10).fit(X, y)
classifier = GridSearchCV(estimator=classifier, cv=5, param_grid=dict(gamma=gamma_val))
classifier.fit(X_train, y_train)
"""
Explanation: Correct way
Fine tune hyperparameters
End of explanation
"""
title = 'Learning Curves (SVM, gamma=%.6f)' %classifier.best_estimator_.gamma
estimator = svm.SVC(kernel='rbf', C=10, gamma=classifier.best_estimator_.gamma)
plot_learning_curve(estimator, title, X_train, y_train, cv=4)
plt.show()
### Final evaluation on the test set
classifier.score(X_test, y_test)
"""
Explanation: Debug algorithm with learning curve
X_train is randomly split into a training and a test set 3 times (n_iter=3). Each point on the training-score curve is the average of 3 scores where the model was trained and evaluated on the first i training examples. Each point on the cross-validation score curve is the average of 3 scores where the model was trained on the first i training examples and evaluated on all examples of the test set.
End of explanation
"""
C_range = np.logspace(-2, 10, 13)
gamma_range = np.logspace(-9, 3, 13)
param_grid = dict(gamma=gamma_range, C=C_range)
cv = StratifiedShuffleSplit(n_splits=5, test_size=0.2, random_state=42)
grid_clf = GridSearchCV(SVC(), param_grid=param_grid, cv=cv)
grid_clf.fit(X, y)
print("The best parameters are %s with a score of %0.2f"
% (grid_clf.best_params_, grid_clf.best_score_))
plt.figure(figsize=(8, 6))
scores = grid_clf.cv_results_['mean_test_score'].reshape(len(C_range),
len(gamma_range))
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.jet,
norm=MidpointNormalize(vmin=0.2, midpoint=0.92))
plt.xlabel('gamma')
plt.ylabel('C')
plt.colorbar()
plt.xticks(np.arange(len(gamma_range)), gamma_range, rotation=45)
plt.yticks(np.arange(len(C_range)), C_range)
plt.title('Validation accuracy')
plt.show()
"""
Explanation: Heatmap
This will take some time...
End of explanation
"""
|
ssamot/ce888 | labs/lab3/facebook_classification.ipynb | gpl-3.0 | df = pd.read_csv("./dataset_Facebook.csv", delimiter = ";")
features = ["Category",
"Page total likes",
"Type",
"Post Month",
"Post Hour",
"Post Weekday",
"Paid"]
df[features].head()
outcomes= ["Lifetime Post Total Reach",
"Lifetime Post Total Impressions",
"Lifetime Engaged Users",
"Lifetime Post Consumers",
"Lifetime Post Consumptions",
"Lifetime Post Impressions by people who have liked your Page",
"Lifetime Post reach by people who like your Page",
"Lifetime People who have liked your Page and engaged with your post",
"comment",
"like",
"share",
"Total Interactions"]
df[outcomes].head()
print df[outcomes[-3:]].head().to_latex()
# convert a string variable to a categorical one
#types = list(set(df["Type"]))
#to_categorical = {types[i]:i for i in range(len(types))}
#df["Type"] = df["Type"].apply(lambda x: to_categorical[x])
df[["Type"]] = df[["Type"]].apply(LabelEncoder().fit_transform)
df.head()
"""
Explanation: We have loaded the necessary libraries above
Now let's load the data
End of explanation
"""
df = df.dropna()
outcomes_of_interest = ["Lifetime Post Consumers", "like"]
n_bins = 10
X_df = df[features].copy()
y_df = df[outcomes_of_interest].copy()
#print X_df.head().to_latex()
#print y_df.values
bins = pd.qcut(y_df[outcomes_of_interest[0]].values,n_bins)
y_df = df[outcomes_of_interest].copy()
y_df[outcomes_of_interest[0]] = bins
y_df[outcomes_of_interest] = y_df[outcomes_of_interest].apply(LabelEncoder().fit_transform)
print y_df.head()
X = X_df.values
y = y_df.values.T[0]
# # import seaborn as sns
y_df['id'] = range(1, len(df) + 1)
y_df.head()
# sns_plot = sns.lmplot(x="id", y= attribute, data=y_df, fit_reg=False, aspect = 2)
# sns_plot.savefig("scaterplot_lpc.png",bbox_inches='tight')
# sns_plot.savefig("scaterplot_lpc.pdf",bbox_inches='tight')
sns_plot = sns.jointplot(x="Lifetime Post Consumers", y="like", data=y_df, ratio = 2)
sns_plot.savefig("joint_plot.png",bbox_inches='tight')
sns_plot.savefig("joint_plot.pdf",bbox_inches='tight')
# sns.distplot(y, kde=False, rug=True)
sns_plot.savefig("histogram_lpc.png",bbox_inches='tight')
sns_plot.savefig("histogram_lpc.pdf",bbox_inches='tight')
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
clf = ExtraTreesClassifier(n_estimators = 2000,max_depth = 4)
dummy_clf = DummyClassifier()
scores = cross_val_score(clf, X, y, cv=10,scoring = make_scorer(acc))
dummy_clf.fit(X,y)
print("ACC: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std()))
print("Dummy ACC: %0.2f")% (acc(y,dummy_clf.predict(X)))
"""
Explanation: Now let's prepare the data by cleaning it up and choosing the relevant column we would like to predict
We can now use the bootstrap to find an approximation of the bias and the variance
End of explanation
"""
clf = ExtraTreesClassifier(n_estimators = 20000,max_depth = 4)
clf.fit(X,y)
print acc(y,clf.predict(X))
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in clf.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
print indices
# Print the feature ranking
print("Feature ranking:")
for f in range(X.shape[1]):
print("%d. %s (%f)" % (f + 1, features[indices[f]], importances[indices[f]]))
# Plot the feature importances of the forest
fig = plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), np.array(features)[indices])
plt.xlim([-1, X.shape[1]])
fig.set_size_inches(15,8)
axes = plt.gca()
axes.set_ylim([0,None])
plt.savefig("importances.png",bbox_inches='tight')
plt.savefig("importances.pdf",bbox_inches='tight')
from sklearn.metrics import confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
t = "(%.2f)"%(cm[i, j])
#print t
# plt.text(j, i, t,
# horizontalalignment="center",
# color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# Compute confusion matrix
y_pred = clf.predict(X)
cnf_matrix = confusion_matrix(y, y_pred)
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=range(len(set(y))), normalize = True,
title='Confusion matrix')
plt.savefig("confusion.png",bbox_inches='tight')
plt.savefig("confusion.pdf",bbox_inches='tight')
"""
Explanation: Now let's train the regressor on the whole dataset
End of explanation
"""
|
bhermanmit/openmc | docs/source/examples/mdgxs-part-ii.ipynb | mit | import math
import pickle
from IPython.display import Image
import matplotlib.pyplot as plt
import numpy as np
import openmc
import openmc.mgxs
%matplotlib inline
"""
Explanation: This IPython Notebook illustrates the use of the openmc.mgxs.Library class. The Library class is designed to automate the calculation of multi-group cross sections for use cases with one or more domains, cross section types, and/or nuclides. In particular, this Notebook illustrates the following features:
Calculation of multi-energy-group and multi-delayed-group cross sections for a fuel assembly
Automated creation, manipulation and storage of MGXS with openmc.mgxs.Library
Steady-state pin-by-pin delayed neutron fractions (beta) for each delayed group.
Generation of surface currents on the interfaces and surfaces of a Mesh.
Generate Input Files
End of explanation
"""
# Instantiate some Nuclides
h1 = openmc.Nuclide('H1')
b10 = openmc.Nuclide('B10')
o16 = openmc.Nuclide('O16')
u235 = openmc.Nuclide('U235')
u238 = openmc.Nuclide('U238')
zr90 = openmc.Nuclide('Zr90')
"""
Explanation: First we need to define materials that will be used in the problem. Before defining a material, we must create nuclides that are used in the material.
End of explanation
"""
# 1.6 enriched fuel
fuel = openmc.Material(name='1.6% Fuel')
fuel.set_density('g/cm3', 10.31341)
fuel.add_nuclide(u235, 3.7503e-4)
fuel.add_nuclide(u238, 2.2625e-2)
fuel.add_nuclide(o16, 4.6007e-2)
# borated water
water = openmc.Material(name='Borated Water')
water.set_density('g/cm3', 0.740582)
water.add_nuclide(h1, 4.9457e-2)
water.add_nuclide(o16, 2.4732e-2)
water.add_nuclide(b10, 8.0042e-6)
# zircaloy
zircaloy = openmc.Material(name='Zircaloy')
zircaloy.set_density('g/cm3', 6.55)
zircaloy.add_nuclide(zr90, 7.2758e-3)
"""
Explanation: With the nuclides we defined, we will now create three materials for the fuel, water, and cladding of the fuel pins.
End of explanation
"""
# Instantiate a Materials object
materials_file = openmc.Materials((fuel, water, zircaloy))
materials_file.default_xs = '71c'
# Export to "materials.xml"
materials_file.export_to_xml()
"""
Explanation: With our three materials, we can now create a Materials object that can be exported to an actual XML file.
End of explanation
"""
# Create cylinders for the fuel and clad
fuel_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.39218)
clad_outer_radius = openmc.ZCylinder(x0=0.0, y0=0.0, R=0.45720)
# Create boundary planes to surround the geometry
min_x = openmc.XPlane(x0=-10.71, boundary_type='reflective')
max_x = openmc.XPlane(x0=+10.71, boundary_type='reflective')
min_y = openmc.YPlane(y0=-10.71, boundary_type='reflective')
max_y = openmc.YPlane(y0=+10.71, boundary_type='reflective')
min_z = openmc.ZPlane(z0=-10., boundary_type='reflective')
max_z = openmc.ZPlane(z0=+10., boundary_type='reflective')
"""
Explanation: Now let's move on to the geometry. This problem will be a square array of fuel pins and control rod guide tubes for which we can use OpenMC's lattice/universe feature. The basic universe will have three regions for the fuel, the clad, and the surrounding coolant. The first step is to create the bounding surfaces for fuel and clad, as well as the outer bounding surfaces of the problem.
End of explanation
"""
# Create a Universe to encapsulate a fuel pin
fuel_pin_universe = openmc.Universe(name='1.6% Fuel Pin')
# Create fuel Cell
fuel_cell = openmc.Cell(name='1.6% Fuel')
fuel_cell.fill = fuel
fuel_cell.region = -fuel_outer_radius
fuel_pin_universe.add_cell(fuel_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='1.6% Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
fuel_pin_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='1.6% Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
fuel_pin_universe.add_cell(moderator_cell)
"""
Explanation: With the surfaces defined, we can now construct a fuel pin cell from cells that are defined by intersections of half-spaces created by the surfaces.
End of explanation
"""
# Create a Universe to encapsulate a control rod guide tube
guide_tube_universe = openmc.Universe(name='Guide Tube')
# Create guide tube Cell
guide_tube_cell = openmc.Cell(name='Guide Tube Water')
guide_tube_cell.fill = water
guide_tube_cell.region = -fuel_outer_radius
guide_tube_universe.add_cell(guide_tube_cell)
# Create a clad Cell
clad_cell = openmc.Cell(name='Guide Clad')
clad_cell.fill = zircaloy
clad_cell.region = +fuel_outer_radius & -clad_outer_radius
guide_tube_universe.add_cell(clad_cell)
# Create a moderator Cell
moderator_cell = openmc.Cell(name='Guide Tube Moderator')
moderator_cell.fill = water
moderator_cell.region = +clad_outer_radius
guide_tube_universe.add_cell(moderator_cell)
"""
Explanation: Likewise, we can construct a control rod guide tube with the same surfaces.
End of explanation
"""
# Create fuel assembly Lattice
assembly = openmc.RectLattice(name='1.6% Fuel Assembly')
assembly.pitch = (1.26, 1.26)
assembly.lower_left = [-1.26 * 17. / 2.0] * 2
"""
Explanation: Using the pin cell universe, we can construct a 17x17 rectangular lattice with a 1.26 cm pitch.
End of explanation
"""
# Create array indices for guide tube locations in lattice
template_x = np.array([5, 8, 11, 3, 13, 2, 5, 8, 11, 14, 2, 5, 8,
11, 14, 2, 5, 8, 11, 14, 3, 13, 5, 8, 11])
template_y = np.array([2, 2, 2, 3, 3, 5, 5, 5, 5, 5, 8, 8, 8, 8,
8, 11, 11, 11, 11, 11, 13, 13, 14, 14, 14])
# Initialize an empty 17x17 array of the lattice universes
universes = np.empty((17, 17), dtype=openmc.Universe)
# Fill the array with the fuel pin and guide tube universes
universes[:,:] = fuel_pin_universe
universes[template_x, template_y] = guide_tube_universe
# Store the array of universes in the lattice
assembly.universes = universes
"""
Explanation: Next, we create a NumPy array of fuel pin and guide tube universes for the lattice.
End of explanation
"""
# Create root Cell
root_cell = openmc.Cell(name='root cell')
root_cell.fill = assembly
# Add boundary planes
root_cell.region = +min_x & -max_x & +min_y & -max_y & +min_z & -max_z
# Create root Universe
root_universe = openmc.Universe(universe_id=0, name='root universe')
root_universe.add_cell(root_cell)
"""
Explanation: OpenMC requires that there is a "root" universe. Let us create a root cell that is filled by the pin cell universe and then assign it to the root universe.
End of explanation
"""
# Create Geometry and set root Universe
geometry = openmc.Geometry()
geometry.root_universe = root_universe
# Export to "geometry.xml"
geometry.export_to_xml()
"""
Explanation: We now must create a geometry that is assigned a root universe and export it to XML.
End of explanation
"""
# OpenMC simulation parameters
batches = 50
inactive = 10
particles = 2500
# Instantiate a Settings object
settings_file = openmc.Settings()
settings_file.batches = batches
settings_file.inactive = inactive
settings_file.particles = particles
settings_file.output = {'tallies': False}
# Create an initial uniform spatial source distribution over fissionable zones
bounds = [-10.71, -10.71, -10, 10.71, 10.71, 10.]
uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)
settings_file.source = openmc.source.Source(space=uniform_dist)
# Export to "settings.xml"
settings_file.export_to_xml()
"""
Explanation: With the geometry and materials finished, we now just need to define simulation parameters. In this case, we will use 10 inactive batches and 40 active batches each with 2500 particles.
End of explanation
"""
# Instantiate a Plot
plot = openmc.Plot(plot_id=1)
plot.filename = 'materials-xy'
plot.origin = [0, 0, 0]
plot.pixels = [250, 250]
plot.width = [-10.71*2, -10.71*2]
plot.color = 'mat'
# Instantiate a Plots object, add Plot, and export to "plots.xml"
plot_file = openmc.Plots([plot])
plot_file.export_to_xml()
"""
Explanation: Let us also create a Plots file that we can use to verify that our fuel assembly geometry was created successfully.
End of explanation
"""
# Run openmc in plotting mode
openmc.plot_geometry(output=False)
# Convert OpenMC's funky ppm to png
!convert materials-xy.ppm materials-xy.png
# Display the materials plot inline
Image(filename='materials-xy.png')
"""
Explanation: With the plots.xml file, we can now generate and view the plot. OpenMC outputs plots in .ppm format, which can be converted into a compressed format like .png with the convert utility.
End of explanation
"""
# Instantiate a 20-group EnergyGroups object
energy_groups = openmc.mgxs.EnergyGroups()
energy_groups.group_edges = np.logspace(-3, 7.3, 21)
# Instantiate a 1-group EnergyGroups object
one_group = openmc.mgxs.EnergyGroups()
one_group.group_edges = np.array([energy_groups.group_edges[0], energy_groups.group_edges[-1]])
# Instantiate a 6-delayed-group list
delayed_groups = list(range(1,7))
"""
Explanation: As we can see from the plot, we have a nice array of fuel and guide tube pin cells with fuel, cladding, and water!
Create an MGXS Library
Now we are ready to generate multi-group cross sections! First, let's define 20-energy-group, 1-energy-group, and 6-delayed-group structures.
End of explanation
"""
# Instantiate a tally mesh
mesh = openmc.Mesh(mesh_id=1)
mesh.type = 'regular'
mesh.dimension = [17, 17, 1]
mesh.lower_left = [-10.71, -10.71, -10000.]
mesh.width = [1.26, 1.26, 20000.]
# Initialize an 20-energy-group and 6-delayed-group MGXS Library
mgxs_lib = openmc.mgxs.Library(geometry)
mgxs_lib.energy_groups = energy_groups
mgxs_lib.delayed_groups = delayed_groups
# Specify multi-group cross section types to compute
mgxs_lib.mgxs_types = ['total', 'transport', 'nu-scatter matrix', 'kappa-fission', 'inverse-velocity', 'chi-prompt',
'prompt-nu-fission', 'chi-delayed', 'delayed-nu-fission', 'beta']
# Specify a "mesh" domain type for the cross section tally filters
mgxs_lib.domain_type = 'mesh'
# Specify the mesh domain over which to compute multi-group cross sections
mgxs_lib.domains = [mesh]
# Construct all tallies needed for the multi-group cross section library
mgxs_lib.build_library()
# Create a "tallies.xml" file for the MGXS Library
tallies_file = openmc.Tallies()
mgxs_lib.add_to_tallies_file(tallies_file, merge=True)
# Instantiate a current tally
mesh_filter = openmc.MeshFilter(mesh)
current_tally = openmc.Tally(name='current tally')
current_tally.scores = ['current']
current_tally.filters = [mesh_filter]
# Add current tally to the tallies file
tallies_file.append(current_tally)
# Export to "tallies.xml"
tallies_file.export_to_xml()
"""
Explanation: Next, we will instantiate an openmc.mgxs.Library for the energy and delayed groups with our the fuel assembly geometry.
End of explanation
"""
# Run OpenMC
openmc.run()
"""
Explanation: Now, we can run OpenMC to generate the cross sections.
End of explanation
"""
# Load the last statepoint file
sp = openmc.StatePoint('statepoint.50.h5')
"""
Explanation: Tally Data Processing
Our simulation ran successfully and created statepoint and summary output files. We begin our analysis by instantiating a StatePoint object.
End of explanation
"""
# Initialize MGXS Library with OpenMC statepoint data
mgxs_lib.load_from_statepoint(sp)
# Extrack the current tally separately
current_tally = sp.get_tally(name='current tally')
"""
Explanation: The statepoint is now ready to be analyzed by the Library. We simply have to load the tallies from the statepoint into the Library and our MGXS objects will compute the cross sections for us under-the-hood.
End of explanation
"""
# Set the time constants for the delayed precursors (in seconds^-1)
precursor_halflife = np.array([55.6, 24.5, 16.3, 2.37, 0.424, 0.195])
precursor_lambda = -np.log(0.5) / precursor_halflife
beta = mgxs_lib.get_mgxs(mesh, 'beta')
# Create a tally object with only the delayed group filter for the time constants
beta_filters = [f for f in beta.xs_tally.filters if type(f) is not openmc.DelayedGroupFilter]
lambda_tally = beta.xs_tally.summation(nuclides=beta.xs_tally.nuclides)
for f in beta_filters:
lambda_tally = lambda_tally.summation(filter_type=type(f), remove_filter=True) * 0. + 1.
# Set the mean of the lambda tally and reshape to account for nuclides and scores
lambda_tally._mean = precursor_lambda
lambda_tally._mean.shape = lambda_tally.std_dev.shape
# Set a total nuclide and lambda score
lambda_tally.nuclides = [openmc.Nuclide(name='total')]
lambda_tally.scores = ['lambda']
delayed_nu_fission = mgxs_lib.get_mgxs(mesh, 'delayed-nu-fission')
# Use tally arithmetic to compute the precursor concentrations
precursor_conc = beta.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) * \
delayed_nu_fission.xs_tally.summation(filter_type=openmc.EnergyFilter, remove_filter=True) / lambda_tally
# The difference is a derived tally which can generate Pandas DataFrames for inspection
precursor_conc.get_pandas_dataframe().head(10)
"""
Explanation: Using Tally Arithmetic to Compute the Delayed Neutron Precursor Concentrations
Finally, we illustrate how one can leverage OpenMC's tally arithmetic data processing feature with MGXS objects. The openmc.mgxs module uses tally arithmetic to compute multi-group cross sections with automated uncertainty propagation. Each MGXS object includes an xs_tally attribute which is a "derived" Tally based on the tallies needed to compute the cross section type of interest. These derived tallies can be used in subsequent tally arithmetic operations. For example, we can use tally artithmetic to compute the delayed neutron precursor concentrations using the Beta and DelayedNuFissionXS objects. The delayed neutron precursor concentrations are modeled using the following equations:
$$\frac{\partial}{\partial t} C_{k,d} (t) = \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t) \nu_d \sigma_{f,x}(\mathbf{r},E',t)\Phi(\mathbf{r},E',t) - \lambda_{d} C_{k,d} (t) $$
$$C_{k,d} (t=0) = \frac{1}{\lambda_{d}} \int_{0}^{\infty}\mathrm{d}E'\int_{\mathbf{r} \in V_{k}}\mathrm{d}\mathbf{r} \beta_{k,d} (t=0) \nu_d \sigma_{f,x}(\mathbf{r},E',t=0)\Phi(\mathbf{r},E',t=0) $$
End of explanation
"""
current_tally.get_pandas_dataframe().head(10)
"""
Explanation: Another useful feature of the Python API is the ability to extract the surface currents for the interfaces and surfaces of a mesh. We can inspect the currents for the mesh by getting the pandas dataframe.
End of explanation
"""
# Extract the energy-condensed delayed neutron fraction tally
beta_by_group = beta.get_condensed_xs(one_group).xs_tally.summation(filter_type='energy', remove_filter=True)
beta_by_group.mean.shape = (17, 17, 6)
beta_by_group.mean[beta_by_group.mean == 0] = np.nan
# Plot the betas
plt.figure(figsize=(18,9))
fig = plt.subplot(231)
plt.imshow(beta_by_group.mean[:,:,0], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 1')
fig = plt.subplot(232)
plt.imshow(beta_by_group.mean[:,:,1], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 2')
fig = plt.subplot(233)
plt.imshow(beta_by_group.mean[:,:,2], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 3')
fig = plt.subplot(234)
plt.imshow(beta_by_group.mean[:,:,3], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 4')
fig = plt.subplot(235)
plt.imshow(beta_by_group.mean[:,:,4], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 5')
fig = plt.subplot(236)
plt.imshow(beta_by_group.mean[:,:,5], interpolation='none', cmap='jet')
plt.colorbar()
plt.title('Beta - delayed group 6')
"""
Explanation: Cross Section Visualizations
In addition to inspecting the data in the tallies by getting the pandas dataframe, we can also plot the tally data on the domain mesh. Below is the delayed neutron fraction tallied in each mesh cell for each delayed group.
End of explanation
"""
|
xpharry/Udacity-DLFoudation | tutorials/sentiment_network/.ipynb_checkpoints/Sentiment Classification - How to Best Frame a Problem for a Neural Network (Project 4)-checkpoint.ipynb | mit | def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: Transforming Text into Numbers
End of explanation
"""
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
"""
Explanation: Project 2: Creating the Input/Output Data
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Project 3: Building a Neural Network
Start with your neural network from the last chapter
3 layer neural network
no non-linearity in hidden layer
use our functions to create the training data
create a "pre_process_data" function to create vocabulary for our training data generating functions
modify "train" to train over the entire corpus
Where to Get Help if You Need it
Re-watch previous week's Udacity Lectures
Chapters 3-5 - Grokking Deep Learning - (40% Off: traskud17)
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
"""
Explanation: Understanding Neural Noise
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] = 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
mlp.train(reviews[:-1000],labels[:-1000])
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
"""
Explanation: Project 4: Reducing Noise in our Input Data
End of explanation
"""
|
franzpl/StableGrid | jupyter_notebooks/computation_schmitt_trigger.ipynb | mit | from IPython.display import Image
Image(filename='circuit.png')
# %matplotlib notebook
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from IPython.display import HTML, display
# For tables
def tableit(data):
display(HTML(
'<table><tr>{}</tr></table>'.format(
'</tr><tr>'.join(
'<td>{}</td>'.format('</td><td>'.join(str(_) for _ in row)) for row in data)
)
))
"""
Explanation: Computation of voltage divider
It could happen that the mains voltage fluctuates because of voltage collapses. Nevertheless, the resulting signal has to be stable enough so that the fluctuations don't influence the final result. A usual voltage divider with 2 resistors wouldn't achieve that. Therefore, adding an additional source from the microcontroller and resistor will make the signal more resistant to voltage flucutations. Furthermore, the microcontroller can only "read" positive voltages. The additional DC voltage source lifts the sine to an only positive range.
End of explanation
"""
U0 = 17.1 # mains voltage in Volt (transformed from 230 V ~ to 17.1 V ~)
U_B = 5 # DC voltage from an external supply in Volts
R1 = 10000 # Ohm
R2 = 2400 # Ohm
R3 = 3000 # Ohm
f = 50 # Hz
fs = 44100 # Hz
duration = 0.06 # Duration of plots in seconds
t = np.arange(0, duration, 1 / fs)
"""
Explanation: Parameters
End of explanation
"""
I1 = (U0 * np.sin(2 * np.pi * f * t) * R2 + U_B * R1) / (R1 * (R2 + R3) + R2 * R3)
I2 = (U0 * np.sin(2 * np.pi * f * t) * R3 - U_B * (R1 + R3)) / (R1 * (R2 + R3) + R2 * R3)
I0 = I1 + I2
plt.plot(t * 1000, I0 * 1000, label="I0")
plt.plot(t * 1000, I1 * 1000, label="I1")
plt.plot(t * 1000, I2 * 1000, label="I2")
plt.title("Currents")
plt.ylabel("I / mA")
plt.xlabel("t / ms")
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.grid()
plt.show()
"""
Explanation: Currents
$$I_1 = \frac{U_0R_2sin(2\pi f t) + U_BR_1}{R_1(R_2+R_3)+R_2R_3}$$
$$I_2 = \frac{U_0R_3sin(2\pi f t) - U_B(R_1+R_3)}{R_1(R_2+R_3)+R_2R_3}$$
$$I_0 = I_1 + I_2$$
End of explanation
"""
U_R1 = I0 * R1
U_R2 = I2 * R2
U_R3 = I1 * R3
signal = U_R3 # U_R3 == signal voltage
plt.plot(t * 1000, U_R1, label="U_R1")
plt.plot(t * 1000, U_R2, label="U_R2")
plt.plot(t * 1000, signal, label="signal")
plt.title("Voltages")
plt.ylabel("U / V")
plt.xlabel("t / ms")
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.grid()
plt.show()
"""
Explanation: Voltages
$$U_{R_1}=I_0R_1$$
$$U_{R_2}=I_2R_2$$
$$U_{R_3}=I_1R_3$$
End of explanation
"""
signal_max = np.max(signal)
signal_min = np.min(signal)
signal_pp = signal_max - signal_min
offset = (signal_max + signal_min) / 2
tableit([["signal_max / V", "signal_min / V","signal_pp / V", "Offset / V"],
[np.around(signal_max, 2), np.around(signal_min, 2), np.around(signal_pp, 2), np.around(offset, 2)],
])
plt.plot(t * 1000, signal, label="Signal Voltage")
plt.title("Signal Voltage")
plt.ylabel("U / V")
plt.xlabel("t / ms")
plt.axhline(y=offset, color='r', linestyle='-', label='Offset')
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.ylim(-0.1, signal_max + 0.2)
plt.grid()
plt.show()
"""
Explanation: Offset of signal voltage
End of explanation
"""
voltage_range = np.arange(U0 - 8, U0 + 8 ,0.1) # mains voltage fluctuations
I1_DC = (voltage_range * R2 + U_B * R1) / (R1 * (R3 + R2) + R3 * R2)
signal_DC = I1_DC * R3
plt.plot(voltage_range, signal_DC)
plt.title('Mains voltage fluctuations and the effect on the signal')
plt.xlabel('Mains Voltage / V')
plt.ylabel('Signal Voltage (DC) / V')
plt.grid()
plt.show()
"""
Explanation: Dependency of signal voltage on the mains voltage
End of explanation
"""
Image(filename='schmitt.png')
"""
Explanation: Evaluation
The computation shows a good stability of the signal voltage if the mains voltage fluctuates. A change of 1V in the mains voltage results just in an approx. 100mV deviation of the signal.
Schmitt Trigger
End of explanation
"""
Image(filename='schmitt_drawing.png')
R4 = 10000 # Ohm
R5 = 34800 # Ohm
R6 = 10000 # Ohm
R7 = 10000 # Ohm
U_aH = U_B
U_aL = 0
"""
Explanation: Source: http://www.mikrocontroller.net/articles/Schmitt-Trigger
Laws
1) If U_e (= input voltage) exceeds U_H, then U_a (= output voltage) = HIGH
2) If U_e comes below U_L, then U_a = LOW
3) If the range of Ue is between U_L and U_H, then U_a = const.
4) The transition from LOW to HiGH or rather from HIGH to LOW has always a steep edge
Computation of a non-inverting Schmitt Trigger
End of explanation
"""
U_ref = U_B * R6 / (R6 + R7)
"""
Explanation: $$U_{High} = \frac{\frac{R_4}{R_4 + R_5} U_{a_L} - U_{ref}} {\frac{R_4}{R_4 + R_5} -1} $$
$$U_{Low} = \frac{\frac{R_4}{R_4 + R_5}U_{a_H} - U_{ref}} {\frac{R_4}{R_4 + R_5} -1} $$
$$U_{ref} = U_B \frac{R_6}{R_6 + R_7}$$
Voltage Divider
End of explanation
"""
U_High = (R4 / (R4 + R5) * U_aL - U_ref) / (R4 / (R4 + R5) -1)
"""
Explanation: Turn-on Threshold
End of explanation
"""
U_Low = (R4 / (R4 + R5) * U_aH - U_ref) / (R4 / (R4 + R5) -1)
tableit([["U_ref / V", "U_Low / V","U_High / V"],
[U_ref, np.around(U_Low, 2), np.around(U_High, 2)],
])
"""
Explanation: Turn-off Threshold
End of explanation
"""
def hyst(x, th_lo, th_hi, initial = False):
hi = x >= th_hi
lo_or_hi = (x <= th_lo) | hi
ind = np.nonzero(lo_or_hi)[0]
if not ind.size: # prevent index error if ind is empty
return np.zeros_like(x, dtype=bool) | initial
cnt = np.cumsum(lo_or_hi) # from 0 to len(x)
return np.where(cnt, hi[ind[cnt-1]], initial)
fig = plt.figure()
ax = fig.add_subplot(111, aspect='equal')
ax.add_patch(patches.Rectangle((U_Low, 0), U_High - U_Low, U_B, fill=False))
ax.set_title('Hysteresis')
ax.set_xlim([0,U_Low + 2]);
ax.set_ylim([0, U_B + 1]);
ax.set_xlabel('Ue / V')
ax.set_ylabel('Ua / V')
ax.arrow(U_High, U_B / 2 , 0, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow(U_Low, U_B / 2 , 0, -0.01, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow((U_High + U_Low) / 2, U_B - 0.03 , -0.001, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
ax.arrow((U_High + U_Low) / 2, 0 , 0.001, 0, head_width=0.2, head_length=0.3, fc='k', ec='k')
plt.grid()
plt.show()
h1 = hyst(signal, U_Low, U_High)
plt.plot(t * 1000, signal, label='Signal Voltage')
plt.plot(t * 1000, U_B * h1, label='U_a')
plt.axhline(y=U_Low, color='k', linestyle='-', label='U_Low')
plt.axhline(y=U_High, color='r', linestyle='-', label='U_High')
plt.axhline(y=U_ref, color='y', linestyle='-', label='U_ref')
plt.title('Schmitt Trigger Result')
plt.xlabel('t / ms')
plt.ylabel('U / V')
plt.legend(loc='upper left', bbox_to_anchor=(1.1, 0.6))
plt.ylim([0, U_aH + 0.5])
plt.grid()
plt.show()
"""
Explanation: Hysteresis of a non-inverting Schmitt Trigger
End of explanation
"""
|
seg/2016-ml-contest | Facies_classification.ipynb | apache-2.0 | %matplotlib inline
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pandas import set_option
set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
filename = 'training_data.csv'
training_data = pd.read_csv(filename)
training_data
"""
Explanation: Facies classification using Machine Learning
Brendon Hall, Enthought
This notebook demonstrates how to train a machine learning algorithm to predict facies from well log data. The dataset we will use comes from a class excercise from The University of Kansas on Neural Networks and Fuzzy Systems. This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see Bohling and Dubois (2003) and Dubois et al. (2007).
The dataset we will use is log data from nine wells that have been labeled with a facies type based on oberservation of core. We will use this log data to train a support vector machine to classify facies types. Support vector machines (or SVMs) are a type of supervised learning model that can be trained on data to perform classification and regression tasks. The SVM algorithm uses the training data to fit an optimal hyperplane between the different classes (or facies, in our case). We will use the SVM implementation in scikit-learn.
First we will explore the dataset. We will load the training data from 9 wells, and take a look at what we have to work with. We will plot the data from a couple wells, and create cross plots to look at the variation within the data.
Next we will condition the data set. We will remove the entries that have incomplete data. The data will be scaled to have zero mean and unit variance. We will also split the data into training and test sets.
We will then be ready to build the SVM classifier. We will demonstrate how to use the cross validation set to do model parameter selection.
Finally, once we have a built and tuned the classifier, we can apply the trained model to classify facies in wells which do not already have labels. We will apply the classifier to two wells, but in principle you could apply the classifier to any number of wells that had the same log data.
Exploring the dataset
First, we will examine the data set we will use to train the classifier. The training data is contained in the file facies_vectors.csv. The dataset consists of 5 wireline log measurements, two indicator variables and a facies label at half foot intervals. In machine learning terminology, each log measurement is a feature vector that maps a set of 'features' (the log measurements) to a class (the facies type). We will use the pandas library to load the data into a dataframe, which provides a convenient data structure to work with well log data.
End of explanation
"""
training_data['Well Name'] = training_data['Well Name'].astype('category')
training_data['Formation'] = training_data['Formation'].astype('category')
training_data['Well Name'].unique()
training_data.describe()
"""
Explanation: This data is from the Council Grove gas reservoir in Southwest Kansas. The Panoma Council Grove Field is predominantly a carbonate gas reservoir encompassing 2700 square miles in Southwestern Kansas. This dataset is from nine wells (with 4149 examples), consisting of a set of seven predictor variables and a rock facies (class) for each example vector and validation (test) data (830 examples from two wells) having the same seven predictor variables in the feature vector. Facies are based on examination of cores from nine wells taken vertically at half-foot intervals. Predictor variables include five from wireline log measurements and two geologic constraining variables that are derived from geologic knowledge. These are essentially continuous variables sampled at a half-foot sample rate.
The seven predictor variables are:
* Five wire line log curves include gamma ray (GR), resistivity logging (ILD_log10),
photoelectric effect (PE), neutron-density porosity difference and average neutron-density porosity (DeltaPHI and PHIND). Note, some wells do not have PE.
* Two geologic constraining variables: nonmarine-marine indicator (NM_M) and relative position (RELPOS)
The nine discrete facies (classes of rocks) are:
1. Nonmarine sandstone
2. Nonmarine coarse siltstone
3. Nonmarine fine siltstone
4. Marine siltstone and shale
5. Mudstone (limestone)
6. Wackestone (limestone)
7. Dolomite
8. Packstone-grainstone (limestone)
9. Phylloid-algal bafflestone (limestone)
These facies aren't discrete, and gradually blend into one another. Some have neighboring facies that are rather close. Mislabeling within these neighboring facies can be expected to occur. The following table lists the facies, their abbreviated labels and their approximate neighbors.
Facies |Label| Adjacent Facies
:---: | :---: |:--:
1 |SS| 2
2 |CSiS| 1,3
3 |FSiS| 2
4 |SiSh| 5
5 |MS| 4,6
6 |WS| 5,7
7 |D| 6,8
8 |PS| 6,7,9
9 |BS| 7,8
Let's clean up this dataset. The 'Well Name' and 'Formation' columns can be turned into a categorical data type.
End of explanation
"""
blind = training_data[training_data['Well Name'] == 'SHANKLE']
training_data = training_data[training_data['Well Name'] != 'SHANKLE']
"""
Explanation: This is a quick view of the statistical distribution of the input variables. Looking at the count values, there are 3232 feature vectors in the training set.
Remove a single well to use as a blind test later.
End of explanation
"""
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00',
'#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS',
'WS', 'D','PS', 'BS']
#facies_color_map is a dictionary that maps facies labels
#to their respective colors
facies_color_map = {}
for ind, label in enumerate(facies_labels):
facies_color_map[label] = facies_colors[ind]
def label_facies(row, labels):
return labels[ row['Facies'] -1]
training_data.loc[:,'FaciesLabels'] = training_data.apply(lambda row: label_facies(row, facies_labels), axis=1)
"""
Explanation: These are the names of the 10 training wells in the Council Grove reservoir. Data has been recruited into pseudo-well 'Recruit F9' to better represent facies 9, the Phylloid-algal bafflestone.
Before we plot the well data, let's define a color map so the facies are represented by consistent color in all the plots in this tutorial. We also create the abbreviated facies labels, and add those to the facies_vectors dataframe.
End of explanation
"""
def make_facies_log_plot(logs, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster=np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=6, figsize=(8, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im=ax[5].imshow(cluster, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[5])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-1):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
"""
Explanation: Let's take a look at the data from individual wells in a more familiar log plot form. We will create plots for the five well log variables, as well as a log for facies labels. The plots are based on the those described in Alessandro Amato del Monte's excellent tutorial.
End of explanation
"""
make_facies_log_plot(
training_data[training_data['Well Name'] == 'SHRIMPLIN'],
facies_colors)
"""
Explanation: Placing the log plotting code in a function will make it easy to plot the logs from multiples wells, and can be reused later to view the results when we apply the facies classification model to other wells. The function was written to take a list of colors and facies labels as parameters.
We then show log plots for wells SHRIMPLIN.
End of explanation
"""
#count the number of unique entries for each facies, sort them by
#facies number (instead of by number of entries)
facies_counts = training_data['Facies'].value_counts().sort_index()
#use facies labels to index each count
facies_counts.index = facies_labels
facies_counts.plot(kind='bar',color=facies_colors,
title='Distribution of Training Data by Facies')
facies_counts
"""
Explanation: In addition to individual wells, we can look at how the various facies are represented by the entire training set. Let's plot a histogram of the number of training examples for each facies class.
End of explanation
"""
#save plot display settings to change back to when done plotting with seaborn
inline_rc = dict(mpl.rcParams)
import seaborn as sns
sns.set()
sns.pairplot(training_data.drop(['Well Name','Facies','Formation','Depth','NM_M','RELPOS'],axis=1),
hue='FaciesLabels', palette=facies_color_map,
hue_order=list(reversed(facies_labels)))
#switch back to default matplotlib plot style
mpl.rcParams.update(inline_rc)
"""
Explanation: This shows the distribution of examples by facies for the examples in the training set. Dolomite (facies 7) has the fewest with 81 examples. Depending on the performance of the classifier we are going to train, we may consider getting more examples of these facies.
Crossplots are a familiar tool in the geosciences to visualize how two properties vary with rock type. This dataset contains 5 log variables, and scatter matrix can help to quickly visualize the variation between the all the variables in the dataset. We can employ the very useful Seaborn library to quickly create a nice looking scatter matrix. Each pane in the plot shows the relationship between two of the variables on the x and y axis, with each point is colored according to its facies. The same colormap is used to represent the 9 facies.
End of explanation
"""
correct_facies_labels = training_data['Facies'].values
feature_vectors = training_data.drop(['Formation', 'Well Name', 'Depth','Facies','FaciesLabels'], axis=1)
feature_vectors.describe()
"""
Explanation: Conditioning the data set
Now we extract just the feature variables we need to perform the classification. The predictor variables are the five wireline values and two geologic constraining variables. We also get a vector of the facies labels that correspond to each feature vector.
End of explanation
"""
from sklearn import preprocessing
scaler = preprocessing.StandardScaler().fit(feature_vectors)
scaled_features = scaler.transform(feature_vectors)
feature_vectors
"""
Explanation: Scikit includes a preprocessing module that can 'standardize' the data (giving each variable zero mean and unit variance, also called whitening). Many machine learning algorithms assume features will be standard normally distributed data (ie: Gaussian with zero mean and unit variance). The factors used to standardize the training set must be applied to any subsequent feature set that will be input to the classifier. The StandardScalar class can be fit to the training set, and later used to standardize any training data.
End of explanation
"""
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
scaled_features, correct_facies_labels, test_size=0.1, random_state=42)
"""
Explanation: Scikit also includes a handy function to randomly split the training data into training and test sets. The test set contains a small subset of feature vectors that are not used to train the network. Because we know the true facies labels for these examples, we can compare the results of the classifier to the actual facies and determine the accuracy of the model. Let's use 20% of the data for the test set.
End of explanation
"""
from sklearn import svm
clf = svm.SVC()
"""
Explanation: Training the SVM classifier
Now we use the cleaned and conditioned training set to create a facies classifier. As mentioned above, we will use a type of machine learning model known as a support vector machine. The SVM is a map of the feature vectors as points in a multi dimensional space, mapped so that examples from different facies are divided by a clear gap that is as wide as possible.
The SVM implementation in scikit-learn takes a number of important parameters. First we create a classifier using the default settings.
End of explanation
"""
clf.fit(X_train,y_train)
"""
Explanation: Now we can train the classifier using the training set we created above.
End of explanation
"""
predicted_labels = clf.predict(X_test)
"""
Explanation: Now that the model has been trained on our data, we can use it to predict the facies of the feature vectors in the test set. Because we know the true facies labels of the vectors in the test set, we can use the results to evaluate the accuracy of the classifier.
End of explanation
"""
from sklearn.metrics import confusion_matrix
from classification_utilities import display_cm, display_adj_cm
conf = confusion_matrix(y_test, predicted_labels)
display_cm(conf, facies_labels, hide_zeros=True)
"""
Explanation: We need some metrics to evaluate how good our classifier is doing. A confusion matrix is a table that can be used to describe the performance of a classification model. Scikit-learn allows us to easily create a confusion matrix by supplying the actual and predicted facies labels.
The confusion matrix is simply a 2D array. The entries of confusion matrix C[i][j] are equal to the number of observations predicted to have facies j, but are known to have facies i.
To simplify reading the confusion matrix, a function has been written to display the matrix along with facies labels and various error metrics. See the file classification_utilities.py in this repo for the display_cm() function.
End of explanation
"""
def accuracy(conf):
total_correct = 0.
nb_classes = conf.shape[0]
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
acc = total_correct/sum(sum(conf))
return acc
"""
Explanation: The rows of the confusion matrix correspond to the actual facies labels. The columns correspond to the labels assigned by the classifier. For example, consider the first row. For the feature vectors in the test set that actually have label SS, 23 were correctly indentified as SS, 21 were classified as CSiS and 2 were classified as FSiS.
The entries along the diagonal are the facies that have been correctly classified. Below we define two functions that will give an overall value for how the algorithm is performing. The accuracy is defined as the number of correct classifications divided by the total number of classifications.
End of explanation
"""
adjacent_facies = np.array([[1], [0,2], [1], [4], [3,5], [4,6,7], [5,7], [5,6,8], [6,7]])
def accuracy_adjacent(conf, adjacent_facies):
nb_classes = conf.shape[0]
total_correct = 0.
for i in np.arange(0,nb_classes):
total_correct += conf[i][i]
for j in adjacent_facies[i]:
total_correct += conf[i][j]
return total_correct / sum(sum(conf))
print('Facies classification accuracy = %f' % accuracy(conf))
print('Adjacent facies classification accuracy = %f' % accuracy_adjacent(conf, adjacent_facies))
"""
Explanation: As noted above, the boundaries between the facies classes are not all sharp, and some of them blend into one another. The error within these 'adjacent facies' can also be calculated. We define an array to represent the facies adjacent to each other. For facies label i, adjacent_facies[i] is an array of the adjacent facies labels.
End of explanation
"""
#model selection takes a few minutes, change this variable
#to true to run the parameter loop
do_model_selection = True
if do_model_selection:
C_range = np.array([.01, 1, 5, 10, 20, 50, 100, 1000, 5000, 10000])
gamma_range = np.array([0.0001, 0.001, 0.01, 0.1, 1, 10])
fig, axes = plt.subplots(3, 2,
sharex='col', sharey='row',figsize=(10,10))
plot_number = 0
for outer_ind, gamma_value in enumerate(gamma_range):
row = int(plot_number / 2)
column = int(plot_number % 2)
cv_errors = np.zeros(C_range.shape)
train_errors = np.zeros(C_range.shape)
for index, c_value in enumerate(C_range):
clf = svm.SVC(C=c_value, gamma=gamma_value)
clf.fit(X_train,y_train)
train_conf = confusion_matrix(y_train, clf.predict(X_train))
cv_conf = confusion_matrix(y_test, clf.predict(X_test))
cv_errors[index] = accuracy(cv_conf)
train_errors[index] = accuracy(train_conf)
ax = axes[row, column]
ax.set_title('Gamma = %g'%gamma_value)
ax.semilogx(C_range, cv_errors, label='CV error')
ax.semilogx(C_range, train_errors, label='Train error')
plot_number += 1
ax.set_ylim([0.2,1])
ax.legend(bbox_to_anchor=(1.05, 0), loc='lower left', borderaxespad=0.)
fig.text(0.5, 0.03, 'C value', ha='center',
fontsize=14)
fig.text(0.04, 0.5, 'Classification Accuracy', va='center',
rotation='vertical', fontsize=14)
"""
Explanation: Model parameter selection
The classifier so far has been built with the default parameters. However, we may be able to get improved classification results with optimal parameter choices.
We will consider two parameters. The parameter C is a regularization factor, and tells the classifier how much we want to avoid misclassifying training examples. A large value of C will try to correctly classify more examples from the training set, but if C is too large it may 'overfit' the data and fail to generalize when classifying new data. If C is too small then the model will not be good at fitting outliers and will have a large error on the training set.
The SVM learning algorithm uses a kernel function to compute the distance between feature vectors. Many kernel functions exist, but in this case we are using the radial basis function rbf kernel (the default). The gamma parameter describes the size of the radial basis functions, which is how far away two vectors in the feature space need to be to be considered close.
We will train a series of classifiers with different values for C and gamma. Two nested loops are used to train a classifier for every possible combination of values in the ranges specified. The classification accuracy is recorded for each combination of parameter values. The results are shown in a series of plots, so the parameter values that give the best classification accuracy on the test set can be selected.
This process is also known as 'cross validation'. Often a separate 'cross validation' dataset will be created in addition to the training and test sets to do model selection. For this tutorial we will just use the test set to choose model parameters.
End of explanation
"""
clf = svm.SVC(C=10, gamma=1)
clf.fit(X_train, y_train)
cv_conf = confusion_matrix(y_test, clf.predict(X_test))
print('Optimized facies classification accuracy = %.2f' % accuracy(cv_conf))
print('Optimized adjacent facies classification accuracy = %.2f' % accuracy_adjacent(cv_conf, adjacent_facies))
"""
Explanation: The best accuracy on the cross validation error curve was achieved for gamma = 1, and C = 10. We can now create and train an optimized classifier based on these parameters:
End of explanation
"""
display_cm(cv_conf, facies_labels,
display_metrics=True, hide_zeros=True)
"""
Explanation: Precision and recall are metrics that give more insight into how the classifier performs for individual facies. Precision is the probability that given a classification result for a sample, the sample actually belongs to that class. Recall is the probability that a sample will be correctly classified for a given class.
Precision and recall can be computed easily using the confusion matrix. The code to do so has been added to the display_confusion_matrix() function:
End of explanation
"""
display_adj_cm(cv_conf, facies_labels, adjacent_facies,
display_metrics=True, hide_zeros=True)
"""
Explanation: To interpret these results, consider facies SS. In our test set, if a sample was labeled SS the probability the sample was correct is 0.8 (precision). If we know a sample has facies SS, then the probability it will be correctly labeled by the classifier is 0.78 (recall). It is desirable to have high values for both precision and recall, but often when an algorithm is tuned to increase one, the other decreases. The F1 score combines both to give a single measure of relevancy of the classifier results.
These results can help guide intuition for how to improve the classifier results. For example, for a sample with facies MS or mudstone, it is only classified correctly 57% of the time (recall). Perhaps this could be improved by introducing more training samples. Sample quality could also play a role. Facies BS or bafflestone has the best F1 score and relatively few training examples. But this data was handpicked from other wells to provide training examples to identify this facies.
We can also consider the classification metrics when we consider misclassifying an adjacent facies as correct:
End of explanation
"""
blind
"""
Explanation: Considering adjacent facies, the F1 scores for all facies types are above 0.9, except when classifying SiSh or marine siltstone and shale. The classifier often misclassifies this facies (recall of 0.66), most often as wackestone.
These results are comparable to those reported in Dubois et al. (2007).
Applying the classification model to the blind data
We held a well back from the training, and stored it in a dataframe called blind:
End of explanation
"""
y_blind = blind['Facies'].values
"""
Explanation: The label vector is just the Facies column:
End of explanation
"""
well_features = blind.drop(['Facies', 'Formation', 'Well Name', 'Depth'], axis=1)
"""
Explanation: We can form the feature matrix by dropping some of the columns and making a new dataframe:
End of explanation
"""
X_blind = scaler.transform(well_features)
"""
Explanation: Now we can transform this with the scaler we made before:
End of explanation
"""
y_pred = clf.predict(X_blind)
blind['Prediction'] = y_pred
"""
Explanation: Now it's a simple matter of making a prediction and storing it back in the dataframe:
End of explanation
"""
cv_conf = confusion_matrix(y_blind, y_pred)
print('Optimized facies classification accuracy = %.2f' % accuracy(cv_conf))
print('Optimized adjacent facies classification accuracy = %.2f' % accuracy_adjacent(cv_conf, adjacent_facies))
"""
Explanation: Let's see how we did with the confusion matrix:
End of explanation
"""
display_cm(cv_conf, facies_labels,
display_metrics=True, hide_zeros=True)
"""
Explanation: We managed 0.71 using the test data, but it was from the same wells as the training data. This more reasonable test does not perform as well...
End of explanation
"""
display_adj_cm(cv_conf, facies_labels, adjacent_facies,
display_metrics=True, hide_zeros=True)
def compare_facies_plot(logs, compadre, facies_colors):
#make sure logs are sorted by depth
logs = logs.sort_values(by='Depth')
cmap_facies = colors.ListedColormap(
facies_colors[0:len(facies_colors)], 'indexed')
ztop=logs.Depth.min(); zbot=logs.Depth.max()
cluster1 = np.repeat(np.expand_dims(logs['Facies'].values,1), 100, 1)
cluster2 = np.repeat(np.expand_dims(logs[compadre].values,1), 100, 1)
f, ax = plt.subplots(nrows=1, ncols=7, figsize=(9, 12))
ax[0].plot(logs.GR, logs.Depth, '-g')
ax[1].plot(logs.ILD_log10, logs.Depth, '-')
ax[2].plot(logs.DeltaPHI, logs.Depth, '-', color='0.5')
ax[3].plot(logs.PHIND, logs.Depth, '-', color='r')
ax[4].plot(logs.PE, logs.Depth, '-', color='black')
im1 = ax[5].imshow(cluster1, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
im2 = ax[6].imshow(cluster2, interpolation='none', aspect='auto',
cmap=cmap_facies,vmin=1,vmax=9)
divider = make_axes_locatable(ax[6])
cax = divider.append_axes("right", size="20%", pad=0.05)
cbar=plt.colorbar(im2, cax=cax)
cbar.set_label((17*' ').join([' SS ', 'CSiS', 'FSiS',
'SiSh', ' MS ', ' WS ', ' D ',
' PS ', ' BS ']))
cbar.set_ticks(range(0,1)); cbar.set_ticklabels('')
for i in range(len(ax)-2):
ax[i].set_ylim(ztop,zbot)
ax[i].invert_yaxis()
ax[i].grid()
ax[i].locator_params(axis='x', nbins=3)
ax[0].set_xlabel("GR")
ax[0].set_xlim(logs.GR.min(),logs.GR.max())
ax[1].set_xlabel("ILD_log10")
ax[1].set_xlim(logs.ILD_log10.min(),logs.ILD_log10.max())
ax[2].set_xlabel("DeltaPHI")
ax[2].set_xlim(logs.DeltaPHI.min(),logs.DeltaPHI.max())
ax[3].set_xlabel("PHIND")
ax[3].set_xlim(logs.PHIND.min(),logs.PHIND.max())
ax[4].set_xlabel("PE")
ax[4].set_xlim(logs.PE.min(),logs.PE.max())
ax[5].set_xlabel('Facies')
ax[6].set_xlabel(compadre)
ax[1].set_yticklabels([]); ax[2].set_yticklabels([]); ax[3].set_yticklabels([])
ax[4].set_yticklabels([]); ax[5].set_yticklabels([])
ax[5].set_xticklabels([])
ax[6].set_xticklabels([])
f.suptitle('Well: %s'%logs.iloc[0]['Well Name'], fontsize=14,y=0.94)
compare_facies_plot(blind, 'Prediction', facies_colors)
"""
Explanation: ...but does remarkably well on the adjacent facies predictions.
End of explanation
"""
well_data = pd.read_csv('validation_data_nofacies.csv')
well_data['Well Name'] = well_data['Well Name'].astype('category')
well_features = well_data.drop(['Formation', 'Well Name', 'Depth'], axis=1)
"""
Explanation: Applying the classification model to new data
Now that we have a trained facies classification model we can use it to identify facies in wells that do not have core data. In this case, we will apply the classifier to two wells, but we could use it on any number of wells for which we have the same set of well logs for input.
This dataset is similar to the training data except it does not have facies labels. It is loaded into a dataframe called test_data.
End of explanation
"""
X_unknown = scaler.transform(well_features)
"""
Explanation: The data needs to be scaled using the same constants we used for the training data.
End of explanation
"""
#predict facies of unclassified data
y_unknown = clf.predict(X_unknown)
well_data['Facies'] = y_unknown
well_data
well_data['Well Name'].unique()
"""
Explanation: Finally we predict facies labels for the unknown data, and store the results in a Facies column of the test_data dataframe.
End of explanation
"""
make_facies_log_plot(
well_data[well_data['Well Name'] == 'STUART'],
facies_colors=facies_colors)
make_facies_log_plot(
well_data[well_data['Well Name'] == 'CRAWFORD'],
facies_colors=facies_colors)
"""
Explanation: We can use the well log plot to view the classification results along with the well logs.
End of explanation
"""
well_data.to_csv('well_data_with_facies.csv')
"""
Explanation: Finally we can write out a csv file with the well data along with the facies classification results.
End of explanation
"""
|
dtamayo/reboundx | ipython_examples/YarkovskyEffect.ipynb | gpl-3.0 | import rebound
import reboundx
import numpy as np
import astropy.units as u
import astropy.constants as constants
import matplotlib.pyplot as plt
%matplotlib inline
#Simulation begins here
sim = rebound.Simulation()
sim.units = ('yr', 'AU', 'Msun') #changes simulation and G to units of solar masses, years, and AU
sim.integrator = "whfast" #integrator for sim
sim.dt = .05 #timestep for sim
sim.add(m=1) #Adds Sun
sim.add(a=.5, f=0, Omega=0, omega=0, e=0, inc=0, m=0) #adds test particle
#Moves all particles to center of momentum frame
sim.move_to_com()
#Gives orbital information before the simulation begins
print("\n***INITIAL ORBITS:***")
for orbit in sim.calculate_orbits():
print(orbit)
"""
Explanation: Yarkovsky Effect
This example shows how to add the Yarkovsky effect in a Rebound simulation. There are two versions, which we call the 'Full Version' and the 'Simple Version.' A special parameter called 'ye_flag' is used to switch between the two. The difference between the versions and what situations they're better suited for is discussed in more detail below.
For more information on this effect, please visit: (implementation paper in progress)
We'll start with the Full Version.
Full Version
This version of the effect is based off of the equations found in Veras et al. (2015). A link to the paper is provided below. The Full Version can be used to get detailed calculations of the Yarkovsky effect on a particular body. However, it requires a large amount of parameters that may be difficult to find for a particular object. It also takes more computational time due to the large amount of equations that must be calaculated between each time step of the simulation. This version of the effect can be used to get accurate calculations on how a body is perturbed by the Yarkovsky effect.
Link to paper: https://ui.adsabs.harvard.edu/abs/2015MNRAS.451.2814V/abstract
Below is a simple example to show how to add the effect to a simulation. First, we create a Rebound simulation with the Sun and a test particle (which will be considered an asteroid) at .5 AU.
End of explanation
"""
density = (3000.0*u.kg/u.m**3).to(u.Msun/u.AU**3)
c = (constants.c).to(u.AU/u.yr) #speed of light
lstar = (3.828e26*u.kg*u.m**2/u.s**3).to(u.Msun*u.AU**2/u.yr**3) #luminosity of star
radius = (1000*u.m).to(u.AU) #radius of object
albedo = .017 #albedo of object
stef_boltz = constants.sigma_sb.to(u.Msun/u.yr**3/u.K**4) #Stefan-Boltzmann constant
emissivity = .9 #emissivity of object
k = .25 #constant between
Gamma = (310*u.kg/u.s**(5/2)).to(u.Msun/u.yr**(5/2)) #thermal inertia of object
rotation_period = (15470.9*u.s).to(u.yr) #rotation period of object
"""
Explanation: As with all REBOUNDx effects, the parameters must be inputed with the same units as the simulation (in this case it's AU/Msun/yr). We'll use the astropy units module to help avoid errors
End of explanation
"""
#Loads the effect into Rebound
rebx = reboundx.Extras(sim)
yark = rebx.load_force("yarkovsky_effect")
#Sets the parameters for the effect
yark.params["ye_c"] = c.value #set on the sim and not a particular particle
yark.params["ye_lstar"] = lstar.value #set on the sim and not a particular particle
yark.params["ye_stef_boltz"] = stef_boltz.value #set on the sim and not a particular particle
"""
Explanation: We then add the Yarkovsky effect and the required parameters for this version. Importantly, we must set 'ye_flag' to 0 to get the Full Version. Physical constants and the stellar luminosity get added to the effect yark
End of explanation
"""
# Sets parameters for the particle
ps = sim.particles
ps[1].r = radius.value #remember radius is not inputed as a Rebx parameter - it's inputed on the particle in the Rebound sim
ps[1].params["ye_flag"] = 0 #setting this flag to 0 will give us the full version of the effect
ps[1].params["ye_body_density"] = density.value
ps[1].params["ye_albedo"] = albedo
ps[1].params["ye_emissivity"] = emissivity
ps[1].params["ye_k"] = k
ps[1].params["ye_thermal_inertia"] = Gamma.value
ps[1].params["ye_rotation_period"] = rotation_period.value
# For this example we assume the object has a spin axis perpendicular to the orbital plane: unit vector = (0,0,1)
ps[1].params["ye_spin_axis_x"] = 0
ps[1].params["ye_spin_axis_y"] = 0
ps[1].params["ye_spin_axis_z"] = 1
rebx.add_force(yark) #adds the force to the simulation
"""
Explanation: Other parameters need to be added to each particle feeling the Yarkovsky effect
End of explanation
"""
%%time
tmax=100000 # in yrs
Nout = 1000
times = np.linspace(0, tmax, Nout)
a_start = .5 #starting semi-major axis for the asteroid
a = np.zeros(Nout)
for i, time in enumerate(times):
a[i] = ps[1].a
sim.integrate(time)
a_final = ps[1].a #semi-major axis of asteroid after the sim
print("CHANGE IN SEMI-MAJOR AXIS:", a_final-a_start, "AU\n") #prints difference between the initial and final semi-major axes of asteroid
fig, ax = plt.subplots()
ax.plot(times, a-a_start, '.')
ax.set_xlabel('Time (yrs)')
ax.set_ylabel('Change in semimajor axis (AU)')
"""
Explanation: We integrate this system for 100,000 years and print out the difference between the particle's semi-major axis before and after the simulation.
End of explanation
"""
sim = rebound.Simulation()
sim.units = ('yr', 'AU', 'Msun') #changes simulation and G to units of solar masses, years, and AU
sim.integrator = "whfast" #integrator for sim
sim.dt = .05 #timestep for sim
sim.add(m=1) #Adds Sun
sim.add(a=.5, f=0, Omega=0, omega=0, e=0, inc=0, m=0) #adds test particle
sim.add(a=.75, f=0, Omega=0, omega=0, e=0, inc=0, m=0) #adds a second test particle
#Moves all particles to center of momentum frame
sim.move_to_com()
#Gives orbital information before the simulation begins
print("\n***INITIAL ORBITS:***")
for orbit in sim.calculate_orbits():
print(orbit)
"""
Explanation: Simple Version
This version of the effect is based off of equations from Veras et al. (2019). Once again, a link to this paper is provided below. This version simplifies the equations by placing constant values in a rotation matrix that in general is time-dependent. It requires fewer parameters than the full version and takes less computational time. However, it is mostly useful only to get a general idea on how much the effect can push bodies inwards or outwards. This version of the effect is better for simulating large groups of asteroids or trying to see general trends in the behavior of a body.
Link to paper: https://ui.adsabs.harvard.edu/abs/2019MNRAS.485..708V/abstract
We'll use the same setup as before, but we'll also add another asteroid at .75 AU with identical physical properties. Let's start by creating a Rebound simulation again.
End of explanation
"""
#Loads the effect into Rebound
rebx = reboundx.Extras(sim)
yark = rebx.load_force("yarkovsky_effect")
#Sets the parameters for the effect
yark.params["ye_c"] = c.value
yark.params["ye_lstar"] = lstar.value
ps = sim.particles #simplifies way to access particles parameters
ps[1].params["ye_flag"] = 1 #setting this flag to 1 will give us the outward version of the effect
ps[1].params["ye_body_density"] = density.value
ps[1].params["ye_albedo"] = albedo
ps[1].r = radius.value #remember radius is not inputed as a Rebx parameter - it's inputed on the particle in the Rebound sim
ps[2].params["ye_flag"] = -1 #setting this flag to -1 will give us the inward version of the effect
ps[2].params["ye_body_density"] = density.value
ps[2].params["ye_albedo"] = albedo
ps[2].r = radius.value
rebx.add_force(yark) #adds the force to the simulation
"""
Explanation: We then add the Yarkovsky effect from Reboundx and the necesary parameters for this version. This time, we must make sure that 'ye_flag' is set to 1 or -1 to get the Simple Version of the effect. Setting it to 1 will push the asteroid outwards, while setting it to -1 will push it inwards. We'll push out our original asteroid and push in our new one. We use the same physical properties as in the example above:
End of explanation
"""
%%time
tmax=100000 # in yrs
a_start_1 = .5 #starting semi-major axis for the 1st asteroid
a_start_2 = .75 #starting semi-major axis for the 2nd asteroid
a1, a2 = np.zeros(Nout), np.zeros(Nout)
for i, time in enumerate(times):
a1[i] = ps[1].a
a2[i] = ps[2].a
sim.integrate(time)
a_final_1 = ps[1].a #semi-major axis of 1st asteroid after the sim
a_final_2 = ps[2].a #semi-major axis of 2nd asteroid after the sim
print("CHANGE IN SEMI-MAJOR AXIS(Asteroid 1):", a_final_1-a_start_1, "AU\n")
print("CHANGE IN SEMI-MAJOR AXIS(Asteroid 2):", a_final_2-a_start_2, "AU\n")
fig, ax = plt.subplots()
ax.plot(times, a1-a_start_1, '.', label='Asteroid 1')
ax.plot(times, a2-a_start_2, '.', label='Asteroid 2')
ax.set_xlabel('Time (yrs)')
ax.set_ylabel('Change in semimajor axis (AU)')
ax.legend()
"""
Explanation: Now we run the sim for 100,000 years and print out the results for both asteroids. Note the difference in simulation times between the versions. Even with an extra particle, the simple version was faster than the full version.
End of explanation
"""
|
AssembleSoftware/IoTPy | examples/ExamplesOfMulticore.ipynb | bsd-3-clause | import sys
import time
import threading
sys.path.append("../")
from IoTPy.core.stream import Stream, StreamArray, run
from IoTPy.agent_types.op import map_element, map_list, map_window
from IoTPy.helper_functions.recent_values import recent_values
from IoTPy.helper_functions.print_stream import print_stream
from IoTPy.concurrency.multicore import get_processes, get_processes_and_procs
from IoTPy.concurrency.multicore import terminate_stream
from IoTPy.concurrency.multicore import get_proc_that_inputs_source
from IoTPy.concurrency.multicore import extend_stream
"""
Explanation: Examples of Multicore Applications:
Shared-Memory Multiprocess Applications
In this notebook we look at multiprocess applications in IoTPy. The processes share memory. Associated with each process is an agent. The application can also have:
<ol>
<li> source threads that acquire data from external sources and </li>
<li> actuator threads the get data from output queues. </li>
</ol>
<b> The central idea is that an application is specified by connecting outputs of processes to inputs of processes.</b>
The Agent associated with a Process
A process in a multicore application executes an agent with the following signature:
<br>
<br>
<b>f(in_streams, out_streams)</b>
<br>
<br>
where:
<ol>
<li> <i>f</i> is a function. </li>
<li> <i>in_streams</i> is a list of input streams. </li>
<li> <i>out_streams</i> is a list of output streams. </li>
</ol>
End of explanation
"""
def f(in_streams, out_streams):
map_element(lambda v: v+100, in_streams[0], out_streams[0])
def g(in_streams, out_streams):
s = Stream('s')
map_element(lambda v: v*2, in_streams[0], s)
print_stream(s, 's')
def h(in_streams, out_streams):
map_element(lambda v: v*2, in_streams[0], out_streams[0])
def r(in_streams, out_streams):
t = Stream('t')
map_element(lambda v: v*3, in_streams[0], t)
print_stream(t, 't')
"""
Explanation: Next we show a collection of agents, <i>f</i>, <i>g</i>, <i>h</i>, and <i>r</i>, with this signature. We will use these agents in the examples of multicore programs.
End of explanation
"""
# Step 0: Define source thread target (if any).
# We will use this thread target for the next few examples.
def source_thread_target(procs):
for i in range(3):
extend_stream(procs, data=list(range(i*2, (i+1)*2)), stream_name='x')
time.sleep(0.001)
terminate_stream(procs, stream_name='x')
"""
Explanation: Threads
A process may execute an arbitrary number of threads. You can use any thread target.
<br>
<br>
Most threads in IoTPY applications pass data to the application or get data from the application. A thread that passes data from an external source, such as a sensor or a Twitter stream, to an IoTPy process is called a <b>source thread</b>.
Source Threads
A source thread calls the following function to put data into a stream:
<br>
<br>
<b>extend_stream(procs, data, stream_name)</b>
<br>
<br>
where
<ol>
<li> <i>procs</i> is a list of process metadata created from the specification of a multicore program. <i>procs</i> is passed as a parameter to the thread target. We will discuss <i>procs</i> later. </li>
<li> <i>data</i> is a list or an array. </li>
<li> <i>stream_name</i> is a string which is the name of a stream.</li>
</ol>
In the example, <i>source_thread_target</i>, the function has a single argument <i>procs</i>. All thread targets that extend streams must have <i>procs</i> as one of its arguments.
<br>
<br>
This function executes a loop in which puts specified data into a stream called <i>x</i> and then sleeps thus yielding the thread.
<br>
<br>
<b>terminate_stream</b>
Sources
A source in a multiprocess application is associated with a process. A source <i>s</i> in a process <i>p</i> is essentially an output stream of <i>p</i>; it differs from output streams created by <i>p</i> in the sense that it is fed by a thread rather than computed by <i>p</i>. However, we don't include <i>s</i> in the list of <b>outputs</b> of <i>p</i>; instead we include <i>s</i> in the list of <b>sources</b> of <i>p</i>.
<br>
<br>
Note that a source thread that generates a source <i>s</i> in a process <i>p</i> can run in a different process <i>p'</i>. You want to choose the process in which to run a thread to balance the computational load across processes. If the output of a source <i>s</i> feeds input streams of exactly one process <i>r</i> then efficiency suggests that <i>s</i> should be a source of <i>r</i>; however, you can make <i>s</i> the source of any process.
Steps to create a multiprocess application
You may find the following steps helpful in creating a multiprocess application. You don't have to follow exactly these steps in this order.
<br>
<br>
<i>Step 0</i>: <b>Define agent functions and source thread targets.</b>
<ol>
<li> <i>Step 0.0</i>: Each process has an agent associated with it, as described earlier. Specify the agent functions for each process. Recall that an agent function has the form:
<br>
<b>f(in_streams, out_streams)</b>.</li>
<li><i>Step 0.1 </i> Define the thread targets for each source. These thread targets typically extend a source stream, and finally terminate the stream.</li>
</ol>
<i>Step 1: </i> <b>Give the multicore_specification of streams and processes.</b> The multicore specification specifies a list of streams and a list of agents.
<br>
<i>Step 2: </i> <b>Create processes</b> by calling:
<br>
processes, procs = get_processes_and_procs(multicore_specification)
<br>
<i>Step 3: </i> <b>Create threads</b> (if any). An example creation of a thread is:
<br>
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
<br>
<i>Step 4: </i> <b>Specify which process each thread runs in.</b> An example:
<br>
procs['p1'].threads = [thread_0]
<br>
<i>Step 5: </i>. <b>Start, join and terminate processes </b> by calling
<br>
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
End of explanation
"""
# Step 1: multicore_specification of streams and processes.
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'i')],
# Processes
[{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y'], 'sources':['x']},
{'name': 'p1', 'agent': g, 'inputs': ['y']}]]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any).
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread (if any) runs in.
# thread_0 runs in the process called 'p1'
procs['p1'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
# Step 1: multicore_specification of streams and processes.
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'i')],
# Processes
[{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y']},
{'name': 'p1', 'agent': g, 'inputs': ['y'], 'sources':['x']}]]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any).
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread (if any) runs in.
# thread_0 runs in the process called 'p1'
procs['p1'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
"""
Explanation: Simple example of a multicore program
Multicore specification: Processes and their connecting streams
Look at <b>multicore_specification</b>. The specification states that the program has two processes called p0 and p1. Process p0 has a single input stream <i>x</i> and a single output stream <i>y</i>. Process p1 has a single input stream <i>y</i> and no output streams. Thus, the output <i>y</i> of process p0 is the input of process p1.
<br>
Multicore specification: Streams
Streams are specified by a list of pairs where each pair is a stream name and a stream type. The stream type 'i' identifies integers, 'f' floats and 'd' double. We use stream types to allow processes to share memory in Python 2.7+. In this example, the pair ('x', 'i') says that the program has a stream <i>x</i> of type int.
<br>
Multicore specification: Sources
Process p0 has a <b>source_functions</b> called <i>h</i>. Function <i>h</i> executes in its own thread within process p0; this thread is started when the process is started. Function <i>h</i> has a single argument called <i>proc</i> which is a dummy argument that represents a process.
<br>
<br>
Function <i>h</i> puts data into stream <i>x</i> when it executes <b>proc.copy_stream()</b>. The thread executing <i>h</i> then sleeps for 0.001 seconds before appending more data to stream <i>x</i>. Finally, the thread signals that the source has terminated appending data to stream <i>x</i> by calling <b>proc.finished_source('x')</b>.
Process Structure
The source <i>h</i> outputs a stream <i>x</i> which is an input of process p0. The output <i>y</i> of process p0 is an input to process p1.
Process Computations
The computation of a process is specified by a function with two arguments <i>in_streams</i> and <i>out_streams</i>. The computation carried out by p0 is specified by function <i>f</i> which reads a single input stream, <i>in_streams[0]</i> and write a single output stream, <i>out_streams[0]</i>. This agent makes:
<br>
<br>
<b> y[n] = x[n] + 100 </b>
<br>
<br>
The computation carried out by process p1 is specified by function <i>g</i> which prints <b>2 * y[n]</b> for all n.
<br>
<br>
The source function <i>h</i> sets x[n] to n, and so this multicore process prints:
<br>
<br>
<b> 2 * (n + 100) </b>
End of explanation
"""
# Step 1: multicore_specification of streams and processes.
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'i'), ('z', 'i')],
# Processes
[{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y']},
{'name': 'p1', 'agent': h, 'inputs': ['y'], 'outputs': ['z'], 'sources': ['x']},
{'name': 'p2', 'agent': r, 'inputs': ['z']}]
]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'p1'
procs['p1'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
"""
Explanation: Example of Three Processes in a Row
This example is the same as the previous one except that it has a third process attached to process p2. The source thread <i>h</i> feeds stream <i>x</i> which is the input to process p0. The output of p0 is stream <i>y</i> which is the input to process p1. The output of p1 is stream <i>z</i> which is the input to process p2.
<br>
Streams
[('x', 'i'), ('y', 'i'), ('z', 'i')]
This specifies that this system has three streams called 'x', 'y' and 'z' which contain ints.
Sources
<b>Source Function</b> <i>h</i>
<br>
This function runs in its own thread. The function puts [0, 1, 2] into the stream called <i>x</i>, then sleeps, and then puts [3, 4, 5] into <i>x</i>. The function then calls <i>finished_source</i> to indicate that it has finished executing and so no further values will be appended to <i>x</i>.
<br>
<br>
This function executes in a thread that runs in process <i>p0</i> because <i>h</i> appears in the specification for <i>p0</i>:
<br>
{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y'], 'sources': ['x'], <b>'source_functions':[h]</b>}
<br>
<b>Stream Sources</b> Stream <i>x</i> is a source in process <i>p0</i> because it appears in the specification of process <i>p0</i>.
Process Structure
<ol>
<li>Source function <i>h</i> feeds stream <i>x</i> which is an input of process <i>p0</i>. </li>
<li> Output stream <i>y</i> of process <i>p0</i> is an input stream of process <i>p1</i>.</li>
<li> Output stream <i>z</i> of process <i>p1</i> is an input stream of process <i>p2</i>.</li>
<li> Process <i>p2</i> has no output streams. </li>
</ol>
Process Functions
Each process function has parameters <i>in_streams</i>, <i>out_streams</i> and possibly additional keyword or positional arguments. The process functions associated with processes <i>p0</i>, <i>p1</i>, and <i>p2</i> are <i>f</i>, <i>g</i> and <i>r</i>, respectively. The process function for a process is in the processes part of <i>multicore_specification</i>.
<br>
<ol>
<li> The source extends stream <i>x</i> with [0, 1, 2, 3, 4, 5] and then calls <i>finished_source</i>. Thus <b>x[n] = n </b> for n less than 6. </li>
<li> Process function <i>f</i> of <i>p0</i> adds 100 to its <i>in_streams[0]</i> which is stream <i>x</i> and puts the result in its <i>out_streams[0]</i> which is stream <i>y</i>. Thus <b>y[n] = x[n]+100 = n + 100 for </b> </li>.
<li> Process function <i>g</i> of <i>p1</i> multiplies 2 to its <i>in_streams[0]</i> which is stream <i>y</i> and puts the result in its <i>out_streams[0]</i> which is stream <i>z</i>. Thus <b>z[n] = 2*y[n] = 2n + 200 for </b> </li>.
<li> Process function <i>r</i> of <i>p2</i> creates a stream <i>s</i> and multiplies 3 to its <i>in_streams[0]</i> which is stream <i>z</i> and and puts the result stream <i>s</i>. This function also prints stream <i>s</i>. Thus it prints <b>3*z[n] = 6n + 600 for </b> </li>.
</ol>
End of explanation
"""
import numpy as np
from IoTPy.helper_functions.type import dtype_float
def test_multicore_with_arrays():
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# f_numpy is the agent function for processor called 'p0'.
def f_numpy(in_streams, out_streams):
map_window(
np.mean, dtype_float(in_streams[0]), out_streams[0],
window_size=2, step_size=2)
# g_numpy is the agent function for processor called 'p1'.
def g_numpy(in_streams, out_streams):
t = StreamArray('t')
map_window(max, dtype_float(in_streams[0]), t,
window_size=2, step_size=2)
print_stream(t)
# Step 0.1: Define source thread targets (if any).
def thread_target_numpy(procs):
for i in range(3):
extend_stream(procs, data=list(range(i*10, (i+1)*10)),
stream_name='x')
# Sleep to simulate an external data source.
time.sleep(0.001)
# Terminate stream because this stream will not be extended.
terminate_stream(procs, stream_name='x')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs, additional arguments.
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'f')],
# Processes
[{'name': 'p0', 'agent': f_numpy, 'inputs':['x'],
'outputs': ['y'], 'sources': ['x']},
{'name': 'p1', 'agent': g_numpy, 'inputs': ['y']}]
]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=thread_target_numpy, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'p1'
procs['p1'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
test_multicore_with_arrays()
"""
Explanation: Example of Multicore with NumPy Arrays
This example illustrates the use of <b>StreamArray</b> which is a stream treated as a NumPy array with an arbitrarily large number of rows. Using <i>StreamArray</i> can be more efficient than using <i>Stream</i> for large computations.
<br>
<br>
These examples are simple and small; however, in most applications each process function would convert an input stream to a <i>StreamArray</i> and carry out a lot computation as arrays before sending the results as output streams.
<br>
<br>
The streams, sources, and process structure are similar to the previous two examples. The process functions differ in that the functions in this example use <i>StreamArray</i> whereas the earlier examples used <i>Stream</i>.
<br>
<br>
You convert a Stream of numbers to a StreamArray of ints, floats, or doubles by calling the functions <b>dtype_int</b>, <b>dtype_float</b>, and <b>dtype_double</b> respectively.
<br>
<br>
In this example, the agent functions <i>f</i> and <i>g</i> operate on StreamArrays of floats though the source function <i>h</i> generates a stream of int.
End of explanation
"""
from IoTPy.agent_types.merge import zip_map
def example_merging_streams_from_multiple_processes():
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# sine is the agent function for the process called 'sine'.
def sine(in_streams, out_streams):
map_element(np.sin, dtype_float(in_streams[0]), out_streams[0],
name='sine')
# cosine is the agent function for the process called 'cosine'.
def cosine(in_streams, out_streams):
map_element(np.cos, dtype_float(in_streams[0]), out_streams[0],
name='cosine')
# tangent is the agent function for the process called 'tangent'.
def tangent(in_streams, out_streams):
map_element(np.tan, dtype_float(in_streams[0]), out_streams[0],
name='tangent')
# coordinate is the agent function for the process called 'coordinate'.
def coordinate(in_streams, out_streams):
x, sines, cosines, tangents = in_streams
def f(lst): return lst[0]/lst[1]
def g(lst):
error_squared= (lst[0] - lst[1])**2
return error_squared
ratios = Stream('ratios')
errors = Stream('errors')
zip_map(f, [sines, cosines], ratios, name='sine / cosine')
zip_map(g, [ratios, tangents], errors, name='compute error')
print_stream(errors, 'error')
# # Step 0.1: Define source thread target (if any).
def source_thread_target(procs):
extend_stream(procs, data = np.linspace(0.0, np.pi, 10), stream_name='x')
terminate_stream(procs, stream_name='x')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs and sources, additional arguments.
multicore_specification = [
# Streams
[('x', 'f'), ('sines', 'f'), ('cosines', 'f'), ('tangents', 'f')],
# Processes
[{'name': 'sine', 'agent': sine, 'inputs':['x'], 'outputs': ['sines']},
{'name': 'cosine', 'agent': cosine, 'inputs':['x'], 'outputs': ['cosines']},
{'name': 'tanget', 'agent': tangent, 'inputs':['x'], 'outputs': ['tangents']},
{'name': 'coordinator', 'agent': coordinate, 'inputs':['x', 'sines', 'cosines', 'tangents'],
'sources': ['x']}]
]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'coordinator'
procs['coordinator'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_merging_streams_from_multiple_processes()
"""
Explanation: Example of Merging Streams from Multiple Processes
This example shows a slightly more complex process structure. The example has four processes
called <i>coordinator</i>, <i>sine</i>, <i>cosine</i>, and <i>tangent</i>. The <i>coordinator</i> generates a sequence of values that are sent to other processes which compute sines, cosines and tangents of these values and send the results back to the <i>coordinator</i>. The <i>coordinator</i> then computes the square of the error --- the difference between tangent and sine/cosine.
<br>
<br>
This example gives names to agents. This is helpful in debugging because the error statements identify the agent that caused the error. We haven't given names to agents in some examples for brevity.
Process Structure
<ol>
<li> A source function <i>h</i> extends stream <i>x</i> with a sequence of 10 values between 0.0 and pi. This source function executes in a thread in the process called <i>coordinator</i>. Stream <i>x</i> is an input for all processes.
</li>
<li> Agents <i>sine</i>, <i>cosine</i>, and <i>tangent</i> read stream <i>x</i> and output streams <i>sines</i>, <i>cosines</i>, and <i>tangents</i> respectively. These streams are inputs to process <i>coordinate</i>.
</li>
<ol>
End of explanation
"""
import multiprocessing
def example_passing_data_to_multicore():
total = multiprocessing.Value('f')
num = multiprocessing.Value('i')
# Values computed from an earlier computation which is not shown.
# total and num are passed to the multiprocessing block.
total.value = 4.0e-13
num.value = 25
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# sine is the agent function for the process called 'sine'.
def sine(in_streams, out_streams):
map_element(np.sin, dtype_float(in_streams[0]),
out_streams[0], name='sine')
# cosine is the agent function for the process called 'cosine'.
def cosine(in_streams, out_streams):
map_element(np.cos, dtype_float(in_streams[0]), out_streams[0], name='cosine')
# tangent is the agent function for the process called 'tangent'.
def tangent(in_streams, out_streams):
map_element(np.tan, dtype_float(in_streams[0]),
out_streams[0], name='tangent')
# coordinate is the agent function for the process called 'coordinate'.
def coordinate(in_streams, out_streams, total, num):
x, sines, cosines, tangents = in_streams
def f(lst): return lst[0]/lst[1]
def g(lst):
error_squared= (lst[0] - lst[1])**2
return error_squared
ratios = Stream('ratios')
errors = Stream('errors')
zip_map(f, [sines, cosines], ratios, name='sine / cosine')
zip_map(g, [ratios, tangents], errors, name='compute error')
print_stream(errors, 'error')
# Step 0.1: Define source thread target (if any).
def source_thread_target(procs):
extend_stream(procs, data=np.linspace(0.0, np.pi, 10), stream_name='x')
terminate_stream(procs, stream_name='x')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs and sources, additional arguments.
multicore_specification = [
# Streams
[('x', 'f'), ('sines', 'f'), ('cosines', 'f'), ('tangents', 'f')],
# Processes
[{'name': 'sine', 'agent': sine, 'inputs':['x'], 'outputs': ['sines']},
{'name': 'cosine', 'agent': cosine, 'inputs':['x'], 'outputs': ['cosines']},
{'name': 'tanget', 'agent': tangent, 'inputs':['x'], 'outputs': ['tangents']},
{'name': 'coordinator', 'agent': coordinate, 'inputs':['x', 'sines', 'cosines', 'tangents'],
'sources': ['x'], 'keyword_args' : {'total' : total, 'num' : num}}]
]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'coordinator'
procs['coordinator'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_passing_data_to_multicore()
"""
Explanation: Passing Data to and from Multiprocessing Blocks
Non-IoTPy processes or threads can interact concurrently with IoTPy by extending input streams, getting data from queues fed by output streams, and by putting data into, and getting data from, multiprocessing blocks.
This example illustrates how to pass data to a multiprocessing block and get data from the block. This example is the same as the previous one except that the variables <b>total</b> and <b>num</b> are passed to the multiprocessing block which returns updated values of these variables.
<br>
<br>
total = multiprocessing.Value('f')
<br>
num = multiprocessing.Value('i')
<br>
<br>
creates <i>total</i> a wrapper for a float, and <i>num</i> a wrapper for int.
<br>
<br>
These variables can be passed to any collection of processes. In this example they are passed only to the process <i>coordinator</i>.
These variables are assigned initial values from a computation that is not shown here. The multiprocessing block shown updates these values. For example, the value of <i>num</i> is 25 before the block is executed and 45 after it terminates.
Passing variables as keyword or positional arguments
In this example, variables are passed to the process <i>coordinator</i> as keyword arguments.
The keyword arguments are specified as a dict with the name of an argument (e.g. 'total') and its initial value (<i>total</i>).
<br>
<br>
{'name': 'coordinator', 'agent': coordinate, 'inputs':['x', 'sines', 'cosines', 'tangents'],
<br>
'sources': ['x'], 'source_functions':[sequence],
<br>
<b>'keyword_args'</b> : {'total' : total, 'num' : num},}
End of explanation
"""
import threading
from IoTPy.agent_types.sink import stream_to_queue
def example_output_thread_with_queue():
q = multiprocessing.Queue()
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# g is the agent function for the process called 'p1'.
def g(in_streams, out_streams, q):
s = Stream('s')
map_element(lambda v: v*2, in_streams[0], s)
stream_to_queue(s, q, name='copy_stream_s_to_queue_q')
# Step 0.1: Define source thread target (if any).
def source_thread_target(procs):
for i in range(3):
extend_stream(procs, data=list(range(i*2, (i+1)*2)),
stream_name='x')
time.sleep(0.001)
terminate_stream(procs, stream_name='x')
# Define the actuator thread target. This thread target is
# used to create a thread (output) which is run in the process
# called 'p0'.
def get_data_from_output_queue(q):
while True:
v = q.get()
if v == '_finished': break
else: print ('q.get() = ', v)
multicore_specification = [
# Streams
[('x', 'i'), ('y', 'i')],
# Processes
[{'name': 'p0', 'agent': f, 'inputs':['x'], 'outputs': ['y'], 'sources': ['x']},
{'name': 'p1', 'agent': g, 'inputs': ['y'],
'args': [q], 'output_queues': [q]}]
]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_output_thread_with_queue()
"""
Explanation: Actuators
A multiprocessing block may need to interact asynchronously with some external device. To do so, the block puts data into a queue and uses threads responsible for interfacing between the queue and the device. This simple example illustrates the simplest actuator: a printer. Indeed printing can be done synchronously by the multiprocessing block. Printing doesn't need a queue to interface between it and the block. We use the printer in this example to illustrate the idea.
<br>
<br>
Function <i>g</i> of process <i>p1</i> has an agent called 'copy_stream_s_to_queue_q' which copies stream <i>s</i> to queue <i>q</i>. A thread, <b>my_thread</b> in <i>p1</i> prints values from the queue; this thread represents the thread that interfaces with an external actuator device. This thread is in addition to any source threads that may exist.
<br>
<br>
Queue <i>q</i> is specified as an <b>output queue</b>. An output queue gets a special message <b>'_finished'</b> when the multiprocess block terminates.
<br>
<br>
Threads (apart from source threads) and output queues are specified in <i>multicore_specifications</i>. See
<br>
<br>
{'name': 'p1', 'agent': g, 'inputs': ['y'],
<br>
'args': [q], <b>'output_queues'</b>: [q], <b>'threads'</b>: [my_thread]}
<br>
<br>
The thread, <i>my_thread</i>, terminates when it receives a '_finished' message. We want this thread to terminate so that process <i>p1</i> terminates, and then the entire multiprocessing block can terminate as well.
End of explanation
"""
from IoTPy.agent_types.basics import *
def example_echo_two_cores():
# This is the delay from when the made sound hits a
# reflecting surface.
delay = 6
# This is the attenuation of the reflected wave.
attenuation = 0.5
# The results are put in this queue. A thread reads this
# queue and feeds a speaker or headphone.
q = multiprocessing.Queue()
# ----------------------------------------------
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# Agent function for process named 'p0'
# echo is a delay of zeroes followed by attenuated heard sound.
# out_streams[0], which is the same as sound_heard, is
# echo + sound_made
def f_echo(in_streams, out_streams, delay):
sound_made, attenuated = in_streams
echo = StreamArray('echo', dtype='float')
echo.extend(np.zeros(delay, dtype='float'))
map_element(lambda v: v, attenuated, echo)
# The zip_map output is the sound heard which is
# the sound heard plus the echo.
zip_map(sum, [sound_made, echo], out_streams[0])
# Agent function for process named 'p1'
# This process puts the sound heard into the output queue
# and returns an attenuated version of the sound_heard as
# its output stream.
def g_echo(in_streams, out_streams, attenuation, q):
def gg(v):
# v is the sound heard
q.put(v)
# v*attenuation is the echo
print ('in g_echo; v is ', v)
return v*attenuation
map_element(gg, in_streams[0], out_streams[0])
def source_thread_target(procs):
data=list(range(10))
extend_stream(procs, data=np.array(np.arange(10.0)), stream_name='sound_made')
time.sleep(0.0001)
extend_stream(procs, data=np.array([0.0]*10), stream_name='sound_made')
terminate_stream(procs, stream_name='sound_made')
# Thread that gets data from the output queue
# This thread is included in 'threads' in the specification.
# Thread target
def get_data_from_output_queue(q):
finished_getting_output = False
while not finished_getting_output:
v = q.get()
if v == '_finished': break
print ('heard sound = spoken + echo: ', v)
multicore_specification = [
# Streams
[('sound_made', 'f'), ('attenuated', 'f'), ('sound_heard', 'f')],
# Processes
[{'name': 'p0', 'agent': f_echo, 'inputs': ['sound_made', 'attenuated'],
'outputs': ['sound_heard'], 'keyword_args' : {'delay' : delay}, 'sources': ['sound_made']},
{'name': 'p1', 'agent': g_echo, 'inputs': ['sound_heard'], 'outputs': ['attenuated'],
'args': [attenuation, q], 'output_queues': [q] } ]]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_echo_two_cores()
"""
Explanation: Example of Process Structure with Feedback
The example shows a process structure with feedback. This example creates an echo from a spoken sound. (You can write more efficient and succinct code to compute echoes. The code in this example is here merely because it illustrates a concept.)
<br>
Streams
<ol>
<li><b>sound_made</b>: This is the sound made by a speaker in a large spherical space.</li>
<li><b>attenuated</b>: This is the sound made multiplied by an attenuation factor.</li>
<li><b>echo</b>: This is the echo of the sound made heard at the center of the room. The echo is a delay followed by an attenuation of the sound heard. </li>
<li><b>sound_heard</b>: This is the sound that is heard by the speaker. The heard sound is the sound made by the speaker plus the echo.</li>
</ol>
The equations that define the streams are:
<ol>
<li>
<b>attentuated[n] = sound_heard[n]*attenuation</b>
</li>
<li>
<b>echo[n] = attentuated[n-delay]</b> for n > delay.
</li>
<li>
<b>sound_heard[n] = sound_made[n] + echo[n]</b> for n > delay.
</li>
</ol>
Process Structure
Process <i>p0</i> has a source which feeds one of its input streams <i>sound_made</i> with a stream of measurements obtained from a microphone. In this example, the stream is generated with numbers so that we can see how streams are processed.
<br>
<br>
Process <i>p1</i> contains a single input stream which is the sound heard and a single output stream which is an attenuation of the sound heard.
Process Functions
The function <i>f</i> of <i>p0</i> computes <i>echo</i> from <i>sound_made</i>. The first 4 , i.e., <b>delay</b>, units of the echo are empty (i.e. 0).
<br>
<b>map_element(lambda v: v, attenuated, echo)</b>
<br>
copies the attenuated stream to the echo stream; however, since the first 4 (i.e. delay) values of the echo stream are 0, the echo stream will consist of 4 zeroes followed by the attenuated stream.
<br>
<i>out_streams[0]</i> of process <i>p0</i> is <i>sound_heard</i>. Function <i>f</i> makes <i>sound_heard</i> the sum of the echo and the sound made.
<br>
The function <i>g</i> of process <i>p1</i> <i>p0</i> puts elements of its input stream (i.e. <i>sound_heard</i> on queue <i>q</i> and returns the elements multiplied by <i>attenuation</i>.
End of explanation
"""
def example_echo_single_core():
# This is the delay from when the made sound hits a
# reflecting surface.
delay = 4
# This is the attenuation of the reflected wave.
attenuation = 0.5
# The results are put in this queue. A thread reads this
# queue and feeds a speaker or headphone.
q = multiprocessing.Queue()
# ----------------------------------------------
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agent functions.
# Agent function for process named 'p0'
# echo is a delay of zeroes followed by attenuated heard sound.
# out_streams[0], which is the same as sound_heard is
# echo + sound_made
def f_echo(in_streams, out_streams, delay, attenuation, q):
echo = StreamArray('echo', dtype='float')
echo.extend(np.zeros(delay, dtype='float'))
#echo = Stream('echo', initial_value=[0]*delay)
#Note: sound_made = in_streams[0]
sound_heard = in_streams[0] + echo
map_element(lambda v: v*attenuation, sound_heard, echo)
stream_to_queue(sound_heard, q)
def source_thread_target(procs):
extend_stream(procs, data=list(range(10)), stream_name='sound_made')
time.sleep(0.0001)
extend_stream(procs=procs, data=[0]*10, stream_name='sound_made')
terminate_stream(procs, stream_name='sound_made')
# Thread that gets data from the output queue
# This thread is included in 'threads' in the specification.
# Thread target
def get_data_from_output_queue(q):
finished_getting_output = False
while not finished_getting_output:
v = q.get()
if v == '_finished': break
print ('heard sound = spoken + echo: ', v)
multicore_specification = [
# Streams
[('sound_made', 'f')],
# Processes
[{'name': 'p0', 'agent': f_echo, 'inputs': ['sound_made'],
'args' : [delay, attenuation, q], 'sources': ['sound_made'],'output_queues': [q]}]]
processes, procs = get_processes_and_procs(multicore_specification)
source_thread = threading.Thread(target=source_thread_target, args=(procs,))
output_thread = threading.Thread(target=get_data_from_output_queue, args=(q,))
procs['p0'].threads = [source_thread, output_thread]
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
example_echo_single_core()
"""
Explanation: Example source and actuator thread with single process
This example is the same as the previous one except that the computation is carried out in a single process rather in two processes. The example illustrates an actuator thread and a source thread in the same process.
End of explanation
"""
from IoTPy.core.stream import _no_value
def test_grid():
# N is the size of the grid
N = 5
# M is the number of steps of execution.
M = 5
# DELTA is the deviation from the final solution.
DELTA = 0.01
# even, odd are the grids that will be returned
# by this computation
even = multiprocessing.Array('f', N)
odd = multiprocessing.Array('f', N)
# Set up initial values of the grid.
for i in range(1, N-1):
even[i] = i + DELTA
even[N-1] = N-1
odd[N-1] = N-1
def f(in_streams, out_streams, index, even, odd):
def g(v):
if (0 < index) and (index < N-1):
if v%2 == 0:
odd[index] = (even[index-1] + even[index] + even[index+1])/3.0
else:
even[index] = (odd[index-1] + odd[index] + odd[index+1])/3.0
return v+1
def r(lst, state):
if state < M:
return lst[0], state+1
else:
return _no_value, state
for out_stream in out_streams: out_stream.extend([0])
synch_stream = Stream('synch_stream')
zip_map(r, in_streams, synch_stream, state=0, name='zip_map_'+str(index))
map_element(g, synch_stream, out_streams[0], name='grid'+str(index))
run()
multicore_specification = [
# Streams
[('s_'+str(index), 'i') for index in range(1, N-1)],
# Processes
[{'name': 'grid_'+str(index), 'agent': f,
'inputs':['s_'+str(index+1), 's_'+str(index-1)],
'outputs':['s_'+str(index)],
'args': [index, even, odd]} for index in range(2, N-2)] + \
[{'name': 'grid_'+str(1), 'agent': f,
'inputs':['s_'+str(2)], 'outputs':['s_'+str(1)],
'args': [1, even, odd]}] + \
[{'name': 'grid_'+str(N-2), 'agent': f,
'inputs':['s_'+str(N-3)], 'outputs':['s_'+str(N-2)],
'args': [N-2, even, odd]}]
]
# Execute processes (after including your own non IoTPy processes)
processes = get_processes(multicore_specification)
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
print ('Grid after ', M, ' steps is: ')
if M%2 == 0:
print (even[:])
else:
print (odd[:])
test_grid()
"""
Explanation: Example of a grid computation
Grid computations are used in science, for example in computing the temperature of a metal plate. The grid is partitioned into regions with a process assigned to simulate each region. On the n-th step, each process reads the values of relevant parts of the grid and updates its own value.
<br>
<br>
This example uses two copies of the grid; the two copies are <b>even</b> and <b>odd</b>.
<ol>
<li>On <b>even</b> steps (i.e., steps 0, 2, 4,..) the <i>j</i>-th
proces <b>reads</b> the <i>even</i> grid and <b>writes</b> the
<i>j</i>-th element of the <i>odd</i> grid. </li>
<li>On <b>odd</b> steps, the <i>j</i>-th proces <b>reads</b> the
<i>odd</i> grid and <b>writes</b> the <i>j</i>-th element of the
<i>even</i> grid. </li>
</ol>
So, each portion of the grid is modified by only one process. And no process reads a value while it is modified.
The example problem
A linear metal bar of length <i>N</i> is partitioned into a grid of <i>N</i> continuous regions. Grid 0 is kept at a constant temperature of 0 degrees while grid <i>N-1</i> is kept at a constant temperature of <i>N-1</i> degrees. Initially, the temperature at intermediate grid points is arbitrary; in the code below, the temperature at grid point <i>i</i> exceeds <i>i</i> by <i>DELTA</i>.
<br>
<br>
Let <b>TEMP[i][k]</b> be the temperature of the <i>i</i>-th region on step <i>k</i>. Then, for all <i>k</i>:
<ol>
<li>TEMP[0][k] = 0 </li>
<li>TEMP[N-1][k] = N-1 </li>
<li>TEMP[i][k] = (TEMP[i-1][k] + TEMP[i][k] + TEMP[i+1][k])/3 i in [1, ..,N-2] </li>
</ol>
Processes
The computation uses <i>N-2</i> processes. The <i>i</i>-th process is called 'grid_i' and is responsible for simulating the <i>i</i>-th region.
<br>
Each process takes the <i>k + 1</i>-th step after it has finished the <i>k</i>-th step and it has determined that its neighbors have also finished the <i>k</i>-th step.
<br>
Streams
The system has one stream, <b>s_i</b> for the <i>i</i>-th process. This stream contains the elements [0, 1, .. , k] after the <i>i</i>-th process has completed <i>k</i>-th steps.
<br>
Process <i>grid_i</i> outputs stream <i>s_i</i> and inputs streams from its neighboring processes which are <i>grid_(i-1)</i> if <i>i</i> exceeds 1 and <i>grid_(i+1)</i> if <i>i</i> is less than <i>N-1</i>.
Process Structure
The process structure is linear with each process getting input streams from each of its neighbors and sending its output stream to all its neighbors.
Process Function
The process begins by sending 0 on its output stream to indicate that it has finished its 0-th step.
<br>
<br>
The <i>k</i>-th value of <i>in_streams[j]</i> is <i>k</i> when the <i>j</i>-th neighboring process has completed its <i>k</i>-th step.
<br>
<br>
<b>synch_stream</b> is an internal stream of the process. The <i>k</i>-th element of this stream is <i>k</i> after all neighboring processes have completed their <i>k</i>-th step.
<br>
<br>
The zip_map function <i>r</i> operates on a list with one element from each neighbor. All the elements of the list will be <i>k</i> on the <i>k</i>-th step. The zip_map function returns <i>k</i> which is any element of the list. In this example it returns the 0-th element.
<br>
<br>
Thus the zip_map function acts as a synchronizer. It waits until all neighbors have completed the <i>k</i>-step and then it outputs <i>k</i>.
<br>
<br>
Function <i>g</i> is called for the <i>k</i>-th time when this process and all its neighbors have completed <i>k - 1</i> steps. Function <i>g</i> does the grid computation. Function <i>r</i> and the zip_map agent are used merely for synchronizing.
run()
Function <i>f</i> calls <b>run</b> after it has declared all its agents. Without calling run() the function will take no action.
<br>
<br>
Note that when using external source threads, you should not call <i>run</i> because the source threads are responsible for starting and stopping the main computational thread. This example has no source threads so you must call <i>run</i> to start the system.
End of explanation
"""
from examples.Counting.bloom_filter import bloom_filter_stream
from examples.Counting.bloom_filter import BloomFilter
from examples.Counting.count_min_sketch import count_min_sketch_stream
from examples.Counting.count_min_sketch import CountMinSketch
from IoTPy.agent_types.merge import merge_asynch
def test_multiprocessing_counting_algorithms():
# ----------------------------------------------
# Step 0: Define agent functions, source threads
# and actuator threads (if any).
# Step 0.0: Define agents
def bloom_filter_agent(in_streams, out_streams):
bloom_filter = BloomFilter(
est_elements=1000, false_positive_rate=0.05)
bloom_filter_stream(in_streams[0], out_streams[0],
bloom_filter=bloom_filter)
def count_min_sketch_agent(in_streams, out_streams):
count_min_sketch = CountMinSketch(width=1000, depth=20)
count_min_sketch_stream(in_streams[0], out_streams[0],
count_min_sketch=count_min_sketch)
def merge_agent(in_streams, out_streams):
s = Stream('print stream')
def g(pair):
index, value = pair
if index == 0:
print ('bloom_filter. value: ', value)
else:
print ('count_min_sketch. value: ', value)
merge_asynch(g, in_streams, s)
# Step 0.1: Define source thread target (if any).
def source_thread_target(procs):
data=[('add', 'a'), ('add', 'b'), ('add', 'a'),
('check', 'c'), ('add', 'd'), ('check','a')]
extend_stream(procs, data, stream_name='data')
time.sleep(0.001)
data=[('add', 'c'), ('check', 'b'), ('check', 'a'),
('check', 'c'), ('check', 'e'), ('add', 'a')]
extend_stream(procs, data, stream_name='data')
terminate_stream(procs, stream_name='data')
# Step 1: multicore_specification of streams and processes.
# Specify Streams: list of pairs (stream_name, stream_type).
# Specify Processes: name, agent function,
# lists of inputs and outputs and sources, additional arguments.
multicore_specification = [
# Streams
[('data', 'x'), ('bloom_results', 'x'),
('count_min_sketch_results', 'x')],
# Processes
[{'name': 'bloom_filter_process', 'agent': bloom_filter_agent,
'inputs':['data'], 'outputs': ['bloom_results'],
'sources': ['data']},
{'name': 'count_min_sketch_process', 'agent': count_min_sketch_agent,
'inputs':['data'], 'outputs': ['count_min_sketch_results']},
{'name': 'merge_process', 'agent': merge_agent,
'inputs': ['bloom_results', 'count_min_sketch_results']}
]]
# Step 2: Create processes.
processes, procs = get_processes_and_procs(multicore_specification)
# Step 3: Create threads (if any)
thread_0 = threading.Thread(target=source_thread_target, args=(procs,))
# Step 4: Specify which process each thread runs in.
# thread_0 runs in the process called 'coordinator'
procs['bloom_filter_process'].threads = [thread_0]
# Step 5: Start, join and terminate processes.
for process in processes: process.start()
for process in processes: process.join()
for process in processes: process.terminate()
test_multiprocessing_counting_algorithms()
"""
Explanation: Counting Elements in Streams
The next example uses algorithms to count elements in streams (see IoTPy/examples/Counting). This example uses a Bloom Filter and a count-min-sketch algorithm. The data stream consists of pairs where a pair is either ('add', object) or ('check', object). The pair ('add', z) in the input stream states that object z was added to the data stream. The pair ('check', z) in the input stream is a command to check whether z was added earlier.
<br>
End of explanation
"""
|
KronosKoderS/sie552 | venus_example.ipynb | mit | class PlanetaryObject():
"""
A simple class used to store pertinant information about the plantary object
"""
def __init__(self, date, L, e, SMA, i, peri, asc, r, v, anom, fp, mu):
self.date = date # Event Date
self.L = L # Longitude
self.e = e # Eccentricity
self.SMA = SMA # SMA
self.i = i # Inclination
self.peri = peri # Longitude of Perihelion
self.asc = asc # Longitude of Ascending Node
self.r = r # Radius
self.v = v # Velocity
self.anom = anom # True Anomaly
self.fp = fp # Flight Path Angle
self.mu = mu # Gravitation parameter
earth = PlanetaryObject(
datetime.date(1988, 4, 8),
197.53, # Longitude
0.01672, # Eccentricity
None, # SMA
None, # Inclination
102.29, # Longitude of Perihelion
0, # Longitude of Ascending Node
149.7848e6, # Radius
29.75, # Velocity
95.24, # True Anomaly
0.9554, # Flight Path Angle
398600.4 # Gravitation parameter (km^3/s^2)
)
venus = PlanetaryObject(
datetime.date(1988, 7, 26),
330.52, # Longitude
0.006778, # Eccentricity
None, # SMA
3.394, # Inclination
131.41, # Longitude of Perihelion
76.58, # Longitude of Ascending Node
108.9014e6, # Radius
34.8, # Velocity
199.11, # True Anomaly
-0.128, # Flight Path Angle
324858.8 # Gravitation parameter (km^3/s^2)
)
"""
Explanation: This is a direct copy of the Earth to Venus mission plan. I'm doing this to make sure I get the functions correct, before proceeding further on the Earth to Mars.
Below is the capturing of the data for each planet. I'm using a custom PlanetaryObject class to store the information
End of explanation
"""
mu_sun = 132712439935.5
def eccentricity(r_1, r_2, theta_1, theta_2):
"""
Calculates the eccentricity of the transfer ellipse. This is calculated through
the following equation:
.. math::
\frac {r_2 - r_1} {r_1 * \cos{\theta_1} - r_2 * \cos{\theta_2}}
:param r_1: radius of the departing planetary object
:param r_2: radius of the arriving planetary object
:param theta_1: True anomaly of the departing planetary object in degrees
:param theta_2: True anomaly of the arriving planetary object in degrees
"""
return (r_2 - r_1) / ((r_1 * math.cos(math.radians(theta_1))) - (r_2 * math.cos(math.radians(theta_2))))
def periapsis_radius(r, e, theta):
"""
Calculates the periapsis radius of the transfer ellipse. This is calculated
using the following equation:
.. math::
\frac {r_1 * [1 + e \cos{\theta]}} {1 + e}
:param r: radius of the departing planetary object
:param e: eccentricity of the transfer ellipse
"""
return (r * (1 + e * math.cos(math.radians(theta)))) / (1 + e)
def semimajor_axis(r=None, r_a=None, r_p=None, mu=None, V=None, e=None):
"""
Calculates the semi-major axis of the transfer ellipse. This is calculated
using one of the following equations:
.. math::
\frac {r_a + r_p} {2}
\frac {\mu r} {2 \mu - V^2 r}
\frac {r_p} {1 - e}
\frac {r_a} {1 + e}
:param r: general radius of the elliptical orbit
:param r_a: Radius of apoapsis
:param r_p: Radius of periapsis
:param mu: gravitation parameter
:param V: Velocity of the orbiting object
:param e: Eccentricity of the elliptical orbit
"""
if r_a != None and r_p != None:
return (r_a + r_p) / 2
if mu != None and r !=None and V != None:
return (mu * r) / (2 * mu - V ** 2 * r)
if r_p != None and e != None:
return r_p / (1 - e)
if r_a != None and e != None:
return r_a / (1 + e)
# If we reach this point, then the passed in arguments doesn't match
# any equations we have defined. Raise an Error
raise TypeError("Invalid arguments!")
def time_since_periapsis(e, n, theta=None, E=None):
"""
Calculates the time since the periapsis. This is calculated using the
following equation:
.. math::
\frac {E - e \sin{E}} {n}
If E, isn't defined, it will be calculated using the param theta and
the following equation:
..math::
\cos {E} = \frac {e + \cos{\theta}} {1 + e \cos{\theta}}
:param e: eccentricity of the transfer ellipse
:param n: mean motion
:param theta: degrees to periapsis
:param E: eccentric anomaly in radians
"""
if theta == None and E == None:
raise TypeError("theta or E MUST be defined")
if theta != None and E != None:
raise TypeError("theta OR E must be defined. Not both")
if E == None:
cos_E = (e + math.cos(math.radians(theta))) / (1 + e * math.cos(math.radians(theta)))
E = math.acos(cos_E)
return (E - e * math.sin(E)) / n
def mean_motion(mu, a):
"""
Calculates the mean motion of an elliptical orbit. This is calculated
using the following equation:
.. math::
\sqrt{\frac{\mu} {a^3}}
:param mu: gravitation parameter (Mass * Gravitation constant)
:param a: semimajor axis
"""
return math.sqrt(mu / a ** 3)
def velocity(mu, r, a):
"""
Calculates the Velocity (V) of an object based on the elliptical orbit.
This is calculated using the following equation:
.. math::
\sqrt{\frac{2 * \mu} {r} - \frac{\mu} {a}}
:param mu: gravitation parameter (Mass * Gravition constant)
:param a: semimajor axis
"""
return math.sqrt(2 * mu / r - mu / a)
def flight_path_angle(e, theta):
"""
Calculates the Flight Path Angle (γ). This is calculated using
the following equation:
.. math::
\tan{γ} = {\frac{e * \sin{\theta}}{1 + 3 * \cos{\theta}}
:param e: eccentricity of the elliptical orbit
:param theta:
"""
tan_y = (e * math.sin(math.radians(theta))) / (1 + e * math.cos(math.radians(theta)))
return math.atan(tan_y)
def inclination(Omega, L_s, L_t, i):
a = math.radians(Omega + 180 - L_s)
b = math.radians(L_t - (180 + Omega))
alpha = math.radians(180 - i)
cos_c = math.cos(a) * math.cos(b) + math.sin(a) * math.sin(b) * math.cos(alpha)
c = math.acos(cos_c)
sin_i_t = (math.sin(alpha) * math.sin(b)) / math.sin(c)
return math.asin(sin_i_t)
"""
Explanation: These are my formulas in python form. They're based off of Table 3.3 found in the book
End of explanation
"""
venus.date - earth.date
time_of_flight = venus.date - earth.date
time_of_flight = time_of_flight.days
time_of_flight
"""
Explanation: Designing the Transfer Ellipse
Time of Flight
End of explanation
"""
line_of_apisides = 180
true_anom = line_of_apisides + (venus.L - earth.L)
true_anom
eccentricity(earth.r, venus.r, line_of_apisides, true_anom)
e = eccentricity(earth.r, venus.r, line_of_apisides, true_anom)
"""
Explanation: Eccentricity
End of explanation
"""
periapsis_radius(earth.r, e, line_of_apisides)
r_p = periapsis_radius(earth.r, e, line_of_apisides)
"""
Explanation: Periapsis Radius
End of explanation
"""
# Book apparently rounds the actual values here
semimajor_axis(r_p=103.555e6, e=0.1825)
a = 126.673e6
"""
Explanation: Semi-Major Axis
End of explanation
"""
n = mean_motion(mu_sun, a)
n
peri_to_earth = time_since_periapsis(e, n, theta=line_of_apisides)
peri_to_earth / 3600 / 24 # conversion from seconds to days
venus_to_peri = time_since_periapsis(e, n, theta=true_anom)
venus_to_peri / 3600 / 24
(peri_to_earth - venus_to_peri) / 3600 / 24
"""
Explanation: Time of Flight
End of explanation
"""
velocity(mu_sun, earth.r, 129.336e6) # using the Value from the Book which appear to be rounded
velocity(mu_sun, venus.r, 129.336e6) # again using the values from the book which appear to be rounded
"""
Explanation: Velocities
End of explanation
"""
math.degrees(flight_path_angle(0.17194, 199.53)) # same as above, using the book values
math.degrees(flight_path_angle(0.17194, 332.52))
"""
Explanation: Flight Path Angles
End of explanation
"""
def transfer_ellipse(start_planet, end_planet, tof_accuracy=2, max_iters=1000, return_trials=False):
time_of_flight = end_planet.date - start_planet.date
time_of_flight = time_of_flight.days
longs = []
tofs = []
line_of_apisides = 180 # trial start
tof = 9999999999 # large number to get us started
bottom_angle = 90
top_angle = 270
i = 0
while not(time_of_flight - 10e-tof_accuracy < tof / 3600 / 24 < time_of_flight + 10e-tof_accuracy) and i < max_iters:
line_of_apisides = (bottom_angle - top_angle) / 2
true_anom = line_of_apisides + (end_planet.L - start_planet.L)
longs.append((line_of_apisides, true_anom))
e = eccentricity(start_planet.r, end_planet.r, line_of_apisides, true_anom)
r_p = periapsis_radius(start_planet.r, e, line_of_apisides)
a = semimajor_axis(r_p=r_p, e=e)
n = mean_motion(mu_sun, a)
peri_to_start = time_since_periapsis(e, n, theta=line_of_apisides)
end_to_peri = time_since_periapsis(e, n, theta=true_anom)
tof = peri_to_start - end_to_peri
tofs.append(tof / 3600 / 24)
if tof / 3600 / 24 > time_of_flight:
i += 1
# while tof / 3600 / 24 > time_of_flight:
# true_anom = line_of_apisides + (end_planet.L - start_planet.L)
# longs.append((line_of_apisides, true_anom))
# e = eccentricity(start_planet.r, end_planet.r, line_of_apisides, true_anom)
# r_p = periapsis_radius(start_planet.r, e, line_of_apisides)
# a = semimajor_axis(r_p=r_p, e=e)
# n = mean_motion(mu_sun, a)
# peri_to_start = time_since_periapsis(e, n, theta=line_of_apisides)
# end_to_peri = time_since_periapsis(e, n, theta=true_anom)
# tof = peri_to_start - end_to_peri
# tofs.append(tof / 3600 / 24)
# line_of_apisides += 1
# Calculate the Relative Velocities
V_start = velocity(mu_sun, start_planet.r, a)
V_end = velocity(mu_sun, end_planet.r, a)
y_start = flight_path_angle(e, line_of_apisides)
y_end = flight_path_angle(e, true_anom)
r_dict = {
'line_of_apisides': line_of_apisides - 1, # subtract the 1 we added during the loop
'true_anom': true_anom,
'eccentricity': e,
'SMA': a,
'time_of_flight': tof,
'V_start': V_start,
'V_end': V_end,
'y_start': math.degrees(y_start),
'y_end': math.degrees(y_end)
}
if return_trials:
r_dict.update({'runs':{'longs': longs, 'tofs':tofs}})
return r_dict
tf = transfer_ellipse(earth, venus, return_trials=True)
tf
"""
Explanation: Now that I've verified the fundamental functions above, let's wrap this all up into a nice function that'll optimize this for us
End of explanation
"""
alpha = 180 - venus.i
alpha
a = venus.asc + 180 - earth.L
a
b_prime = venus.L - (venus.asc + 180)
b_prime
# b = b_prime # this can be used when the transfer angles are small.
b = 73.967 # taken from the book b/c after much research, I still don't know how to solve a spherical right triangle
csc_c = math.cos(math.radians(a)) * math.cos(math.radians(b)) + math.sin(math.radians(a)) * math.sin(math.radians(b)) * math.cos(math.radians(alpha))
csc_c
c = math.degrees(math.acos(csc_c))
c
sin_i = (math.sin(math.radians(alpha)) * math.sin(math.radians(b))) / math.sin(math.radians(c))
sin_i
i_t = math.degrees(math.asin(sin_i))
i_t
"""
Explanation: Interestingly enough, we're getting $\theta_{Earth} = 194$, however the book claims that $\theta_{Earth} = 199$. I believe the discrepency here is found with the fact that the book apparently rounds their vaules while the values used by the functions above are more accurate.
Designing the Departure Trajectory
Plane Change
End of explanation
"""
# cos_alpha = math.cos(math.radians(i_t)) * math.cos(math.radians(earth.fp + tf['y_start']))
cos_alpha = math.cos(math.radians(4.455)) * math.cos(math.radians(earth.fp + 3.924)) # using the value from the book, since my is different (and more accurate I believe)
cos_alpha
alpha = math.degrees(math.acos(cos_alpha))
alpha
#C3 = earth.v ** 2 + tf['V_start'] ** 2 - 2 * earth.v * tf['V_start'] * math.cos(math.radians(alpha))
C3 = earth.v ** 2 + 27.312 ** 2 - 2 * earth.v * 27.312 * math.cos(math.radians(alpha))
C3
V_he = math.sqrt(C3)
V_he
"""
Explanation: Calculating $V_{HE}$ and C3
End of explanation
"""
def depart_trajectory(start_planet, end_planet, y, V):
alpha = 180 - end_planet.i
a = end_planet.asc + 180 - start_planet.L
b = end_planet.L - (end_planet.asc + 180)
csc_c = math.cos(math.radians(a)) * math.cos(math.radians(b)) + math.sin(math.radians(a)) * math.sin(math.radians(b)) * math.cos(math.radians(alpha))
c = math.degrees(math.acos(csc_c))
sin_i = (math.sin(math.radians(alpha)) * math.sin(math.radians(b))) / math.sin(math.radians(c))
i_t = math.degrees(math.asin(sin_i))
# if they have the same sign, subtract them, else add them
if start_planet.fp * y > 0:
y_s = abs(start_planet.fp) - abs(y)
else:
y_s = abs(start_planet.fp) + abs(y)
cos_alpha = math.cos(math.radians(i_t)) * math.cos(math.radians(y_s))
alpha = math.degrees(math.acos(cos_alpha))
C3 = start_planet.v ** 2 + V ** 2 - 2 * start_planet.v * V * math.cos(math.radians(alpha))
V_he = math.sqrt(C3)
r_dict = {
'i_t': i_t,
'C3': C3,
'V_he': V_he
}
return r_dict
depart_trajectory(earth, venus, -3.924, 27.312)
"""
Explanation: Similar to what we did for the Transfer Ellipse, let's combine all these steps into a single function to calculate these for us:
End of explanation
"""
alpha = 180 - venus.i
alpha
a = venus.asc + 180 - earth.L
a
b_prime = venus.L - (venus.asc + 180)
b_prime
# b = b_prime # this can be used when the transfer angles are small.
b = 73.967 # taken from the book b/c after much research, I still don't know how to solve a spherical right triangle
b
csc_c = math.cos(math.radians(a)) * math.cos(math.radians(b)) + math.sin(math.radians(a)) * math.sin(math.radians(b)) * math.cos(math.radians(alpha))
csc_c
c = math.degrees(math.acos(csc_c))
c
sin_it = math.sin(math.radians(alpha)) * math.sin(math.radians(a)) / math.sin(math.radians(c))
sin_it
it = math.degrees(math.asin(sin_it))
it
"""
Explanation: Designing the Arrival Trajectory
Plane Change
End of explanation
"""
#cos_alpha_inf = math.cos(math.radians(it)) * math.cos(math.radians(tf['y_end'] + venus.fp))
cos_alpha_inf = math.cos(math.radians(it)) * math.cos(math.radians(3.938 + venus.fp))
alpha_inf = math.acos(cos_alpha_inf)
math.degrees(alpha_inf)
#C3 = venus.v ** 2 + tf['V_end'] ** 2 + 2 * venus.v * tf['V_end'] * math.cos(alpha_inf)
C3 = venus.v ** 2 + 37.57 ** 2 - 2 * venus.v * 37.57 * math.cos(math.radians(5.5039))
V_inf = math.sqrt(C3)
V_inf# should be 4.442 km/s
def arrival_trajectory(start_planet, end_planet, y, V):
alpha = 180 - end_planet.i
a = end_planet.asc + 180 - start_planet.L
b = end_planet.L - (end_planet.asc + 180)
csc_c = math.cos(math.radians(a)) * math.cos(math.radians(b)) + math.sin(math.radians(a)) * math.sin(math.radians(b)) * math.cos(math.radians(alpha))
c = math.degrees(math.acos(csc_c))
sin_it = math.sin(math.radians(alpha)) * math.sin(math.radians(a)) / math.sin(math.radians(c))
it = math.degrees(math.asin(sin_it))
# if they have the same sign, subtract them, else add them
if end_planet.fp * y > 0:
y_s = abs(abs(end_planet.fp) - abs(y))
else:
y_s = abs(abs(end_planet.fp) + abs(y))
cos_alpha_inf = math.cos(math.radians(it)) * math.cos(math.radians(y_s + end_planet.fp))
alpha_inf = math.acos(cos_alpha_inf)
C3 = end_planet.v ** 2 + V ** 2 - 2 * end_planet.v * V * math.cos(math.radians(alpha_inf))
V_inf = math.sqrt(C3)
r_dict = {
'i_t': it,
'V_inf': V_inf
}
return r_dict
arrival_trajectory(earth, venus, -3.938, 37.57)
"""
Explanation: Calculating $V_\infty$
End of explanation
"""
math.degrees(math.acos(math.cos(math.radians(3.975)) * math.cos(math.radians(3.938-0.128))))
"""
Explanation: We're getting different answers here, becuase our angles are a little different. alpha_inf as calculated by the book is 5.5039 while I'm getting 5.5036. This is due to the rounding of the $i_{tp}$ as found in the book. I'm getting 3.9745967799374893 while the books rounds this to 3.975. See calculation below:
End of explanation
"""
def trajectories(start_planet, end_planet, y_start, y_end, V_start, V_end):
alpha = 180 - end_planet.i
a = end_planet.asc + 180 - start_planet.L
b = end_planet.L - (end_planet.asc + 180)
csc_c = math.cos(math.radians(a)) * math.cos(math.radians(b)) + math.sin(math.radians(a)) * math.sin(math.radians(b)) * math.cos(math.radians(alpha))
c = math.degrees(math.acos(csc_c))
sin_i_start = (math.sin(math.radians(alpha)) * math.sin(math.radians(b))) / math.sin(math.radians(c))
i_start = math.degrees(math.asin(sin_i_start))
sin_i_end = math.sin(math.radians(alpha)) * math.sin(math.radians(a)) / math.sin(math.radians(c))
i_end = math.degrees(math.asin(sin_i_end))
# if they have the same sign, subtract them, else add them
if start_planet.fp * y_start > 0:
y_s = abs(abs(start_planet.fp) - abs(y_start))
else:
y_s = abs(abs(start_planet.fp) + abs(y_start))
cos_alpha = math.cos(math.radians(i_start)) * math.cos(math.radians(y_s))
alpha = math.degrees(math.acos(cos_alpha))
C3 = start_planet.v ** 2 + V_start ** 2 - 2 * start_planet.v * V_start * math.cos(math.radians(alpha))
V_he = math.sqrt(C3)
if end_planet.fp * y_end > 0:
y_e = abs(abs(end_planet.fp) - abs(y_end))
else:
y_e = abs(abs(end_planet.fp) + abs(y_end))
cos_alpha_inf = math.cos(math.radians(i_end)) * math.cos(math.radians(y_s + end_planet.fp))
alpha_inf = math.acos(cos_alpha_inf)
C3_inf = end_planet.v ** 2 + V_end ** 2 - 2 * end_planet.v * V_end * math.cos(math.radians(alpha_inf))
V_inf = math.sqrt(C3_inf)
r_dict = {
'i_start': i_start,
'C3': C3,
'V_he': V_he,
'i_end': i_end,
'V_inf': V_inf
}
return r_dict
trajectories(earth, venus, -3.924, -3.938, 27.312, 37.57)
"""
Explanation: Combining the Trajectories into a single function:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ncc/cmip6/models/noresm2-mh/landice.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ncc', 'noresm2-mh', 'landice')
"""
Explanation: ES-DOC CMIP6 Model Properties - Landice
MIP Era: CMIP6
Institute: NCC
Source ID: NORESM2-MH
Topic: Landice
Sub-Topics: Glaciers, Ice.
Properties: 30 (21 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:24
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Grid
4. Glaciers
5. Ice
6. Ice --> Mass Balance
7. Ice --> Mass Balance --> Basal
8. Ice --> Mass Balance --> Frontal
9. Ice --> Dynamics
1. Key Properties
Land ice key properties
1.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.ice_albedo')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "function of ice age"
# "function of ice density"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Ice Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify how ice albedo is modelled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.atmospheric_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Atmospheric Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the atmosphere and ice (e.g. orography, ice mass)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.oceanic_coupling_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Oceanic Coupling Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
Which variables are passed between the ocean and ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice velocity"
# "ice thickness"
# "ice temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which variables are prognostically calculated in the ice model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of land ice code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Grid
Land ice grid
3.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is an adative grid being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.base_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Base Resolution
Is Required: TRUE Type: FLOAT Cardinality: 1.1
The base resolution (in metres), before any adaption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.resolution_limit')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Resolution Limit
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If an adaptive grid is being used, what is the limit of the resolution (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.grid.projection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.5. Projection
Is Required: TRUE Type: STRING Cardinality: 1.1
The projection of the land ice grid (e.g. albers_equal_area)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Glaciers
Land ice glaciers
4.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of glaciers in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of glaciers, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.glaciers.dynamic_areal_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 4.3. Dynamic Areal Extent
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does the model include a dynamic glacial extent?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Ice
Ice sheet and ice shelf
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the ice sheet and ice shelf in the land ice scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.grounding_line_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grounding line prescribed"
# "flux prescribed (Schoof)"
# "fixed grid size"
# "moving grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.2. Grounding Line Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the technique used for modelling the grounding line in the ice sheet-ice shelf coupling
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_sheet')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.3. Ice Sheet
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice sheets simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.ice_shelf')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Ice Shelf
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are ice shelves simulated?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.surface_mass_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Ice --> Mass Balance
Description of the surface mass balance treatment
6.1. Surface Mass Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how and where the surface mass balance (SMB) is calulated. Include the temporal coupling frequeny from the atmosphere, whether or not a seperate SMB model is used, and if so details of this model, such as its resolution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.bedrock')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Ice --> Mass Balance --> Basal
Description of basal melting
7.1. Bedrock
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over bedrock
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.basal.ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Ocean
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of basal melting over the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Ice --> Mass Balance --> Frontal
Description of claving/melting from the ice shelf front
8.1. Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of calving from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.mass_balance.frontal.melting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Melting
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the implementation of melting from the front of the ice shelf
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Ice --> Dynamics
**
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description if ice sheet and ice shelf dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.approximation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SIA"
# "SAA"
# "full stokes"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9.2. Approximation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Approximation type used in modelling ice dynamics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.adaptive_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.3. Adaptive Timestep
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there an adaptive time scheme for the ice scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.landice.ice.dynamics.timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 9.4. Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep (in seconds) of the ice scheme. If the timestep is adaptive, then state a representative timestep.
End of explanation
"""
|
amueller/scipy-2017-sklearn | notebooks/08.Unsupervised_Learning-Clustering.ipynb | cc0-1.0 | from sklearn.datasets import make_blobs
X, y = make_blobs(random_state=42)
X.shape
plt.scatter(X[:, 0], X[:, 1]);
"""
Explanation: Unsupervised Learning Part 2 -- Clustering
Clustering is the task of gathering samples into groups of similar
samples according to some predefined similarity or distance (dissimilarity)
measure, such as the Euclidean distance.
<img width="60%" src='figures/clustering.png'/>
In this section we will explore a basic clustering task on some synthetic and real-world datasets.
Here are some common applications of clustering algorithms:
Compression for data reduction
Summarizing data as a reprocessing step for recommender systems
Similarly:
grouping related web news (e.g. Google News) and web search results
grouping related stock quotes for investment portfolio management
building customer profiles for market analysis
Building a code book of prototype samples for unsupervised feature extraction
Let's start by creating a simple, 2-dimensional, synthetic dataset:
End of explanation
"""
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3, random_state=42)
"""
Explanation: In the scatter plot above, we can see three separate groups of data points and we would like to recover them using clustering -- think of "discovering" the class labels that we already take for granted in a classification task.
Even if the groups are obvious in the data, it is hard to find them when the data lives in a high-dimensional space, which we can't visualize in a single histogram or scatterplot.
Now we will use one of the simplest clustering algorithms, K-means.
This is an iterative algorithm which searches for three cluster
centers such that the distance from each point to its cluster is
minimized. The standard implementation of K-means uses the Euclidean distance, which is why we want to make sure that all our variables are measured on the same scale if we are working with real-world datastets. In the previous notebook, we talked about one technique to achieve this, namely, standardization.
<br/>
<div class="alert alert-success">
<b>Question</b>:
<ul>
<li>
what would you expect the output to look like?
</li>
</ul>
</div>
End of explanation
"""
labels = kmeans.fit_predict(X)
labels
np.all(y == labels)
"""
Explanation: We can get the cluster labels either by calling fit and then accessing the
labels_ attribute of the K means estimator, or by calling fit_predict.
Either way, the result contains the ID of the cluster that each point is assigned to.
End of explanation
"""
plt.scatter(X[:, 0], X[:, 1], c=labels);
"""
Explanation: Let's visualize the assignments that have been found
End of explanation
"""
plt.scatter(X[:, 0], X[:, 1], c=y);
"""
Explanation: Compared to the true labels:
End of explanation
"""
from sklearn.metrics import confusion_matrix, accuracy_score
print('Accuracy score:', accuracy_score(y, labels))
print(confusion_matrix(y, labels))
np.mean(y == labels)
"""
Explanation: Here, we are probably satisfied with the clustering results. But in general we might want to have a more quantitative evaluation. How about comparing our cluster labels with the ground truth we got when generating the blobs?
End of explanation
"""
from sklearn.metrics import adjusted_rand_score
adjusted_rand_score(y, labels)
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
After looking at the "True" label array y, and the scatterplot and `labels` above, can you figure out why our computed accuracy is 0.0, not 1.0, and can you fix it?
</li>
</ul>
</div>
Even though we recovered the partitioning of the data into clusters perfectly, the cluster IDs we assigned were arbitrary,
and we can not hope to recover them. Therefore, we must use a different scoring metric, such as adjusted_rand_score, which is invariant to permutations of the labels:
End of explanation
"""
kmeans = KMeans(n_clusters=2, random_state=42)
labels = kmeans.fit_predict(X)
plt.scatter(X[:, 0], X[:, 1], c=labels);
kmeans.cluster_centers_
"""
Explanation: One of the "short-comings" of K-means is that we have to specify the number of clusters, which we often don't know apriori. For example, let's have a look what happens if we set the number of clusters to 2 in our synthetic 3-blob dataset:
End of explanation
"""
distortions = []
for i in range(1, 11):
km = KMeans(n_clusters=i,
random_state=0)
km.fit(X)
distortions.append(km.inertia_)
plt.plot(range(1, 11), distortions, marker='o')
plt.xlabel('Number of clusters')
plt.ylabel('Distortion')
plt.show()
"""
Explanation: The Elbow Method
The Elbow method is a "rule-of-thumb" approach to finding the optimal number of clusters. Here, we look at the cluster dispersion for different values of k:
End of explanation
"""
plt.figure(figsize=(12, 12))
n_samples = 1500
random_state = 170
X, y = make_blobs(n_samples=n_samples, random_state=random_state)
# Incorrect number of clusters
y_pred = KMeans(n_clusters=2, random_state=random_state).fit_predict(X)
plt.subplot(221)
plt.scatter(X[:, 0], X[:, 1], c=y_pred)
plt.title("Incorrect Number of Blobs")
# Anisotropicly distributed data
transformation = [[0.60834549, -0.63667341], [-0.40887718, 0.85253229]]
X_aniso = np.dot(X, transformation)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_aniso)
plt.subplot(222)
plt.scatter(X_aniso[:, 0], X_aniso[:, 1], c=y_pred)
plt.title("Anisotropicly Distributed Blobs")
# Different variance
X_varied, y_varied = make_blobs(n_samples=n_samples,
cluster_std=[1.0, 2.5, 0.5],
random_state=random_state)
y_pred = KMeans(n_clusters=3, random_state=random_state).fit_predict(X_varied)
plt.subplot(223)
plt.scatter(X_varied[:, 0], X_varied[:, 1], c=y_pred)
plt.title("Unequal Variance")
# Unevenly sized blobs
X_filtered = np.vstack((X[y == 0][:500], X[y == 1][:100], X[y == 2][:10]))
y_pred = KMeans(n_clusters=3,
random_state=random_state).fit_predict(X_filtered)
plt.subplot(224)
plt.scatter(X_filtered[:, 0], X_filtered[:, 1], c=y_pred)
plt.title("Unevenly Sized Blobs")
"""
Explanation: Then, we pick the value that resembles the "pit of an elbow." As we can see, this would be k=3 in this case, which makes sense given our visual expection of the dataset previously.
Clustering comes with assumptions: A clustering algorithm finds clusters by making assumptions with samples should be grouped together. Each algorithm makes different assumptions and the quality and interpretability of your results will depend on whether the assumptions are satisfied for your goal. For K-means clustering, the model is that all clusters have equal, spherical variance.
In general, there is no guarantee that structure found by a clustering algorithm has anything to do with what you were interested in.
We can easily create a dataset that has non-isotropic clusters, on which kmeans will fail:
End of explanation
"""
from sklearn.datasets import load_digits
digits = load_digits()
# ...
# %load solutions/08B_digits_clustering.py
"""
Explanation: Some Notable Clustering Routines
The following are two well-known clustering algorithms.
sklearn.cluster.KMeans: <br/>
The simplest, yet effective clustering algorithm. Needs to be provided with the
number of clusters in advance, and assumes that the data is normalized as input
(but use a PCA model as preprocessor).
sklearn.cluster.MeanShift: <br/>
Can find better looking clusters than KMeans but is not scalable to high number of samples.
sklearn.cluster.DBSCAN: <br/>
Can detect irregularly shaped clusters based on density, i.e. sparse regions in
the input space are likely to become inter-cluster boundaries. Can also detect
outliers (samples that are not part of a cluster).
sklearn.cluster.AffinityPropagation: <br/>
Clustering algorithm based on message passing between data points.
sklearn.cluster.SpectralClustering: <br/>
KMeans applied to a projection of the normalized graph Laplacian: finds
normalized graph cuts if the affinity matrix is interpreted as an adjacency matrix of a graph.
sklearn.cluster.Ward: <br/>
Ward implements hierarchical clustering based on the Ward algorithm,
a variance-minimizing approach. At each step, it minimizes the sum of
squared differences within all clusters (inertia criterion).
Of these, Ward, SpectralClustering, DBSCAN and Affinity propagation can also work with precomputed similarity matrices.
<img src="figures/cluster_comparison.png" width="900">
<div class="alert alert-success">
<b>EXERCISE: digits clustering</b>:
<ul>
<li>
Perform K-means clustering on the digits data, searching for ten clusters.
Visualize the cluster centers as images (i.e. reshape each to 8x8 and use
``plt.imshow``) Do the clusters seem to be correlated with particular digits? What is the ``adjusted_rand_score``?
</li>
<li>
Visualize the projected digits as in the last notebook, but this time use the
cluster labels as the color. What do you notice?
</li>
</ul>
</div>
End of explanation
"""
|
sdpython/ensae_teaching_cs | _doc/notebooks/td1a_algo/td1a_sobel.ipynb | mit | from jyquickhelper import add_notebook_menu
add_notebook_menu()
"""
Explanation: 1A.algo - filtre de Sobel
Le filtre de Sobel est utilisé pour calculer des gradients dans une image. L'image ainsi filtrée révèle les forts contrastes.
End of explanation
"""
from pyquickhelper.loghelper import noLOG
from pyensae.datasource import download_data
f = download_data("python.png", url="http://imgs.xkcd.com/comics/")
from IPython.display import Image
Image("python.png")
"""
Explanation: Exercice 1 : application d'un filtre
Le filtre de Sobel est un filtre qu'on applique à une image pour calculer le gradient d'une image afin de déterminer les contours qui s'y trouve. Le filtre de Canny permet de flouter une image. Dans un premier temps, on cherchera à appliquer un filtre 3x3 :
$\left( \begin{array}{ccc} 1&1&1 \ 1&1&1 \ 1&1&1 \end{array} \right)$
Qu'on applique au voisinage 3x3 du pixel $p_5$ :
$\left( \begin{array}{ccc} p_1&p_2&p_3 \ p_4&p_5&p_6 \ p_7&p_8&p_9 \end{array} \right)$
Après l'application du filtre à ce pixel, le résultat devient :
$\left( \begin{array}{ccc} ?&?&? \ ?& \sum_{i=1}^9 p_i &? \ ?&?&? \end{array} \right)$
On veut maintenant appliquer ce filtre sur l'image suivante :
End of explanation
"""
import PIL
import PIL.Image
im = PIL.Image.open("python.png")
from PIL.ImageDraw import Draw
import numpy
tab = numpy.asarray(im).copy()
tab.flags.writeable = True # afin de pouvoir modifier l'image
"dimension",tab.shape, " type", type(tab[0,0])
"""
Explanation: Mais avant de pouvoir faire des calculs dessus, il faut pouvoir convertir l'image en un tableau numpy avec la fonction numpy.asarray.
End of explanation
"""
tab[100:300,200:400] = 255
im2 = PIL.Image.fromarray(numpy.uint8(tab))
im2.save("python_white.png")
Image("python_white.png")
"""
Explanation: Une fois les calculs effectués, il faut convertir le tableau numpy en image. On peut par exemple blanchir tout une partie de l'image et l'afficher.
End of explanation
"""
l = tab.tolist()
len(l),len(l[0])
"""
Explanation: Et maintenant, il s'agit d'appliquer le filtre de Canny uniforme présenté ci-dessus et d'afficher l'image, soit en utilisant numpy, soit sans numpy en convertissant l'image en liste avec la méthode tolist. On pourra comparer les temps de calcul.
End of explanation
"""
|
vinitsamel/udacitydeeplearning | embeddings/Skip-Grams-Solution.ipynb | mit | import time
import numpy as np
import tensorflow as tf
import utils
"""
Explanation: Skip-gram word2vec
In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.
Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
A really good conceptual overview of word2vec from Chris McCormick
First word2vec paper from Mikolov et al.
NIPS paper with improvements for word2vec also from Mikolov et al.
An implementation of word2vec from Thushan Ganegedara
TensorFlow word2vec tutorial
Word embeddings
When you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation.
To solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.
Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an embedding lookup and the number of hidden units is the embedding dimension.
<img src='assets/tokenize_lookup.png' width=500>
There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.
Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called Word2Vec uses the embedding layer to find vector representations of words that contain semantic meaning.
Word2Vec
The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
<img src="assets/word2vec_architectures.png" width="500">
In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
First up, importing packages.
End of explanation
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
"""
Explanation: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
End of explanation
"""
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
"""
Explanation: Preprocessing
Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
End of explanation
"""
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
"""
Explanation: And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
End of explanation
"""
from collections import Counter
import random
threshold = 1e-5
word_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
"""
Explanation: Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
I'm going to leave this up to you as an exercise. Check out my solution to see how I did it.
Exercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to train_words.
End of explanation
"""
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = set(words[start:idx] + words[idx+1:stop+1])
return list(target_words)
"""
Explanation: Making batches
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
From Mikolov et al.:
"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
Exercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.
End of explanation
"""
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
"""
Explanation: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
End of explanation
"""
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name='inputs')
labels = tf.placeholder(tf.int32, [None, None], name='labels')
"""
Explanation: Building the graph
From Chris McCormick's blog, we can see the general structure of our network.
The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
The idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.
Exercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.
End of explanation
"""
n_vocab = len(int_to_vocab)
n_embedding = 200 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs)
"""
Explanation: Embedding
The embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.
Exercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.
End of explanation
"""
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
"""
Explanation: Negative sampling
For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.
Exercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.
End of explanation
"""
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
"""
Explanation: Validation
This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
End of explanation
"""
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
"""
Explanation: Restore the trained network if you need to:
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
"""
Explanation: Visualizing the word vectors
Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.
End of explanation
"""
|
msanterre/deep_learning | embeddings/Skip-Gram_word2vec.ipynb | mit | import time
import numpy as np
import tensorflow as tf
import utils
"""
Explanation: Skip-gram word2vec
In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.
Readings
Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
A really good conceptual overview of word2vec from Chris McCormick
First word2vec paper from Mikolov et al.
NIPS paper with improvements for word2vec also from Mikolov et al.
An implementation of word2vec from Thushan Ganegedara
TensorFlow word2vec tutorial
Word embeddings
When you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation.
To solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.
Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an embedding lookup and the number of hidden units is the embedding dimension.
<img src='assets/tokenize_lookup.png' width=500>
There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.
Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called Word2Vec uses the embedding layer to find vector representations of words that contain semantic meaning.
Word2Vec
The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
<img src="assets/word2vec_architectures.png" width="500">
In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
First up, importing packages.
End of explanation
"""
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
"""
Explanation: Load the text8 dataset, a file of cleaned up Wikipedia articles from Matt Mahoney. The next cell will download the data set to the data folder. Then you can extract it and delete the archive file to save storage space.
End of explanation
"""
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
"""
Explanation: Preprocessing
Here I'm fixing up the text to make training easier. This comes from the utils module I wrote. The preprocess function coverts any punctuation into tokens, so a period is changed to <PERIOD>. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
End of explanation
"""
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
"""
Explanation: And here I'm creating dictionaries to convert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list int_words.
End of explanation
"""
## Your code here
train_words = # The final subsampled word list
"""
Explanation: Subsampling
Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
$$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
I'm going to leave this up to you as an exercise. This is more of a programming challenge, than about deep learning specifically. But, being able to prepare your data for your network is an important skill to have. Check out my solution to see how I did it.
Exercise: Implement subsampling for the words in int_words. That is, go through int_words and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is the probability that a word is discarded. Assign the subsampled data to train_words.
End of explanation
"""
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
# Your code here
return
"""
Explanation: Making batches
Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
From Mikolov et al.:
"Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
Exercise: Implement a function get_target that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you choose a random number of words from the window.
End of explanation
"""
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
"""
Explanation: Here's a function that returns batches for our network. The idea is that it grabs batch_size words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
End of explanation
"""
train_graph = tf.Graph()
with train_graph.as_default():
inputs =
labels =
"""
Explanation: Building the graph
From Chris McCormick's blog, we can see the general structure of our network.
The input words are passed in as integers. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
The idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
I'm going to have you build the graph in stages now. First off, creating the inputs and labels placeholders like normal.
Exercise: Assign inputs and labels using tf.placeholder. We're going to be passing in integers, so set the data types to tf.int32. The batches we're passing in will have varying sizes, so set the batch sizes to [None]. To make things work later, you'll need to set the second dimension of labels to None or 1.
End of explanation
"""
n_vocab = len(int_to_vocab)
n_embedding = # Number of embedding features
with train_graph.as_default():
embedding = # create embedding weight matrix here
embed = # use tf.nn.embedding_lookup to get the hidden layer output
"""
Explanation: Embedding
The embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.
Exercise: Tensorflow provides a convenient function tf.nn.embedding_lookup that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use tf.nn.embedding_lookup to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using tf.random_uniform.
End of explanation
"""
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = # create softmax weight matrix here
softmax_b = # create softmax biases here
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
"""
Explanation: Negative sampling
For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called "negative sampling". Tensorflow has a convenient function to do this, tf.nn.sampled_softmax_loss.
Exercise: Below, create weights and biases for the softmax layer. Then, use tf.nn.sampled_softmax_loss to calculate the loss. Be sure to read the documentation to figure out how it works.
End of explanation
"""
with train_graph.as_default():
## From Thushan Ganegedara's implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
!mkdir checkpoints
"""
Explanation: Validation
This code is from Thushan Ganegedara's implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
End of explanation
"""
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
## From Thushan Ganegedara's implementation
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
"""
Explanation: Training
Below is the code to train the network. Every 100 batches it reports the training loss. Every 1000 batches, it'll print out the validation words.
End of explanation
"""
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
"""
Explanation: Restore the trained network if you need to:
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
"""
Explanation: Visualizing the word vectors
Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out this post from Christopher Olah to learn more about T-SNE and other ways to visualize high-dimensional data.
End of explanation
"""
|
sheikhomar/ml | tensor-flow-basics.ipynb | mit | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
tf.__version__
"""
Explanation: TensorFlow Basics
End of explanation
"""
h = tf.constant('Hello World')
h
h.graph is tf.get_default_graph()
x = tf.constant(100)
x
# Create Session object in which we can run operations.
# A session object encapsulates the environment in which
# operations are executed. Tensor objects are evaluated
# by operations.
session = tf.Session()
session.run(h)
session.run(x)
type(session.run(x))
type(session.run(h))
"""
Explanation: Constants
End of explanation
"""
a = tf.constant(2)
b = tf.constant(3)
with tf.Session() as session:
print('Addition: {}'.format(session.run(a + b)))
print('Subtraction: {}'.format(session.run(a - b)))
print('Multiplication: {}'.format(session.run(a * b)))
print('Division: {}'.format(session.run(a / b)))
e = np.array([[5., 5.]])
f = np.array([[2.], [2.]])
e
f
# Convert numpy arrays to TensorFlow objects
ec = tf.constant(e)
fc = tf.constant(f)
matrix_mult_op = tf.matmul(ec, fc)
with tf.Session() as session:
print('Matrix Multiplication: {}'.format(session.run(matrix_mult_op)))
"""
Explanation: Operations
End of explanation
"""
c = tf.placeholder(tf.int32)
d = tf.placeholder(tf.int32)
add_op = tf.add(c, d)
sub_op = tf.subtract(c, d)
mult_op = tf.multiply(c, d)
div_op = tf.divide(c, d)
with tf.Session() as session:
input_dict = {c: 11, d: 10}
print('Addition: {}'.format(session.run(add_op, feed_dict=input_dict)))
print('Subtraction: {}'.format(session.run(sub_op, feed_dict=input_dict)))
print('Multiplication: {}'.format(session.run(mult_op, feed_dict=input_dict)))
print('Division: {}'.format(session.run(div_op, feed_dict={c:11, d:11})))
"""
Explanation: Placeholders
Instead of using a constant, we can define a placeholder that allows us to provide the value at the time of execution just like function parameters.
End of explanation
"""
var2 = tf.get_variable('var2', [2])
var2
"""
Explanation: Variables
A variable is a tensor that can change during program execution.
End of explanation
"""
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('data', one_hot=True)
type(mnist)
mnist.train.images
mnist.train.images.shape
"""
Explanation: Classification using the MNIST dataset
End of explanation
"""
# Convert the vector to a 28x28 matrix
sample_img = mnist.train.images[0].reshape(28, 28)
# Show the picture
plt.imshow(sample_img, cmap='Greys')
"""
Explanation: The MNIST dataset contain 55,000 images. The dimensions of each image is 28-by-28. Each vector has 784 elements because 28*28=784.
End of explanation
"""
learning_rate = 0.001
training_epochs = 15
batch_size = 100
"""
Explanation: Before we begin, we specify three parameters:
- the learning rate $\alpha$: how quickly should the cost function be adjusted.
- training epoch: number of training cycles
- batch size: batches of training data
End of explanation
"""
# Number of classes is 10 because we have 10 digits
n_classes = 10
# Number of training examples
n_samples = mnist.train.num_examples
# The flatten array of the 28x28 image matrix contains 784 elements
n_input = 784
# Number of neurons in the hidden layers. For image data, 256 neurons
# is common because we have 256 intensity values (8-bit).
# In this example, we only use 2 hidden layers. The more hidden
# layers, we use the longer it takes for the model to run but
# more layers has the possibility of being more accurate.
n_hidden_1 = 256
n_hidden_2 = 256
"""
Explanation: Network parameters
End of explanation
"""
def multilayer_perceptron(x, weights, biases):
'''
x: Placeholder for the data input
weights: Dictionary of weights
biases: Dictionary of bias values
'''
# First hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Second hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer
layer_out = tf.add(tf.matmul(layer_2, weights['out']), biases['out'])
return layer_out
weights = {
'h1': tf.Variable(tf.random_normal(shape=[n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal(shape=[n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal(shape=[n_hidden_2, n_classes]))
}
tf.random_normal(shape=(n_input, n_hidden_1))
#tf.Session().run(weights['h1'])
"""
Explanation: def multi
End of explanation
"""
|
bourneli/deep-learning-notes | DAT236x Deep Learning Explained/Lab5_RecurrentNetwork.ipynb | mit | from matplotlib import pyplot as plt
import math
import numpy as np
import os
import pandas as pd
import random
import time
import cntk as C
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
%matplotlib inline
# to make things reproduceable, seed random
np.random.seed(0)
C.cntk_py.set_fixed_random_seed(1)
C.cntk_py.force_deterministic_algorithms()
"""
Explanation: Lab 5 - Time series prediction with LSTM (IOT Data)
This lab corresponds to Module 5 of the "Deep Learning Explained" course.
In this lab we want to build a model using some real world internet-of-things (IOT) data. As an example we want to predict the daily output of a solar panel base on the initial readings of the day.
Solar power forecasting is a challenging and important problem. The solar energy generation forecasting problem is closely linked to the problem of weather variables forecasting. Indeed, this problem is usually split into two parts, on one hand focusing on the forecasting of solar PV (solar panel system) or any other meteorological variable and on the other hand estimating the amount of energy that a concrete power plant will produce with the estimated meteorological resource. In general, the way to deal with this difficult problem is usually related to the spatial and temporal scales we are interested in. This lab focusses on a simplified forecasting model using previously generated data from solar panel to predict the future.
Goal
Using historic daily production of a solar panel, we want to predict the total power production of the solar panel array for a future day. We will be using an LSTM based time series prediction model to predict the daily output of a solar panel based on the initial readings of a the day.
We train the model with historical data of the solar panel. In our example we want to predict the total power production of the solar panel array for the day starting with the initial readings of the day. We start predicting after the first 2 readings and adjust the prediction with each new reading.
We will use a recurrent model with the LSTM cell. This lab has the following sub-sections:
- Setup
- Data generation
- LSTM network modeling
- Training, test and prediction workflow
For more details on how LSTMs work, view the Module 5 videos or see this excellent post.
Setup
End of explanation
"""
if 'TEST_DEVICE' in os.environ:
if os.environ['TEST_DEVICE'] == 'cpu':
C.device.try_set_default_device(C.device.cpu())
else:
C.device.try_set_default_device(C.device.gpu(0))
# Test for CNTK version
if not C.__version__ == "2.0":
raise Exception("this notebook was designed to work with 2.0. Current Version: " + C.__version__)
"""
Explanation: In the block below, we check if we are running this notebook in the CNTK internal test machines by looking for environment variables defined there. We then select the right target device (GPU vs CPU) to test this notebook. In other cases, we use CNTK's default policy to use the best available device (GPU, if available, else CPU).
End of explanation
"""
isFast = True
# we need around 2000 epochs to see good accuracy. For testing 100 epochs will do.
EPOCHS = 200 if isFast else 2000
"""
Explanation: There are two training modes that we can choose from for this lab:
- Fast mode: isFast is set to True. This is the default mode for the notebooks, which means we train for fewer iterations or train / test on limited data. This ensures functional correctness of the notebook though the models produced are far from what a completed training would produce.
Slow mode: We recommend the user to set this flag to False once the user has gained familiarity with the notebook content and wants to gain insight from running the notebooks for a longer period with different parameters for training.
For Fast mode we train the model for 100 epochs and results have low accuracy but is good enough for development. The model yields good accuracy after 1000-2000 epochs.
End of explanation
"""
def generate_solar_data(input_url, time_steps, normalize=1, val_size=0.1, test_size=0.1):
"""
generate sequences to feed to rnn based on data frame with solar panel data
the csv has the format: time ,solar.current, solar.total
(solar.current is the current output in Watt, solar.total is the total production
for the day so far in Watt hours)
"""
# try to find the data file local. If it doesn't exists download it.
cache_path = os.path.join("data", "iot")
cache_file = os.path.join(cache_path, "solar.csv")
if not os.path.exists(cache_path):
os.makedirs(cache_path)
if not os.path.exists(cache_file):
urlretrieve(input_url, cache_file)
print("downloaded data successfully from ", input_url)
else:
print("using cache for ", input_url)
df = pd.read_csv(cache_file, index_col="time", parse_dates=['time'], dtype=np.float32)
df["date"] = df.index.date
# normalize data
df['solar.current'] /= normalize
df['solar.total'] /= normalize
# group by day, find the max for a day and add a new column .max
grouped = df.groupby(df.index.date).max()
grouped.columns = ["solar.current.max", "solar.total.max", "date"]
# merge continuous readings and daily max values into a single frame
df_merged = pd.merge(df, grouped, right_index=True, on="date")
df_merged = df_merged[["solar.current", "solar.total",
"solar.current.max", "solar.total.max"]]
# we group by day so we can process a day at a time.
grouped = df_merged.groupby(df_merged.index.date)
per_day = []
for _, group in grouped:
per_day.append(group)
# split the dataset into train, validatation and test sets on day boundaries
val_size = int(len(per_day) * val_size)
test_size = int(len(per_day) * test_size)
next_val = 0
next_test = 0
result_x = {"train": [], "val": [], "test": []}
result_y = {"train": [], "val": [], "test": []}
# generate sequences a day at a time
for i, day in enumerate(per_day):
# if we have less than 8 datapoints for a day we skip over the
# day assuming something is missing in the raw data
total = day["solar.total"].values
if len(total) < 8:
continue
if i >= next_val:
current_set = "val"
next_val = i + int(len(per_day) / val_size)
elif i >= next_test:
current_set = "test"
next_test = i + int(len(per_day) / test_size)
else:
current_set = "train"
max_total_for_day = np.array(day["solar.total.max"].values[0])
for j in range(2, len(total)):
result_x[current_set].append(total[0:j])
result_y[current_set].append([max_total_for_day])
if j >= time_steps:
break
# make result_y a numpy array
for ds in ["train", "val", "test"]:
result_y[ds] = np.array(result_y[ds])
return result_x, result_y
"""
Explanation: Data generation
Our solar panel emits two measures at 30 min interval:
- solar.current is the current production in Watt
- solar.total is the total produced for the day so far in Watt/hour
Our prediction approach involves starting with the first 2 initial readings of the day. Based on these readings we start predicting and adjust the prediction with each new reading. The training data we are going to use comes as a CSV file and has the following format:
time,solar.current,solar.total
7am,6.3,1.7
7:30am,44.3,11.4
...
Our training dataset contains 3 years of captured data and can be found here.
The dataset is not pre-processed: it is raw data and contains smaller gaps and errors (like a panel failed to report).
Pre-processing
Most of the code in this example is related to data preparation. Thankfully the pandas library make this easy.
Below, we define a generate_solar_data() function that performs the following tasks:
- read raw data into a pandas dataframe
- normalize the data
- group the data by day
- append the columns "solar.current.max" and "solar.total.max"
- generate the sequences for each day
Sequence Generation The measurements for a day are grouped into a set of sequences, corresponding to each measurement for the day. Each measurement sequence will contain all of the raw measurements (solar.current, solar.total) seen for the current day, up until and including the current measurement.
Here is an example of some sequehnces for a day:
1.7,11.4 -> 10300
1.7,11.4,67.5 -> 10300
1.7,11.4,67.5,250.5 ... -> 10300
1.7,11.4,67.5,250.5,573.5 -> 10300
All sequences for all days are flattened into a single list of sequences. The day and timestamp information have been removed; only the sequences matter.
Note if we have less than 8 datapoints for a day we skip over the day assuming something is missing in the raw data. If we get more than 14 data points in a day we truncate the readings.
Training / Testing / Validation data preparation
We start by reading the CSV file. The raw data is sorted by time. Normally, we would randomize the data before splitting into training, validation and test datasets, but this would make it impractical to visualize results.
Hence, we split the dataset in the following manner: pick the following (in sequence order): 8 values for training, 1 for validation and 1 for test until there is no more data. This will spread training, validation and test datasets across the full timeline while preserving time order.
End of explanation
"""
# there are 14 lstm cells, 1 for each possible reading we get per day
TIMESTEPS = 14
# 20000 is the maximum total output in our dataset. We normalize all values with
# this so our inputs are between 0.0 and 1.0 range.
NORMALIZE = 20000
X, Y = generate_solar_data("https://www.cntk.ai/jup/dat/solar.csv",
TIMESTEPS, normalize=NORMALIZE)
"""
Explanation: Data caching
For routine testing we would like to cache the data locally when available. If it is not available from the cache locations we shall download.
End of explanation
"""
# process batches of 10 days
BATCH_SIZE = TIMESTEPS * 10
def next_batch(x, y, ds):
"""get the next batch for training"""
def as_batch(data, start, count):
return data[start:start + count]
for i in range(0, len(x[ds]), BATCH_SIZE):
yield as_batch(X[ds], i, BATCH_SIZE), as_batch(Y[ds], i, BATCH_SIZE)
"""
Explanation: Utility for data fetching
next_batch() yields the next batch for training. We use variable size sequences supported by CNTK and batches are a list of numpy arrays where the numpy arrays have variable length.
A standard practice is to shuffle batches with each epoch. We don't do this here because we want to be able to graph the data that is easily interpretable.
End of explanation
"""
X['train'][0:3]
Y['train'][0:3]
"""
Explanation: Understand the data format
You can now see the sequence we are going to feed to the LSTM. Note if we have less than 8 datapoints for a day we skip over the day assuming something is missing in the raw data. If we get more than 14 data points in a day we truncate the readings.
End of explanation
"""
#Define the size of the internal state
H_DIMS = 14 # error 0.007
# H_DIMS = 7 # error 0.008
# H_DIMS = 28 # erro 0.00862598
# dropout 0.2, test error: 0.007
# dropout 0.5, test error: 0.0083
# dropout 0.9, test error: 0.013
def create_model(x):
"""Create the model for time series prediction"""
with C.layers.default_options(initial_state = 0.1):
m = C.layers.Recurrence(C.layers.LSTM(H_DIMS))(x)
#m = C.layers.Recurrence(C.layers.RNNStep(H_DIMS))(x) # 0.028
m = C.sequence.last(m)
m = C.layers.Dropout(0.2)(m)
# m = C.layers.Dropout(0.5)(m)
# m = C.layers.Dropout(0.9)(m)
m = C.layers.Dense(1)(m)
return m
"""
Explanation: LSTM network setup
LSTM will automatically adapt to the maximum sequence length of our data; we do not need to specify this maximum length as part of our model and we do not need to pad our variable length sequences in any way.
We model our network with H_DIMS = 14 LSTM cells, which we have found (by trying other values) produces good prediction results for our problem.
The output of the neural network is the total output for the day and each sequence for a given day has the same total output.
For example:
1.7,11.4 -> 10300
1.7,11.4,67.5 -> 10300
1.7,11.4,67.5,250.5 ... -> 10300
1.7,11.4,67.5,250.5,573.5 -> 10300
The outputs from the LSTMs are feed into a dense layer and we randomly dropout 20% of the values to not overfit the model to the training set. The output of the dense layer becomes the prediction our model generates.
Our LSTM model has the following design:
The network model is an exact translation of the network diagram above.
End of explanation
"""
# input sequences
x = C.sequence.input_variable(1)
# create the model
z = create_model(x)
# expected output (label), also the dynamic axes of the model output
# is specified as the model of the label input
l = C.input_variable(1, dynamic_axes=z.dynamic_axes, name="y")
# the learning rate
learning_rate = 0.005
lr_schedule = C.learning_rate_schedule(learning_rate, C.UnitType.minibatch)
# loss and error function
loss = C.squared_error(z, l)
error = C.squared_error(z, l)
# loss = C.abs(z-l)
# error = C.abs(z-l)
# use adam optimizer
momentum_time_constant = C.momentum_as_time_constant_schedule(BATCH_SIZE / -math.log(0.9))
learner = C.fsadagrad(z.parameters,
lr = lr_schedule,
momentum = momentum_time_constant)
trainer = C.Trainer(z, (loss, error), [learner])
"""
Explanation: Training
Before we can start training we need to bind our input variables for the model and define which optimizer we want to use. For this example we choose the adam optimizer. We choose squared_error as our loss function.
End of explanation
"""
# training
loss_summary = []
start = time.time()
for epoch in range(0, EPOCHS):
for x_batch, l_batch in next_batch(X, Y, "train"):
trainer.train_minibatch({x: x_batch, l: l_batch})
if epoch % (EPOCHS / 10) == 0:
training_loss = trainer.previous_minibatch_loss_average
loss_summary.append(training_loss)
print("epoch: {}, loss: {:.4f}".format(epoch, training_loss))
print("Training took {:.1f} sec".format(time.time() - start))
"""
Explanation: Time to start training.
End of explanation
"""
plt.plot(loss_summary, label='training loss');
"""
Explanation: A look how the loss function shows how the model is converging:
End of explanation
"""
# evaluate the specified X and Y data on our model
def get_error(X,Y,labeltxt):
result = 0.0
numbatches = 0
for x1, y1 in next_batch(X, Y, labeltxt):
eval_error = trainer.test_minibatch({x : x1, l : y1})
result += eval_error
numbatches += 1
return float(result)/numbatches
# Print the training and validation errors
for labeltxt in ["train", "val"]:
print("Error for {}: {:.8f}".format(labeltxt, get_error(X, Y, labeltxt)))
# Print the test error
labeltxt = "test"
print("Error for {}: {:.8f}".format(labeltxt, get_error(X, Y, labeltxt)))
"""
Explanation: Let us evaluate our performance for the training, validation, and test datasets. We use mean squared error as our metric which might be a little simplistic. A method that would define a ratio of how many predictions have been inside a given tolerance would make a better measure.
Suggested Exploration: Try out absolute error (C.abs(z-l)) instead of a squared error.
End of explanation
"""
# predict
f, a = plt.subplots(2, 1, figsize=(12, 8))
for j, ds in enumerate(["val", "test"]):
results = []
for x_batch, _ in next_batch(X, Y, ds):
pred = z.eval({x: x_batch})
results.extend(pred[:, 0])
# because we normalized the input data we need to multiply the prediction
# with SCALER to get the real values.
a[j].plot((Y[ds] * NORMALIZE).flatten(), label=ds + ' raw');
a[j].plot(np.array(results) * NORMALIZE, label=ds + ' pred');
a[j].legend();
"""
Explanation: Visualize the prediction
Our model has been trained well, given that the training, validation and test errors are in the same ballpark. To better understand our predictions, let's visualize the results. We will take our newly created model, make predictions and plot them against the actual readings.
End of explanation
"""
|
tensorflow/docs-l10n | site/ko/guide/ragged_tensor.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import math
import tensorflow as tf
"""
Explanation: 비정형 텐서
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/guide/ragged_tensor"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />TensorFlow.org에서 보기</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ko/guide/ragged_tensor.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />구글 코랩(Colab)에서 실행하기</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ko/guide/ragged_tensor.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />깃허브(GitHub)에서 소스 보기</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ko/guide/ragged_tensor.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />노트북 다운로드</a>
</td>
</table>
Note: 이 문서는 텐서플로 커뮤니티에서 번역했습니다. 커뮤니티 번역 활동의 특성상 정확한 번역과 최신 내용을 반영하기 위해 노력함에도
불구하고 공식 영문 문서의 내용과 일치하지 않을 수 있습니다.
이 번역에 개선할 부분이 있다면
tensorflow/docs-l10n 깃헙 저장소로 풀 리퀘스트를 보내주시기 바랍니다.
문서 번역이나 리뷰에 참여하려면
docs-ko@tensorflow.org로
메일을 보내주시기 바랍니다.
설정
End of explanation
"""
digits = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
words = tf.ragged.constant([["So", "long"], ["thanks", "for", "all", "the", "fish"]])
print(tf.add(digits, 3))
print(tf.reduce_mean(digits, axis=1))
print(tf.concat([digits, [[5, 3]]], axis=0))
print(tf.tile(digits, [1, 2]))
print(tf.strings.substr(words, 0, 2))
"""
Explanation: 개요
데이터는 다양한 형태로 제공됩니다; 텐서도 마찬가지입니다.
비정형 텐서는 중첩 가변 길이 목록에 해당하는 텐서플로입니다.
다음을 포함하여 균일하지 않은 모양으로 데이터를 쉽게 저장하고 처리할 수 있습니다:
일련의 영화의 배우들과 같은 가변 길이 기능
문장이나 비디오 클립과 같은 가변 길이 순차적 입력의 배치
절, 단락, 문장 및 단어로 세분화된 텍스트 문서와 같은 계층적 입력
프로토콜 버퍼와 같은 구조화된 입력의 개별 필드
비정형 텐서로 할 수 있는 일
비정형 텐서는 수학 연산 (예 : tf.add 및 tf.reduce_mean),
배열 연산 (예 : tf.concat 및 tf.tile),
문자열 조작 작업 (예 : tf.substr)을 포함하여 수백 가지 이상의 텐서플로 연산에서 지원됩니다
:
End of explanation
"""
print(digits[0]) # 첫 번째 행
print(digits[:, :2]) # 각 행의 처음 두 값
print(digits[:, -2:]) # 각 행의 마지막 두 값
"""
Explanation: 팩토리 메서드, 변환 메서드 및 값 매핑 연산을 포함하여 비정형 텐서에
고유한 여러 메서드 및 연산도 있습니다.
지원되는 작업 목록은 tf.ragged 패키지 문서를 참조하십시오.
일반 텐서와 마찬가지로, Python 스타일 인덱싱을 사용하여 비정형 텐서의 특정 부분에 접근할 수 있습니다.
자세한 내용은 아래
인덱싱 절을 참조하십시오.
End of explanation
"""
print(digits + 3)
print(digits + tf.ragged.constant([[1, 2, 3, 4], [], [5, 6, 7], [8], []]))
"""
Explanation: 일반 텐서와 마찬가지로, 파이썬 산술 및 비교 연산자를 사용하여 요소 별 연산을 수행할 수 있습니다.
자세한 내용은 아래의
오버로드된 연산자 절을 참조하십시오.
End of explanation
"""
times_two_plus_one = lambda x: x * 2 + 1
print(tf.ragged.map_flat_values(times_two_plus_one, digits))
"""
Explanation: RaggedTensor의 값으로 요소 별 변환을 수행해야하는 경우, 함수와 하나 이상의 매개변수를 갖는 tf.ragged.map_flat_values를 사용할 수 있고, RaggedTensor의 값을 변환할 때 적용할 수 있습니다.
End of explanation
"""
sentences = tf.ragged.constant([
["Let's", "build", "some", "ragged", "tensors", "!"],
["We", "can", "use", "tf.ragged.constant", "."]])
print(sentences)
paragraphs = tf.ragged.constant([
[['I', 'have', 'a', 'cat'], ['His', 'name', 'is', 'Mat']],
[['Do', 'you', 'want', 'to', 'come', 'visit'], ["I'm", 'free', 'tomorrow']],
])
print(paragraphs)
"""
Explanation: 비정형 텐서 생성하기
비정형 텐서를 생성하는 가장 간단한 방법은
tf.ragged.constant를 사용하는 것입니다. tf.ragged.constant는 주어진 중첩된 Python 목록에 해당하는 RaggedTensor를
빌드 합니다:
End of explanation
"""
print(tf.RaggedTensor.from_value_rowids(
values=[3, 1, 4, 1, 5, 9, 2, 6],
value_rowids=[0, 0, 0, 0, 2, 2, 2, 3]))
"""
Explanation: 비정형 텐서는 tf.RaggedTensor.from_value_rowids, tf.RaggedTensor.from_row_lengths 및 tf.RaggedTensor.from_row_splits와
tf.RaggedTensor.from_row_splits와 같은 팩토리 클래스 메서드를 사용하여
플랫 values 텐서와 행 분할 텐서를 쌍을 지어 해당 값을 행으로 분할하는 방법을 표시하는 방식으로도 생성할 수 있습니다.
tf.RaggedTensor.from_value_rowids
각 값이 속하는 행을 알고 있으면 value_rowids 행 분할 텐서를 사용하여 RaggedTensor를 빌드할 수 있습니다:
End of explanation
"""
print(tf.RaggedTensor.from_row_lengths(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_lengths=[4, 0, 3, 1]))
"""
Explanation: tf.RaggedTensor.from_row_lengths
각 행의 길이를 알고 있으면 row_lengths 행 분할 텐서를 사용할 수 있습니다:
End of explanation
"""
print(tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2, 6],
row_splits=[0, 4, 4, 7, 8]))
"""
Explanation: tf.RaggedTensor.from_row_splits
각 행의 시작과 끝 인덱스를 알고 있다면 row_splits 행 분할 텐서를 사용할 수 있습니다:
End of explanation
"""
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]])) # 좋음: 유형=문자열, 랭크=2
print(tf.ragged.constant([[[1, 2], [3]], [[4, 5]]])) # 좋음: 유형=32비트정수, 랭크=3
try:
tf.ragged.constant([["one", "two"], [3, 4]]) # 안좋음: 다수의 유형
except ValueError as exception:
print(exception)
try:
tf.ragged.constant(["A", ["B", "C"]]) # 안좋음: 다중첩 깊이
except ValueError as exception:
print(exception)
"""
Explanation: 팩토리 메서드의 전체 목록은 tf.RaggedTensor 클래스 문서를 참조하십시오.
비정형 텐서에 저장할 수 있는 것
일반 텐서와 마찬가지로, RaggedTensor의 값은 모두 같은 유형이어야 합니다;
값은 모두 동일한 중첩 깊이 (텐서의 랭크)에
있어야 합니다:
End of explanation
"""
queries = tf.ragged.constant([['Who', 'is', 'Dan', 'Smith'],
['Pause'],
['Will', 'it', 'rain', 'later', 'today']])
# 임베딩 테이블 만들기
num_buckets = 1024
embedding_size = 4
embedding_table = tf.Variable(
tf.random.truncated_normal([num_buckets, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
# 각 단어에 대한 임베딩 찾기
word_buckets = tf.strings.to_hash_bucket_fast(queries, num_buckets)
word_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, word_buckets) # ①
# 각 문장의 시작과 끝에 마커 추가하기
marker = tf.fill([queries.nrows(), 1], '#')
padded = tf.concat([marker, queries, marker], axis=1) # ②
# 바이그램 빌드 & 임베딩 찾기
bigrams = tf.strings.join([padded[:, :-1],
padded[:, 1:]],
separator='+') # ③
bigram_buckets = tf.strings.to_hash_bucket_fast(bigrams, num_buckets)
bigram_embeddings = tf.ragged.map_flat_values(
tf.nn.embedding_lookup, embedding_table, bigram_buckets) # ④
# 각 문장의 평균 임베딩 찾기
all_embeddings = tf.concat([word_embeddings, bigram_embeddings], axis=1) # ⑤
avg_embedding = tf.reduce_mean(all_embeddings, axis=1) # ⑥
print(avg_embedding)
"""
Explanation: 사용 예시
다음 예제는 RaggedTensor를 사용하여 각 문장의 시작과 끝에 특수 마커를 사용하여
가변 길이 쿼리 배치에 대한 유니그램 및 바이그램 임베딩을 생성하고 결합하는 방법을 보여줍니다.
이 예제에서 사용된 작업에 대한 자세한 내용은
tf.ragged 패키지 설명서를 참조하십시오.
End of explanation
"""
tf.ragged.constant([["Hi"], ["How", "are", "you"]]).shape
"""
Explanation: 비정형 텐서: 정의
비정형 및 정형 차원
비정형 텐서는 슬라이스의 길이가 다를 수 있는 하나 이상의 비정형 크기를 갖는 텐서입니다.
예를 들어,
rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []] 의 내부 (열) 크기는
열 슬라이스(rt[0, :], ..., rt[4, :])의 길이가 다르기 때문에 비정형입니다.
부분의 길이가 모두 같은 차원을 정형차원이라고 합니다.
비정형 텐서의 가장 바깥 쪽 차원은 단일 슬라이스로 구성되므로 슬라이스의 길이가
다를 가능성이 없으므로 항상 균일합니다.
비정형 텐서는 균일한 가장 바깥 쪽 차원에 더하여 균일한 내부 차원을 가질 수도 있습니다.
예를 들어, [num_sentences, (num_words), embedding_size] 형태의 비정형 텐서를 사용하여
각 단어에 대한 단어 임베딩을 일련의 문장으로 저장할 수 있습니다.
여기서 (num_words)의 괄호는 차원이 비정형임을 나타냅니다.
비정형 텐서는 다수의 비정형 차원을 가질 수 있습니다. 예를 들어
모양이 [num_documents, (num_paragraphs), (num_sentences), (num_words)] 인
텐서를 사용하여 일련의 구조화된 텍스트 문서를 저장할 수 있습니다.
(여기서 괄호는 비정형 차원임을 나타냅니다.)
비정형 텐서 형태 제한
비정형 텐서의 형태는 다음과 같은 형식으로 제한됩니다:
단일 정형 차원
하나 이상의 비정형 차원
0 또는 그 이상의 정형 차원
참고: 이러한 제한은 현재 구현의 결과이며
향후 완화될 수 있습니다.
랭크 및 비정형 랭크
비정형 텐서의 총 차원 수를 랭크라고 하고,
비정형 텐서의 비정형 차원 수를 비정형랭크라고 합니다. 그래프 실행 모드 (즉, 비 즉시 실행(non-eager) 모드)에서, 텐서의 비정형 랭크는
생성 시 고정됩니다: 비정형 랭크는 런타임 값에 의존할 수 없으며 다른 세션 실행에 따라
동적으로 변할 수 없습니다.
잠재적으로 비정형인 텐서는
tf.Tensor 또는 tf.RaggedTensor 일 수 있는 값입니다.
tf.Tensor의 비정형 랭크는 0으로 정의됩니다.
비정형 텐서 형태
비정형 텐서의 형태를 설명할 때, 비정형 차원은 괄호로 묶어 표시됩니다.
예를 들어, 위에서 살펴본 것처럼 일련의 문장에서 각 단어에 대한 단어 임베딩을 저장하는
3차원 비정형텐서의 형태는
[num_sentences, (num_words), embedding_size]로 나타낼 수 있습니다.
RaggedTensor.shape 프로퍼티는 비정형 텐서에 대해 크기가 없는 비정형 차원인 tf.TensorShape를 반환합니다:
End of explanation
"""
print(tf.ragged.constant([["Hi"], ["How", "are", "you"]]).bounding_shape())
"""
Explanation: tf.RaggedTensor.bounding_shape 메서드를 사용하여 지정된
RaggedTensor에 대한 빈틈이 없는 경계 형태를 찾을 수 있습니다:
End of explanation
"""
ragged_x = tf.ragged.constant([["John"], ["a", "big", "dog"], ["my", "cat"]])
ragged_y = tf.ragged.constant([["fell", "asleep"], ["barked"], ["is", "fuzzy"]])
print(tf.concat([ragged_x, ragged_y], axis=1))
"""
Explanation: 비정형 vs 희소 텐서
비정형텐서는 희소 텐서의 유형이 아니라
불규칙한 형태의 밀집 텐서로 간주되어야 합니다.
예를 들어, 비정형 vs 희소 텐서에 대해 concat,
stack 및 tile과 같은 배열 연산이 어떻게 정의되는지 고려하십시오.
비정형 텐서들을 연결하면 각 행을 결합하여 단일 행을 형성합니다:
End of explanation
"""
sparse_x = ragged_x.to_sparse()
sparse_y = ragged_y.to_sparse()
sparse_result = tf.sparse.concat(sp_inputs=[sparse_x, sparse_y], axis=1)
print(tf.sparse.to_dense(sparse_result, ''))
"""
Explanation: 그러나 희소 텐서를 연결하는 것은 다음 예에 표시된 것처럼
해당 밀집 텐서를 연결하는 것과 같습니다. (여기서 Ø는 누락된 값을 나타냅니다.):
End of explanation
"""
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
y = tf.ragged.constant([[1, 1], [2], [3, 3, 3]])
print(x + y)
"""
Explanation: 이 구별이 중요한 이유의 다른 예를 보려면,
tf.reduce_mean과 같은 연산에 대한 “각 행의 평균값”의 정의를 고려하십시오.
비정형 텐서의 경우, 행의 평균값은 행 값을 행 너비로 나눈 값의 합입니다.
그러나 희소 텐서의 경우 행의 평균값은
행 값의 합계롤 희소 텐서의 전체 너비(가장 긴 행의 너비 이상)로
나눈 값입니다.
오버로드된 연산자
RaggedTensor 클래스는 표준 Python 산술 및 비교 연산자를 오버로드하여
기본 요소 별 수학을 쉽게 수행할 수 있습니다:
End of explanation
"""
x = tf.ragged.constant([[1, 2], [3], [4, 5, 6]])
print(x + 3)
"""
Explanation: 오버로드된 연산자는 요소 단위 계산을 수행하므로, 모든
이진 연산에 대한 입력은 동일한 형태이거나, 동일한 형태로 브로드캐스팅 할 수 있어야 합니다.
가장 간단한 확장의 경우, 단일 스칼라가 비정형 텐서의
각 값과 요소 별로 결합됩니다:
End of explanation
"""
queries = tf.ragged.constant(
[['Who', 'is', 'George', 'Washington'],
['What', 'is', 'the', 'weather', 'tomorrow'],
['Goodnight']])
print(queries[1])
print(queries[1, 2]) # 한 단어
print(queries[1:]) # 첫 번째 행을 제외한 모든 단어
print(queries[:, :3]) # 각 쿼리의 처음 세 단어
print(queries[:, -2:]) # 각 쿼리의 마지막 두 단어
"""
Explanation: 고급 사례에 대한 설명은 브로드캐스팅 절을
참조하십시오.
비정형 텐서는 일반 텐서와 동일한 연산자 세트를 오버로드합니다:단항
연산자 -, ~ 및 abs(); 그리고 이항 연산자 +, -, *, /,
//, %, **, &, |, ^, ==, <, <=, > 및 >=.
인덱싱
비정형 텐서는 다차원 인덱싱 및 슬라이싱을 포함하여 Python 스타일 인덱싱을 지원합니다.
다음 예는 2차원 및 3차원 비정형 텐서를 사용한 비정형 텐서 인덱싱을
보여줍니다.
비정형 1차원으로 2차원 비정형 텐서 인덱싱
End of explanation
"""
rt = tf.ragged.constant([[[1, 2, 3], [4]],
[[5], [], [6]],
[[7]],
[[8, 9], [10]]])
print(rt[1]) # 두 번째 행 (2차원 비정형 텐서)
print(rt[3, 0]) # 네 번째 행의 첫 번째 요소 (1차원 텐서)
print(rt[:, 1:3]) # 각 행의 1-3 항목 (3차원 비정형 텐서)
print(rt[:, -1:]) # 각 행의 마지막 항목 (3차원 비정형 텐서)
"""
Explanation: 비정형 2차원으로 3차원 비정형 텐서 인덱싱
End of explanation
"""
ragged_sentences = tf.ragged.constant([
['Hi'], ['Welcome', 'to', 'the', 'fair'], ['Have', 'fun']])
print(ragged_sentences.to_tensor(default_value=''))
print(ragged_sentences.to_sparse())
x = [[1, 3, -1, -1], [2, -1, -1, -1], [4, 5, 8, 9]]
print(tf.RaggedTensor.from_tensor(x, padding=-1))
st = tf.SparseTensor(indices=[[0, 0], [2, 0], [2, 1]],
values=['a', 'b', 'c'],
dense_shape=[3, 3])
print(tf.RaggedTensor.from_sparse(st))
"""
Explanation: RaggedTensor는 다차원 인덱싱 및 슬라이싱을 지원하며, 한 가지 제한 사항이
있습니다: 비정형 차원으로 인덱싱할 수 없습니다. 이 값은
표시된 값이 일부 행에 존재할 수 있지만 다른 행에는 존재하지 않기 때문에 문제가 됩니다.
그러한 경우, 우리가 (1) IndexError를 제기해야 하는지; (2)
기본값을 사용해야 하는지; 또는 (3) 그 값을 스킵하고 시작한 것보다 적은 행을 가진 텐서를 반환해야 하는지
에 대한 여부는 확실하지 않습니다.
Python의 안내 지침
("애매한 상황에서
추측하려고 하지 마십시오" )에 따라, 현재 이 작업을 허용하지
않습니다.
텐서 형 변환
RaggedTensor 클래스는
RaggedTensor와 tf.Tensor 또는 tf.SparseTensors 사이를 변환하는데 사용할 수 있는 메서드를 정의합니다:
End of explanation
"""
rt = tf.ragged.constant([[1, 2], [3, 4, 5], [6], [], [7]])
print(rt.to_list())
"""
Explanation: 비정형 텐서 평가
즉시 실행
즉시 실행 모드에서는, 비정형 텐서가 즉시 실행됩니다. 포함된 값에
접근하려면 다음을 수행하십시오:
비정형 텐서를 Python 목록으로 변환하는
tf.RaggedTensor.to_list()
메서드를 사용하십시오.
End of explanation
"""
print(rt[1].numpy())
"""
Explanation: Python 인덱싱을 사용하십시오. 선택한 텐서 조각에 비정형 차원이 없으면,
EagerTensor로 반환됩니다. 그런 다음
numpy()메서드를 사용하여 값에 직접 접근할 수 있습니다.
End of explanation
"""
print(rt.values)
print(rt.row_splits)
"""
Explanation: tf.RaggedTensor.values 및
tf.RaggedTensor.row_splits 특성 또는
tf.RaggedTensor.row_lengths() 및
tf.RaggedTensor.value_rowids()와 같은 행 분할 메서드를 사용하여
비정형 텐서를 구성 요소로
분해하십시오.
End of explanation
"""
# x (2D ragged): 2 x (num_rows)
# y (scalar)
# 결과 (2D ragged): 2 x (num_rows)
x = tf.ragged.constant([[1, 2], [3]])
y = 3
print(x + y)
# x (2d ragged): 3 x (num_rows)
# y (2d tensor): 3 x 1
# 결과 (2d ragged): 3 x (num_rows)
x = tf.ragged.constant(
[[10, 87, 12],
[19, 53],
[12, 32]])
y = [[1000], [2000], [3000]]
print(x + y)
# x (3d ragged): 2 x (r1) x 2
# y (2d ragged): 1 x 1
# 결과 (3d ragged): 2 x (r1) x 2
x = tf.ragged.constant(
[[[1, 2], [3, 4], [5, 6]],
[[7, 8]]],
ragged_rank=1)
y = tf.constant([[10]])
print(x + y)
# x (3d ragged): 2 x (r1) x (r2) x 1
# y (1d tensor): 3
# 결과 (3d ragged): 2 x (r1) x (r2) x 3
x = tf.ragged.constant(
[
[
[[1], [2]],
[],
[[3]],
[[4]],
],
[
[[5], [6]],
[[7]]
]
],
ragged_rank=2)
y = tf.constant([10, 20, 30])
print(x + y)
"""
Explanation: 브로드캐스팅
브로드캐스팅은 다른 형태의 텐서가 요소 별 연산에 적합한 형태를 갖도록 만드는 프로세스입니다.
브로드캐스팅에 대한 자세한 내용은
다음을 참조하십시오:
Numpy: 브로드캐스팅
tf.broadcast_dynamic_shape
tf.broadcast_to
호환 가능한 형태를 갖도록 두 개의 입력 x 와 y 를 브로드캐스팅하는 기본 단계는
다음과 같습니다:
x 와 y 의 차원 수가 동일하지 않은 경우, 외부 차원
(크기 1)을 차원 수가 동일해질 때까지 추가합니다 .
x 와 y 의 크기가 다른 각 차원에 대해:
차원 d에 x 또는 y의 크기가 1 이면, 다른 입력의 크기와 일치하도록
차원 d에서 값을 반복하십시오.
그렇지 않으면 예외가 발생합니다 (x 와 y 는 브로드캐스트와 호환되지
않습니다).
정형 차원에서 텐서의 크기가 단일 숫자 (해당 차원에서
슬라이스 크기)인 경우; 그리고 비정형 차원에서 텐서의 크기가 슬라이스 길이의 목록인 경우
(해당 차원의 모든 슬라이스에 대해).
브로드캐스팅 예제
End of explanation
"""
# x (2d ragged): 3 x (r1)
# y (2d tensor): 3 x 4 # 뒤의 차원은 일치하지 않습니다.
x = tf.ragged.constant([[1, 2], [3, 4, 5, 6], [7]])
y = tf.constant([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (2d ragged): 3 x (r1)
# y (2d ragged): 3 x (r2) # 비정형 차원은 일치하지 않습니다.
x = tf.ragged.constant([[1, 2, 3], [4], [5, 6]])
y = tf.ragged.constant([[10, 20], [30, 40], [50]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
# x (3d ragged): 3 x (r1) x 2
# y (3d ragged): 3 x (r1) x 3 # 뒤의 차원은 일치하지 않습니다.
x = tf.ragged.constant([[[1, 2], [3, 4], [5, 6]],
[[7, 8], [9, 10]]])
y = tf.ragged.constant([[[1, 2, 0], [3, 4, 0], [5, 6, 0]],
[[7, 8, 0], [9, 10, 0]]])
try:
x + y
except tf.errors.InvalidArgumentError as exception:
print(exception)
"""
Explanation: 브로드캐스트 하지 않는 형태의 예는 다음과 같습니다:
End of explanation
"""
rt = tf.RaggedTensor.from_row_splits(
values=[3, 1, 4, 1, 5, 9, 2],
row_splits=[0, 4, 4, 6, 7])
print(rt)
"""
Explanation: RaggedTensor 인코딩
비정형텐서는 RaggedTensor 클래스를 사용하여 인코딩됩니다. 내부적으로, 각
RaggedTensor는 다음으로 구성됩니다:
가변 길이 행을 병합된 목록으로 연결하는 values
텐서
병합된 값을 행으로 나누는 방법을 나타내는 row_splits 벡터,
특히, 행 rt[i]의 값은 슬라이스
rt.values[rt.row_splits[i]:rt.row_splits[i+1]]에 저장됩니다.
End of explanation
"""
rt = tf.RaggedTensor.from_row_splits(
values=tf.RaggedTensor.from_row_splits(
values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
row_splits=[0, 3, 3, 5, 9, 10]),
row_splits=[0, 1, 1, 5])
print(rt)
print("형태: {}".format(rt.shape))
print("비정형 텐서의 차원 : {}".format(rt.ragged_rank))
"""
Explanation: 다수의 비정형 차원
다수의 비정형 차원을 갖는 비정형 텐서는
values 텐서에 대해 중첩된 RaggedTensor를 사용하여 인코딩됩니다. 중첩된 각 RaggedTensor는
단일 비정형 차원을 추가합니다.
End of explanation
"""
rt = tf.RaggedTensor.from_nested_row_splits(
flat_values=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
nested_row_splits=([0, 1, 1, 5], [0, 3, 3, 5, 9, 10]))
print(rt)
"""
Explanation: 팩토리 함수 tf.RaggedTensor.from_nested_row_splits는
row_splits 텐서 목록을 제공하여 다수의 비정형 차원으로 RaggedTensor를
직접 생성하는데 사용할 수 있습니다:
End of explanation
"""
rt = tf.RaggedTensor.from_row_splits(
values=[[1, 3], [0, 0], [1, 3], [5, 3], [3, 3], [1, 2]],
row_splits=[0, 3, 4, 6])
print(rt)
print("형태: {}".format(rt.shape))
print("비정형 텐서의 차원 : {}".format(rt.ragged_rank))
"""
Explanation: 정형한 내부 차원
내부 차원이 정형한 비정형 텐서는
values에 다차원 tf.Tensor를 사용하여 인코딩됩니다.
End of explanation
"""
values = [3, 1, 4, 1, 5, 9, 2, 6]
print(tf.RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0]))
print(tf.RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8]))
print(tf.RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8]))
print(tf.RaggedTensor.from_value_rowids(
values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5))
"""
Explanation: 대체 가능한 행 분할 방식
RaggedTensor 클래스는 row_splits를 기본 메커니즘으로 사용하여
값이 행으로 분할되는 방법에 대한 정보를 저장합니다. 그러나,
RaggedTensor는 네 가지 대체 가능한 행 분할 방식을 지원하므로 데이터 형식에 따라 더 편리하게
사용할 수 있습니다.
내부적으로, RaggedTensor는 이러한 추가적인 방식을 사용하여 일부 컨텍스트에서 효율성을
향상시킵니다.
<dl>
<dt>행 길이</dt>
<dd>`row_lengths`는 `[nrows]`형태의 벡터로, 각 행의 길이를
지정합니다.</dd>
<dt>행 시작</dt>
<dd>`row_starts`는 `[nrows]`형태의 벡터로, 각 행의 시작 오프셋을
지정합니다. `row_splits[:-1]`와 같습니다.</dd>
<dt>행 제한</dt>
<dd>`row_limits`는 `[nrows]`형태의 벡터로, 각 행의 정지 오프셋을
지정합니다. `row_splits[1:]`와 같습니다.</dd>
<dt>행 인덱스 및 행 수</dt>
<dd>`value_rowids`는 `[nvals]`모양의 벡터로, 값과 일대일로 대응되며
각 값의 행 인덱스를 지정합니다.
특히, `rt[row]`행은 `value_rowids[j]==row`인 `rt.values[j]`값으로 구성됩니다.
\`nrows`는
`RaggedTensor`의 행 수를 지정하는 정수입니다.
특히, `nrows`는 뒤의 빈 행을 나타내는데
사용됩니다.</dd>
</dl>
예를 들어, 다음과 같이 비정형 텐서는 동일합니다:
End of explanation
"""
rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
print(" values: {}".format(rt.values))
print(" row_splits: {}".format(rt.row_splits))
print(" row_lengths: {}".format(rt.row_lengths()))
print(" row_starts: {}".format(rt.row_starts()))
print(" row_limits: {}".format(rt.row_limits()))
print("value_rowids: {}".format(rt.value_rowids()))
"""
Explanation: RaggedTensor 클래스는 이러한 각 행 분할 텐서를 생성하는데 사용할 수 있는
메서드를 정의합니다.
End of explanation
"""
|
saturn77/CythonBootstrap | CythonBootstrap.ipynb | gpl-2.0 | %%file ./src/helloCython.pyx
import cython
import sys
def message():
print(" Hello World ....\n")
print(" Hello Central Ohio Python User Group ...\n")
print(" The 614 > 650::True")
print(" Another line ")
print(" The Python version is %s" % sys.version)
print(" The Cython version is %s" % cython.__version__)
print(" I hope that you learn something useful . . . .")
def main():
message()
%%file ./src/cyMath.pyx
import cython
def cy_fib(int n):
"""Print the Fibonacci series up to n."""
cdef int a = 0
cdef int b = 1
cdef int c = 0
cdef int index = 0
while b < n:
print ("%d, %d, \n" % (index, b) )
a, b = b, a + b
index += 1
%%file ./src/printString.pyx
import cython
def display(char *bytestring):
""" Print out a bytestring byte by byte. """
cdef char byte
for byte in bytestring:
print(byte)
%%file ./src/bits.pyx
import cython
def cy_reflect(int reg, int bits):
""" Reverse all the bits in a register.
reg = input register
r = output register
"""
cdef int x
cdef int y
cdef int r
x = 1 << (bits-1)
y = 1
r = 0
while x:
if reg & x:
r |= y
x = x >> 1
y = y << 1
return r
def reflect(self,s, bits=8):
""" Take a binary number (byte) and reflect the bits. """
x = 1<<(bits-1)
y = 1
r = 0
while x:
if s & x:
r |= y
x = x >> 1
y = y << 1
return r
%%file ./src/setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
#=========================================
# Setup the extensions
#=========================================
sources = [ "./src/cyMath.pyx", "./src/helloCython.pyx",
"./src/cy_math.pyx", "./src/bits.pyx",
"./src/printString.pyx"]
#for fileName in sources:
# setup(ext_modules=cythonize(str(fileName)))
map(lambda fileName : setup(ext_modules=cythonize(str(fileName))), sources)
!python ./src/setup.py build_ext --inplace
from src import helloCython
helloCython.message()
from src import cyMath
cyMath.cy_fib(100)
from src import bits
from bits import cy_reflect
hexlist = [int(0x01),int(0x02),int(0x04),int(0x08)]
[hex(cy_reflect(item,8)) for item in hexlist]
from src import printString
printString.display('123')
# A little list comprehension here ...
# A comparative method to the Cython printString function
numberList = [1,2,3]
[ord(str(value)) for value in numberList]
"""
Explanation: Cython -- A Transcompiler Language
Transform Your Python !!
By James Bonanno, Central Ohio Python Presentation, March 2015
There are many cases where you simply want to get speed up an existing Python design, and in particular code in Python to get things working, then optimize (yes, early optimization is the root of all evil, but it's even more sinister if you run out of ways to optimize your code.)
What is is good for?
for making Python faster,
for making Python faster in an easy way
for wrapping external C and C++
making Python accessible to C and C++ (going the other way)
This presentation seeks primarily to discuss ways to transform your Python code and use it in a Python project.
References
The new book by Kurt Smith is well written, clear in explanations, and the best overall treatment of Cython out there. An excellent book !! The book by Gorelick and Ozsvald is a good treatment, and it compares different methods of optimizing python including Shedskin, Theano, Numba, etc.
1] Kurt W. Smith Cython, A Guide for Python Programmers, O'Reilly, January 2015
2] Mich Gorelick & Ian Ozsvald High Performance Python -- Practical Performant Programming for Humans O'Reilly September 2014
3] David Beazley and Brian K Jones, Python Cookbook, 3rd Edition, Printed May 2013, O'Reilly -- Chapter 15, page 632
Why CYTHON?
It's more versatile than all the competition and has a manageable syntax. I hihgly recommend Kurt Smith's book on Cython. It's thorough, and if you read chapter 3, you will take in the essence of working with Cython functions. ***
Make sure to check out the new, improved documentation for Cython at:
http://docs.cython.org/index.html
This presentation will focus on using Cython to speed up Python functions, with some attention also given to arrays and numpy. There are more sophisticated treatments of using dynamically allocated memory, such as typically done with C and C++.
A good link on memory allocation, where the heap is used with malloc():
http://docs.cython.org/src/tutorial/memory_allocation.html?highlight=numpy
Getting Started:: Cython function types...
You must use "cdef" when defining a type inside of a function. For example,
python
def quad(int k):
cdef int alpha = 1.5
return alpha*(k**2)
People often get confused when using def, cdef, and cpdef.
The key factors are
def is importable into python
cdef is importable into C, but not python
cpdef is importable into both
Getting Started:: Cythonizing a Python function
Now, if you were going to put pure cython code into action within your editor, say Wing IDE
or PyCharm, you would want to define something like this in a file say for example cy_math.pyx
Now, let's start with the familiar Fibonacci series ...
```python
import cython
def cy_fib(int n):
"""Print the Fibonacci series up to n."""
cdef int a = 0
cdef int b = 1
cdef int index = 0
while b < n:
print ("%d, %d, \n" % (index, b) )
a, b = b, a + b
index += 1
```
Getting Started:: A Distutils setup.py ...
```python
from distutils.core import setup, Extension
from Cython.Build import cythonize
=========================================
Setup the extensions
=========================================
sources = [ "cyMath.pyx", "helloCython.pyx","cy_math.pyx", "bits.pyx", "printString.pyx"]
for fileName in sources:
setup(ext_modules=cythonize(str(fileName)))
or...
map(lambda fileName : setup(ext_modules=cythonize(str(fileName))), sources)
```
End of explanation
"""
%%file ./src/cyFib.pyx
def cyfib(int n):
cdef int a = 0
cdef int b = 1
cdef int index = 0
while b < n:
a, b = b, a+b
index += 1
return b
"""
Explanation: Now let's see the time difference between a cyfib and pyfib ...
End of explanation
"""
!makecython ./src/cyFib.pyx
def pyfib(n):
a = 0
b = 1
index = 0
while b < n:
a, b = b, a+b
index += 1
return b
%timeit pyfib(1000)
import cyFib
%timeit cyFib.cyfib(1000)
"""
Explanation: Introducing runcython !!
Is located on Github
Easy installation == pip install runcython
Russell91 on Github
https://github.com/Russell91/runcython
There is a runcython and makecython function calls . . . . .
End of explanation
"""
import dis
dis.dis(pyfib)
import cProfile
cProfile.run('pyfib(1000)')
"""
Explanation: NOW THAT IS A CONSIDERABLE SPEEDUP ...
Fibonnaci function shows a factor of over 1500 % Improvement
Let's take a look at disassembly for some reasons for this ....
End of explanation
"""
%%file ./src/cyPoly.pyx
def cypoly(int n, int k):
return map(lambda x:(1.0*x**2 + 0.5*x + 0.25*x), range(k))
!makecython ./src/cyPoly.pyx
def pypoly(n,k):
return map(lambda x:.1*x**2 + .5*x + 0.25*x, range(k))
"""
Explanation: Another Example, with a polynomial this time ...
For now, lets begin with a polynomial function, and compare how to do this in python and cython! ....
Now consider a function such as
$f(x) = a_0x^n + a_1x^{(n-1)} + a_2x^{(n-2)} ..... a_nx^0$
where in the case below n is selected as 2, and
- $a_0 = 0.1$,
- $a_1=0.5$
- $a_2=0.25$.
The cython function to do this called "cypoly" while the python version is called "pypoly". Each function is defined with a functional programming techinque of lambda and map, as shown below.
End of explanation
"""
from src import cyPoly
cyPoly.cypoly(4,50)
pypoly(4,50)
"""
Explanation: Now to compare the two ....
End of explanation
"""
%%file ./src/sineWave.pyx
import cython
from libc.math cimport sin
def sinewave(double x):
""" Calculate a sinewave for specified number of cycles, Ncycles, at a given frequency."""
return sin(x)
!makecython ./src/sineWave.pyx
from src import sineWave
import math
angle90 = math.pi/2
sineWave.sinewave(angle90)
"""
Explanation: Now's lets do something graphically, like plot a trig function. Let's also use a float/double type.
End of explanation
"""
%matplotlib inline
import numpy as np
x = np.linspace(0,2*np.pi,2000)
%timeit plot(x,np.sin(x),'r')
## %timeit plot(x,sineWave.sinewave(x),'r') <== Why is this a problem ??
xlim(0,6.28)
title('Sinewave for Array Data')
grid(True)
%%file ./src/myFunc.pyx
import cython
import numpy as np
cimport numpy as np
@cython.boundscheck(False)
@cython.wraparound(False)
def myfunc(np.ndarray[double, ndim=1] A):
return np.sin(A)
!makecython ./src/myFunc.pyx
%matplotlib inline
from src import myFunc
import cython
import numpy as np
x = np.linspace(0,2*np.pi,2000)
y = myFunc.myfunc(x)
%timeit plot(x,y,'r')
xlim(0,6.28)
title('Sinewave for Array Data with Cython')
grid(True)
"""
Explanation: Now let's looking a data that involves arrays, and look at both python and numpy versions as well.
End of explanation
"""
!python-config --cflags
!python-config --ldflags
!ls -a ./src
%%file ./src/exponent.pyx
"""
module:: This is a Cython file that uses decorators for arguments.
"""
import cython
@cython.locals(a = cython.int, x = cython.int, y = cython.int)
def power(a, x):
""" funtion that uses cython.locals """
y = a**x
return y
!makecython ./src/exponent.pyx
from src import exponent
exponent?
import src
%%file ./src/setup.py
from distutils.core import setup, Extension
from Cython.Build import cythonize
#=========================================
# Setup the extensions
#=========================================
sources = [ "./src/cyMath.pyx", "./src/helloCython.pyx",
"./src/cy_math.pyx", "./src/bits.pyx",
"./src/printString.pyx", "./src/exponent.pyx"]
#for fileName in sources:
# setup(ext_modules=cythonize(str(fileName)))
map(lambda fileName : setup(ext_modules=cythonize(str(fileName))), sources)
!python ./src/setup.py build_ext --inplace
from src import exponent
[exponent.power(x,3) for x in range(10)]
def quadPy(a,x):
return a*(x**2)
%timeit quadPy(2.0, 5.0)
"""
Explanation: Summary & Conclusions
This talk has presented the basics of getting started with Cython and IPython/Jupyter Notebook. There were examples presented on how to compile Cython programs with a setup.py and distuils as well as a nice application, runcython. Basic programs and some programs with arrays were demonstrated.
Cython is flexible, and it's flexibility is matched by it's performance.
It's realitively easy to use, but it does have some details to watch out for when working with arrays, references, etc.
Overall
Cython enables Python code to be transformed easily
The transformed Python code is signficantly faster
Wide support and documentation exists for Cython
Language has evolved and grown over the past few years with widespread support
Usage in Ipython Notebook / Jupyter is now well supported
Can be used on a wide variety of programs, ranging from math to logic.
Transform your Python with Cython !!
End of explanation
"""
|
mkcor/advanced-pandas | notebooks/05_sql.ipynb | cc0-1.0 | import pandas as pd
"""
Explanation: SQL-type operations
If you know something about relational databases and SQL, you may have heard of JOIN and GROUP BY.
End of explanation
"""
mlo, gl = pd.read_csv('../data/co2-mm-mlo.csv', na_values=-99.99, index_col='Date', parse_dates=True), \
pd.read_csv('../data/co2-mm-gl.csv', na_values=-99.99, index_col='Date', parse_dates=True)
# pd.read_csv('https://python.g-node.org/wiki/_media/co2-mm-mlo.csv')
ml, gl = mlo[['Average']], gl[['Average']]
ml.columns, gl.columns = ['Average_mlo'], ['Average_gl']
ml = ml[ml.index >= '1980-01']
ml, gl = ml.head(), gl.head()
"""
Explanation: Joining
End of explanation
"""
pd.concat([ml, gl])
"""
Explanation: We can concatenate the two DataFrames.
End of explanation
"""
ml.append(gl)
"""
Explanation: Alternatively, the above can be obtained with the self-describing append() method.
End of explanation
"""
pd.concat([ml, gl], axis=1)
"""
Explanation: By default, concat() concatenates along the rows (axis 0). What we did previously was 'concatenating' along the columns (axis 1). It is actually a join operation, on (index or key) Date.
End of explanation
"""
ml.join(gl)
"""
Explanation: Indeed, we could alternatively use the join() method.
End of explanation
"""
mlo = pd.read_csv('../data/co2-mm-mlo.csv', na_values=-99.99, index_col='Date', parse_dates=True)
mlo = mlo[['Average']]
mlo.head()
mlo.join(gl)
mlo.join(gl, how='inner')
"""
Explanation: Wait! Which frame's index is used? JOIN can be left, right, outer, or inner (picture unions and intersections).
End of explanation
"""
z = pd.Series([0.5, 0.8, 0.6, 0.3], index=pd.MultiIndex.from_product([[0, 1], [0, 1]], names=['x', 'y']))
z
z.groupby('x')
z.groupby('x').apply(lambda u: u.min())
"""
Explanation: Hands-on exercise
How else, still using the join() method, could we obtain Average and Average_gl for dates between 1980-01 and 1980-05?
Grouping
We introduce the split-apply-combine approach:
split the data into groups;
apply a function to each group independently;
combine the results into an appropriate data structure.
End of explanation
"""
z.groupby(level=0).apply(lambda u: u.min())
"""
Explanation: The function in question is an aggregation (for each group, return the minimum value). The length of the result is the number of different groups (here, number of unique x values). Aggregation reduces the size of the data structure.
End of explanation
"""
z.groupby('x').min()
"""
Explanation: The aggregation function can be applied directly, if it is available.
End of explanation
"""
z.min(level='x')
"""
Explanation: With a hierarchical index, the level parameter can even be passed directly to certain aggregation functions.
End of explanation
"""
z.groupby('x').size()
"""
Explanation: Counting the number of records in each group is also an aggregation.
End of explanation
"""
z.groupby(['x', 'y']).size()
z.groupby('y').describe()
import numpy as np
z.groupby('y').apply(lambda u: np.std(u, ddof=1))
z.groupby('y').apply(lambda u: np.std(u))
z.groupby('y').apply(np.std)
"""
Explanation: For each unique values of x (which are 0 and 1), we have two entries.
The grouping object (here, x) is referred to as the key.
End of explanation
"""
z
z.groupby('y').apply(lambda u: u.min() > 0.4)
z.groupby('y').filter(lambda u: u.min() > 0.4)
"""
Explanation: Filtering is another kind of operation which can be applied to each group. Filtering may reduce the size of the data structure, since some groups might get filtered out.
End of explanation
"""
z.groupby('y').transform(lambda u: u.min())
"""
Explanation: The third kind of operation is transformation, where the size of the data structure is preserved.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mri/cmip6/models/mri-esm2-0/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mri', 'mri-esm2-0', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: MRI
Source ID: MRI-ESM2-0
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:19
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
dsacademybr/PythonFundamentos | Cap04/Notebooks/DSA-Python-Cap04-10-Enumerate.ipynb | gpl-3.0 | # Versão da Linguagem Python
from platform import python_version
print('Versão da Linguagem Python Usada Neste Jupyter Notebook:', python_version())
"""
Explanation: <font color='blue'>Data Science Academy - Python Fundamentos - Capítulo 4</font>
Download: http://github.com/dsacademybr
End of explanation
"""
# Criando uma lista
seq = ['a','b','c']
enumerate(seq)
list(enumerate(seq))
# Imprimindo os valores de uma lista com a função enumerate() e seus respectivos índices
for indice, valor in enumerate(seq):
print (indice, valor)
for indice, valor in enumerate(seq):
if indice >= 2:
break
else:
print (valor)
lista = ['Marketing', 'Tecnologia', 'Business']
for i, item in enumerate(lista):
print(i, item)
for i, item in enumerate('Isso é uma string'):
print(i, item)
for i, item in enumerate(range(10)):
print(i, item)
"""
Explanation: Enumerate
End of explanation
"""
|
jmhsi/justin_tinker | data_science/courses/temp/courses/dl1/lesson1_dessert_classifier.ipynb | apache-2.0 | # Put these at the top of every notebook, to get automatic reloading and inline plotting
%reload_ext autoreload
%autoreload 2
%matplotlib inline
"""
Explanation: Image classification with Convolutional Neural Networks
Welcome to the first week of the second deep learning certificate! We're going to use convolutional neural networks (CNNs) to allow our computer to see - something that is only possible thanks to deep learning.
Introduction to our first task: 'Dogs vs Cats'
We're going to try to create a model to enter the Dogs vs Cats competition at Kaggle. There are 25,000 labelled dog and cat photos available for training, and 12,500 in the test set that we have to try to label for this competition. According to the Kaggle web-site, when this competition was launched (end of 2013): "State of the art: The current literature suggests machine classifiers can score above 80% accuracy on this task". So if we can beat 80%, then we will be at the cutting edge as of 2013!
End of explanation
"""
# This file contains all the main external libs we'll use
from fastai.imports import *
from fastai.transforms import *
from fastai.conv_learner import *
from fastai.model import *
from fastai.dataset import *
from fastai.sgdr import *
from fastai.plots import *
from data_science.j_utils import copyanything
PATH = "data/desserts/"
sz = 224
"""
Explanation: Here we import the libraries we need. We'll learn about what each does during the course.
End of explanation
"""
import os
def walklevel(some_dir, level=1):
'''For copying dirs n levels deep from some_dir. n = 1 by default.'''
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
if not os.path.isdir(PATH):
os.mkdir(PATH)
inputpath = 'data/dogscats/'
outputpath = 'data/desserts'
for dirpath, dirnames, filenames in walklevel(inputpath, level=1):
structure = os.path.join(outputpath, dirpath[len(inputpath):])
if not os.path.isdir(structure):
os.mkdir(structure)
else:
print("Folder already exists!")
# used python ~/google_image_finder/image_download.py <class> 500 to download images
# move the dataset in homedir to data dir, and rename
making_dataset = False
if making_dataset:
!cp -R /home/justin/dataset/. data/desserts/train/
!rm -rf /home/justin/dataset
"""
Explanation: making folder structure and downloading some images
End of explanation
"""
import os
import imghdr
# print(imghdr.what(f'{PATH}train/cheesecake/cheesecake_0.jpg'))
def get_dirs_dataset(PATH):
'''Function to get paths to lowest directories in PATH. Used for purging
images that mpl cannot load.'''
need_checking_dirs = []
for root, dirs, files in os.walk(PATH):
if not dirs:
need_checking_dirs.append(root)
return need_checking_dirs
def remove_bad_imgs(need_checking_dirs):
for dirs in need_checking_dirs:
for root, _, files in os.walk(dirs):
for file in files:
fpath = os.path.join(root, file)
if not imghdr.what(fpath):
try:
img = plt.imread(fpath)
except OSError:
os.remove(fpath)
def checked_imgs(need_checking_dirs):
for dirs in need_checking_dirs:
for root, _, files in os.walk(dirs):
for file in files:
fpath = os.path.join(root, file)
if not imghdr.what(fpath):
try:
img = plt.imread(fpath)
except OSError:
print('There are still bad images somehow!')
return None
print('All iamges seem good to go!')
return True
removed_bad_imgs = True
if not removed_bad_imgs:
remove_bad_imgs(get_dirs_dataset(PATH+'train'))
imgs_checked = checked_imgs(get_dirs_dataset(PATH+'train'))
"""
Explanation: somehow the script managed to download things that aren't images, remove them
End of explanation
"""
from sklearn.model_selection import train_test_split
def get_all_filepaths(PATH):
all_filepaths = []
for root, dirs, files in os.walk(PATH+'train'):
for file in files:
all_filepaths.append(os.path.join(root,file))
return all_filepaths
import os
import errno
def remove_empty_folders(path, removeRoot=True):
'Function to remove empty folders'
if not os.path.isdir(path):
return
# remove empty subfolders
files = os.listdir(path)
if len(files):
for f in files:
fullpath = os.path.join(path, f)
if os.path.isdir(fullpath):
remove_empty_folders(fullpath)
# if folder empty, delete it
files = os.listdir(path)
if len(files) == 0 and removeRoot:
print("Removing empty folder:", path)
os.rmdir(path)
def move_to_target(validset, target):
for file in validset:
dst = file.replace('train', target)
os.makedirs(os.path.dirname(dst), exist_ok=True)
os.rename(file, dst)
print('moved files from train to {0}.'.format(target))
return True
def convert_to_test(PATH):
k = 0
for root, subdirs, files in os.walk(PATH):
if len(files) > 0:
for file in files:
os.rename(os.path.join(root, file), os.path.join(PATH, '{0}.jpg'.format(k)))
k += 1
remove_empty_folders(PATH)
def convert_to_jpg(file):
try:
im = Image.open(file)
except:
os.remove(file)
if '.jpg' not in file:
try:
im = Image.open(file)
name, ext = file.split('.')
im.save(name+'.jpg', 'JPG')
os.remove(file)
im = Image.open(name+'.jpg')
plt.imshow(im)
except:
os.remove(file)
already_moved_valid = True
if not already_moved_valid:
trainset, validset= train_test_split(get_all_filepaths(PATH), test_size=.2)
move_to_target(validset, 'valid')
# make testset
already_moved_test = True
if not already_moved_test:
trainset, testset = train_test_split(get_all_filepaths(PATH), test_size=.2)
move_to_target(testset, 'test1')
convert_to_test(PATH+'test1')
import warnings
warnings.filterwarnings("error")
# convert all images to .jpg
dirs_to_convert = ['train', 'valid', 'test1']
for dirs in dirs_to_convert:
for root, subdirs, files in os.walk(PATH+dirs):
print(root, len(files))
if len(files) > 0:
for file in files:
convert_to_jpg(os.path.join(root,file))
# check images again...
# convert all images to .jpg
dirs_to_check = ['train', 'valid', 'test1']
for dirs in dirs_to_check:
for root, subdirs, files in os.walk(PATH+dirs):
print(root, len(files))
if len(files) > 0:
for file in files:
im = Image.open(os.path.join(root, file))
try:
plt.imshow(im)
except:
os.remove(os.path.join(root, file))
# make sample
inputpath = PATH
outputpath = os.path.join(PATH, 'sample')
for dirpath, dirnames, filenames in os.walk(inputpath):
structure = os.path.join(outputpath, dirpath[len(inputpath):])
if os.path.join(outputpath, 'sample') in structure:
pass
elif not os.path.isdir(structure):
os.mkdir(structure)
if len(filenames) > 0:
try:
files_to_copy = np.random.choice(filenames, 50, replace=False)
except:
files_to_copy = np.random.choice(filenames, 50, replace=True)
for file in files_to_copy:
copyanything(os.path.join(dirpath, file), os.path.join(structure,file))
else:
print("Folder {0} already exists! {1} files contained.".format(structure, len(os.listdir(structure))))
"""
Explanation: move some images from the classes into validation and test
End of explanation
"""
!ls {PATH}
!ls {PATH}valid
files = !ls {PATH}valid/strawberry_cake | head
files
img = plt.imread(f'{PATH}valid/strawberry_cake/{files[0]}')
plt.imshow(img);
"""
Explanation: First look at cat pictures
Our library will assume that you have train and valid directories. It also assumes that each dir will have subdirs for each class you wish to recognize (in this case, 'cats' and 'dogs').
End of explanation
"""
img.shape
img[:4,:4]
"""
Explanation: Here is how the raw data looks like
End of explanation
"""
# Uncomment the below if you need to reset your precomputed activations
# !rm -rf {PATH}tmp
%pdb
%%writefile ../../fastai/dataset.py
# %load ../../fastai/dataset.py
from .imports import *
from .torch_imports import *
from .core import *
from .transforms import *
from .layer_optimizer import *
from .dataloader import DataLoader
def get_cv_idxs(n, cv_idx=0, val_pct=0.2, seed=42):
np.random.seed(seed)
n_val = int(val_pct*n)
idx_start = cv_idx*n_val
idxs = np.random.permutation(n)
return idxs[idx_start:idx_start+n_val]
def resize_img(fname, targ, path, new_path):
dest = os.path.join(path,new_path,str(targ),fname)
if os.path.exists(dest): return
im = Image.open(os.path.join(path, fname)).convert('RGB')
r,c = im.size
ratio = targ/min(r,c)
sz = (scale_to(r, ratio, targ), scale_to(c, ratio, targ))
os.makedirs(os.path.split(dest)[0], exist_ok=True)
im.resize(sz, Image.LINEAR).save(dest)
def resize_imgs(fnames, targ, path, new_path):
if not os.path.exists(os.path.join(path,new_path,str(targ),fnames[0])):
with ThreadPoolExecutor(8) as e:
ims = e.map(lambda x: resize_img(x, targ, path, 'tmp'), fnames)
for x in tqdm(ims, total=len(fnames), leave=False): pass
return os.path.join(path,new_path,str(targ))
def read_dir(path, folder):
# TODO: warn or error if no files found?
full_path = os.path.join(path, folder)
fnames = iglob(f"{full_path}/*.*")
if any(fnames):
return [os.path.relpath(f,path) for f in fnames]
else:
raise FileNotFoundError("{} folder doesn't exist or is empty".format(folder))
def read_dirs(path, folder):
labels, filenames, all_labels = [], [], []
full_path = os.path.join(path, folder)
for label in sorted(os.listdir(full_path)):
all_labels.append(label)
for fname in os.listdir(os.path.join(full_path, label)):
filenames.append(os.path.join(folder, label, fname))
labels.append(label)
# import pdb; pdb.set_trace()
return filenames, labels, all_labels
def n_hot(ids, c):
res = np.zeros((c,), dtype=np.float32)
res[ids] = 1
return res
def folder_source(path, folder):
fnames, lbls, all_labels = read_dirs(path, folder)
label2idx = {v:k for k,v in enumerate(all_labels)}
idxs = [label2idx[lbl] for lbl in lbls]
c = len(all_labels)
label_arr = np.array(idxs, dtype=int)
return fnames, label_arr, all_labels
def parse_csv_labels(fn, skip_header=True):
skip = 1 if skip_header else 0
csv_lines = [o.strip().split(',') for o in open(fn)][skip:]
fnames = [fname for fname, _ in csv_lines]
csv_labels = {a:b.split(' ') for a,b in csv_lines}
all_labels = sorted(list(set(p for o in csv_labels.values() for p in o)))
label2idx = {v:k for k,v in enumerate(all_labels)}
return sorted(fnames), csv_labels, all_labels, label2idx
def nhot_labels(label2idx, csv_labels, fnames, c):
all_idx = {k: n_hot([label2idx[o] for o in v], c)
for k,v in csv_labels.items()}
return np.stack([all_idx[o] for o in fnames])
def csv_source(folder, csv_file, skip_header=True, suffix='', continuous=False):
fnames,csv_labels,all_labels,label2idx = parse_csv_labels(csv_file, skip_header)
full_names = [os.path.join(folder,fn+suffix) for fn in fnames]
if continuous:
label_arr = np.array([csv_labels[i] for i in fnames]).astype(np.float32)
else:
label_arr = nhot_labels(label2idx, csv_labels, fnames, len(all_labels))
is_single = np.all(label_arr.sum(axis=1)==1)
if is_single: label_arr = np.argmax(label_arr, axis=1)
return full_names, label_arr, all_labels
class BaseDataset(Dataset):
def __init__(self, transform=None):
self.transform = transform
self.n = self.get_n()
self.c = self.get_c()
self.sz = self.get_sz()
def __getitem__(self, idx):
x,y = self.get_x(idx),self.get_y(idx)
return self.get(self.transform, x, y)
def __len__(self): return self.n
def get(self, tfm, x, y):
return (x,y) if tfm is None else tfm(x,y)
@abstractmethod
def get_n(self): raise NotImplementedError
@abstractmethod
def get_c(self): raise NotImplementedError
@abstractmethod
def get_sz(self): raise NotImplementedError
@abstractmethod
def get_x(self, i): raise NotImplementedError
@abstractmethod
def get_y(self, i): raise NotImplementedError
@property
def is_multi(self): return False
@property
def is_reg(self): return False
def open_image(fn):
""" Opens an image using OpenCV given the file path.
Arguments:
fn: the file path of the image
Returns:
The numpy array representation of the image in the RGB format
"""
flags = cv2.IMREAD_UNCHANGED+cv2.IMREAD_ANYDEPTH+cv2.IMREAD_ANYCOLOR
if not os.path.exists(fn):
print('No such file or directory: {}'.format(fn))
elif os.path.isdir(fn):
print('Is a directory: {}'.format(fn))
else:
try:
return cv2.cvtColor(cv2.imread(fn, flags), cv2.COLOR_BGR2RGB).astype(np.float32)/255
except Exception as e:
print(fn, e)
class FilesDataset(BaseDataset):
'''Justin modified get_x'''
def __init__(self, fnames, transform, path):
self.path,self.fnames = path,fnames
super().__init__(transform)
def get_n(self): return len(self.y)
def get_sz(self): return self.transform.sz
def get_x(self, i): return open_image(os.path.join(self.path, self.fnames[i]))
def resize_imgs(self, targ, new_path):
dest = resize_imgs(self.fnames, targ, self.path, new_path)
return self.__class__(self.fnames, self.y, self.transform, dest)
def denorm(self,arr):
"""Reverse the normalization done to a batch of images.
Arguments:
arr: of shape/size (N,3,sz,sz)
"""
if type(arr) is not np.ndarray: arr = to_np(arr)
if len(arr.shape)==3: arr = arr[None]
return self.transform.denorm(np.rollaxis(arr,1,4))
class FilesArrayDataset(FilesDataset):
def __init__(self, fnames, y, transform, path):
self.y=y
assert(len(fnames)==len(y))
super().__init__(fnames, transform, path)
def get_y(self, i): return self.y[i]
def get_c(self): return self.y.shape[1]
class FilesIndexArrayDataset(FilesArrayDataset):
def get_c(self): return int(self.y.max())+1
class FilesNhotArrayDataset(FilesArrayDataset):
@property
def is_multi(self): return True
class FilesIndexArrayRegressionDataset(FilesArrayDataset):
def is_reg(self): return True
class ArraysDataset(BaseDataset):
def __init__(self, x, y, transform):
self.x,self.y=x,y
assert(len(x)==len(y))
super().__init__(transform)
def get_x(self, i): return self.x[i]
def get_y(self, i): return self.y[i]
def get_n(self): return len(self.y)
def get_sz(self): return self.x.shape[1]
class ArraysIndexDataset(ArraysDataset):
def get_c(self): return int(self.y.max())+1
def get_y(self, i): return self.y[i]
class ArraysNhotDataset(ArraysDataset):
def get_c(self): return self.y.shape[1]
@property
def is_multi(self): return True
class ModelData():
def __init__(self, path, trn_dl, val_dl, test_dl=None):
self.path,self.trn_dl,self.val_dl,self.test_dl = path,trn_dl,val_dl,test_dl
@classmethod
def from_dls(cls, path,trn_dl,val_dl,test_dl=None):
trn_dl,val_dl = ModelDataLoader(trn_dl),ModelDataLoader(val_dl)
if test_dl: test_dl = ModelDataLoader(test_dl)
return cls(path, trn_dl, val_dl, test_dl)
@property
def is_reg(self): return self.trn_ds.is_reg
@property
def trn_ds(self): return self.trn_dl.dataset
@property
def val_ds(self): return self.val_dl.dataset
@property
def test_ds(self): return self.test_dl.dataset
@property
def trn_y(self): return self.trn_ds.y
@property
def val_y(self): return self.val_ds.y
class ModelDataLoader():
def __init__(self, dl): self.dl=dl
@classmethod
def create_dl(cls, *args, **kwargs): return cls(DataLoader(*args, **kwargs))
def __iter__(self):
self.it,self.i = iter(self.dl),0
return self
def __len__(self): return len(self.dl)
def __next__(self):
if self.i>=len(self.dl): raise StopIteration
self.i+=1
return next(self.it)
@property
def dataset(self): return self.dl.dataset
class ImageData(ModelData):
def __init__(self, path, datasets, bs, num_workers, classes):
trn_ds,val_ds,fix_ds,aug_ds,test_ds,test_aug_ds = datasets
self.path,self.bs,self.num_workers,self.classes = path,bs,num_workers,classes
self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl,self.test_dl,self.test_aug_dl = [
self.get_dl(ds,shuf) for ds,shuf in [
(trn_ds,True),(val_ds,False),(fix_ds,False),(aug_ds,False),
(test_ds,False),(test_aug_ds,False)
]
]
def get_dl(self, ds, shuffle):
if ds is None: return None
return ModelDataLoader.create_dl(ds, batch_size=self.bs, shuffle=shuffle,
num_workers=self.num_workers, pin_memory=False)
@property
def sz(self): return self.trn_ds.sz
@property
def c(self): return self.trn_ds.c
def resized(self, dl, targ, new_path):
return dl.dataset.resize_imgs(targ,new_path) if dl else None
def resize(self, targ, new_path):
new_ds = []
dls = [self.trn_dl,self.val_dl,self.fix_dl,self.aug_dl]
if self.test_dl: dls += [self.test_dl, self.test_aug_dl]
else: dls += [None,None]
t = tqdm_notebook(dls)
for dl in t: new_ds.append(self.resized(dl, targ, new_path))
t.close()
return self.__class__(new_ds[0].path, new_ds, self.bs, self.num_workers, self.classes)
class ImageClassifierData(ImageData):
@property
def is_multi(self): return self.trn_dl.dataset.is_multi
@staticmethod
def get_ds(fn, trn, val, tfms, test=None, **kwargs):
res = [
fn(trn[0], trn[1], tfms[0], **kwargs), # train
fn(val[0], val[1], tfms[1], **kwargs), # val
fn(trn[0], trn[1], tfms[1], **kwargs), # fix
fn(val[0], val[1], tfms[0], **kwargs) # aug
]
if test is not None:
test_lbls = np.zeros((len(test),1))
res += [
fn(test, test_lbls, tfms[1], **kwargs), # test
fn(test, test_lbls, tfms[0], **kwargs) # test_aug
]
else: res += [None,None]
return res
@classmethod
def from_arrays(cls, path, trn, val, bs=64, tfms=(None,None), classes=None, num_workers=4, test=None):
""" Read in images and their labels given as numpy arrays
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
trn: a tuple of training data matrix and target label/classification array (e.g. `trn=(x,y)` where `x` has the
shape of `(5000, 784)` and `y` has the shape of `(5000,)`)
val: a tuple of validation data matrix and target label/classification array.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
classes: a list of all labels/classifications
num_workers: a number of workers
test: a matrix of test data (the shape should match `trn[0]`)
Returns:
ImageClassifierData
"""
datasets = cls.get_ds(ArraysIndexDataset, trn, val, tfms, test=test)
return cls(path, datasets, bs, num_workers, classes=classes)
@classmethod
def from_paths(cls, path, bs=64, tfms=(None,None), trn_name='train', val_name='valid', test_name=None, num_workers=8):
""" Read in images and their labels given as sub-folder names
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
trn_name: a name of the folder that contains training images.
val_name: a name of the folder that contains validation images.
test_name: a name of the folder that contains test images.
num_workers: number of workers
Returns:
ImageClassifierData
"""
trn,val = [folder_source(path, o) for o in (trn_name, val_name)]
test_fnames = read_dir(path, test_name) if test_name else None
datasets = cls.get_ds(FilesIndexArrayDataset, trn, val, tfms, path=path, test=test_fnames)
return cls(path, datasets, bs, num_workers, classes=trn[2])
@classmethod
def from_csv(cls, path, folder, csv_fname, bs=64, tfms=(None,None),
val_idxs=None, suffix='', test_name=None, continuous=False, skip_header=True, num_workers=8):
""" Read in images and their labels given as a CSV file.
This method should be used when training image labels are given in an CSV file as opposed to
sub-directories with label names.
Arguments:
path: a root path of the data (used for storing trained models, precomputed values, etc)
folder: a name of the folder in which training images are contained.
csv_fname: a name of the CSV file which contains target labels.
bs: batch size
tfms: transformations (for data augmentations). e.g. output of `tfms_from_model`
val_idxs: index of images to be used for validation. e.g. output of `get_cv_idxs`
suffix: suffix to add to image names in CSV file (sometimes CSV only contains the file name without file
extension e.g. '.jpg' - in which case, you can set suffix as '.jpg')
test_name: a name of the folder which contains test images.
continuous: TODO
skip_header: skip the first row of the CSV file.
num_workers: number of workers
Returns:
ImageClassifierData
"""
fnames,y,classes = csv_source(folder, csv_fname, skip_header, suffix, continuous=continuous)
((val_fnames,trn_fnames),(val_y,trn_y)) = split_by_idx(val_idxs, np.array(fnames), y)
test_fnames = read_dir(path, test_name) if test_name else None
if continuous:
f = FilesIndexArrayRegressionDataset
else:
f = FilesIndexArrayDataset if len(trn_y.shape)==1 else FilesNhotArrayDataset
datasets = cls.get_ds(f, (trn_fnames,trn_y), (val_fnames,val_y), tfms,
path=path, test=test_fnames)
return cls(path, datasets, bs, num_workers, classes=classes)
def split_by_idx(idxs, *a):
mask = np.zeros(len(a[0]),dtype=bool)
mask[np.array(idxs)] = True
return [(o[mask],o[~mask]) for o in a]
arch=resnet34
data = ImageClassifierData.from_paths(PATH, tfms=tfms_from_model(arch, sz))
learn = ConvLearner.pretrained(arch, data, precompute=True)
learn.fit(0.01, 3)
learn.fit(0.01,3)
learn.fit(0.01,2)
"""
Explanation: Our first model: quick start
We're going to use a <b>pre-trained</b> model, that is, a model created by some one else to solve a different problem. Instead of building a model from scratch to solve a similar problem, we'll use a model trained on ImageNet (1.2 million images and 1000 classes) as a starting point. The model is a Convolutional Neural Network (CNN), a type of Neural Network that builds state-of-the-art models for computer vision. We'll be learning all about CNNs during this course.
We will be using the <b>resnet34</b> model. resnet34 is a version of the model that won the 2015 ImageNet competition. Here is more info on resnet models. We'll be studying them in depth later, but for now we'll focus on using them effectively.
Here's how to train and evalulate a dogs vs cats model in 3 lines of code, and under 20 seconds:
End of explanation
"""
# This is the label for a val data
data.val_y
# from here we know that 'cats' is label 0 and 'dogs' is label 1.
data.classes
# this gives prediction for validation set. Predictions are in log scale
log_preds = learn.predict()
log_preds.shape
log_preds[:10]
preds = np.argmax(log_preds, axis=1) # from log probabilities to 0 or 1
probs = np.exp(log_preds[:,1]) # pr(dog)
def rand_by_mask(mask): return np.random.choice(np.where(mask)[0], 4, replace=False)
def rand_by_correct(is_correct): return rand_by_mask((preds == data.val_y)==is_correct)
# def plot_val_with_title(idxs, title):
# imgs = np.stack([data.val_ds[x][0] for x in idxs])
# title_probs = [probs[x] for x in idxs]
# print(title)
# return plots(data.val_ds.denorm(imgs), rows=1, titles=title_probs)
def plots(ims, figsize=(12,6), rows=1, titles=None):
f = plt.figure(figsize=figsize)
for i in range(len(ims)):
sp = f.add_subplot(rows, len(ims)//rows, i+1)
sp.axis('Off')
if titles is not None: sp.set_title(titles[i], fontsize=16)
plt.imshow(ims[i])
def load_img_id(ds, idx): return np.array(PIL.Image.open(PATH+ds.fnames[idx]))
def plot_val_with_title(idxs, title):
imgs = [load_img_id(data.val_ds,x) for x in idxs]
title_probs = [probs[x] for x in idxs]
print(title)
return plots(imgs, rows=1, titles=title_probs, figsize=(16,8))
# 1. A few correct labels at random
plot_val_with_title(rand_by_correct(True), "Correctly classified")
# 2. A few incorrect labels at random
plot_val_with_title(rand_by_correct(False), "Incorrectly classified")
def most_by_mask(mask, mult):
idxs = np.where(mask)[0]
return idxs[np.argsort(mult * probs[idxs])[:4]]
def most_by_correct(y, is_correct):
mult = -1 if (y==1)==is_correct else 1
return most_by_mask((preds == data.val_y)==is_correct & (data.val_y == y), mult)
plot_val_with_title(most_by_correct(0, True), "Most correct cats")
plot_val_with_title(most_by_correct(1, True), "Most correct dogs")
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
most_uncertain = np.argsort(np.abs(probs -0.5))[:4]
plot_val_with_title(most_uncertain, "Most uncertain predictions")
"""
Explanation: How good is this model? Well, as we mentioned, prior to this competition, the state of the art was 80% accuracy. But the competition resulted in a huge jump to 98.9% accuracy, with the author of a popular deep learning library winning the competition. Extraordinarily, less than 4 years later, we can now beat that result in seconds! Even last year in this same course, our initial model had 98.3% accuracy, which is nearly double the error we're getting just a year later, and that took around 10 minutes to compute.
Analyzing results: looking at pictures
As well as looking at the overall metrics, it's also a good idea to look at examples of each of:
1. A few correct labels at random
2. A few incorrect labels at random
3. The most correct labels of each class (ie those with highest probability that are correct)
4. The most incorrect labels of each class (ie those with highest probability that are incorrect)
5. The most uncertain labels (ie those with probability closest to 0.5).
End of explanation
"""
learn = ConvLearner.pretrained(arch, data, precompute=True)
lrf=learn.lr_find()
"""
Explanation: Choosing a learning rate
The learning rate determines how quickly or how slowly you want to update the weights (or parameters). Learning rate is one of the most difficult parameters to set, because it significantly affect model performance.
The method learn.lr_find() helps you find an optimal learning rate. It uses the technique developed in the 2015 paper Cyclical Learning Rates for Training Neural Networks, where we simply keep increasing the learning rate from a very small value, until the loss starts decreasing. We can plot the learning rate across batches to see what this looks like.
We first create a new learner, since we want to know how to set the learning rate for a new (untrained) model.
End of explanation
"""
learn.sched.plot_lr()
"""
Explanation: Our learn object contains an attribute sched that contains our learning rate scheduler, and has some convenient plotting functionality including this one:
End of explanation
"""
learn.sched.plot()
"""
Explanation: Note that in the previous plot iteration is one iteration (or minibatch) of SGD. In one epoch there are
(num_train_samples/num_iterations) of SGD.
We can see the plot of loss versus learning rate to see where our loss stops decreasing:
End of explanation
"""
tfms = tfms_from_model(resnet34, sz, aug_tfms=transforms_side_on, max_zoom=1.1)
def get_augs():
data = ImageClassifierData.from_paths(PATH, bs=2, tfms=tfms, num_workers=1)
x,_ = next(iter(data.aug_dl))
return data.trn_ds.denorm(x)[1]
ims = np.stack([get_augs() for i in range(6)])
plots(ims, rows=2)
"""
Explanation: The loss is still clearly improving at lr=1e-2 (0.01), so that's what we use. Note that the optimal learning rate can change as we training the model, so you may want to re-run this function from time to time.
Improving our model
Data augmentation
If you try training for more epochs, you'll notice that we start to overfit, which means that our model is learning to recognize the specific images in the training set, rather than generalizaing such that we also get good results on the validation set. One way to fix this is to effectively create more data, through data augmentation. This refers to randomly changing the images in ways that shouldn't impact their interpretation, such as horizontal flipping, zooming, and rotating.
We can do this by passing aug_tfms (augmentation transforms) to tfms_from_model, with a list of functions to apply that randomly change the image however we wish. For photos that are largely taken from the side (e.g. most photos of dogs and cats, as opposed to photos taken from the top down, such as satellite imagery) we can use the pre-defined list of functions transforms_side_on. We can also specify random zooming of images up to specified scale by adding the max_zoom parameter.
End of explanation
"""
data = ImageClassifierData.from_paths(PATH, tfms=tfms)
learn = ConvLearner.pretrained(arch, data, precompute=True)
learn.fit(1e-2, 1)
learn.precompute=False
"""
Explanation: Let's create a new data object that includes this augmentation in the transforms.
End of explanation
"""
learn.fit(1e-2, 30, cycle_len=1)
"""
Explanation: By default when we create a learner, it sets all but the last layer to frozen. That means that it's still only updating the weights in the last layer when we call fit.
End of explanation
"""
learn.sched.plot_lr()
"""
Explanation: What is that cycle_len parameter? What we've done here is used a technique called stochastic gradient descent with restarts (SGDR), a variant of learning rate annealing, which gradually decreases the learning rate as training progresses. This is helpful because as we get closer to the optimal weights, we want to take smaller steps.
However, we may find ourselves in a part of the weight space that isn't very resilient - that is, small changes to the weights may result in big changes to the loss. We want to encourage our model to find parts of the weight space that are both accurate and stable. Therefore, from time to time we increase the learning rate (this is the 'restarts' in 'SGDR'), which will force the model to jump to a different part of the weight space if the current area is "spikey". Here's a picture of how that might look if we reset the learning rates 3 times (in this paper they call it a "cyclic LR schedule"):
<img src="images/sgdr.png" width="80%">
(From the paper Snapshot Ensembles).
The number of epochs between resetting the learning rate is set by cycle_len, and the number of times this happens is refered to as the number of cycles, and is what we're actually passing as the 2nd parameter to fit(). So here's what our actual learning rates looked like:
End of explanation
"""
learn.save('224_lastlayer')
learn.load('224_lastlayer')
"""
Explanation: Our validation loss isn't improving much, so there's probably no point further training the last layer on its own.
Since we've got a pretty good model at this point, we might want to save it so we can load it again later without training it from scratch.
End of explanation
"""
learn.unfreeze()
"""
Explanation: Fine-tuning and differential learning rate annealing
Now that we have a good final layer trained, we can try fine-tuning the other layers. To tell the learner that we want to unfreeze the remaining layers, just call (surprise surprise!) unfreeze().
End of explanation
"""
lr=np.array([1e-4,1e-3,1e-2])
learn.fit(lr, 5, cycle_len=1, cycle_mult=2)
"""
Explanation: Note that the other layers have already been trained to recognize imagenet photos (whereas our final layers where randomly initialized), so we want to be careful of not destroying the carefully tuned weights that are already there.
Generally speaking, the earlier layers (as we've seen) have more general-purpose features. Therefore we would expect them to need less fine-tuning for new datasets. For this reason we will use different learning rates for different layers: the first few layers will be at 1e-4, the middle layers at 1e-3, and our FC layers we'll leave at 1e-2 as before. We refer to this as differential learning rates, although there's no standard name for this techique in the literature that we're aware of.
End of explanation
"""
learn.sched.plot_lr()
"""
Explanation: Another trick we've used here is adding the cycle_mult parameter. Take a look at the following chart, and see if you can figure out what the parameter is doing:
End of explanation
"""
learn.save('224_all')
learn.load('224_all')
"""
Explanation: Note that's what being plotted above is the learning rate of the final layers. The learning rates of the earlier layers are fixed at the same multiples of the final layer rates as we initially requested (i.e. the first layers have 100x smaller, and middle layers 10x smaller learning rates, since we set lr=np.array([1e-4,1e-3,1e-2]).
End of explanation
"""
log_preds,y = learn.TTA()
probs = np.mean(np.exp(log_preds),0)
np.exp(log_preds).shape
probs.shape
accuracy(probs, y)
"""
Explanation: There is something else we can do with data augmentation: use it at inference time (also known as test time). Not surprisingly, this is known as test time augmentation, or just TTA.
TTA simply makes predictions not just on the images in your validation set, but also makes predictions on a number of randomly augmented versions of them too (by default, it uses the original image along with 4 randomly augmented versions). It then takes the average prediction from these images, and uses that. To use TTA on the validation set, we can use the learner's TTA() method.
End of explanation
"""
preds = np.argmax(probs, axis=1)
probs = probs[:,1]
"""
Explanation: I generally see about a 10-20% reduction in error on this dataset when using TTA at this point, which is an amazing result for such a quick and easy technique!
Analyzing results
Confusion matrix
End of explanation
"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y, preds)
"""
Explanation: A common way to analyze the result of a classification model is to use a confusion matrix. Scikit-learn has a convenient function we can use for this purpose:
End of explanation
"""
plot_confusion_matrix(cm, data.classes)
"""
Explanation: We can just print out the confusion matrix, or we can show a graphical view (which is mainly useful for dependents with a larger number of categories).
End of explanation
"""
plot_val_with_title(most_by_correct(0, False), "Most incorrect cats")
plot_val_with_title(most_by_correct(1, False), "Most incorrect dogs")
"""
Explanation: Looking at pictures again
End of explanation
"""
tfms = tfms_from_model(resnet34, sz)
"""
Explanation: Review: easy steps to train a world-class image classifier
Enable data augmentation, and precompute=True
Use lr_find() to find highest learning rate where loss is still clearly improving
Train last layer from precomputed activations for 1-2 epochs
Train last layer with data augmentation (i.e. precompute=False) for 2-3 epochs with cycle_len=1
Unfreeze all layers
Set earlier layers to 3x-10x lower learning rate than next higher layer
Use lr_find() again
Train full network with cycle_mult=2 until over-fitting
Understanding the code for our first model
Let's look at the Dogs v Cats code line by line.
tfms stands for transformations. tfms_from_model takes care of resizing, image cropping, initial normalization (creating data with (mean,stdev) of (0,1)), and more.
End of explanation
"""
data = ImageClassifierData.from_paths(PATH, tfms=tfms)
ImageClassifierData.from_paths??
"""
Explanation: We need a <b>path</b> that points to the dataset. In this path we will also store temporary data and final results. ImageClassifierData.from_paths reads data from a provided path and creates a dataset ready for training.
End of explanation
"""
learn = ConvLearner.pretrained(resnet34, data, precompute=True)
"""
Explanation: ConvLearner.pretrained builds learner that contains a pre-trained model. The last layer of the model needs to be replaced with the layer of the right dimensions. The pretained model was trained for 1000 classes therfore the final layer predicts a vector of 1000 probabilities. The model for cats and dogs needs to output a two dimensional vector. The diagram below shows in an example how this was done in one of the earliest successful CNNs. The layer "FC8" here would get replaced with a new layer with 2 outputs.
<img src="images/pretrained.png" width="500">
original image
End of explanation
"""
learn.fit(1e-2, 1)
"""
Explanation: Parameters are learned by fitting a model to the data. Hyparameters are another kind of parameter, that cannot be directly learned from the regular training process. These parameters express “higher-level” properties of the model such as its complexity or how fast it should learn. Two examples of hyperparameters are the learning rate and the number of epochs.
During iterative training of a neural network, a batch or mini-batch is a subset of training samples used in one iteration of Stochastic Gradient Descent (SGD). An epoch is a single pass through the entire training set which consists of multiple iterations of SGD.
We can now fit the model; that is, use gradient descent to find the best parameters for the fully connected layer we added, that can separate cat pictures from dog pictures. We need to pass two hyperameters: the learning rate (generally 1e-2 or 1e-3 is a good starting point, we'll look more at this next) and the number of epochs (you can pass in a higher number and just stop training when you see it's no longer improving, then re-run it with the number of epochs you found works well.)
End of explanation
"""
def binary_loss(y, p):
return np.mean(-(y * np.log(p) + (1-y)*np.log(1-p)))
acts = np.array([1, 0, 0, 1])
preds = np.array([0.95, 0.1, 0.2, 0.8])
binary_loss(acts, preds)
"""
Explanation: Analyzing results: loss and accuracy
When we run learn.fit we print 3 performance values (see above.) Here 0.03 is the value of the loss in the training set, 0.0226 is the value of the loss in the validation set and 0.9927 is the validation accuracy. What is the loss? What is accuracy? Why not to just show accuracy?
Accuracy is the ratio of correct prediction to the total number of predictions.
In machine learning the loss function or cost function is representing the price paid for inaccuracy of predictions.
The loss associated with one example in binary classification is given by:
-(y * log(p) + (1-y) * log (1-p))
where y is the true label of x and p is the probability predicted by our model that the label is 1.
End of explanation
"""
|
alekz112/Test | Interview+questions.ipynb | mit | def this_and_prev(iterable):
iterator = iter(iterable)
prev_item = None
curr_item = next(iterator)
for next_item in iterator:
yield (prev_item, curr_item)
prev_item = curr_item
curr_item = next_item
yield (prev_item, curr_item)
for i,j in this_and_prev( range(5) ): print i,j
"""
Explanation: <h2>Problem 1. Python / Generator functions</h2>
Следующая функция возвращает текущее и предыдущее значения в цикле:
End of explanation
"""
def row_number(driver_id, input_data):
sorted_data = sorted(input_data, lambda x: x[0]) # сортируем список входных данных по дате
result = []
row_number = 0
while row_number <= range( 0, len(input_data) ):
row_data = {'row_number': row_number
, 'driver_id': driver_id
, 'start_timestamp': sorted_data[row_number][0]
, 'status': sorted_data[row_number][1]
}
row_number += 1
result.append(row_data)
return result
$row_number = Python::row_number(driver_id, input_data);
$raw = (
SELECT
driver_id
, start_timestamp
, status
FROM sample_table
);
$reduced = (
REDUCE $raw
ON driver_id
USING $row_number((start_timestamp, status))
);
SELECT * FROM $reduced;
"""
Explanation: По аналогии требуется написать функцию, которая будет возвращать текущее и следующее значения.
<i>Type your code below</i>
<h2>Problem 2. SQL / Python</h2>
Есть следующая SQL таблица <b>sample_table</b>:
<table>
<tr><td>column name</td><td><b>driver_id</b></td> <td><b>start_timestamp</b></td> <td><b>status</b></td></tr>
<tr><td>data type</td><td><i>(String)</i></td><td><i>(String)</i></td><td><i>(String)</i></td></tr>
<tr><td>1</td><td>driver_id_1</td><td>2017-01-21 00:05</td><td>driving</td></tr>
<tr><td>2</td><td>driver_id_1</td><td>2017-01-21 00:09</td><td>waiting</td></tr>
<tr><td>...</td><td>...</td><td>...</td><td>...</td></tr>
<tr><td>k x n</td><td>driver_id_n</td><td>2017-01-21 23:49</td><td>transporting</td></tr>
</table>
driver_id_i -- идентификатор i-го водителя
start_timestamp -- время начала статуса, в котором находился водитель
status -- статус, в котором находился водитель
Для простоты предположим, что по каждому водителю в таблице одинаковое число записей k.
<hr>
Табличка хранится в СУБД, которая умеет применять к данным функции, написанные на Python. Например, следующий код выполняет функцию ROW_NUMBER():
End of explanation
"""
|
tensorflow/docs-l10n | site/zh-cn/guide/upgrade.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import tensorflow as tf
print(tf.__version__)
"""
Explanation: 自动将代码升级到 TensorFlow 2
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://tensorflow.google.cn/guide/upgrade"> <img src="https://tensorflow.google.cn/images/tf_logo_32px.png"> View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/upgrade.ipynb"> <img src="https://tensorflow.google.cn/images/colab_logo_32px.png"> Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/zh-cn/guide/upgrade.ipynb"> <img src="https://tensorflow.google.cn/images/GitHub-Mark-32px.png"> View source on GitHub</a>
</td>
<td>
<a target="_blank" href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/zh-cn/guide/upgrade.ipynb"> <img src="https://tensorflow.google.cn/images/download_logo_32px.png"> Download notebook</a>
</td>
</table>
TensorFlow 2.0 包含许多 API 变更,例如重新排序了参数,重命名了符号,更改了参数的默认值。手动执行所有这些修改可能很乏味,而且很容易出错。为了简化更改,尽可能地让您无缝过渡到 TF 2.0,TensorFlow 团队创建了 tf_upgrade_v2 实用工具,帮助您将旧版代码转换至新的 API。
注:TensorFlow 1.13 和更高版本(包括所有 TF 2.0 版本)会自动安装 tf_upgrade_v2。
典型的用法如下:
<pre class="devsite-terminal devsite-click-to-copy prettyprint lang-bsh">tf_upgrade_v2 \
--intree my_project/ \
--outtree my_project_v2/ \
--reportfile report.txt
</pre>
将现有 TensorFlow 1.x Python 脚本转换为 TensorFlow 2.0 脚本可以加快升级流程。
转换脚本会尽可能实现自动化处理,但仍有一些语法和样式变更无法通过脚本执行转换。
兼容性模块
某些 API 符号无法通过简单的字符串替换进行升级。为了确保代码在 TensorFlow 2.0 中仍受支持,升级脚本包含了一个 compat.v1 模块。该模块可将 TF 1.x 符号(如 tf.foo)替换为等效的 tf.compat.v1.foo 引用。虽然该兼容性模块效果不错,但我们仍建议人工校对替换,并尽快将代码迁移到 tf.* 命名空间(而不是 tf.compat.v1 命名空间)中的新 API。
由于 TensorFlow 2.x 模块弃用(例如,tf.flags 和 tf.contrib),切换到 compat.v1 无法解决某些更改。升级此代码可能需要其他库(例如,absl.flags)或切换到 tensorflow/addons 中的软件包。
推荐的升级流程
本指南的剩余部分演示如何使用升级脚本。虽然升级脚本的使用非常简单,我们仍强烈建议在以下流程中使用脚本:
单元测试:确保要升级的代码包含具有合理覆盖范围的单元测试套件。这是 Python 代码,该语言并不会帮助您避免各种类型的错误。同时为了与 TensorFlow 2.0 兼容,还要确保升级所有依赖项。
安装 TensorFlow 1.14:将 TensorFlow 升级到最新的 TensorFlow 1.x 版本(最低为 1.14 版本)。其中包括 tf.compat.v2 中的最终 TensorFlow 2.0 API。
通过 1.14 版本进行测试:确保此时可通过单元测试。在升级过程中,您将反复进行测试,因此,从无错误的代码开始非常重要。
运行升级脚本:对整个源代码树运行 tf_upgrade_v2(已包含测试)。这样可将代码升级为仅使用 TensorFlow 2.0 中所提供的符号的格式。被弃用的符号将通过 tf.compat.v1 进行访问。最终需要人工检查这些升级,但不是现在。
通过 TensorFlow 1.14 运行转换的测试:代码在 TensorFlow 1.14 中应该仍可以正常运行。再次运行单元测试。测试在此时产生任何错误都意味着升级脚本存在错误。请通知我们。
检查更新报告中的警告和错误:该脚本会编写一个对需要复查的转换或需要执行的人工操作进行解释的报告文件。例如:contrib 的所有剩余实例需要通过人工操作删除。请查阅 RFC 中的详细说明。
安装 TensorFlow 2.0:此时应该可以安全切换到 TensorFlow 2.0
使用 v1.disable_v2_behavior 进行测试:使用测试主函数中的 v1.disable_v2_behavior() 重新运行测试产生的结果应与在 1.14 下运行时产生的结果相同。
启用 V2 行为:现在,使用 v2 API 已经成功通过了测试,不妨开始考虑启用 v2 行为。这可能需要执行一些更改,具体取决于代码编写方式。有关详细信息,请参阅迁移指南。
使用升级脚本
设置
开始之前,请确保已安装 TensorlFlow 2.0。
End of explanation
"""
!git clone --branch r1.13.0 --depth 1 https://github.com/tensorflow/models
"""
Explanation: 克隆 tensorflow/models git 仓库,以便获得一些要测试的代码:
End of explanation
"""
!tf_upgrade_v2 -h
"""
Explanation: 读取帮助
脚本应当随 TensorFlow 安装。下面是内置帮助命令:
End of explanation
"""
!head -n 65 models/samples/cookbook/regression/custom_regression.py | tail -n 10
"""
Explanation: TF1 代码示例
下面是一个简单的 TensorFlow 1.0 脚本示例:
End of explanation
"""
!(cd models/samples/cookbook/regression && python custom_regression.py)
"""
Explanation: 对于安装的 TensorFlow 2.0,它不会运行:
End of explanation
"""
!tf_upgrade_v2 \
--infile models/samples/cookbook/regression/custom_regression.py \
--outfile /tmp/custom_regression_v2.py
"""
Explanation: 单个文件
升级脚本可以在单个 Python 文件上运行:
End of explanation
"""
# upgrade the .py files and copy all the other files to the outtree
!tf_upgrade_v2 \
--intree models/samples/cookbook/regression/ \
--outtree regression_v2/ \
--reportfile tree_report.txt
"""
Explanation: 如果无法找到解决代码问题的方法,该脚本会打印错误消息。
目录树
典型项目(包括下面的简单示例)会使用远不止一个文件。通常需要升级整个软件包,所以该脚本也可以在目录树上运行:
End of explanation
"""
!(cd regression_v2 && python custom_regression.py 2>&1) | tail
"""
Explanation: 注意关于 dataset.make_one_shot_iterator 函数的一条警告。
现在,对于 TensorFlow 2.0,该脚本已经可以发挥作用:
请注意,凭借 tf.compat.v1 模块,转换的脚本在 TensorFlow 1.14 中也可以运行。
End of explanation
"""
!head -n 20 tree_report.txt
"""
Explanation: 详细报告
该脚本还会报告一个详细更改列表。在本例中,它发现了一个可能不安全的转换,因此在文件顶部包含了一条警告:
End of explanation
"""
%%writefile dropout.py
import tensorflow as tf
d = tf.nn.dropout(tf.range(10), 0.2)
z = tf.zeros_like(d, optimize=False)
!tf_upgrade_v2 \
--infile dropout.py \
--outfile dropout_v2.py \
--reportfile dropout_report.txt > /dev/null
!cat dropout_report.txt
"""
Explanation: 再次注意关于 Dataset.make_one_shot_iterator 函数的一条警告。
在其他情况下,对于非常用更改,输出会解释原因:
End of explanation
"""
!cat dropout_v2.py
"""
Explanation: 以下是经过修改的文件内容,请注意脚本如何通过添加参数名来处理移动和重命名的参数:
End of explanation
"""
!tf_upgrade_v2 \
--intree models/research/deeplab \
--outtree deeplab_v2 \
--reportfile deeplab_report.txt > /dev/null
"""
Explanation: 更大的项目可能会包含一些错误,例如转换 DeepLab 模型:
End of explanation
"""
!ls deeplab_v2
"""
Explanation: 它会生成输出文件:
End of explanation
"""
!cat deeplab_report.txt | grep -i models/research/deeplab | grep -i error | head -n 3
"""
Explanation: 但是其中包含错误。该报告会帮助您找到确保代码可以正常运行所需要解决的错误。下面是前三个错误:
End of explanation
"""
!cat dropout.py
!tf_upgrade_v2 --mode SAFETY --infile dropout.py --outfile dropout_v2_safe.py > /dev/null
!cat dropout_v2_safe.py
"""
Explanation: “安全”模式
该转换脚本还有一种介入度相对较低的 SAFETY 模式。在此模式下,只需更改导入来使用 tensorflow.compat.v1 模块:
End of explanation
"""
|
xpharry/Udacity-DLFoudation | tutorials/sentiment_network/Sentiment Classification - Project 1 Solution.ipynb | mit | def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
reversed(pos_neg_ratios.most_common())
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
|
simulkade/peteng | python/test averaging methods.ipynb | mit | from fipy import Grid2D, CellVariable, FaceVariable
import numpy as np
def upwindValues(mesh, field, velocity):
"""Calculate the upwind face values for a field variable
Note that the mesh.faceNormals point from `id1` to `id2` so if velocity is in the same
direction as the `faceNormal`s then we take the value from `id1`s and visa-versa.
Args:
mesh: a fipy mesh
field: a fipy cell variable or equivalent numpy array
velocity: a fipy face variable (rank 1) or equivalent numpy array
Returns:
numpy array shaped as a fipy face variable
"""
# direction is over faces (rank 0)
direction = np.sum(np.array(mesh.faceNormals * velocity), axis=0)
# id1, id2 are shaped as faces but contains cell index values
id1, id2 = mesh._adjacentCellIDs
return np.where(direction >= 0, field[id1], field[id2])
from fipy import *
import numpy as np
"""
Explanation: Testing averaging methods
From this post
The equation is: $$\frac{\partial\phi}{\partial t}+\nabla . (-D(\phi)\nabla \phi) =0$$
End of explanation
"""
L= 1.0 # domain length
Nx= 100
dx_min=L/Nx
x=np.array([0.0, dx_min])
while x[-1]<L:
x=np.append(x, x[-1]+1.05*(x[-1]-x[-2]))
x[-1]=L
mesh = Grid1D(dx=dx)
phi = CellVariable(mesh=mesh, name="phi", hasOld=True, value = 0.0)
phi.constrain(5.0, mesh.facesLeft)
phi.constrain(0., mesh.facesRight)
# D(phi)=D0*(1.0+phi.^2)
# dD(phi)=2.0*D0*phi
D0 = 1.0
dt= 0.01*L*L/D0 # a proper time step for diffusion process
eq = TransientTerm(var=phi) - DiffusionTerm(var=phi, coeff=D0*(1+phi.faceValue**2))
for i in range(4):
for i in range(5):
c_res = eq.sweep(dt = dt)
phi.updateOld()
Viewer(vars = phi, datamax=5.0, datamin=0.0);
# viewer.plot()
"""
Explanation: $$\frac{\partial\phi}{\partial t}+\nabla . \left(-D\left(\phi_{0}\right)\nabla \phi\right)+\nabla.\left(-\nabla \phi_{0}\left(\frac{\partial D}{\partial \phi}\right){\phi{0,face}}\phi\right) =\nabla.\left(-\nabla \phi_{0}\left(\frac{\partial D}{\partial \phi}\right){\phi{0,face}}\phi_{0,face}\right)$$
End of explanation
"""
phi2 = CellVariable(mesh=mesh, name="phi", hasOld=True, value = 0.0)
phi2.constrain(5.0, mesh.facesLeft)
phi2.constrain(0., mesh.facesRight)
# D(phi)=D0*(1.0+phi.^2)
# dD(phi)=2.0*D0*phi
D0 = 1.0
dt= 0.01*L*L/D0 # a proper time step for diffusion process
eq2 = TransientTerm(var=phi2)-DiffusionTerm(var=phi2, coeff=D0*(1+phi2.faceValue**2))+ \
UpwindConvectionTerm(var=phi2, coeff=-2*D0*phi2.faceValue*phi2.faceGrad)== \
(-2*D0*phi2.faceValue*phi2.faceGrad*phi2.faceValue).divergence
for i in range(4):
for i in range(5):
c_res = eq2.sweep(dt = dt)
phi2.updateOld()
viewer = Viewer(vars = [phi, phi2], datamax=5.0, datamin=0.0)
"""
Explanation: $$\frac{\partial\phi}{\partial t}+\nabla . \left(-D\left(\phi_{0}\right)\nabla \phi\right)+\nabla.\left(-\nabla \phi_{0}\left(\frac{\partial D}{\partial \phi}\right){\phi{0,face}}\phi\right) =\nabla.\left(-\nabla \phi_{0}\left(\frac{\partial D}{\partial \phi}\right){\phi{0,face}}\phi_{0,face}\right)$$
End of explanation
"""
phi3 = CellVariable(mesh=mesh, name="phi", hasOld=True, value = 0.0)
phi3.constrain(5.0, mesh.facesLeft)
phi3.constrain(0., mesh.facesRight)
# D(phi)=D0*(1.0+phi.^2)
# dD(phi)=2.0*D0*phi
D0 = 1.0
dt= 0.01*L*L/D0 # a proper time step for diffusion process
u = -2*D0*phi3.faceValue*phi3.faceGrad
eq3 = TransientTerm(var=phi3)-DiffusionTerm(var=phi3, coeff=D0*(1+phi3.faceValue**2))+ \
UpwindConvectionTerm(var=phi3, coeff=-2*D0*phi3.faceValue*phi3.faceGrad)== \
(-2*D0*phi3.faceValue*phi3.faceGrad*phi3.faceValue).divergence
for i in range(4):
for i in range(5):
c_res = eq3.sweep(dt = dt)
phi_face = FaceVariable(mesh, upwindValues(mesh, phi3, u))
u = -2*D0*phi_face*phi3.faceGrad
eq3 = TransientTerm(var=phi3)-DiffusionTerm(var=phi3, coeff=D0*(1+phi3.faceValue**2))+ \
UpwindConvectionTerm(var=phi3, coeff=u)== \
(u*phi_face).divergence
phi3.updateOld()
viewer = Viewer(vars = [phi, phi3], datamax=5.0, datamin=0.0)
"""
Explanation: The above figure shows how the upwind convection term is not consistent with the linear averaging.
End of explanation
"""
|
tpin3694/tpin3694.github.io | regex/match_exact_text.ipynb | mit | # Load regex package
import re
"""
Explanation: Title: Match Exact Text
Slug: match_exact_text
Summary: Match Exact Text
Date: 2016-05-01 12:00
Category: Regex
Tags: Basics
Authors: Chris Albon
Based on: Regular Expressions Cookbook
Preliminaries
End of explanation
"""
# Create a variable containing a text string
text = 'The quick brown fox jumped over the lazy brown bear.'
"""
Explanation: Create some text
End of explanation
"""
# Find all instances of the exact match 'The'
re.findall(r'The', text)
"""
Explanation: Apply regex
End of explanation
"""
|
colour-science/colour-hdri | colour_hdri/examples/examples_merge_from_raw_files.ipynb | bsd-3-clause | import logging
import matplotlib.pyplot as plt
import numpy as np
import os
import colour
from colour_hdri import (
EXAMPLES_RESOURCES_DIRECTORY,
Image,
ImageStack,
camera_space_to_sRGB,
convert_dng_files_to_intermediate_files,
convert_raw_files_to_dng_files,
filter_files,
read_exif_tag,
image_stack_to_radiance_image,
update_exif_tags,
weighting_function_Debevec1997,
)
from colour_hdri.plotting import plot_radiance_image_strip
logging.basicConfig(level=logging.INFO)
RESOURCES_DIRECTORY = os.path.join(
EXAMPLES_RESOURCES_DIRECTORY, "frobisher_001"
)
colour.plotting.colour_style()
colour.utilities.describe_environment();
RAW_FILES = filter_files(RESOURCES_DIRECTORY, ("CR2",))
DNG_FILES = convert_raw_files_to_dng_files(RAW_FILES, RESOURCES_DIRECTORY)
XYZ_TO_CAMERA_SPACE_MATRIX = colour.utilities.as_float_array(
[
float(M_c)
for M_c in read_exif_tag(DNG_FILES[-2], "ColorMatrix2").split()
]
).reshape((3, 3))
# In order to avoid artefacts, white balancing should be peformed before
# demosaicing thus we need to pass appropriate gains to *dcraw*.
WHITE_BALANCE_MULTIPLIERS = colour.utilities.as_float_array(
[
float(M_c)
for M_c in read_exif_tag(DNG_FILES[-2], "AsShotNeutral").split()
]
)
WHITE_BALANCE_MULTIPLIERS = 1 / WHITE_BALANCE_MULTIPLIERS
RAW_CONVERTER_ARGUMENTS = (
'-t 0 -H 1 -r {0} {1} {2} {1} -4 -q 3 -o 0 -T "{{raw_file}}"'.format(
*WHITE_BALANCE_MULTIPLIERS
)
)
INTERMEDIATE_FILES = convert_dng_files_to_intermediate_files(
DNG_FILES,
RESOURCES_DIRECTORY,
raw_converter_arguments=RAW_CONVERTER_ARGUMENTS,
)
update_exif_tags(zip(DNG_FILES, INTERMEDIATE_FILES))
colour.plotting.plot_image(
colour.cctf_encoding(
colour.read_image(str(INTERMEDIATE_FILES[-2]))[
1250:2250, 3000:4000, ...
]
),
text_kwargs={"text": os.path.basename(INTERMEDIATE_FILES[-2])},
);
"""
Explanation: Colour - HDRI - Examples: Merge from Raw Files
Through this example, some Canon EOS 5D Mark II CR2 files will be merged together in order to create a single radiance image.
The following steps will be taken:
Conversion of the CR2 files to DNG files using Adobe DNG Converter.
Conversion of the DNG files to intermediate demosaiced linear Tiff files using Dave Coffin's dcraw.
Creation of an image stack using DNG and intermediate Tiff files:
Reading of the DNG files Exif metadata using Phil Harvey's ExifTool.
Reading of the intermediate Tiff files pixel data using OpenImageIO.
White balancing of the intermediate Tiff files.
Conversion of the intermediate Tiff files to RGB display colourspace.
Merging of the image stack into a radiance image.
Display of the final resulting radiance image.
Note: Some steps can be performed using alternative methods or simplified, for instance the DNG conversion can be entirely avoided. Our interest here is to retrieve the camera levels and the Adobe DNG camera colour profiling data.
CR2 Files Conversion to DNG and Intermediate Files
End of explanation
"""
def merge_from_raw_files(
dng_files,
output_directory,
batch_size=5,
white_balance_multipliers=None,
weighting_function=weighting_function_Debevec1997,
):
paths = []
for dng_files in colour.utilities.batch(dng_files, batch_size):
image_stack = ImageStack()
for dng_file in dng_files:
image = Image(dng_file)
image.read_metadata()
image.path = str(dng_file.replace("dng", "tiff"))
image.read_data()
image.data = camera_space_to_sRGB(
image.data * np.max(WHITE_BALANCE_MULTIPLIERS),
XYZ_TO_CAMERA_SPACE_MATRIX,
)
image_stack.append(image)
path = os.path.join(
output_directory,
"{0}_{1}_MRF.{2}".format(
os.path.splitext(os.path.basename(image_stack.path[0]))[0],
batch_size,
"exr",
),
)
paths.append(path)
logging.info('Merging "{0}"...'.format(path))
logging.info(
'\tImage stack "F Number" (Exif): {0}'.format(image_stack.f_number)
)
logging.info(
'\tImage stack "Exposure Time" (Exif): {0}'.format(
image_stack.exposure_time
)
)
logging.info('\tImage stack "ISO" (Exif): {0}'.format(image_stack.iso))
image = image_stack_to_radiance_image(
image_stack, weighting_function, weighting_average=True
)
logging.info('Writing "{0}"...'.format(path))
colour.write_image(image, path)
return paths
PATHS = merge_from_raw_files(DNG_FILES, RESOURCES_DIRECTORY)
"""
Explanation: Radiance Image Merge
End of explanation
"""
plot_radiance_image_strip(colour.read_image(PATHS[0]));
"""
Explanation: Radiance Image Display
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.