repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
dmlc/mxnet | example/multi-task/multi-task-learning.ipynb | apache-2.0 | import logging
import random
import time
import matplotlib.pyplot as plt
import mxnet as mx
from mxnet import gluon, nd, autograd
import numpy as np
"""
Explanation: Multi-Task Learning Example
This is a simple example to show how to use mxnet for multi-task learning.
The network is jointly going to learn whether a number is odd or even and to actually recognize the digit.
For example
1 : 1 and odd
2 : 2 and even
3 : 3 and odd
etc
In this example we don't expect the tasks to contribute to each other much, but for example multi-task learning has been successfully applied to the domain of image captioning. In A Multi-task Learning Approach for Image Captioning by Wei Zhao, Benyou Wang, Jianbo Ye, Min Yang, Zhou Zhao, Ruotian Luo, Yu Qiao, they train a network to jointly classify images and generate text captions
End of explanation
"""
batch_size = 128
epochs = 5
ctx = mx.gpu() if len(mx.test_utils.list_gpus()) > 0 else mx.cpu()
lr = 0.01
"""
Explanation: Parameters
End of explanation
"""
train_dataset = gluon.data.vision.MNIST(train=True)
test_dataset = gluon.data.vision.MNIST(train=False)
def transform(x,y):
x = x.transpose((2,0,1)).astype('float32')/255.
y1 = y
y2 = y % 2 #odd or even
return x, np.float32(y1), np.float32(y2)
"""
Explanation: Data
We get the traditionnal MNIST dataset and add a new label to the existing one. For each digit we return a new label that stands for Odd or Even
End of explanation
"""
train_dataset_t = train_dataset.transform(transform)
test_dataset_t = test_dataset.transform(transform)
"""
Explanation: We assign the transform to the original dataset
End of explanation
"""
train_data = gluon.data.DataLoader(train_dataset_t, shuffle=True, last_batch='rollover', batch_size=batch_size, num_workers=5)
test_data = gluon.data.DataLoader(test_dataset_t, shuffle=False, last_batch='rollover', batch_size=batch_size, num_workers=5)
print("Input shape: {}, Target Labels: {}".format(train_dataset[0][0].shape, train_dataset_t[0][1:]))
"""
Explanation: We load the datasets DataLoaders
End of explanation
"""
class MultiTaskNetwork(gluon.HybridBlock):
def __init__(self):
super(MultiTaskNetwork, self).__init__()
self.shared = gluon.nn.HybridSequential()
with self.shared.name_scope():
self.shared.add(
gluon.nn.Dense(128, activation='relu'),
gluon.nn.Dense(64, activation='relu'),
gluon.nn.Dense(10, activation='relu')
)
self.output1 = gluon.nn.Dense(10) # Digist recognition
self.output2 = gluon.nn.Dense(1) # odd or even
def hybrid_forward(self, F, x):
y = self.shared(x)
output1 = self.output1(y)
output2 = self.output2(y)
return output1, output2
"""
Explanation: Multi-task Network
The output of the featurization is passed to two different outputs layers
End of explanation
"""
loss_digits = gluon.loss.SoftmaxCELoss()
loss_odd_even = gluon.loss.SigmoidBCELoss()
"""
Explanation: We can use two different losses, one for each output
End of explanation
"""
mx.random.seed(42)
random.seed(42)
net = MultiTaskNetwork()
net.initialize(mx.init.Xavier(), ctx=ctx)
net.hybridize() # hybridize for speed
trainer = gluon.Trainer(net.collect_params(), 'adam', {'learning_rate':lr})
"""
Explanation: We create and initialize the network
End of explanation
"""
def evaluate_accuracy(net, data_iterator):
acc_digits = mx.metric.Accuracy(name='digits')
acc_odd_even = mx.metric.Accuracy(name='odd_even')
for i, (data, label_digit, label_odd_even) in enumerate(data_iterator):
data = data.as_in_context(ctx)
label_digit = label_digit.as_in_context(ctx)
label_odd_even = label_odd_even.as_in_context(ctx).reshape(-1,1)
output_digit, output_odd_even = net(data)
acc_digits.update(label_digit, output_digit.softmax())
acc_odd_even.update(label_odd_even, output_odd_even.sigmoid() > 0.5)
return acc_digits.get(), acc_odd_even.get()
"""
Explanation: Evaluate Accuracy
We need to evaluate the accuracy of each task separately
End of explanation
"""
alpha = 0.5 # Combine losses factor
for e in range(epochs):
# Accuracies for each task
acc_digits = mx.metric.Accuracy(name='digits')
acc_odd_even = mx.metric.Accuracy(name='odd_even')
# Accumulative losses
l_digits_ = 0.
l_odd_even_ = 0.
for i, (data, label_digit, label_odd_even) in enumerate(train_data):
data = data.as_in_context(ctx)
label_digit = label_digit.as_in_context(ctx)
label_odd_even = label_odd_even.as_in_context(ctx).reshape(-1,1)
with autograd.record():
output_digit, output_odd_even = net(data)
l_digits = loss_digits(output_digit, label_digit)
l_odd_even = loss_odd_even(output_odd_even, label_odd_even)
# Combine the loss of each task
l_combined = (1-alpha)*l_digits + alpha*l_odd_even
l_combined.backward()
trainer.step(data.shape[0])
l_digits_ += l_digits.mean()
l_odd_even_ += l_odd_even.mean()
acc_digits.update(label_digit, output_digit.softmax())
acc_odd_even.update(label_odd_even, output_odd_even.sigmoid() > 0.5)
print("Epoch [{}], Acc Digits {:.4f} Loss Digits {:.4f}".format(
e, acc_digits.get()[1], l_digits_.asscalar()/(i+1)))
print("Epoch [{}], Acc Odd/Even {:.4f} Loss Odd/Even {:.4f}".format(
e, acc_odd_even.get()[1], l_odd_even_.asscalar()/(i+1)))
print("Epoch [{}], Testing Accuracies {}".format(e, evaluate_accuracy(net, test_data)))
"""
Explanation: Training Loop
We need to balance the contribution of each loss to the overall training and do so by tuning this alpha parameter within [0,1].
End of explanation
"""
def get_random_data():
idx = random.randint(0, len(test_dataset))
img = test_dataset[idx][0]
data, _, _ = test_dataset_t[idx]
data = data.as_in_context(ctx).expand_dims(axis=0)
plt.imshow(img.squeeze().asnumpy(), cmap='gray')
return data
data = get_random_data()
digit, odd_even = net(data)
digit = digit.argmax(axis=1)[0].asnumpy()
odd_even = (odd_even.sigmoid()[0] > 0.5).asnumpy()
print("Predicted digit: {}, odd: {}".format(digit, odd_even))
"""
Explanation: Testing
End of explanation
"""
|
crystalzhaizhai/cs207_yi_zhai | lectures/L3/L3-Part2.ipynb | mit | %%bash
cd /tmp
rm -rf playground #remove if it exists
git clone https://github.com/dsondak/playground.git
%%bash
cd /tmp/playground
git branch -avv
"""
Explanation: Lecture 3: Branches with Git
In Lecture 2, you worked with the playground repository. You learned how to navigate the repository from the Git point of view, make changes to the repo, and work with the remote repo.
One very important topic in Git involves the concept of the branch. You will work extensively with branches in any real project. In fact, branches are central to the Git workflow. In this portion of the lecture, we will discuss branches with Git.
For more details on branches in Git see Chapter 3 of the Git Book: Git Branching - Branches in a Nutshell.
Branching
As you might have seen by now, everything in git is a branch. We have branches on remote (upstream) repositories, copies of remote branches in our local repository, and branches on local repositories which (so far) track remote branches (or more precisely local copies of remote repositories).
Begin by entering your playground repository from last lecture. Note that the following cell is not necessary for you. I have to re-clone the repo since I'm in a new notebook. You should just keep working like you were before.
End of explanation
"""
%%bash
cd /tmp/playground
git branch mybranch1
"""
Explanation: And all of these branches are nothing but commit-streams in disguise, as can be seen above. Its a very simple model which leads to a lot of interesting version control patterns.
Since branches are so light-weight, the recommended way of working on software using git is to create a new branch for each new feature you add, test it out, and if good, merge it into master. Then you deploy the software from master. We have been using branches under the hood. Let's now lift the hood.
branch
Branches can also be created manually, and they are a useful way of organizing unfinished changes.
The branch command has two forms. The first:
git branch
simply lists all of the branches in your local repository. If you run it without having created any branches, it will list only one, called master. This is the default branch. You have also seen the use of git branch -avv to show all branches (even remote ones).
The other form creates a branch with a given name:
It's important to note that the other branch is not active. If you make changes, they will still apply to the master branch, not my-new-branch. That is, after executing the git branch my-new-branch command you're still on the master branch and not the my-new-branch branch. To change this, you need the next command.
checkout
Checkout switches the active branch. Since branches can have different changes, checkout may make the working directory look very different. For instance, if you have added new files to one branch, and then check another branch out, those files will no longer show up in the directory. They are still stored in the .git folder, but since they only exist in the other branch, they cannot be accessed until you check out the original branch.
You can combine creating a new branch and checking it out with the shortcut:
Ok so lets try this out on our repository.
End of explanation
"""
%%bash
cd /tmp/playground
git branch
"""
Explanation: See what branches we have created.
End of explanation
"""
%%bash
cd /tmp/playground
git checkout mybranch1
git branch
"""
Explanation: Jump onto the mybranch1 branch...
End of explanation
"""
%%bash
cd /tmp/playground
ls
"""
Explanation: Notice that it is bootstrapped off the master branch and has the same files.
End of explanation
"""
%%bash
cd /tmp/playground
git status
"""
Explanation: Note You could have created this branch using git checkout -b mybranch1.
Now let's check the status of our repo.
End of explanation
"""
%%bash
cd /tmp/playground
echo '# Things I wish G.R.R. Martin would say: Finally updating A Song of Ice and Fire.' > books.md
git status
"""
Explanation: Alright, so we're on our new branch but we haven't added or modified anything yet; there's nothing to commit.
Adding a file on a new branch
Let's add a new file. Note that this file gets added on this branch only!
End of explanation
"""
%%bash
cd /tmp/playground
git add .
git status
%%bash
cd /tmp/playground
git commit -m "Added another test file to demonstrate git features" -a
git status
"""
Explanation: We add the file to the index, and then commit the files to the local repository on the mybranch1 branch.
End of explanation
"""
%%bash
cd /tmp/playground
git push
"""
Explanation: At this point, we have committed a new file (books.md) to our new branch in our local repo. Our remote repo is still not aware of this new file (or branch). In fact, our master branch is still not really aware of this file.
Note: There are really two options at this point:
1. Push the current branch to our upstream repo. This would correspond to a "long-lived" branch. You may want to do this if you have a version of your code that you are maintaining.
2. Merge the new branch into the local master branch. This will happen much more frequently than the first option. You'll be creating branches all the time for little bug fixes and features. You don't necessary want such branches to be "long-lived". Once your feature is ready, you'll merge the feature branch into the master branch, stage, commit, and push (all on master). Then you'll delete the "short-lived" feature branch.
We'll continue with the first option for now and discuss the other option later.
Long-lived branches
Ok we have committed. Lets try to push!
End of explanation
"""
%%bash
cd /tmp/playground
git push --set-upstream origin mybranch1
"""
Explanation: Fail! Why? Because Git didn't know what to push to on origin (the name of our remote repo) and didn't want to assume we wanted to call the branch mybranch1 on the remote. We need to tell that to Git explicitly (just like it tells us to).
End of explanation
"""
%%bash
cd /tmp/playground
git branch -avv
"""
Explanation: Aha, now we have both a remote and a local for mybranch1
End of explanation
"""
%%bash
cd /tmp/playground
git checkout master
"""
Explanation: We make sure we are back on master
End of explanation
"""
%%bash
cd /tmp/playground
git checkout -b feature-branch
%%bash
cd /tmp/playground
git branch
%%bash
cd /tmp/playground
echo '# The collected works of G.R.R. Martin.' > feature.txt
%%bash
cd /tmp/playground
git status
%%bash
cd /tmp/playground
git add feature.txt
git commit -m 'George finished his books!'
"""
Explanation: Short-lived branches
Now we'll look into option 2 above. Suppose we want to add a feature to our repo. We'll create a new branch to work on that feature, but we don't want this branch to be long-lived. Here's how we can accomplish that.
We'll go a little faster this time since you've seen all these commands before. Even though we're going a little faster this time, make sure you understand what you're doing! Don't just copy and paste!!
End of explanation
"""
%%bash
cd /tmp/playground
git checkout master
ls
"""
Explanation: At this point, we've committed our new feature to our feature branch in our local repo. Presumably it's all tested and everything is working nicely. We'd like to merge it into our master branch now. First, we'll switch to the master branch.
End of explanation
"""
%%bash
cd /tmp/playground
git merge feature-branch
%%bash
cd /tmp/playground
git status
ls
"""
Explanation: The master branch doesn't have any idea about our new feature yet! We should merge the feature branch into the master branch.
End of explanation
"""
%%bash
cd /tmp/playground
git branch -d feature-branch
"""
Explanation: Now our master branch is up to date with our feature branch. We can now delete our feature branch since it is no longer relevant.
End of explanation
"""
%%bash
cd /tmp/playground
git push
"""
Explanation: Finally, let's push the changes to our remote repo.
End of explanation
"""
%%bash
cd /tmp
mkdir Joe
mkdir Sally
"""
Explanation: Great, so now you have a basic understanding of how to work with branches. There is much more to learn, but these commands should get you going. You should really familiarize yourself with Chapter 3 of the Git book for more details and workflow ideas.
Merge Conflicts
Many of you have already experience merge conflicts. The first hurdle to overcome is to learn how to use vim, which you did in the last lecture. Now we will discuss how to deal with the merge conflict.
First, let's pretend that there are two different developers, Sally and Joe.
If you are currently in your playground directory, please go up one directory (i.e. cd ..). You are going to pretend to be two different developers; Sally and Joe.
First, create two new directories; one for Joe and one for Sally.
End of explanation
"""
%%bash
cd /tmp/Joe
git clone https://github.com/dsondak/playground.git
%%bash
cd /tmp/Sally
git clone https://github.com/dsondak/playground.git
"""
Explanation: Now, Joe and Sally both clone your playground repo.
End of explanation
"""
%%bash
cd /tmp/Sally
cd playground
echo '# A Project by Sally' >> intro.md
cat intro.md
"""
Explanation: At this point, Joe and Sally each have a clone of the playground project. They will now each make changes to the same file. We'll work with Sally first (since we're already in her directory).
End of explanation
"""
%%bash
cd /tmp/Sally/playground
git add intro.md
git commit -m 'Attributed the test file to Sally.'
"""
Explanation: Sally is happy with her changes and now decides to commit them to her local repo.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
echo '# A Project by Joe' >> intro.md
cat intro.md
git add intro.md
git commit -m 'Attributed the test file to Joe.'
"""
Explanation: At the same time, Joe has made some changes as well to the same file.
End of explanation
"""
%%bash
cd /tmp/Sally/playground
git push
"""
Explanation: Now the local repositories for Joe and Sally have different histories! Suppose Sally pushes her changes first.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
git push
"""
Explanation: Everything worked splendidly. Sally goes home for the day.
Joe is a little bit slower and tries to push just after Sally.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
git fetch
git merge origin/master
"""
Explanation: Joe realizes that he's made a mistake. Always fetch and merge (or pull) from the remote repo before doing your work for the day or pushing your recent changes. However, he's a little nervous since it only took him a minute to make his changes to hello.md. He realizes that someone else probably did a push in the meantime. Nevertheless, he proceeds.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
cat intro.md
"""
Explanation: There is a conflict in intro.md and Git can't figure out how to resolve the conflict automatically. It doesn't know who's right. Instead, Git produces a file that contains information about the conflict.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
echo '# Project by Sally and Joe' > intro.md
cat intro.md
"""
Explanation: Joe knows that Sally is working on the same project as him (they're teammates) so he's not alarmed. He could contact her about the conflict, but in this case he knows exactly what to do.
Note: Joe will use Linux terminal commands but you should feel free to use the vim text editor (or some another text editor of your choice). Remember, jupyter can't handle text editors.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
git commit -a -m 'Shared attribution between Joe and Sally.'
"""
Explanation: Now Joe needs to stage (add) and commit intro.md to fix the merge conflict.
End of explanation
"""
%%bash
cd /tmp/Joe/playground
git push
"""
Explanation: Finally, Joe is ready to push the changes back to the upstream repository.
End of explanation
"""
%%bash
cd /tmp/Sally/playground
git fetch
git merge origin/master
"""
Explanation: The merge conflict has been resolved! Of course, Sally doesn't yet know about what just happened. She needs to fetch and merge to get the updates.
End of explanation
"""
|
ForestClaw/forestclaw | applications/clawpack/advection/2d/periodic/periodic.ipynb | bsd-2-clause | %%bash
periodic
"""
Explanation: Periodic
<hr style="border-width:4px; border-color:coral">
</hr>
Scalar advection problem of a disk in a periodic domain
Serial mode
<hr style="border-width:2px; border-color:coral">
</hr>
Run code in serial mode (will work, even if code is compiled with MPI)
End of explanation
"""
%%bash
mpirun -n 4 periodic
"""
Explanation: Parallel mode
<hr style="border-width:2px; border-color:coral">
</hr>
Or, run code in parallel mode (command may need to be customized, depending your on MPI installation.)
End of explanation
"""
%%bash
python make_plots.py
"""
Explanation: Graphics
<hr style="border-width:2px; border-color:coral">
</hr>
Create PNG files for web-browser viewing, or animation. This requires a Clawpack installation. You can learn about Clawpack at http://www.clawpack.org
End of explanation
"""
%pylab inline
import glob
from matplotlib import image
# from clawpack.visclaw.JSAnimation import IPython_display
from matplotlib import animation
figno = 0
fname = '_plots/*fig' + str(figno) + '.png'
filenames=sorted(glob.glob(fname))
fig = plt.figure()
im = plt.imshow(image.imread(filenames[0]))
def init():
im.set_data(image.imread(filenames[0]))
return im,
def animate(i):
image_i=image.imread(filenames[i])
im.set_data(image_i)
return im,
animation.FuncAnimation(fig, animate, init_func=init,
frames=len(filenames), interval=500, blit=True)
"""
Explanation: View PNG files in browser, using URL above, or create an animation of all PNG files, using code below.
End of explanation
"""
|
google/learned_optimization | docs/notebooks/Part4_GradientEstimators.ipynb | apache-2.0 | import numpy as np
import jax.numpy as jnp
import jax
import functools
from matplotlib import pylab as plt
from typing import Optional, Tuple, Mapping
from learned_optimization.outer_trainers import full_es
from learned_optimization.outer_trainers import truncated_pes
from learned_optimization.outer_trainers import truncated_grad
from learned_optimization.outer_trainers import gradient_learner
from learned_optimization.outer_trainers import truncation_schedule
from learned_optimization.outer_trainers import common
from learned_optimization.outer_trainers import lopt_truncated_step
from learned_optimization.outer_trainers import truncated_step as truncated_step_mod
from learned_optimization.outer_trainers.gradient_learner import WorkerWeights, GradientEstimatorState, GradientEstimatorOut
from learned_optimization.outer_trainers import common
from learned_optimization.tasks import quadratics
from learned_optimization.tasks.fixed import image_mlp
from learned_optimization.tasks import base as tasks_base
from learned_optimization.learned_optimizers import base as lopt_base
from learned_optimization.learned_optimizers import mlp_lopt
from learned_optimization.optimizers import base as opt_base
from learned_optimization import optimizers
from learned_optimization import training
from learned_optimization import eval_training
import haiku as hk
import tqdm
"""
Explanation: Part 4: GradientEstimators
End of explanation
"""
PRNGKey = jnp.ndarray
class GradientEstimator:
truncated_step: truncated_step_mod.TruncatedStep
def init_worker_state(self, worker_weights: WorkerWeights,
key: PRNGKey) -> GradientEstimatorState:
raise NotImplementedError()
def compute_gradient_estimate(
self, worker_weights: WorkerWeights, key: PRNGKey,
state: GradientEstimatorState, with_summary: Optional[bool]
) -> Tuple[GradientEstimatorOut, Mapping[str, jnp.ndarray]]:
raise NotImplementedError()
"""
Explanation: Gradient estimators provide an interface to estimate gradients of some loss with respect to the parameters of some meta-learned system.
GradientEstimator are not specific to learned optimizers, and can be applied to any unrolled system defined by a TruncatedStep (see previous colab).
learned_optimization supports a handful of estimators each with different strengths and weaknesses. Understanding which estimators are right for which situations is an open research question. After providing some introductions to the GradientEstimator class, we provide a quick tour of the different estimators implemented here.
The GradientEstimator base class signature is below.
End of explanation
"""
task_family = quadratics.FixedDimQuadraticFamily(10)
lopt = lopt_base.LearnableAdam()
# With FullES, there are no truncations, so we set trunc_sched to never ending.
trunc_sched = truncation_schedule.NeverEndingTruncationSchedule()
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
lopt,
trunc_sched,
num_tasks=3,
)
"""
Explanation: A gradient estimator must have an instance of a TaskFamily -- or the task that is being used to estimate gradients with, an init_worker_state function -- which initializes the current state of the gradient estimator, and a compute_gradient_estimate function which takes state and computes a bunch of outputs (GradientEstimatorOut) which contain the computed gradients with respect to the learned optimizer, meta-loss values, and various other information about the unroll. Additionally a mapping which contains various metrics is returned.
Both of these methods take in a WorkerWeights instance. This particular piece of data represents the learnable weights needed to compute a gradients including the weights of the learned optimizer, as well as potentially non-learnable running statistics such as those computed with batch norm. In every case this contains the weights of the meta-learned algorithm (e.g. an optimizer) and is called theta. This can also contain other info though. If the learned optimizer has batchnorm, for example, it could also contain running averages.
In the following examples, we will show gradient estimation on learned optimizers using the VectorizedLOptTruncatedStep.
End of explanation
"""
es_trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
gradient_estimator = full_es.FullES(
truncated_step, truncation_schedule=es_trunc_sched)
key = jax.random.PRNGKey(0)
theta = truncated_step.outer_init(key)
worker_weights = gradient_learner.WorkerWeights(
theta=theta,
theta_model_state=None,
outer_state=gradient_learner.OuterState(0))
"""
Explanation: FullES
The FullES estimator is one of the simplest, and most reliable estimators but can be slow in practice as it does not make use of truncations. Instead, it uses antithetic sampling to estimate a gradient via ES of an entire optimization (hence the full in the name).
First we define a meta-objective, $f(\theta)$, which could be the loss at the end of training, or average loss. Next, we compute a gradient estimate via ES gradient estimation:
$\nabla_\theta f \approx \dfrac{\epsilon}{2\sigma^2} (f(\theta + \epsilon) - f(\theta - \epsilon))$
We can instantiate one of these as follows:
End of explanation
"""
gradient_estimator_state = gradient_estimator.init_worker_state(
worker_weights, key=key)
gradient_estimator_state
"""
Explanation: Because we are working with full length unrolls, this gradient estimator has no state -- there is nothing to keep track of truncation to truncation.
End of explanation
"""
out, metrics = gradient_estimator.compute_gradient_estimate(
worker_weights, key=key, state=gradient_estimator_state, with_summary=False)
out.grad
"""
Explanation: Gradients can be computed with the compute_gradient_estimate method.
End of explanation
"""
trunc_sched = truncation_schedule.ConstantTruncationSchedule(10)
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
lopt,
trunc_sched,
num_tasks=3,
random_initial_iteration_offset=10)
gradient_estimator = truncated_pes.TruncatedPES(
truncated_step=truncated_step, trunc_length=10)
key = jax.random.PRNGKey(1)
theta = truncated_step.outer_init(key)
worker_weights = gradient_learner.WorkerWeights(
theta=theta,
theta_model_state=None,
outer_state=gradient_learner.OuterState(0))
gradient_estimator_state = gradient_estimator.init_worker_state(
worker_weights, key=key)
"""
Explanation: TruncatedPES
Truncated Persistent Evolutionary Strategies (PES) is a unbiased truncation method based on ES. It was proposed in Unbiased Gradient Estimation in Unrolled Computation Graphs with Persistent Evolution Strategies and has been a promising tool for training learned optimizers.
End of explanation
"""
jax.tree_map(lambda x: x.shape, gradient_estimator_state)
"""
Explanation: Now let's look at what this state contains.
End of explanation
"""
out, metrics = gradient_estimator.compute_gradient_estimate(
worker_weights, key=key, state=gradient_estimator_state, with_summary=False)
"""
Explanation: First, this contains 2 instances of SingleState -- one for the positive perturbation, and one for the negative perturbation. Each one of these contains all the necessary state required to keep track of the training run. This means the opt_state, details from the truncation, the task parameters (sample from the task family), the inner_step, and a bool to determine if done or not.
We can compute one gradient estimate as follows.
End of explanation
"""
out.grad
jax.tree_map(lambda x: x.shape, out.unroll_state)
"""
Explanation: This out object contains various outputs from the gradient estimator including gradients with respect to the learned optimizer, as well as the next state of the training models.
End of explanation
"""
print("Progress on inner problem before", out.unroll_state.pos_state.inner_step)
out, metrics = gradient_estimator.compute_gradient_estimate(
worker_weights, key=key, state=out.unroll_state, with_summary=False)
print("Progress on inner problem after", out.unroll_state.pos_state.inner_step)
"""
Explanation: One could simply use these gradients to meta-train, and then use the unroll_states as the next state passed into the compute gradient estimate. For example:
End of explanation
"""
truncated_step = lopt_truncated_step.VectorizedLOptTruncatedStep(
task_family,
lopt,
trunc_sched,
num_tasks=3,
random_initial_iteration_offset=10)
gradient_estimator = truncated_grad.TruncatedGrad(
truncated_step=truncated_step, unroll_length=5, steps_per_jit=5)
key = jax.random.PRNGKey(1)
theta = truncated_step.outer_init(key)
worker_weights = gradient_learner.WorkerWeights(
theta=theta,
theta_model_state=None,
outer_state=gradient_learner.OuterState(0))
gradient_estimator_state = gradient_estimator.init_worker_state(
worker_weights, key=key)
jax.tree_map(lambda x: x.shape, gradient_estimator_state)
out, metrics = gradient_estimator.compute_gradient_estimate(
worker_weights, key=key, state=gradient_estimator_state, with_summary=False)
out.grad
"""
Explanation: TruncatedGrad
TruncatedGrad performs truncated backprop through time. This is great for short unrolls, but can run into memory issues, and/or exploding gradients for longer unrolls.
End of explanation
"""
|
obust/Pandas-Tutorial | Pandas II - Working with DataFrames.ipynb | mit | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
pd.set_option('max_columns', 50)
"""
Explanation: Pandas II - Working with DataFrames
End of explanation
"""
# pass in column names for each CSV
u_cols = ['user_id', 'age', 'sex', 'occupation', 'zip_code']
df_users = pd.read_csv('data/MovieLens-100k/u.user', sep='|', names=u_cols)
r_cols = ['user_id', 'movie_id', 'rating', 'unix_timestamp']
df_ratings = pd.read_csv('data/MovieLens-100k/u.data', sep='\t', names=r_cols)
m_cols = ['movie_id', 'title', 'release_date', 'video_release_date', 'imdb_url']
df_movies = pd.read_csv('data/MovieLens-100k/u.item', sep='|', names=m_cols, usecols=range(5))# only load the first five columns
"""
Explanation: We'll be using the MovieLens dataset in many examples going forward. The dataset contains 100,000 ratings made by 943 users on 1,682 movies.
End of explanation
"""
print df_movies.dtypes,'\n'
print df_users.dtypes,'\n'
print df_ratings.dtypes,'\n'
"""
Explanation: Summary
Inspect<br>
a) .dtype<br>
b) .describe()<br>
c) .head(), .tail(), [i:j]
Select<br>
a) Column Selection<br>
b) Row Selection<br>
Sort<br>
a) .sort() for DataFrames<br>
b) .order() for Series<br>
Operations<br>
a) Descriptive Stats<br>
b) Apply<br>
b) Bins<br>
b) Histograms<br>
Split-Apply-Combine
Other<br>
a) Rename columns<br>
b) Missing values<br>
1. Inspect
Pandas has a variety of functions for getting basic information about your DataFrame.<br>
The most basic of which is calling your DataFrame by name. The output tells a few things about our DataFrame.
It's an instance of a DataFrame.
Each row is assigned an index of 0 to N-1, where N is the number of rows in the DataFrame. (index can be set arbitrary)
There are 1,682 rows (every row must have an index).
Our dataset has five total columns, one of which isn't populated at all (video_release_date) and two that are missing some values (release_date and imdb_url).
a) .dtypes
Use the .dtypes attribute to get the datatype for each column.
End of explanation
"""
df_users.describe()
"""
Explanation: b) .describe()
Use the .describe() method to see the basic statistics about the DataFrame's numeric columns. Be careful though, since this will return information on all columns of a numeric datatype.
End of explanation
"""
print df_users.head()
print df_users.tail(3)
print df_users[20:22]
"""
Explanation: Notice user_id was included since it's numeric. Since this is an ID value, the stats for it don't really matter.
We can quickly see the average age of our users is just above 34 years old, with the youngest being 7 and the oldest being 73. The median age is 31, with the youngest quartile of users being 25 or younger, and the oldest quartile being at least 43.
c) .head(), tail(), [i:j]
By default, .head() displays the first five records of the DataFrame, while .tail() displays the last five.<br>
Alternatively, Python's regular slicing [i:j] syntax works as well.
End of explanation
"""
df_users['occupation'].head()
"""
Explanation: 2. Select
a) Column Selection
You can think of a DataFrame as a group of Series (ie: rows) that share an index (ie: column headers). This makes it easy to select specific columns.
Single column selection<br>
Selecting a single column from the DataFrame will return a Series object.
End of explanation
"""
list_of_cols = ['occupation', 'sex']
print df_users[list_of_cols].head()
"""
Explanation: Multiple columns selection<br>
To select multiple columns, simply pass a list of column names to the DataFrame, the output of which will be a DataFrame.
End of explanation
"""
# users older than 25
print df_users[df_users.age > 25].head(3), '\n'
# users aged 40 AND male
print df_users[(df_users.age == 40) & (df_users.sex == 'M')].head(3), '\n'
# users younger than 30 OR female
print df_users[(df_users.sex == 'F') | (df_users.age < 30)].head(3)
"""
Explanation: b) Row Selection
Row selection can be done multiple ways, but using boolean indexing or individual index .ix() are typically easiest.
Boolean Indexing
End of explanation
"""
# Change index column (new DataFrame)
new_df_users = df_users.set_index('user_id')
print new_df_users.head(3)
# Change index column (inplace)
df_users.set_index('user_id', inplace=True)
print df_users.head(3)
# Select users using their respective user_id
print df_users.ix[99], '\n'
print df_users.ix[[1, 50, 300]]
"""
Explanation: .ix() method
When you change the indexing of a DataFrame to a specific column, you use the default pandas 0-based index.<br>
Use .ix() method for row selection based on the new index.
Let's set the index to the user_id using the .set_index() method.<br>
NB: By default, .set_index() returns a new DataFrame, so you'll have to specify if you'd like the changes to occur in place.
End of explanation
"""
df_users.reset_index(inplace=True)
print df_users.head()
"""
Explanation: Use the .reset_index() method to reset the default index (the same rule apply for inplace).
End of explanation
"""
# Oldest techicians
df_users.sort('age', ascending=False, inplace=True)
print df_users[df_users.occupation == "technician"][:5]
"""
Explanation: 3. Sort
a) .sort() for DataFrames
Use .sort() method to sort DataFrames. Returns a new instance of a Dataframe. (See DOC)
column : column name to base the sorting on (list for nested sorting / tuple for multi-index sorting)
ascending (True) : sort ascending vs. descending (specify list for multiple sort orders)
inplace (False): result is a new instance of DataFrame
End of explanation
"""
print df_users.zip_code.order()[:3]
"""
Explanation: b) .order() for Series
Use .order() method to sort Series. Returns a new instance of a Dataframe.
ascending (True) : sort ascending vs. descending (specify list for multiple sort orders)
inplace (False): result is a new instance of DataFrame
End of explanation
"""
labels = ['0-9', '10-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-79']
bins = range(0, 81, 10) # [0, 10, 20, 30, 40, 50, 60, 70, 80]
df_users['age_group'] = pd.cut(df_users.age, bins, right=False, labels=labels)
print df_users[27:31] # preview of age bin
"""
Explanation: 4. Operations
a) Descriptive Stats
A large number of methods for computing descriptive statistics and other related operations on Series, DataFrame, and Panel. For DataFrames these methods take an axis argument:
axis=0 : compute over indexes
axis=1 : compute over columns
Most methods produce a lower-dimensional result (aka aggregate functions) :
- .count(): number of NOT NULL values
- .nunique(): number of unique NOT NULL values
- .size() : number of values
- .min(): minimum
- .max(): maximum
- .sum(): sum of values
- .prod(): product of values
- .median(): arithmetic median of values
- .quantile(): sample quantile (value at %)
- .mean(): mean of values
- .std(): unbiased standard deviation
- .var(): unbiased variance
- .mad(): mean absolute deviation
- .sem(): unbiased standard error of the mean
- .skew(): unbiased skewness (3rd moment)
- .kurt(): unbiased kurtosis (4th moment)
Some methods produce an object of the same size :
- .rank(): compute data rank (1 through n)
- .mode(): mode
- .abs(): absolute value
- .cumsum(): cumulative sum
- .cumprod(): cumulative product
- .cummax(): cumulative maximum
- .cummin(): cumulative minimum
b) Apply
To apply your own or another library’s functions to pandas objects, you should be aware of the three methods below. The appropriate method to use depends on whether your function expects to operate on an entire DataFrame or Series, row- or column-wise, or elementwise.
Tablewise Function Application: .pipe()
Row or Column-wise Function Application: .apply()
Elementwise function application: .applymap() or .map()
.pipe()
Use .pipe() for method chaining over a DataFrame. (See DOC)<br>
The following two are equivalent :
- f(g(h(df), arg1=1), arg2=2, arg3=3)
- df.pipe(h).pipe(g, arg1=1).pipe(f, arg2=2, arg3=3)
The pipe method is inspired by unix pipes and more recently dplyr and magrittr, which have introduced the popular (%>%) (read pipe) operator for R.
.apply()
Use .apply() to apply a function along the axes of a DataFrame, like the descriptive statistics methods. (See DOC)<br>
- df.apply(np.mean, axis=1)
- df.apply(lambda x: x.max() - x.min())
.applymap() / .map()
Use .applymap() on DataFrame or .map() on Series to operate elementwise.<br>
The vectorized function must take a single value and return a single value.(See DOC)<br>
- df.applymap(lambda x: len(str(x)))
- df['colA'].map(lambda x: len(str(x)))
c) Bins
Use pandas.cut() static method to bin numeric values into groups. Useful for discretization. (DOC)
pandas.cut(x, bins) returns an array of the indices (or labels) of the half-open bins to which each value of x belongs.
x : array of values to be binned
bins : sequence defining the bin edges
right (True): boolean indicating whether the bins include the rightmost edge or not ([a,b] or [a,b[)
labels (None): array used as labels for the resulting bins
End of explanation
"""
df_users['occupation'].value_counts().head()
"""
Explanation: d) Histograms
Use .value_counts() Series method to return the counts of unique values (ie frequency). (See DOC)
End of explanation
"""
!head -n 3 data/city-of-chicago-salaries.csv
"""
Explanation: 5. Split-Apply-Combine
Use .groupby() method to execute the split-apply-combine strategy for data analysis :
1. Split the DataFrame into groups based on some criteria (DataFrameGroupBy or SeriesGroupBy)
2. Apply a function to each group independently
3. Combine the results into a data structure (DataFrame or Series)
DataFrameGroupBy/SeriesGroupBy Methods (See Doc)
- .apply(): apply your own or another library's function or list of functions
- .agg(): aggregate using input function or dict of {column: function}
- .transform(): transform
- .filter(): return a copy of a DataFrame excluding elements from groups
<br>
In the apply step, we might wish to do one of the following:
- Aggregation: computing a summary statistic (or statistics) about each group. Some examples:
- Compute group columns sums and means :
- gby.agg([np.sum, np.mean])
- Compute group sizes and counts :
- gby.agg([np.size, np.mean])
- Transformation: perform some group-specific computations on every data point. Some examples:
- Standardizing data (zscore) within group :
- gby.transform(lambda x: (x - x.mean()) / x.std())
- Filling NAs within groups with a value derived from each group
- gby.fillna(x.mean())
- Filtration: discard some groups, according to a group-wise computation that evaluates True or False. Some examples:
- Discarding data that belongs to groups with only a few members :
- gby.filter(lambda x: x.size() > 100)
- Discarding data based on the group sum or mean
- gby.filter(lambda x: x['A'].sum() + x['B'].sum() > 0)
- Discarding data for missing data
- gby.dropna(axis=0)
City of Chicago salaries
The City of Chicago is kind enough to publish all city employee salaries to its open data portal. Let's go through some basic groupby examples using this data.
End of explanation
"""
headers = ['name', 'title', 'department', 'salary']
df_chicago = pd.read_csv('data/city-of-chicago-salaries.csv',
header=False,
names=headers,
converters={'salary': lambda x: float(x.replace('$', ''))})
print df_chicago.head()
print df_chicago.groupby('department').count().head(3), '\n' # NOT NULL records within each column
print df_chicago.groupby('department').size().head(3) # total records for each department
print df_chicago.groupby('department').agg({'salary': [np.size, np.mean]}).head()
"""
Explanation: Since the data contains a '$' sign for each salary, python will treat the field as a series of strings. We can use the converters parameter to change this when reading in the file.
converters = Dict of functions for converting values in certain columns. Keys can either be integers or column labels
End of explanation
"""
print df_chicago.groupby('department').title.nunique().order(ascending=False)[:3]
"""
Explanation: What departments have the most number of distinct title positions ?
Split DataFrame into groups by departement, keep only title column => SeriesGroupBy
Apply .nunique() method
(Combine into Serie)
Order resulting Serie (NB: .order() is for Series, .sort() is for DataFrames)
End of explanation
"""
print df_chicago.groupby('department').mean().sort('salary', ascending=False).head()
print df_chicago.groupby('department').agg({'salary': [np.size, np.mean]}).sort(('salary', 'mean'), ascending=False).head()
"""
Explanation: What department pays best on average ?
Split DataFrame into groups by departement => DataFrameGroupBy
Apply .mean() method
(Combine into DataFrame)
Sort resulting DataFrame according to the salary (NB: .order() is for Series, .sort() is for DataFrames)
End of explanation
"""
df_chicago['dept_rank'] = df_chicago.groupby('department')['salary'].rank(method='first', ascending=False)
df_chicago.sort('salary', ascending=False, inplace=True)
print df_chicago[df_chicago['dept_rank'] == 1].head()
print df_chicago[df_chicago['department'] == 'MAYOR\'S OFFICE'].tail(10)
"""
Explanation: Who is the highest paid employee of each department ?
Split DataFrame into groups by departement, keep only salary column => SeriesGroupBy
Apply .rank() method
(Combine into Serie)
Assign the resulting Serie to a new column of the DataFrame
Sort DataFrame according to salary (NB: .order() is for Series, .sort() is for DataFrames)
Display only first rankers
For the .rank() method, use attributes:
- ascending=False : to rank high (1) to low (N)
- method='first' : so that equally high paid people within a department don't get the same rank .
End of explanation
"""
|
Davidwwww/intro-numerical-methods | 5_root_finding_optimization.ipynb | mit | def total_value(P, m, r, n):
"""Total value of portfolio given parameters
Based on following formula:
A = \frac{P}{(r / m)} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n}
- 1 \right ]
:Input:
- *P* (float) - Payment amount per compounding period
- *m* (int) - number of compounding periods per year
- *r* (float) - annual interest rate
- *n* (float) - number of years to retirement
:Returns:
(float) - total value of portfolio
"""
return P / (r / float(m)) * ( (1.0 + r / float(m))**(float(m) * n)
- 1.0)
P = 1500.0
m = 12
n = 20.0
r = numpy.linspace(0.05, 0.1, 100)
goal = 1e6
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, total_value(P, m, r, n))
axes.plot(r, numpy.ones(r.shape) * goal, 'r--')
axes.set_xlabel("r (interest rate)")
axes.set_ylabel("A (total value)")
axes.set_title("When can I retire?")
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
plt.show()
"""
Explanation: Root Finding and Optimization
GOAL: Find where $f(x) = 0$.
Example: Future Time Annuity
When can I retire?
$$ A = \frac{P}{(r / m)} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n} - 1 \right ] $$
$P$ is payment amount per compounding period
$m$ number of compounding periods per year
$r$ annual interest rate
$n$ number of years to retirement
$A$ total value after $n$ years
If I want to retire in 20 years what does $r$ need to be?
Set $P = \frac{\$18,000}{12} = \$1500, ~~~~ m=12, ~~~~ n=20$.
End of explanation
"""
def g(P, m, r, n, A):
"""Reformulated minimization problem
Based on following formula:
g(r) = \frac{P \cdot m}{A} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n} - 1 \right ]
:Input:
- *P* (float) - Payment amount per compounding period
- *m* (int) - number of compounding periods per year
- *r* (float) - annual interest rate
- *n* (float) - number of years to retirement
- *A* (float) - total value after $n$ years
:Returns:
(float) - value of g(r)
"""
return P * m / A * ( (1.0 + r / float(m))**(float(m) * n)
- 1.0)
P = 1500.0
m = 12
n = 20.0
r = numpy.linspace(0.00, 0.1, 100)
goal = 1e6
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, g(P, m, r, n, goal))
axes.plot(r, r, 'r--')
axes.set_xlabel("r (interest rate)")
axes.set_ylabel("$g(r)$")
axes.set_title("When can I retire?")
axes.set_ylim([0, 0.12])
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
plt.show()
"""
Explanation: Fixed Point Iteration
How do we go about solving this?
Could try to solve at least partially for $r$:
$$ A = \frac{P}{(r / m)} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n} - 1 \right ] ~~~~ \Rightarrow ~~~~~$$
$$ r = \frac{P \cdot m}{A} \left[ \left(1 + \frac{r}{m} \right)^{m \cdot n} - 1 \right ] ~~~~ \Rightarrow ~~~~~$$
$$ r = g(r)$$
or
$$ g(r) - r = 0$$
End of explanation
"""
r = 0.09
for steps in xrange(10):
print "r = ", r
print "Residual = ", g(P, m, r, n, goal) - r
r = g(P, m, r, n, goal)
print
"""
Explanation: Guess at $r_0$ and check to see what direction we need to go...
$r_0 = 0.0800$, $g(r_0) - r_0 = -0.009317550125425428$
$r_1 = 0.0850$, $g(r_1) - r_1 = -0.00505763375972$
$r_2 = 0.0875$, $g(r_2) - r_2 = -0.00257275331014$
A bit tedious, we can also make this algorithmic:
End of explanation
"""
x = numpy.linspace(0.2, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, numpy.exp(-x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
x = 0.4
for steps in xrange(7):
print "x = ", x
print "Residual = ", numpy.exp(-x) - x
x = numpy.exp(-x)
print
axes.plot(x, numpy.exp(-x),'o',)
plt.show()
"""
Explanation: Example 2:
Let $f(x) = x - e^{-x}$, solve $f(x) = 0$
Equivalent to $x = e^{-x}$ or $x = g(x)$ where $g(x) = e^{-x}$
Note that this problem is equivalent to $x = -\ln x$.
End of explanation
"""
x = numpy.linspace(0.1, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.log(x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
axes.set_ylim([0.0, 1.5])
x = 0.5
for steps in xrange(3):
print "x = ", x
print "Residual = ", numpy.log(x) + x
x = -numpy.log(x)
print
axes.plot(x, -numpy.log(x),'o',)
plt.show()
"""
Explanation: Example 3:
Let $f(x) = \ln x + x$ and solve $f(x) = 0$ or $x = -\ln x$.
End of explanation
"""
x = numpy.linspace(0.0, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, numpy.exp(-x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
x = numpy.linspace(0.4, 0.8, 100)
axes.plot(numpy.ones(x.shape) * 0.4, numpy.exp(-x),'--k')
axes.plot(x, numpy.ones(x.shape) * numpy.exp(-x[-1]), '--k')
axes.plot(numpy.ones(x.shape) * 0.8, numpy.exp(-x),'--k')
axes.plot(x, numpy.ones(x.shape) * numpy.exp(-x[0]), '--k')
plt.show()
x = numpy.linspace(0.1, 1.0, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.log(x), 'r')
axes.plot(x, x, 'b')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
axes.set_ylim([0.0, 1.0])
x = numpy.linspace(0.4, 0.8, 100)
axes.plot(numpy.ones(x.shape) * 0.4, -numpy.log(x),'--k')
axes.plot(x, numpy.ones(x.shape) * -numpy.log(x[-1]), '--k')
axes.plot(numpy.ones(x.shape) * 0.8, -numpy.log(x),'--k')
axes.plot(x, numpy.ones(x.shape) * -numpy.log(x[0]), '--k')
plt.show()
"""
Explanation: These are equivalent problems! Something is awry...
Analysis of Fixed Point Iteration
Theorem: Existence and uniqueness of fixed point problems
Assume $g \in C[a, b]$, if the range of the mapping $y = g(x)$ satisfies $y \in [a, b]~~~ \forall~~~ x \in [a, b]$ then $g$ has a fixed point in $[a, b]$.
End of explanation
"""
x = numpy.linspace(0.4, 0.8, 100)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(x, -numpy.exp(-x), 'r')
axes.set_xlabel("x")
axes.set_ylabel("f(x)")
plt.show()
"""
Explanation: Additionally, suppose $g'(x)$ is defined for $x \in [a,b]$ and $\exists K < 1$ s.t. $|g'(x)| \leq K < 1 ~~~ \forall ~~~ x \in (a,b)$, then $g$ has a unique fixed point $P \in [a,b]$
End of explanation
"""
import sympy
m, P, A, r, n = sympy.symbols('m, P, A, r, n')
(m * P / A * ((1 + r / m)**(m * n) - 1)).diff(r)
"""
Explanation: Theorem 2: Asymptotic convergence behavior of fixed point iterations
$$x_{k+1} = g(x_k)$$
Assume that $\exists ~ x^$ s.t. $x^ = g(x^*)$
$$x_k = x^ + e_k ~~~~~~~~~~~~~~ x_{k+1} = x^ + e_{k+1}$$
$$x^ + e_{k+1} = g(x^ + e_k)$$
Using a Taylor expansion we know
$$g(x^ + e_k) = g(x^) + g'(x^) e_k + \frac{g''(x^) e_k^2}{2}$$
$$x^ + e_{k+1} = g(x^) + g'(x^) e_k + \frac{g''(x^) e_k^2}{2}$$
Note that because $x^ = g(x^)$ these terms cancel leaving
$$e_{k+1} = g'(x^) e_k + \frac{g''(x^) e_k^2}{2}$$
So if $|g'(x^*)| \leq K < 1$ we can conclude that
$$|e_{k+1}| = K |e_k|$$
which shows convergence (although somewhat arbitrarily fast).
Convergence of iterative schemes
Given any iterative scheme where
$$|e_{k+1}| = C |e_k|^n$$
If $C < 1$ and
- $n=1$ then the scheme is linearly convergence
- $n=2$ then the scheme exhibits quadratic convergence
- $n > 1$ the scheme can also be called superlinearly convergent
If $C > 1$ then the scheme is divergent
Examples Revisited
$g(x) = e^{-x}$ with $x^* \approx 0.56$
$$|g'(x^)| = |-e^{-x^}| \approx 0.56$$
$g(x) = - \ln x$ with $x^* \approx 0.56$
$$|g'(x^)| = \frac{1}{|x^|} \approx 1.79$$
$g(r) = \frac{m P}{A} ((1 + \frac{r}{m})^{mn} - 1)$ with $r^* \approx 0.09$
$$|g'(r^*)| = \frac{P m n}{A} \left(1 + \frac{r}{m} \right)^{m n - 1} \approx 2.15$$
End of explanation
"""
P = 1500.0
m = 12
n = 20.0
A = 1e6
r = numpy.linspace(0.05, 0.1, 100)
f = lambda r, A, m, P, n: A - m * P / r * ((1.0 + r / m)**(m * n) - 1.0)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, f(r, A, m, P, n), 'b')
axes.plot(r, numpy.zeros(r.shape),'r--')
axes.set_xlabel("r (%)")
axes.set_ylabel("f(r)")
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
a = 0.075
b = 0.095
axes.plot(a, f(a, A, m, P, n), 'ko')
axes.plot([a, a], [0.0, f(a, A, m, P, n)], 'k--')
axes.plot(b, f(b, A, m, P, n), 'ko')
axes.plot([b, b], [f(b, A, m, P, n), 0.0], 'k--')
plt.show()
"""
Explanation: Better ways for root-finding/optimization
If $x^$ is a fixed point of $g(x)$ then $x^$ is also a root of $f(x^) = g(x^) - x^$ s.t. $f(x^) = 0$.
$$f(r) = r - \frac{m P}{A} \left [ \left (1 + \frac{r}{m} \right)^{m n} - 1 \right ] =0 $$
or
$$f(r) = A - \frac{m P}{r} \left [ \left (1 + \frac{r}{m} \right)^{m n} - 1 \right ] =0 $$
Classical Methods
Bisection (linear convergence)
Newton's Method (quadratic convergence)
Secant Method (super-linear)
Combined Methods
RootSafe (Newton + Bisection)
Brent's Method (Secant + Bisection)
Bracketing and Bisection
A bracket is an interval $[a,b]$ s.t. $\text{sign}(f(a)) \neq \text{sign}(f(b))$.
Theorem: If $f(x) \in C[a,b]$ and $\text{sign}(f(a)) \neq \text{sign}(f(b))$ then there exists a number $c \in (a,b)$ s.t. $f(c) = 0$. (proof uses intermediate value theorem)
End of explanation
"""
P = 1500.0
m = 12
n = 20.0
A = 1e6
r = numpy.linspace(0.05, 0.11, 100)
f = lambda r, A=A, m=m, P=P, n=n: A - m * P / r * ((1.0 + r / m)**(m * n) - 1.0)
# Initialize bracket
a = 0.07
b = 0.10
# Setup figure to plot convergence
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
axes.plot(r, f(r, A, m, P, n), 'b')
axes.plot(r, numpy.zeros(r.shape),'r--')
axes.set_xlabel("r (%)")
axes.set_ylabel("f(r)")
# axes.set_xlim([0.085, 0.091])
axes.ticklabel_format(axis='y', style='sci', scilimits=(-1,1))
axes.plot(a, f(a, A, m, P, n), 'ko')
axes.plot([a, a], [0.0, f(a, A, m, P, n)], 'k--')
axes.plot(b, f(b, A, m, P, n), 'ko')
axes.plot([b, b], [f(b, A, m, P, n), 0.0], 'k--')
# Algorithm parameters
TOLERANCE = 1e-4
MAX_STEPS = 100
# Initialize loop
f_a = f(a)
f_b = f(b)
delta_x = b - a
# Loop until we reach the TOLERANCE or we take MAX_STEPS
for step in xrange(MAX_STEPS):
c = a + delta_x / 2.0
f_c = f(c)
if numpy.sign(f_a) != numpy.sign(f_c):
b = c
f_b = f_c
else:
a = c
f_a = f_c
delta_x = b - a
# Plot iteration
axes.text(c, f(c), str(step))
# Check tolerance - Could also check the size of delta_x
if numpy.abs(f_c) < TOLERANCE:
break
if step == MAX_STEPS:
print "Reached maximum number of steps!"
else:
print "Success!"
print " x* = %s" % c
print " f(x*) = %s" % f(c)
print " number of steps = %s" % step
"""
Explanation: Bisection Algorithm
Given a bracket $[a,b]$ and a function $f(x)$ -
1. Initialize with bracket
2. Iterate
1. Cut bracket in half and check to see where the zero is
2. Set bracket to new bracket based on what direction we went
End of explanation
"""
|
nilbody/h2o-3 | h2o-py/demos/turbofan_phm_gtkerror_NOPASS.ipynb | apache-2.0 | import sys
import h2o
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from h2o.utils.shared_utils import _locate
import numpy as np
import pandas as pd
import seaborn as sns
import pykalman as pyk
sns.set()
doGridSearch = True
doKalmanSmoothing = False #unrelated to h2o, set true for demo
"""
Explanation: Machine Learning Prognostics for Turbofan Engine Degradation Dataset
Information about the problem is at http://ti.arc.nasa.gov/tech/dash/pcoe/prognostic-data-repository/publications/#turbofan and original data is at http://ti.arc.nasa.gov/tech/dash/pcoe/prognostic-data-repository/#turbofan
The data was originally generated using the Commercial Modular Aero-Propulsion System Simulations (C-MAPPS) system.
The approach used in the turbofan engine degradation dataset was then used in the PHM08 challenge. Information about other research on the C-MAPSS data is available at https://www.phmsociety.org/sites/phmsociety.org/files/phm_submission/2014/phmc_14_063.pdf
End of explanation
"""
# Input files don't have column names
dependent_vars = ['RemainingUsefulLife']
index_columns_names = ["UnitNumber","Cycle"]
operational_settings_columns_names = ["OpSet"+str(i) for i in range(1,4)]
sensor_measure_columns_names =["SensorMeasure"+str(i) for i in range(1,22)]
input_file_column_names = index_columns_names + operational_settings_columns_names + sensor_measure_columns_names
# And we are going to add these columns
kalman_smoothed_mean_columns_names =["SensorMeasureKalmanMean"+str(i) for i in range(1,22)]
"""
Explanation: Preprocessing
End of explanation
"""
train = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/train_FD001.txt", sep=r"\s*", header=None,
names=input_file_column_names, engine='python')
test = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/test_FD001.txt", sep=r"\s*", header=None,
names=input_file_column_names, engine='python')
test_rul = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/RUL_FD001.txt", header=None, names=['RemainingUsefulLife'])
test_rul.index += 1 # set the index to be the unit number in the test data set
test_rul.index.name = "UnitNumber"
"""
Explanation: Read in the raw files
End of explanation
"""
# Calculate the remaining useful life for each training sample based on last measurement being zero remaining
grouped_train = train.groupby('UnitNumber', as_index=False)
useful_life_train = grouped_train.agg({'Cycle' : np.max })
useful_life_train.rename(columns={'Cycle': 'UsefulLife'}, inplace=True)
train_wfeatures = pd.merge(train, useful_life_train, on="UnitNumber")
train_wfeatures["RemainingUsefulLife"] = -(train_wfeatures.UsefulLife - train_wfeatures.Cycle)
train_wfeatures.drop('UsefulLife', axis=1, inplace=True)
grouped_test = test.groupby('UnitNumber', as_index=False)
useful_life_test = grouped_test.agg({'Cycle' : np.max })
useful_life_test.rename(columns={'Cycle': 'UsefulLife'}, inplace=True)
test_wfeatures = pd.merge(test, useful_life_test, on="UnitNumber")
test_wfeatures["RemainingUsefulLife"] = -(test_wfeatures.UsefulLife - test_wfeatures.Cycle)
test_wfeatures.drop('UsefulLife', axis=1, inplace=True)
"""
Explanation: Calculate Remaining Useful Life in T-minus notation for the training data
This puts all data on the same basis for supervised training
End of explanation
"""
sns.set_context("notebook", font_scale=1.5)
p = sns.pairplot(train_wfeatures.query('UnitNumber < 10'),
vars=["RemainingUsefulLife", "SensorMeasure4", "SensorMeasure3",
"SensorMeasure9", "SensorMeasure8", "SensorMeasure13"], size=10,
hue="UnitNumber", palette=sns.color_palette("husl", 9));
sns.plt.show()
"""
Explanation: Exploratory Data Analysis
Look at how the sensor measures evolve over time (first column) as well as how they relate to each other for a subset of the units.
These features were the top 3 and bottom 2 most important sensor features as discovered by H2O's GBM, later in the notebook.
End of explanation
"""
kalman_smoothed_mean_columns_names =["SensorMeasureKalmanMean"+str(i) for i in range(1,22)]
def calcSmooth(measures):
kf = pyk.KalmanFilter(initial_state_mean=measures[0], n_dim_obs=measures.shape[1])
(smoothed_state_means, smoothed_state_covariances) = kf.em(measures).smooth(measures)
return smoothed_state_means
def filterEachUnit(df):
dfout = df.copy()
for newcol in kalman_smoothed_mean_columns_names:
dfout[newcol] = np.nan
for unit in dfout.UnitNumber.unique():
sys.stdout.write('\rProcessing Unit: %d' % unit)
sys.stdout.flush()
unitmeasures = dfout[dfout.UnitNumber == unit][sensor_measure_columns_names]
smoothed_state_means = calcSmooth( np.asarray( unitmeasures ) )
dfout.loc[dfout.UnitNumber == unit, kalman_smoothed_mean_columns_names] = smoothed_state_means
sys.stdout.write('\rProcessing Unit: %d' % unit)
sys.stdout.flush()
sys.stdout.write('\rFinished\n')
sys.stdout.flush()
return dfout
"""
Explanation: Signal processing using Kalman smoothing filter
Kalman parameters were determined using EM algorithm and then those parameters are used for smoothing the signal data.
This is applied repeatedly to each Unit, in both the training and test set.
End of explanation
"""
# Get picky about the order of output columns
test_output_cols = index_columns_names + operational_settings_columns_names + sensor_measure_columns_names + \
kalman_smoothed_mean_columns_names
train_output_cols = test_output_cols + dependent_vars
if doKalmanSmoothing:
train_wkalman = filterEachUnit(train_wfeatures)
test_wkalman = filterEachUnit(test_wfeatures)
train_output = train_wkalman[train_output_cols]
test_output = test_wkalman[test_output_cols]
# Output the files, so we don't have to do the preprocessing again.
if doKalmanSmoothing:
train_output.to_csv("train_FD001_preprocessed.csv", index=False)
test_output.to_csv("test_FD001_preprocessed.csv", index=False)
test_rul.to_csv("rul_FD001_preprocessed.csv", index=True)
"""
Explanation: Output the results to files
Helps so preprocessing only has to be done once.
End of explanation
"""
h2o.init()
"""
Explanation: Modeling
Startup H2O
End of explanation
"""
#Pull Kalman-smoothed data if generated locally, or source from AWS
if doKalmanSmoothing:
train_hex = h2o.import_file(_locate("train_FD001_preprocessed.csv"))
test_hex = h2o.import_file(_locate("test_FD001_preprocessed.csv"))
else:
train_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/train_FD001_preprocessed.csv")
test_hex = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/test_FD001_preprocessed.csv")
"""
Explanation: Load training and final test data into H2O
End of explanation
"""
xCols= operational_settings_columns_names + kalman_smoothed_mean_columns_names
yCol = dependent_vars
foldCol = "UnitNumberMod10"
train_hex[foldCol] = train_hex["UnitNumber"] % 10
"""
Explanation: Setup independent and dependent features
Use the operational settings and Kalman smoothed mean states as the independent features
Setup a fold column to great cross validation models from 90 units and cross validating on 10 units. This creates a 10-fold cross validation. The cross validation models are then used to create an ensemble model for predictions
End of explanation
"""
def trainGLM(x, y, fold_column, training_frame, alpha=0.5, penalty=1e-5):
model = H2OGeneralizedLinearEstimator(family = "gaussian",alpha = [alpha], Lambda = [penalty])
model.train(x=x, y=y, training_frame=training_frame, fold_column=fold_column)
return model
def gridSearchGLM(x, y, fold_column, training_frame, alphas = [0,0.5,1], penalties=np.logspace(-3,0,num=4)):
results = []
for alpha in alphas:
for penalty in penalties:
results.append( trainGLM(x, y, fold_column, training_frame, alpha, penalty) )
return results
if doGridSearch:
glmModels = gridSearchGLM(xCols, yCol, foldCol, train_hex)
else:
# this is used to speed up the demonstration by just building the single model previously found
glmModels = [ trainGLM(xCols, yCol, foldCol, train_hex, alpha=1, penalty=0.01 )]
"""
Explanation: Train a series of GLM Models using Grid Search over $\alpha$ and $\lambda$
End of explanation
"""
def extractBestModel(models):
bestMse = models[0].mse(xval=True)
result = models[0]
for model in models:
if model.mse(xval=True) < bestMse:
bestMse = model.mse(xval=True)
result = model
return result
bestModel = extractBestModel(glmModels)
bestModel
"""
Explanation: Extract the 'best' model
Uses model with lowest MSE on the cross validation data.
This is a reasonable substitute for using the final scoring method.
End of explanation
"""
def trainGBM(x, y, fold_column, training_frame, learning_rate=0.1, ntrees=50, max_depth=5):
model = H2OGradientBoostingEstimator(distribution = "gaussian",
learn_rate=learning_rate, ntrees=ntrees, max_depth=max_depth)
model.train(x=x, y=y, training_frame=training_frame, fold_column=fold_column)
return model
def gridSearchGBM(x, y, fold_column, training_frame, learning_rates = [0.1,0.03,0.01], ntrees=[10,30,100,300], max_depth=[1,3,5]):
results = []
for learning_rate in learning_rates:
for ntree in ntrees:
for depth in max_depth:
print "GBM: {learning rate: "+str(learning_rate)+"},{ntrees: "+str(ntree)+"},{max_depth: "+str(depth)+"}"
results.append( trainGBM(x, y, fold_column, training_frame, learning_rate=learning_rate, ntrees=ntree, max_depth=depth) )
return results
if doGridSearch:
#bmModels = gridSearchGBM(xCols, yCol, foldCol, train_hex,\
# learning_rates=[0.03,0.01,0.003], ntrees=[100,300,500], max_depth=[1,3,5])
#run the below line for fast demo
gbmModels = gridSearchGBM(xCols, yCol, foldCol, train_hex, learning_rates=[0.03,0.01], ntrees=[50,200], max_depth=[2,5])
else:
gbmModels = [trainGBM(xCols, yCol, foldCol, train_hex, \
ntrees=300, max_depth=5)]
bestGbmModel = extractBestModel(gbmModels)
"""
Explanation: Build a series of GBM models using grid search for hyper-parameters
Extract the 'best' model using the same approach as with GLM.
End of explanation
"""
bestGbmModel.params
"""
Explanation: Best model had depth 5, learning rate 0.01, and 300 trees
End of explanation
"""
bestGbmModel
"""
Explanation: Best GBM Model reported MSE on cross validation data as 1687, an improvement from GLM of 1954.
End of explanation
"""
train_hex["weights"] = 1
allModels = bestGbmModel.xvals
pred = sum([model.predict(train_hex) for model in allModels]) / len(allModels)
pred["actual"] = train_hex["RemainingUsefulLife"]
pred["unit"] = train_hex["UnitNumber"]
"""
Explanation: Exploratory model analysis
See how well the models do predicting on the training set. Should be pretty good, but often worth a check.
Predictions are an ensemble of the 10-fold cross validation models.
End of explanation
"""
scored_df = pred.as_data_frame(use_pandas=True)
sns.set_context("notebook", font_scale=3)
g=sns.lmplot(x="actual",y="predict",hue="unit",col="unit",data=scored_df[scored_df.unit < 13],col_wrap=3,fit_reg=False, size=10)
ticks = np.linspace(-300,100, 5)
g = (g.set_axis_labels("Remaining Useful Life", "Predicted Useful Life")
.set(xlim=(-325, 125), ylim=(-325, 125),
xticks=ticks, yticks=ticks))
np.linspace(-300,100, 5)
"""
Explanation: Plot actual remaining useful life vs predicted remaining useful life
Ideally all points would be on the diagonal, indication prediction from data matched exactly the actual.
Also, it is important that the prediction gets more accurate the closer it gets to no useful life remaining.
Looking at a sample of the first 12 units.
Moved predictions from H2O to Python Pandas for plotting using Seaborn.
End of explanation
"""
testPreds = sum([model.predict(test_hex) for model in allModels]) / len(allModels)
"""
Explanation: Model prediction and assessment
Predict on the hold-out test set, using an average of all the cross validation models.
End of explanation
"""
testPreds["Cycle"] = test_hex["Cycle"]
testPreds["UnitNumber"] = test_hex["UnitNumber"]
"""
Explanation: Append the original index information (Cycle and UnitNumber) to the predicted values so we have them later.
End of explanation
"""
testPreds_df = testPreds.as_data_frame(use_pandas=True)
"""
Explanation: Move the predictions over to Python Pandas for final analysis and scoring
End of explanation
"""
if doKalmanSmoothing:
actual_RUL = pd.read_csv(_locate("rul_FD001_preprocessed.csv"))
else:
actual_RUL = pd.read_csv("http://h2o-public-test-data.s3.amazonaws.com/bigdata/laptop/CMAPSSData/rul_FD001_preprocessed.csv")
"""
Explanation: Load up the actual Remaining Useful Life information.
End of explanation
"""
def aggfunc(x):
return np.mean( x.order().tail(3) )
grouped_by_unit_preds = testPreds_df.groupby("UnitNumber", as_index=False)
predictedRUL = grouped_by_unit_preds.agg({'predict' : aggfunc })
predictedRUL.predict = -predictedRUL.predict
"""
Explanation: The final scoring used in the competition is based on a single value per unit. We extract the last three predictions and use the mean of those (simple aggregation) and put the prediction back from remaining useful life in T-minus format to cycles remaining (positive).
End of explanation
"""
final = pd.concat([actual_RUL, predictedRUL.predict], axis=1)
def rowScore(row):
d = row.predict-row.RemainingUsefulLife
return np.exp( -d/10 )-1 if d < 0 else np.exp(d/13)-1
rowScores = final.apply(rowScore, axis=1)
"""
Explanation: Add the prediction to the actual data frame, and use the scoring used in the PHMO8 competition (more penality for predicting more useful life than there is actual).
End of explanation
"""
sum(rowScores)
"""
Explanation: This is the final score using PHM08 method of scoring.
End of explanation
"""
sns.set_context("notebook", font_scale=1.25)
sns.regplot("RemainingUsefulLife", "predict", data=final, fit_reg=False);
"""
Explanation: Finally look at the actual remaining useful life and compare to predicted
Some things that should ideally would be true:
- As RUL gets closer to zero, the prediction gets closer to actual
End of explanation
"""
|
KrisCheng/ML-Learning | archive/MOOC/Deeplearning_AI/ImprovingDeepNeuralNetworks/OptimizationMethods/Optimization+methods.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
import scipy.io
import math
import sklearn
import sklearn.datasets
from opt_utils import load_params_and_grads, initialize_parameters, forward_propagation, backward_propagation
from opt_utils import compute_cost, predict, predict_dec, plot_decision_boundary, load_dataset
from testCases import *
%matplotlib inline
plt.rcParams['figure.figsize'] = (7.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
"""
Explanation: Optimization Methods
Until now, you've always used Gradient Descent to update the parameters and minimize the cost. In this notebook, you will learn more advanced optimization methods that can speed up learning and perhaps even get you to a better final value for the cost function. Having a good optimization algorithm can be the difference between waiting days vs. just a few hours to get a good result.
Gradient descent goes "downhill" on a cost function $J$. Think of it as trying to do this:
<img src="images/cost.jpg" style="width:650px;height:300px;">
<caption><center> <u> Figure 1 </u>: Minimizing the cost is like finding the lowest point in a hilly landscape<br> At each step of the training, you update your parameters following a certain direction to try to get to the lowest possible point. </center></caption>
Notations: As usual, $\frac{\partial J}{\partial a } = $ da for any variable a.
To get started, run the following code to import the libraries you will need.
End of explanation
"""
# GRADED FUNCTION: update_parameters_with_gd
def update_parameters_with_gd(parameters, grads, learning_rate):
"""
Update parameters using one step of gradient descent
Arguments:
parameters -- python dictionary containing your parameters to be updated:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients to update each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
learning_rate -- the learning rate, scalar.
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Update rule for each parameter
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate * grads["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate * grads["db" + str(l+1)]
### END CODE HERE ###
return parameters
parameters, grads, learning_rate = update_parameters_with_gd_test_case()
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
"""
Explanation: 1 - Gradient Descent
A simple optimization method in machine learning is gradient descent (GD). When you take gradient steps with respect to all $m$ examples on each step, it is also called Batch Gradient Descent.
Warm-up exercise: Implement the gradient descent update rule. The gradient descent rule is, for $l = 1, ..., L$:
$$ W^{[l]} = W^{[l]} - \alpha \text{ } dW^{[l]} \tag{1}$$
$$ b^{[l]} = b^{[l]} - \alpha \text{ } db^{[l]} \tag{2}$$
where L is the number of layers and $\alpha$ is the learning rate. All parameters should be stored in the parameters dictionary. Note that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift l to l+1 when coding.
End of explanation
"""
# GRADED FUNCTION: random_mini_batches
def random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):
"""
Creates a list of random minibatches from (X, Y)
Arguments:
X -- input data, of shape (input size, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
mini_batch_size -- size of the mini-batches, integer
Returns:
mini_batches -- list of synchronous (mini_batch_X, mini_batch_Y)
"""
np.random.seed(seed) # To make your "random" minibatches the same as ours
m = X.shape[1] # number of training examples
mini_batches = []
# Step 1: Shuffle (X, Y)
permutation = list(np.random.permutation(m))
shuffled_X = X[:, permutation]
shuffled_Y = Y[:, permutation].reshape((1,m))
# Step 2: Partition (shuffled_X, shuffled_Y). Minus the end case.
num_complete_minibatches = math.floor(m/mini_batch_size) # number of mini batches of size mini_batch_size in your partitionning
for k in range(0, num_complete_minibatches):
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size*k : mini_batch_size*(k+1)]
mini_batch_Y = shuffled_Y[:, mini_batch_size*k : mini_batch_size*(k+1)]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
# Handling the end case (last mini-batch < mini_batch_size)
if m % mini_batch_size != 0:
### START CODE HERE ### (approx. 2 lines)
mini_batch_X = shuffled_X[:, mini_batch_size*num_complete_minibatches : m]
mini_batch_Y = shuffled_Y[:, mini_batch_size*num_complete_minibatches : m]
### END CODE HERE ###
mini_batch = (mini_batch_X, mini_batch_Y)
mini_batches.append(mini_batch)
return mini_batches
X_assess, Y_assess, mini_batch_size = random_mini_batches_test_case()
mini_batches = random_mini_batches(X_assess, Y_assess, mini_batch_size)
print ("shape of the 1st mini_batch_X: " + str(mini_batches[0][0].shape))
print ("shape of the 2nd mini_batch_X: " + str(mini_batches[1][0].shape))
print ("shape of the 3rd mini_batch_X: " + str(mini_batches[2][0].shape))
print ("shape of the 1st mini_batch_Y: " + str(mini_batches[0][1].shape))
print ("shape of the 2nd mini_batch_Y: " + str(mini_batches[1][1].shape))
print ("shape of the 3rd mini_batch_Y: " + str(mini_batches[2][1].shape))
print ("mini batch sanity check: " + str(mini_batches[0][0][0][0:3]))
"""
Explanation: Expected Output:
<table>
<tr>
<td > **W1** </td>
<td > [[ 1.63535156 -0.62320365 -0.53718766]
[-1.07799357 0.85639907 -2.29470142]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.74604067]
[-0.75184921]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.32171798 -0.25467393 1.46902454]
[-2.05617317 -0.31554548 -0.3756023 ]
[ 1.1404819 -1.09976462 -0.1612551 ]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.88020257]
[ 0.02561572]
[ 0.57539477]] </td>
</tr>
</table>
A variant of this is Stochastic Gradient Descent (SGD), which is equivalent to mini-batch gradient descent where each mini-batch has just 1 example. The update rule that you have just implemented does not change. What changes is that you would be computing gradients on just one training example at a time, rather than on the whole training set. The code examples below illustrate the difference between stochastic gradient descent and (batch) gradient descent.
(Batch) Gradient Descent:
``` python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
# Forward propagation
a, caches = forward_propagation(X, parameters)
# Compute cost.
cost = compute_cost(a, Y)
# Backward propagation.
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
```
Stochastic Gradient Descent:
python
X = data_input
Y = labels
parameters = initialize_parameters(layers_dims)
for i in range(0, num_iterations):
for j in range(0, m):
# Forward propagation
a, caches = forward_propagation(X[:,j], parameters)
# Compute cost
cost = compute_cost(a, Y[:,j])
# Backward propagation
grads = backward_propagation(a, caches, parameters)
# Update parameters.
parameters = update_parameters(parameters, grads)
In Stochastic Gradient Descent, you use only 1 training example before updating the gradients. When the training set is large, SGD can be faster. But the parameters will "oscillate" toward the minimum rather than converge smoothly. Here is an illustration of this:
<img src="images/kiank_sgd.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> Figure 1 </u><font color='purple'> : SGD vs GD<br> "+" denotes a minimum of the cost. SGD leads to many oscillations to reach convergence. But each step is a lot faster to compute for SGD than for GD, as it uses only one training example (vs. the whole batch for GD). </center></caption>
Note also that implementing SGD requires 3 for-loops in total:
1. Over the number of iterations
2. Over the $m$ training examples
3. Over the layers (to update all parameters, from $(W^{[1]},b^{[1]})$ to $(W^{[L]},b^{[L]})$)
In practice, you'll often get faster results if you do not use neither the whole training set, nor only one training example, to perform each update. Mini-batch gradient descent uses an intermediate number of examples for each step. With mini-batch gradient descent, you loop over the mini-batches instead of looping over individual training examples.
<img src="images/kiank_minibatch.png" style="width:750px;height:250px;">
<caption><center> <u> <font color='purple'> Figure 2 </u>: <font color='purple'> SGD vs Mini-Batch GD<br> "+" denotes a minimum of the cost. Using mini-batches in your optimization algorithm often leads to faster optimization. </center></caption>
<font color='blue'>
What you should remember:
- The difference between gradient descent, mini-batch gradient descent and stochastic gradient descent is the number of examples you use to perform one update step.
- You have to tune a learning rate hyperparameter $\alpha$.
- With a well-turned mini-batch size, usually it outperforms either gradient descent or stochastic gradient descent (particularly when the training set is large).
2 - Mini-Batch Gradient descent
Let's learn how to build mini-batches from the training set (X, Y).
There are two steps:
- Shuffle: Create a shuffled version of the training set (X, Y) as shown below. Each column of X and Y represents a training example. Note that the random shuffling is done synchronously between X and Y. Such that after the shuffling the $i^{th}$ column of X is the example corresponding to the $i^{th}$ label in Y. The shuffling step ensures that examples will be split randomly into different mini-batches.
<img src="images/kiank_shuffle.png" style="width:550px;height:300px;">
Partition: Partition the shuffled (X, Y) into mini-batches of size mini_batch_size (here 64). Note that the number of training examples is not always divisible by mini_batch_size. The last mini batch might be smaller, but you don't need to worry about this. When the final mini-batch is smaller than the full mini_batch_size, it will look like this:
<img src="images/kiank_partition.png" style="width:550px;height:300px;">
Exercise: Implement random_mini_batches. We coded the shuffling part for you. To help you with the partitioning step, we give you the following code that selects the indexes for the $1^{st}$ and $2^{nd}$ mini-batches:
python
first_mini_batch_X = shuffled_X[:, 0 : mini_batch_size]
second_mini_batch_X = shuffled_X[:, mini_batch_size : 2 * mini_batch_size]
...
Note that the last mini-batch might end up smaller than mini_batch_size=64. Let $\lfloor s \rfloor$ represents $s$ rounded down to the nearest integer (this is math.floor(s) in Python). If the total number of examples is not a multiple of mini_batch_size=64 then there will be $\lfloor \frac{m}{mini_batch_size}\rfloor$ mini-batches with a full 64 examples, and the number of examples in the final mini-batch will be ($m-mini__batch__size \times \lfloor \frac{m}{mini_batch_size}\rfloor$).
End of explanation
"""
# GRADED FUNCTION: initialize_velocity
def initialize_velocity(parameters):
"""
Initializes the velocity as a python dictionary with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
Returns:
v -- python dictionary containing the current velocity.
v['dW' + str(l)] = velocity of dWl
v['db' + str(l)] = velocity of dbl
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
# Initialize velocity
for l in range(L):
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = np.zeros(parameters['W' + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters['b' + str(l+1)].shape)
### END CODE HERE ###
return v
parameters = initialize_velocity_test_case()
v = initialize_velocity(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
"""
Explanation: Expected Output:
<table style="width:50%">
<tr>
<td > **shape of the 1st mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_X** </td>
<td > (12288, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_X** </td>
<td > (12288, 20) </td>
</tr>
<tr>
<td > **shape of the 1st mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 2nd mini_batch_Y** </td>
<td > (1, 64) </td>
</tr>
<tr>
<td > **shape of the 3rd mini_batch_Y** </td>
<td > (1, 20) </td>
</tr>
<tr>
<td > **mini batch sanity check** </td>
<td > [ 0.90085595 -0.7612069 0.2344157 ] </td>
</tr>
</table>
<font color='blue'>
What you should remember:
- Shuffling and Partitioning are the two steps required to build mini-batches
- Powers of two are often chosen to be the mini-batch size, e.g., 16, 32, 64, 128.
3 - Momentum
Because mini-batch gradient descent makes a parameter update after seeing just a subset of examples, the direction of the update has some variance, and so the path taken by mini-batch gradient descent will "oscillate" toward convergence. Using momentum can reduce these oscillations.
Momentum takes into account the past gradients to smooth out the update. We will store the 'direction' of the previous gradients in the variable $v$. Formally, this will be the exponentially weighted average of the gradient on previous steps. You can also think of $v$ as the "velocity" of a ball rolling downhill, building up speed (and momentum) according to the direction of the gradient/slope of the hill.
<img src="images/opt_momentum.png" style="width:400px;height:250px;">
<caption><center> <u><font color='purple'>Figure 3</u><font color='purple'>: The red arrows shows the direction taken by one step of mini-batch gradient descent with momentum. The blue points show the direction of the gradient (with respect to the current mini-batch) on each step. Rather than just following the gradient, we let the gradient influence $v$ and then take a step in the direction of $v$.<br> <font color='black'> </center>
Exercise: Initialize the velocity. The velocity, $v$, is a python dictionary that needs to be initialized with arrays of zeros. Its keys are the same as those in the grads dictionary, that is:
for $l =1,...,L$:
python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
Note that the iterator l starts at 0 in the for loop while the first parameters are v["dW1"] and v["db1"] (that's a "one" on the superscript). This is why we are shifting l to l+1 in the for loop.
End of explanation
"""
# GRADED FUNCTION: update_parameters_with_momentum
def update_parameters_with_momentum(parameters, grads, v, beta, learning_rate):
"""
Update parameters using Momentum
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- python dictionary containing the current velocity:
v['dW' + str(l)] = ...
v['db' + str(l)] = ...
beta -- the momentum hyperparameter, scalar
learning_rate -- the learning rate, scalar
Returns:
parameters -- python dictionary containing your updated parameters
v -- python dictionary containing your updated velocities
"""
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update for each parameter
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
# compute velocities
v["dW" + str(l+1)] = beta * v["dW" + str(l+1)] + (1 - beta) * grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta * v["db" + str(l+1)] + (1 - beta) * grads["db" + str(l+1)]
# update parameters
parameters["W" + str(l+1)] = parameters['W' + str(l+1)] - learning_rate * v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters['b' + str(l+1)] - learning_rate * v["db" + str(l+1)]
### END CODE HERE ###
return parameters, v
parameters, grads, v = update_parameters_with_momentum_test_case()
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta = 0.9, learning_rate = 0.01)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
"""
Explanation: Expected Output:
<table style="width:40%">
<tr>
<td > **v["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
</table>
Exercise: Now, implement the parameters update with momentum. The momentum update rule is, for $l = 1, ..., L$:
$$ \begin{cases}
v_{dW^{[l]}} = \beta v_{dW^{[l]}} + (1 - \beta) dW^{[l]} \
W^{[l]} = W^{[l]} - \alpha v_{dW^{[l]}}
\end{cases}\tag{3}$$
$$\begin{cases}
v_{db^{[l]}} = \beta v_{db^{[l]}} + (1 - \beta) db^{[l]} \
b^{[l]} = b^{[l]} - \alpha v_{db^{[l]}}
\end{cases}\tag{4}$$
where L is the number of layers, $\beta$ is the momentum and $\alpha$ is the learning rate. All parameters should be stored in the parameters dictionary. Note that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$ (that's a "one" on the superscript). So you will need to shift l to l+1 when coding.
End of explanation
"""
# GRADED FUNCTION: initialize_adam
def initialize_adam(parameters) :
"""
Initializes v and s as two python dictionaries with:
- keys: "dW1", "db1", ..., "dWL", "dbL"
- values: numpy arrays of zeros of the same shape as the corresponding gradients/parameters.
Arguments:
parameters -- python dictionary containing your parameters.
parameters["W" + str(l)] = Wl
parameters["b" + str(l)] = bl
Returns:
v -- python dictionary that will contain the exponentially weighted average of the gradient.
v["dW" + str(l)] = ...
v["db" + str(l)] = ...
s -- python dictionary that will contain the exponentially weighted average of the squared gradient.
s["dW" + str(l)] = ...
s["db" + str(l)] = ...
"""
L = len(parameters) // 2 # number of layers in the neural networks
v = {}
s = {}
# Initialize v, s. Input: "parameters". Outputs: "v, s".
for l in range(L):
### START CODE HERE ### (approx. 4 lines)
v["dW" + str(l+1)] = np.zeros(parameters['W' + str(l+1)].shape)
v["db" + str(l+1)] = np.zeros(parameters['b' + str(l+1)].shape)
s["dW" + str(l+1)] = np.zeros(parameters['W' + str(l+1)].shape)
s["db" + str(l+1)] = np.zeros(parameters['b' + str(l+1)].shape)
### END CODE HERE ###
return v, s
parameters = initialize_adam_test_case()
v, s = initialize_adam(parameters)
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
"""
Explanation: Expected Output:
<table style="width:90%">
<tr>
<td > **W1** </td>
<td > [[ 1.62544598 -0.61290114 -0.52907334]
[-1.07347112 0.86450677 -2.30085497]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.74493465]
[-0.76027113]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.31930698 -0.24990073 1.4627996 ]
[-2.05974396 -0.32173003 -0.38320915]
[ 1.13444069 -1.0998786 -0.1713109 ]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.87809283]
[ 0.04055394]
[ 0.58207317]] </td>
</tr>
<tr>
<td > **v["dW1"]** </td>
<td > [[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[-0.01228902]
[-0.09357694]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.02344157]
[ 0.16598022]
[ 0.07420442]]</td>
</tr>
</table>
Note that:
- The velocity is initialized with zeros. So the algorithm will take a few iterations to "build up" velocity and start to take bigger steps.
- If $\beta = 0$, then this just becomes standard gradient descent without momentum.
How do you choose $\beta$?
The larger the momentum $\beta$ is, the smoother the update because the more we take the past gradients into account. But if $\beta$ is too big, it could also smooth out the updates too much.
Common values for $\beta$ range from 0.8 to 0.999. If you don't feel inclined to tune this, $\beta = 0.9$ is often a reasonable default.
Tuning the optimal $\beta$ for your model might need trying several values to see what works best in term of reducing the value of the cost function $J$.
<font color='blue'>
What you should remember:
- Momentum takes past gradients into account to smooth out the steps of gradient descent. It can be applied with batch gradient descent, mini-batch gradient descent or stochastic gradient descent.
- You have to tune a momentum hyperparameter $\beta$ and a learning rate $\alpha$.
4 - Adam
Adam is one of the most effective optimization algorithms for training neural networks. It combines ideas from RMSProp (described in lecture) and Momentum.
How does Adam work?
1. It calculates an exponentially weighted average of past gradients, and stores it in variables $v$ (before bias correction) and $v^{corrected}$ (with bias correction).
2. It calculates an exponentially weighted average of the squares of the past gradients, and stores it in variables $s$ (before bias correction) and $s^{corrected}$ (with bias correction).
3. It updates parameters in a direction based on combining information from "1" and "2".
The update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{dW^{[l]}} = \beta_1 v_{dW^{[l]}} + (1 - \beta_1) \frac{\partial \mathcal{J} }{ \partial W^{[l]} } \
v^{corrected}{dW^{[l]}} = \frac{v{dW^{[l]}}}{1 - (\beta_1)^t} \
s_{dW^{[l]}} = \beta_2 s_{dW^{[l]}} + (1 - \beta_2) (\frac{\partial \mathcal{J} }{\partial W^{[l]} })^2 \
s^{corrected}{dW^{[l]}} = \frac{s{dW^{[l]}}}{1 - (\beta_1)^t} \
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}{dW^{[l]}}}{\sqrt{s^{corrected}{dW^{[l]}}} + \varepsilon}
\end{cases}$$
where:
- t counts the number of steps taken of Adam
- L is the number of layers
- $\beta_1$ and $\beta_2$ are hyperparameters that control the two exponentially weighted averages.
- $\alpha$ is the learning rate
- $\varepsilon$ is a very small number to avoid dividing by zero
As usual, we will store all parameters in the parameters dictionary
Exercise: Initialize the Adam variables $v, s$ which keep track of the past information.
Instruction: The variables $v, s$ are python dictionaries that need to be initialized with arrays of zeros. Their keys are the same as for grads, that is:
for $l = 1, ..., L$:
```python
v["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
v["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
s["dW" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["W" + str(l+1)])
s["db" + str(l+1)] = ... #(numpy array of zeros with the same shape as parameters["b" + str(l+1)])
```
End of explanation
"""
# GRADED FUNCTION: update_parameters_with_adam
def update_parameters_with_adam(parameters, grads, v, s, t, learning_rate = 0.01,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8):
"""
Update parameters using Adam
Arguments:
parameters -- python dictionary containing your parameters:
parameters['W' + str(l)] = Wl
parameters['b' + str(l)] = bl
grads -- python dictionary containing your gradients for each parameters:
grads['dW' + str(l)] = dWl
grads['db' + str(l)] = dbl
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
learning_rate -- the learning rate, scalar.
beta1 -- Exponential decay hyperparameter for the first moment estimates
beta2 -- Exponential decay hyperparameter for the second moment estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
Returns:
parameters -- python dictionary containing your updated parameters
v -- Adam variable, moving average of the first gradient, python dictionary
s -- Adam variable, moving average of the squared gradient, python dictionary
"""
L = len(parameters) // 2 # number of layers in the neural networks
v_corrected = {} # Initializing first moment estimate, python dictionary
s_corrected = {} # Initializing second moment estimate, python dictionary
# Perform Adam update on all parameters
for l in range(L):
# Moving average of the gradients. Inputs: "v, grads, beta1". Output: "v".
### START CODE HERE ### (approx. 2 lines)
v["dW" + str(l+1)] = beta1 * v["dW" + str(l+1)] + (1 - beta1) * grads["dW" + str(l+1)]
v["db" + str(l+1)] = beta1 * v["db" + str(l+1)] + (1 - beta1) * grads["db" + str(l+1)]
### END CODE HERE ###
# Compute bias-corrected first moment estimate. Inputs: "v, beta1, t". Output: "v_corrected".
### START CODE HERE ### (approx. 2 lines)
v_corrected["dW" + str(l+1)] = v["dW" + str(l+1)] / (1 - beta1**t)
v_corrected["db" + str(l+1)] = v["db" + str(l+1)] / (1 - beta1**t)
### END CODE HERE ###
# Moving average of the squared gradients. Inputs: "s, grads, beta2". Output: "s".
### START CODE HERE ### (approx. 2 lines)
s["dW" + str(l+1)] = beta2 * s["dW" + str(l+1)] + (1 - beta2) * grads['dW' + str(l+1)]**2
s["db" + str(l+1)] = beta2 * s["db" + str(l+1)] + (1 - beta2) * grads['db' + str(l+1)]**2
### END CODE HERE ###
# Compute bias-corrected second raw moment estimate. Inputs: "s, beta2, t". Output: "s_corrected".
### START CODE HERE ### (approx. 2 lines)
s_corrected["dW" + str(l+1)] = s["dW" + str(l+1)] / (1 - beta2**t)
s_corrected["db" + str(l+1)] = s["db" + str(l+1)] / (1 - beta2**t)
### END CODE HERE ###
# Update parameters. Inputs: "parameters, learning_rate, v_corrected, s_corrected, epsilon". Output: "parameters".
### START CODE HERE ### (approx. 2 lines)
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - (learning_rate * v_corrected["dW" + str(l+1)])/(np.sqrt(s_corrected["dW" + str(l+1)] + epsilon) )
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - (learning_rate * v_corrected["db" + str(l+1)])/(np.sqrt(s_corrected["db" + str(l+1)] + epsilon) )
### END CODE HERE ###
return parameters, v, s
parameters, grads, v, s = update_parameters_with_adam_test_case()
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s, t = 2)
print("W1 = " + str(parameters["W1"]))
print("b1 = " + str(parameters["b1"]))
print("W2 = " + str(parameters["W2"]))
print("b2 = " + str(parameters["b2"]))
print("v[\"dW1\"] = " + str(v["dW1"]))
print("v[\"db1\"] = " + str(v["db1"]))
print("v[\"dW2\"] = " + str(v["dW2"]))
print("v[\"db2\"] = " + str(v["db2"]))
print("s[\"dW1\"] = " + str(s["dW1"]))
print("s[\"db1\"] = " + str(s["db1"]))
print("s[\"dW2\"] = " + str(s["dW2"]))
print("s[\"db2\"] = " + str(s["db2"]))
"""
Explanation: Expected Output:
<table style="width:40%">
<tr>
<td > **v["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **s["dW1"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **s["db1"]** </td>
<td > [[ 0.]
[ 0.]] </td>
</tr>
<tr>
<td > **s["dW2"]** </td>
<td > [[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] </td>
</tr>
<tr>
<td > **s["db2"]** </td>
<td > [[ 0.]
[ 0.]
[ 0.]] </td>
</tr>
</table>
Exercise: Now, implement the parameters update with Adam. Recall the general update rule is, for $l = 1, ..., L$:
$$\begin{cases}
v_{W^{[l]}} = \beta_1 v_{W^{[l]}} + (1 - \beta_1) \frac{\partial J }{ \partial W^{[l]} } \
v^{corrected}{W^{[l]}} = \frac{v{W^{[l]}}}{1 - (\beta_1)^t} \
s_{W^{[l]}} = \beta_2 s_{W^{[l]}} + (1 - \beta_2) (\frac{\partial J }{\partial W^{[l]} })^2 \
s^{corrected}{W^{[l]}} = \frac{s{W^{[l]}}}{1 - (\beta_2)^t} \
W^{[l]} = W^{[l]} - \alpha \frac{v^{corrected}{W^{[l]}}}{\sqrt{s^{corrected}{W^{[l]}}}+\varepsilon}
\end{cases}$$
Note that the iterator l starts at 0 in the for loop while the first parameters are $W^{[1]}$ and $b^{[1]}$. You need to shift l to l+1 when coding.
End of explanation
"""
train_X, train_Y = load_dataset()
"""
Explanation: Expected Output:
<table>
<tr>
<td > **W1** </td>
<td > [[ 1.63178673 -0.61919778 -0.53561312]
[-1.08040999 0.85796626 -2.29409733]] </td>
</tr>
<tr>
<td > **b1** </td>
<td > [[ 1.75225313]
[-0.75376553]] </td>
</tr>
<tr>
<td > **W2** </td>
<td > [[ 0.32648046 -0.25681174 1.46954931]
[-2.05269934 -0.31497584 -0.37661299]
[ 1.14121081 -1.09245036 -0.16498684]] </td>
</tr>
<tr>
<td > **b2** </td>
<td > [[-0.88529978]
[ 0.03477238]
[ 0.57537385]] </td>
</tr>
<tr>
<td > **v["dW1"]** </td>
<td > [[-0.11006192 0.11447237 0.09015907]
[ 0.05024943 0.09008559 -0.06837279]] </td>
</tr>
<tr>
<td > **v["db1"]** </td>
<td > [[-0.01228902]
[-0.09357694]] </td>
</tr>
<tr>
<td > **v["dW2"]** </td>
<td > [[-0.02678881 0.05303555 -0.06916608]
[-0.03967535 -0.06871727 -0.08452056]
[-0.06712461 -0.00126646 -0.11173103]] </td>
</tr>
<tr>
<td > **v["db2"]** </td>
<td > [[ 0.02344157]
[ 0.16598022]
[ 0.07420442]] </td>
</tr>
<tr>
<td > **s["dW1"]** </td>
<td > [[ 0.00121136 0.00131039 0.00081287]
[ 0.0002525 0.00081154 0.00046748]] </td>
</tr>
<tr>
<td > **s["db1"]** </td>
<td > [[ 1.51020075e-05]
[ 8.75664434e-04]] </td>
</tr>
<tr>
<td > **s["dW2"]** </td>
<td > [[ 7.17640232e-05 2.81276921e-04 4.78394595e-04]
[ 1.57413361e-04 4.72206320e-04 7.14372576e-04]
[ 4.50571368e-04 1.60392066e-07 1.24838242e-03]] </td>
</tr>
<tr>
<td > **s["db2"]** </td>
<td > [[ 5.49507194e-05]
[ 2.75494327e-03]
[ 5.50629536e-04]] </td>
</tr>
</table>
You now have three working optimization algorithms (mini-batch gradient descent, Momentum, Adam). Let's implement a model with each of these optimizers and observe the difference.
5 - Model with different optimization algorithms
Lets use the following "moons" dataset to test the different optimization methods. (The dataset is named "moons" because the data from each of the two classes looks a bit like a crescent-shaped moon.)
End of explanation
"""
def model(X, Y, layers_dims, optimizer, learning_rate = 0.0007, mini_batch_size = 64, beta = 0.9,
beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8, num_epochs = 10000, print_cost = True):
"""
3-layer neural network model which can be run in different optimizer modes.
Arguments:
X -- input data, of shape (2, number of examples)
Y -- true "label" vector (1 for blue dot / 0 for red dot), of shape (1, number of examples)
layers_dims -- python list, containing the size of each layer
learning_rate -- the learning rate, scalar.
mini_batch_size -- the size of a mini batch
beta -- Momentum hyperparameter
beta1 -- Exponential decay hyperparameter for the past gradients estimates
beta2 -- Exponential decay hyperparameter for the past squared gradients estimates
epsilon -- hyperparameter preventing division by zero in Adam updates
num_epochs -- number of epochs
print_cost -- True to print the cost every 1000 epochs
Returns:
parameters -- python dictionary containing your updated parameters
"""
L = len(layers_dims) # number of layers in the neural networks
costs = [] # to keep track of the cost
t = 0 # initializing the counter required for Adam update
seed = 10 # For grading purposes, so that your "random" minibatches are the same as ours
# Initialize parameters
parameters = initialize_parameters(layers_dims)
# Initialize the optimizer
if optimizer == "gd":
pass # no initialization required for gradient descent
elif optimizer == "momentum":
v = initialize_velocity(parameters)
elif optimizer == "adam":
v, s = initialize_adam(parameters)
# Optimization loop
for i in range(num_epochs):
# Define the random minibatches. We increment the seed to reshuffle differently the dataset after each epoch
seed = seed + 1
minibatches = random_mini_batches(X, Y, mini_batch_size, seed)
for minibatch in minibatches:
# Select a minibatch
(minibatch_X, minibatch_Y) = minibatch
# Forward propagation
a3, caches = forward_propagation(minibatch_X, parameters)
# Compute cost
cost = compute_cost(a3, minibatch_Y)
# Backward propagation
grads = backward_propagation(minibatch_X, minibatch_Y, caches)
# Update parameters
if optimizer == "gd":
parameters = update_parameters_with_gd(parameters, grads, learning_rate)
elif optimizer == "momentum":
parameters, v = update_parameters_with_momentum(parameters, grads, v, beta, learning_rate)
elif optimizer == "adam":
t = t + 1 # Adam counter
parameters, v, s = update_parameters_with_adam(parameters, grads, v, s,
t, learning_rate, beta1, beta2, epsilon)
# Print the cost every 1000 epoch
if print_cost and i % 1000 == 0:
print ("Cost after epoch %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(costs)
plt.ylabel('cost')
plt.xlabel('epochs (per 100)')
plt.title("Learning rate = " + str(learning_rate))
plt.show()
return parameters
"""
Explanation: We have already implemented a 3-layer neural network. You will train it with:
- Mini-batch Gradient Descent: it will call your function:
- update_parameters_with_gd()
- Mini-batch Momentum: it will call your functions:
- initialize_velocity() and update_parameters_with_momentum()
- Mini-batch Adam: it will call your functions:
- initialize_adam() and update_parameters_with_adam()
End of explanation
"""
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "gd")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Gradient Descent optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: You will now run this 3 layer neural network with each of the 3 optimization methods.
5.1 - Mini-batch Gradient descent
Run the following code to see how the model does with mini-batch gradient descent.
End of explanation
"""
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, beta = 0.9, optimizer = "momentum")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Momentum optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: 5.2 - Mini-batch gradient descent with momentum
Run the following code to see how the model does with momentum. Because this example is relatively simple, the gains from using momemtum are small; but for more complex problems you might see bigger gains.
End of explanation
"""
# train 3-layer model
layers_dims = [train_X.shape[0], 5, 2, 1]
parameters = model(train_X, train_Y, layers_dims, optimizer = "adam")
# Predict
predictions = predict(train_X, train_Y, parameters)
# Plot decision boundary
plt.title("Model with Adam optimization")
axes = plt.gca()
axes.set_xlim([-1.5,2.5])
axes.set_ylim([-1,1.5])
plot_decision_boundary(lambda x: predict_dec(parameters, x.T), train_X, train_Y)
"""
Explanation: 5.3 - Mini-batch with Adam mode
Run the following code to see how the model does with Adam.
End of explanation
"""
|
pycomlink/pycomlink | notebooks/outdated_notebooks/Baseline determination.ipynb | bsd-3-clause | cml = pycml.io.examples.read_one_cml()
# Remove artifacts and plot data
cml.process.quality_control.set_to_nan_if('tx', '>=', 100)
cml.process.quality_control.set_to_nan_if('rx', '==', -99.9)
cml.plot_data(['tx', 'rx', 'txrx']);
"""
Explanation: Read in example data from one CML
End of explanation
"""
cml.process.wet_dry.std_dev(window_length=30, threshold=0.8)
cml.plot_data(['txrx', 'wet']);
"""
Explanation: Do a simple wet/dry classification
End of explanation
"""
cml.process.baseline.constant()
cml.process.baseline.calc_A()
ax = cml.plot_data(['txrx', 'wet', 'baseline', 'A']);
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
# Save a copy of these results for comparing them to the linear baseline later
baseline_constant = cml.channel_1.data.baseline.copy()
A_constant = cml.channel_1.data.A.copy()
"""
Explanation: Derive a constant baseline
Let's just focus on the rain events on 2016-10-25
End of explanation
"""
cml.process.baseline.linear()
cml.process.baseline.calc_A()
ax = cml.plot_data(['txrx', 'wet', 'baseline', 'A']);
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
# Save a copy of these results for comparing them to the constant baseline
baseline_linear = cml.channel_1.data.baseline.copy()
A_linear = cml.channel_1.data.A.copy()
"""
Explanation: Or derive a linear baseline
End of explanation
"""
fig, ax = plt.subplots(2, 1, figsize=(10, 4), sharex=True)
ax[0].plot(baseline_constant, color='C3', label='constant baseline')
ax[0].plot(baseline_linear, color='C4', label='linear baseline')
ax[1].plot(A_constant, color='C3', label='constant baseline')
ax[1].plot(A_linear, color='C4', label='linear baseline')
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
ax[0].set_ylabel('baseline')
ax[1].set_ylabel('A')
ax[0].legend();
"""
Explanation: Compare the results from constant and linear baseline
End of explanation
"""
# Exchange the current `wet` pd.Series in `channel_1` with a different series of floats with some `NaN`s
wet_temp = cml.channel_1.data.wet.astype(float)
wet_temp['2016-10-25 04:45': '2016-10-25 05:00'] = np.NaN
cml.channel_1.data.wet = wet_temp
"""
Explanation: NaN handling
The algorithms for constant and linear baseline handle NaNs differently:
* constant baseline:
* For NaN values in wet the baseline is also set to NaN.
* All baseline values following a NaN during a wet event are also set to NaN till the next dry event starts. This has to be done, since we do not know if a new wet event started during a NaN period and hence we do not know at which level the constant baseline should be.
* linear baseline:
* Default:
The baseline for a whole wet event is set to NaN if there is at least one wet NaN within this period. This makes sense, since for the interpolation of the linear baseline the correct end of the wet period has to be known to its txrx value. Since the wet event could have ended during the NaN period, we do not know the end of the wet period and hence cannot savely assume a txrx endpoint for the interpolation.
* Option to ignore_nan:
If you know what you are doing, e.g. because you know that your only have very few consecutive wet NaNs and hecne can assume that a wet event will not stop during your wet NaN, then you can ignore all NaNs. This will take the next switch from wet to dry as endpoint of the wet event and do the interpolation accordingly.
End of explanation
"""
cml.process.baseline.constant()
cml.process.baseline.calc_A()
ax = cml.plot_data(['txrx', 'wet', 'baseline', 'A']);
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
"""
Explanation: Constant baseline
End of explanation
"""
cml.process.baseline.linear()
cml.process.baseline.calc_A()
ax = cml.plot_data(['txrx', 'wet', 'baseline', 'A']);
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
"""
Explanation: Linear baseline (default)
default = set baseline for whole wet event to NaN if it contains at least one wet NaN
End of explanation
"""
cml.process.baseline.linear(ignore_nan=True)
cml.process.baseline.calc_A()
ax = cml.plot_data(['txrx', 'wet', 'baseline', 'A']);
ax[0].set_xlim('2016-10-25 00:00', '2016-10-25 10:00');
"""
Explanation: Linear baseline (ignoring NaNs)
End of explanation
"""
|
Danghor/Formal-Languages | ANTLR4-Python/SLR-Parser-Generator/Shift-Reduce-Parser-Pure.ipynb | gpl-2.0 | import re
"""
Explanation: A Shift-Reduce Parser for Arithmetic Expressions
In this notebook we implement a simple recursive descend parser for arithmetic expressions.
This parser will implement the following grammar:
$$
\begin{eqnarray}
\mathrm{expr} & \rightarrow & \mathrm{expr}\;\;\texttt{'+'}\;\;\mathrm{product} \
& \mid & \mathrm{expr}\;\;\texttt{'-'}\;\;\mathrm{product} \
& \mid & \mathrm{product} \[0.2cm]
\mathrm{product} & \rightarrow & \mathrm{product}\;\;\texttt{''}\;\;\mathrm{factor} \
& \mid & \mathrm{product}\;\;\texttt{'/'}\;\;\mathrm{factor} \
& \mid & \mathrm{factor} \[0.2cm]
\mathrm{factor} & \rightarrow & \texttt{'('} \;\;\mathrm{expr} \;\;\texttt{')'} \
& \mid & \texttt{NUMBER}
\end{eqnarray*}
$$
Implementing a Scanner
End of explanation
"""
def tokenize(s):
'''Transform the string s into a list of tokens. The string s
is supposed to represent an arithmetic expression.
'''
lexSpec = r'''([ \t\n]+) | # blanks and tabs
([1-9][0-9]*|0) | # number
([()]) | # parentheses
([-+*/]) | # arithmetical operators
(.) # unrecognized character
'''
tokenList = re.findall(lexSpec, s, re.VERBOSE)
result = []
for ws, number, parenthesis, operator, error in tokenList:
if ws: # skip blanks and tabs
continue
elif number:
result += [ 'NUMBER' ]
elif parenthesis:
result += [ parenthesis ]
elif operator:
result += [ operator ]
elif error:
result += [ f'ERROR({error})']
return result
tokenize('1 + 2 * (3 - 4)')
class ShiftReduceParser():
def __init__(self, actionTable, gotoTable):
self.mActionTable = actionTable
self.mGotoTable = gotoTable
def parse(self, TL):
index = 0 # points to next token
Symbols = [] # stack of symbols
States = ['s0'] # stack of states, s0 is start state
TL += ['$']
while True:
q = States[-1]
t = TL[index]
print('Symbols:', ' '.join(Symbols + ['|'] + TL[index:]).strip())
p = self.mActionTable.get((q, t), 'error')
if p == 'error':
return False
elif p == 'accept':
return True
elif p[0] == 'shift':
s = p[1]
Symbols += [t]
States += [s]
index += 1
elif p[0] == 'reduce':
head, body = p[1]
n = len(body)
if n > 0:
Symbols = Symbols[:-n]
States = States [:-n]
Symbols = Symbols + [head]
state = States[-1]
States += [ self.mGotoTable[state, head] ]
ShiftReduceParser.parse = parse
del parse
%run parse-table.py
"""
Explanation: The function tokenize transforms the string s into a list of tokens. See below for an example.
End of explanation
"""
def test(s):
parser = ShiftReduceParser(actionTable, gotoTable)
TL = tokenize(s)
print(f'tokenlist: {TL}\n')
if parser.parse(TL):
print('Parse successful!')
else:
print('Parse failed!')
test('(1 + 2) * 3')
test('1 * 2 + 3 * (4 - 5) / 2')
test('11+22*(33-44)/(5-10*5/(4-3))')
test('1+2*3-')
"""
Explanation: Testing
End of explanation
"""
|
DylanM-Marshall/FIDDLE | fiddle/predictions_visualization.ipynb | gpl-3.0 | %matplotlib inline
from matplotlib import pylab as pl
from scipy import stats
import numpy as np
import pandas as pd
import h5py
from matplotlib.backends.backend_pdf import PdfPages
"""
Explanation: FIDDLE Predictions Visualization Tutorial:
This notebook outlines how to create graphs from the data in the output files. The types of graphs depicted in FIDDLE's original bioRxiv paper were created using similar code. It is important to note that if variables are chaged from the suggested names written throughout the steps to then also change their calls later on in step 7.
Note: The following python packages are easily installable via pip, e.g:
pip install scipy
End of explanation
"""
predictions = h5py.File('../results/17_08_11_test1/predictions.h5','r')
"""
Explanation: 1. Load predictions hdf5 dataset, e.g:
predictions = h5py.File('../results/experiment/predictions.h5','r')
End of explanation
"""
predictions.items()
"""
Explanation: 1a. Examine the predictions hdf5 dataset, the keys correspond to outputs as determined by the parameters in the configurations.json file and the values correspond to the outputs' matrix shape, e.g:
predictions.items()
End of explanation
"""
test = h5py.File('../data/hdf5datasets/NSMSDSRSCSTSRI_500bp/test.h5', 'r')
"""
Explanation: 2. Load original test hdf5 dataset (these are the genomic regions FIDDLE performed its final prediction on), e.g:
test = h5py.File('../data/hdf5datasets/NSMSDSRSCSTSRI_500bp/test.h5', 'r')
End of explanation
"""
test.items()
"""
Explanation: 2a. Examine the test hdf5 dataset, the keys and values have the same structure as the predictions hdf5dataset, e.g:
test.items()
End of explanation
"""
infoRef_test = test.get('info')[:]
"""
Explanation: 3. Read in the 'info' reference track from the test hdf5 dataset, e.g.:
infoRef_test = test.get('info')[:]
End of explanation
"""
stats.describe(infoRef_test[:, 0])
stats.describe(infoRef_test[:, 1])
stats.describe(infoRef_test[:, 2])
stats.describe(infoRef_test[:, 3])
"""
Explanation: 3a. Examine the info reference track, the dimensions correspond to the following, this track is used for correct indexing:
1. Chromosome number (e.g. 1-16)
2. Strandedness (e.g. -1, 1)
3. Gene index (parsed from the original GFF file input)
4. Base Pair index (e.g. up to ~10^6)
stats.describe(infoRef_test[:, X])
End of explanation
"""
pred_tss = predictions.get('tssseq')[:]
"""
Explanation: 4. Read in one of the sequencing output tracks (for instance 'tssseq' for TSS-Seq data) from the predictions hdf5 datasets. Possible output types were outlined previously in step 2a. This is the predicted profile that will be plotted, e.g.:
pred_tss = predictions.get('tssseq')[:]
End of explanation
"""
orig_tss = test.get('tssseq')[:]
"""
Explanation: 5. Correspondingly, read in the sequencing output track from the test hdf5 dataset. This is the original profile that the predicted profile will be plotted against, e.g:
orig_tss = test.get('tssseq')[:]
End of explanation
"""
file_name='../Figures/predictions_plot.pdf'
size_interest=50
alpha_orig=0.7
alpha_pred=0.9
"""
Explanation: 6. Create directory to save resulting Figure:
! mkdir ../Figures
! mkdir ../Figures
6. Modify plotting variables according to your liking, such as number of random genomic regions of interest and more:
file_name='../figures/predictions_plot.pdf' # make sure directory exists before carrying through
size_interest=50 # 50 random genomic regions of interest
alpha_orig=0.6 # [0.0, 1.0] - [transparent, opaque], transparency of expected profile from test hdf5 dataset
alpha_pred=0.8 # [0.0, 1.0] - [transparent, opaque], transparency of predicted profile from predictions hdf5 dataset
End of explanation
"""
pl.ioff()
pp = PdfPages(file_name)
np.seterr(divide='ignore', invalid='ignore')
for ix in np.random.randint(pred_tss.shape[0], size=size_interest):
x_ran = np.arange(ix, ix + 500)
fig = pl.figure();
pl.plot(x_ran, orig_tss[ix, 0] / np.sum(orig_tss[ix, 0]), color='red', alpha=alpha_orig);
pl.plot(x_ran, pred_tss[ix], color='green', alpha=alpha_pred);
pl.xlabel('Genomic coordinate (bp) - Chr'+ str(int(infoRef_test[ix,0])));
pl.ylabel('Probability density function');
pp.savefig();
pl.close(fig);
pp.close();
"""
Explanation: 7. Create pdf file with plots of overlayed predictions against expected profiles.
End of explanation
"""
|
poldrack/fmri-analysis-vm | analysis/RTmodeling/RTmodeling.ipynb | mit | import numpy as np
import pandas as pd
import nibabel
from nipy.modalities.fmri.hemodynamic_models import spm_hrf
import matplotlib.pyplot as plt
import nipype.algorithms.modelgen as model # model generation
from nipype.interfaces.base import Bunch
from nipype.interfaces import fsl
from statsmodels.tsa.arima_process import arma_generate_sample
import os
import shutil
from IPython.display import HTML
import seaborn as sns
from nipype.caching import Memory
sns.set_style("white")
mem = Memory(base_dir='.')
def clearmem():
"""function to clear nipype cache"""
for root, dirs, files in os.walk(mem.base_dir):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
# generate the design at a higher temporal resolution
tr = 0.1
"""
Explanation: This notebook will walk through the reason that it is necessary to model response times, and the various ways to model them. We will start by generating a design that has trials that vary in reaction time. This is adapted from Poldrack (2014, Developmental Cognitive Neuroscience).
End of explanation
"""
# we will use the spm HRF function
plt.plot(spm_hrf(tr))
# create a stick function with boxcars of increasing length (2, 3, 5, and 6 timepoints)
variable_sf = np.zeros(1000)
variable_sf[100:102] = 1
variable_sf[300:303] = 1
variable_sf[500:505] = 1
variable_sf[700:706] = 1
plt.figure(figsize=(12,5))
plt.subplot(1, 2, 1)
# plot the HRF against the sticks - reduce scale for plotting against hrf
plt.plot(variable_sf*.2)
# convolve the stick function with the HRF
variable_sf_conv = np.convolve(
variable_sf, spm_hrf(tr, oversampling=1))[0:len(variable_sf)]
plt.plot(variable_sf_conv)
# plot the evoked responses aligned in time
plt.subplot(1, 2, 2)
hrfs = np.zeros((200, 4))
for i in range(4):
hrfs[:, i] = variable_sf_conv[(100 + i * 200):(100 + (i + 1) * 200)]
_ = plt.plot(hrfs)
"""
Explanation: First generate a design with four events that differ in their duration (by convolving the HRF with increasingly long boxcars).
End of explanation
"""
variable_sf_long = np.zeros(1000)
variable_sf_long[100:200] = 1
variable_sf_long[500:900] = 1
plt.plot(variable_sf_long)
variable_sf_long_conv = np.convolve(
variable_sf_long, spm_hrf(tr, oversampling=1))[0:len(variable_sf)]
_ = plt.plot(variable_sf_long_conv)
"""
Explanation: You see that for short events, the length of the impulse is primarily seen in the height of the response, rather than the length of the response.
This effect tapers off for inputs that last longer than the HRF
End of explanation
"""
hrf_bases = np.zeros((1000, 4))
hrf_bases[100:104, 0] = 1
hrf_bases[300:304, 1] = 1
hrf_bases[500:504, 2] = 1
hrf_bases[700:704, 3] = 1
desmtx = np.zeros((1000, 4))
for x in range(4):
desmtx[:, x] = np.convolve(
hrf_bases[:, x], spm_hrf(tr, oversampling=1))[0:len(variable_sf)]
plt.figure(figsize=(5,5))
plt.imshow(desmtx, aspect='auto', interpolation = 'nearest', cmap='gray')
"""
Explanation: Generate a beta-series design matrix that fits a separate regressor for each of the four trials; this is equivalent to separately modeling the intensity of each trial (assuming a constant duration for each).
End of explanation
"""
b_est = np.linalg.inv(desmtx.T.dot(desmtx)).dot(desmtx.T).dot(variable_sf_conv)
print(b_est)
intensity_sf_conv = desmtx.dot(b_est)
"""
Explanation: Now fit the beta-series model, and generate the fitted regressor, using ordinary least squares.
End of explanation
"""
plt.plot(variable_sf_conv, color='k', linewidth=4)
plt.plot(intensity_sf_conv, 'c--')
# plt.plot(constant_sf_conv,color='b')
plt.plot(intensity_sf_conv - variable_sf_conv, color='b')
plt.text(10, -0.02, 'RT')
plt.text(100, -0.02, '200 ms')
plt.text(300, -0.02, '300 ms')
plt.text(500, -0.02, '500 ms')
plt.text(700, -0.02, '600 ms')
plt.text(10, -0.03, 'Beta')
plt.text(100, -0.03, '%0.2f' % b_est[0])
plt.text(300, -0.03, '%0.2f' % b_est[1])
plt.text(500, -0.03, '%0.2f' % b_est[2])
plt.text(700, -0.03, '%0.2f' % b_est[3])
plt.axis([0, 1000, -0.05, 0.15])
_ = plt.legend(['Variable duration',
'Variable intensity (fitted)',
'Residual'],
loc='upper left')
"""
Explanation: Now let's plot the fit of the model (which assumed equal duration but varying intensity) to the data (which were generated by varying duration of the underlying impulse).
End of explanation
"""
np.random.seed(5)
diff = 0.5 # difference in RT across conditions
ntrials = 32 # trials per condition
rtDf = pd.DataFrame({
'condition': np.zeros(ntrials),
'rt': np.zeros(ntrials)
})
rtDf.iloc[int(ntrials / 2):, 0] = 1
rtDf.loc[rtDf.condition == 0, 'rt'] = np.random.lognormal(0.0, 0.2, int(ntrials / 2))
rtDf.loc[rtDf.condition == 1, 'rt'] = np.random.lognormal(diff, 0.2, int(ntrials / 2))
g = sns.FacetGrid(rtDf, hue="condition")
g = g.map(sns.distplot, "rt", hist=False, rug=True)
rtDf.groupby('condition').rt.mean()
# generate random onsets
trial_length = 16 # length of each trial, including ISI
total_length = trial_length * ntrials
randonsets = np.arange(0, total_length, trial_length)
np.random.shuffle(randonsets)
onsets = np.zeros(len(randonsets))
onsets[rtDf.condition == 0] = np.sort(randonsets[rtDf.condition == 0])
onsets[rtDf.condition == 1] = np.sort(randonsets[rtDf.condition == 1])
"""
Explanation: The point to take away from this is that the variable duration and variable intensity have largely indistinguishable effects on the hemodynamic response, at least for relatively short events.
Modeling reaction times
Now let's look at the various ways that one can model response times for fMRI analysis. First let's generate a design with two conditions that differ in mean response times. We will use a lognormal distribution which is a reasonable approximation to the shape of RT distributions.
End of explanation
"""
times = np.arange(0, total_length, 1 / 100.)
deslen = len(times) # length of design in high-resolution (10 ms) space
sf_vd_ca = np.zeros(deslen)
sf_cd_ca = np.zeros(deslen)
sf_cd_va = np.zeros(deslen)
activation_effect = 1
for i in range(len(onsets)):
start = int(onsets[i] * 100.)
stop_var = int(onsets[i] * 100) + round(rtDf.rt[i] * 10).astype('int')
stop_const = int(onsets[i] * 100) + round(rtDf.rt.mean() * 10).astype('int')
sf_vd_ca[start:stop_var] = 1
sf_cd_ca[start:stop_const] = 1
sf_cd_va[start:stop_const] = 1 + rtDf.condition[i] * activation_effect # add activation effect
noiselevel = 0.25
noise = arma_generate_sample([1, 0.4], [1, 0.], total_length) * noiselevel
conv_sf_vd_ca = np.convolve(
sf_vd_ca, spm_hrf(tr=0.01, oversampling=1.))[:len(sf_vd_ca)]
conv_sf_vd_ca = conv_sf_vd_ca[np.arange(0, len(conv_sf_vd_ca), 100)]
data_vd_ca = conv_sf_vd_ca * 50. + noise
conv_sf_cd_ca = np.convolve(
sf_cd_ca, spm_hrf(tr=0.01, oversampling=1.))[:len(sf_cd_ca)]
conv_sf_cd_ca = conv_sf_cd_ca[np.arange(0, len(conv_sf_cd_ca), 100)]
data_cd_ca = conv_sf_cd_ca * 50. + noise
conv_sf_cd_va = np.convolve(
sf_cd_va, spm_hrf(tr=0.01, oversampling=1.))[:len(sf_cd_va)]
conv_sf_cd_va = conv_sf_cd_va[np.arange(0, len(conv_sf_cd_va), 100)]
data_cd_va = conv_sf_cd_va * 50. + noise
"""
Explanation: Now generate the data using these onsets and durations. We will generate three datasets:
* constant event duration and activation across conditions (cd_ca)
* variable event duration but constant activation across conditions (vd_ca)
* constant event duration but variable activation across condition (cd_va)
End of explanation
"""
plt.figure()
start = 3150 #1550
end = start + 100
ymax = 2.7
plt.plot(sf_vd_ca[start:end], 'k--')
plt.ylim([0,ymax])
plt.title("example of a single trial")
plt.plot(sf_cd_ca[start:end], 'b-')
plt.ylim([0, ymax])
plt.plot(sf_cd_va[start:end], 'r')
plt.ylim([0, ymax])
plt.legend(["variable duration / constant amplitude",
"constant duration / constant amplitude",
"constant duration / variable amplitude"], loc=2)
"""
Explanation: Plot an example of a single trial for each of the conditions.
End of explanation
"""
plt.figure()
plt.plot(data_vd_ca[:50], label="variable duration / constant amplitude")
plt.plot(data_cd_ca[:50], label="constant duration / constant amplitude")
plt.plot(data_cd_va[:50], label="constant duration / variable amplitude")
plt.ylim([-1, 3])
plt.legend()
"""
Explanation: And plot an example of timeseries for each condition.
End of explanation
"""
meanrt = rtDf.rt.mean()
info = [Bunch(conditions=['short',
'long'],
onsets=[np.sort(onsets[rtDf.condition == 0]),
np.sort(onsets[rtDf.condition == 1])],
durations=[[meanrt],
[meanrt]])]
# create a dummy image for SpecifyModel to look at
if not os.path.exists('tmp.nii.gz'):
dummy = nibabel.Nifti1Image(
np.zeros((12, 12, 12, total_length)), np.identity(4))
dummy.to_filename('tmp.nii.gz')
s = model.SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = 'tmp.nii.gz'
s.inputs.time_repetition = 1.0
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.subject_info = info
specify_model_results = s.run()
clearmem()
level1design = mem.cache(fsl.model.Level1Design)
level1design_results = level1design(interscan_interval=1.0,
bases={'dgamma': {'derivs': False}},
session_info=specify_model_results.outputs.session_info,
model_serial_correlations=False)
modelgen = mem.cache(fsl.model.FEATModel)
modelgen_results = modelgen(fsf_file=level1design_results.outputs.fsf_files,
ev_files=level1design_results.outputs.ev_files)
X = np.loadtxt(modelgen_results.outputs.design_file, skiprows=5)
X = np.hstack((X, np.ones((X.shape[0], 1))))
print('Model with constant event durations')
sns.heatmap(X, vmin=0, vmax=0.8, xticklabels=['short', 'long', 'Mean'], cmap='gray')
beta_hat_vd_ca_nort = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_vd_ca)
beta_hat_cd_ca_nort = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_ca)
beta_hat_cd_va_nort = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_va)
betas_nort = np.vstack((beta_hat_vd_ca_nort,
beta_hat_cd_ca_nort,
beta_hat_cd_va_nort))
df_nort = pd.DataFrame(
betas_nort,
columns=['short', 'long', 'Mean'],
index=['variable duration/constant amplitude',
'constant duration/constant amplitude',
'constant duration/variable amplitude'])
HTML(df_nort.to_html())
"""
Explanation: Now we will build several models and estimate the parameters, to see the effect of variable duration on various ways of modeling RT.
First, build a model that assumes constant event durations. We will build our models using the model specification tools within nipype.
End of explanation
"""
info = [Bunch(conditions=['short', 'long'],
onsets=[onsets[rtDf.condition == 0],
onsets[rtDf.condition == 1]],
durations=[rtDf.loc[rtDf.condition == 0, 'rt'],
rtDf.loc[rtDf.condition == 1, 'rt']])]
# create a dummy image for SpecifyModel to look at
if not os.path.exists('tmp.nii.gz'):
dummy = nibabel.Nifti1Image(
np.zeros((12, 12, 12, total_length)), np.identity(4))
dummy.to_filename('tmp.nii.gz')
s = model.SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = 'tmp.nii.gz'
s.inputs.time_repetition = 1.0
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.subject_info = info
specify_model_results = s.run()
clearmem()
level1design = mem.cache(fsl.model.Level1Design)
level1design_results = level1design(interscan_interval=1.0,
bases={'dgamma': {'derivs': False}},
session_info=specify_model_results.outputs.session_info,
model_serial_correlations=False)
modelgen = mem.cache(fsl.model.FEATModel)
modelgen_results = modelgen(fsf_file=level1design_results.outputs.fsf_files,
ev_files=level1design_results.outputs.ev_files)
X = np.loadtxt(modelgen_results.outputs.design_file, skiprows=5)
X = np.hstack((X, np.ones((X.shape[0], 1))))
print('Model with variable durations')
sns.heatmap(X, vmin=0, vmax=0.8, xticklabels=['short', 'long', 'Mean'], cmap='gray')
beta_hat_vd_ca_rt = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_vd_ca)
beta_hat_cd_ca_rt = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_ca)
beta_hat_cd_va_rt = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_va)
betas_varrt = np.vstack((beta_hat_vd_ca_rt, beta_hat_cd_ca_rt, beta_hat_cd_va_rt))
df_varrt = pd.DataFrame(
betas_varrt,
columns=['short', 'long', 'Mean'],
index=['variable duration/constant amplitude',
'constant duration/constant amplitude',
'constant duration/variable amplitude'])
HTML(df_varrt.to_html())
"""
Explanation: Note that the first two datasets have equal activation intensity, and thus their betas should be the same, but the increased duration leads to longer activation in the VD/CA condition when the model doesn't include RT. Also notice that the model correctly estimates the effect for the variable amplitude condition.
Now build a model using the actual reaction times as durations to build the regressors (ala Grinband).
End of explanation
"""
info = [Bunch(conditions=['short-const',
'long-const',
'RT'],
onsets=[onsets[rtDf.condition == 0],
onsets[rtDf.condition == 1],
onsets],
durations=[[meanrt],
[meanrt],
[meanrt]],
amplitudes=[[1], [1], rtDf.rt - meanrt])]
# create a dummy image for SpecifyModel to look at
dummy = nibabel.Nifti1Image(
np.zeros((12, 12, 12, total_length)), np.identity(4))
dummy.to_filename('tmp.nii.gz')
s = model.SpecifyModel()
s.inputs.input_units = 'secs'
s.inputs.functional_runs = 'tmp.nii.gz'
s.inputs.time_repetition = 1.0
s.inputs.high_pass_filter_cutoff = 128.
s.inputs.subject_info = info
specify_model_results = s.run()
clearmem()
level1design = mem.cache(fsl.model.Level1Design)
level1design_results = level1design(interscan_interval=1.0,
bases={'dgamma': {'derivs': False}},
session_info=specify_model_results.outputs.session_info,
model_serial_correlations=False)
modelgen = mem.cache(fsl.model.FEATModel)
modelgen_results = modelgen(fsf_file=level1design_results.outputs.fsf_files,
ev_files=level1design_results.outputs.ev_files)
X = np.loadtxt(modelgen_results.outputs.design_file, skiprows=5)
X = np.hstack((X, np.ones((X.shape[0], 1))))
print('Model with parametric RT effect')
sns.heatmap(X, vmin=0, vmax=0.8, xticklabels=['short', 'long', 'RTparam', 'Mean'], cmap='gray')
beta_hat_vd_ca_param = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_vd_ca)
beta_hat_cd_ca_param = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_ca)
beta_hat_cd_va_param = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(data_cd_va)
betas_param = np.vstack((beta_hat_vd_ca_param, beta_hat_cd_ca_param, beta_hat_cd_va_param))
df_param = pd.DataFrame(
betas_param,
columns=['short', 'long', 'RTparam', 'Mean'],
index=['variable duration/constant amplitude',
'constant duration/constant amplitude',
'constant duration/variable amplitude'])
HTML(df_param.to_html())
"""
Explanation: There are three things to notice here. First, there is now no difference between conditions in the VD/CA dataset, because these have been removed by the RT modeling. Second, the RT-unrelated dataset (CD/CA) now has an artifactual difference between conditions, driven by the differences in RT across conditions that are now included in the regressor. Second, notice that the difference in activation between conditions in the third dataset (where one actually exists) is reduced compared to the previous model, because some of the effect is being removed due to its correlation with the RT difference across conditions.
Now let's build a model that includes a separate parametric regressor for RT alongside the constant duration (unmodulated) regressor.
End of explanation
"""
|
hungiyang/StatisticalMethods | examples/SDSScatalog/GalaxySizes.ipynb | gpl-2.0 | %load_ext autoreload
%autoreload 2
from __future__ import print_function
import numpy as np
import SDSS
import pandas as pd
import matplotlib
%matplotlib inline
galaxies = "SELECT top 1000 \
petroR50_i AS size, \
petroR50Err_i AS err \
FROM PhotoObjAll \
WHERE \
(type = '3' AND petroR50Err_i > 0)"
print (galaxies)
# Download data. This can take a few moments...
data = SDSS.select(galaxies)
data.head()
!mkdir -p downloads
data.to_csv("downloads/SDSSgalaxysizes.csv")
"""
Explanation: Illustrating Observed and Intrinsic Object Properties:
SDSS "Galaxy" Sizes
In a catalog, each galaxy's measurements come with "error bars" providing information about how uncertain we should be about each property of each galaxy.
This means that the distribution of "observed" galaxy properties (as reported in the catalog) is not the same as the underlying or "intrinsic" distribution.
Let's look at the distribution of observed sizes in the SDSS photometric object catalog.
End of explanation
"""
data = pd.read_csv("downloads/SDSSgalaxysizes.csv",usecols=["size","err"])
data['size'].hist(bins=np.linspace(0.0,5.0,100),figsize=(12,7))
matplotlib.pyplot.xlabel('Size / arcsec',fontsize=16)
matplotlib.pyplot.title('SDSS Observed Size',fontsize=20)
"""
Explanation: The Distribution of Observed SDSS "Galaxy" Sizes
Let's look at a histogram of galaxy sizes, for 1000 objects classified as "galaxies".
End of explanation
"""
data.plot(kind='scatter', x='size', y='err',s=100,figsize=(12,7));
"""
Explanation: Things to notice:
No small objects (why not?)
A "tail" to large size
Some very large sizes that look a little odd
Are these large galaxies actually large, or have they just been measured that way?
Let's look at the reported uncertainties on these sizes:
End of explanation
"""
def generate_galaxies(mu=np.log10(1.5),S=0.3,N=1000):
return pd.DataFrame({'size' : 10.0**(mu + S*np.random.randn(N))})
mu = np.log10(1.5)
S = 0.05
intrinsic = generate_galaxies(mu=mu,S=S,N=1000)
intrinsic.hist(bins=np.linspace(0.0,5.0,100),figsize=(12,7),color='green')
matplotlib.pyplot.xlabel('Size / arcsec',fontsize=16)
matplotlib.pyplot.title('Intrinsic Size',fontsize=20)
"""
Explanation: Generating Mock Data
Let's look at how distributions like this one can come about, by making a generative model for this dataset.
First, let's imagine a set of perfectly measured galaxies. They won't all have the same size, because the Universe isn't like that. Let's suppose the logarithm of their intrinsic sizes are drawn from a Gaussian distribution of width $S$ and mean $\mu$.
To model one mock galaxy, we draw a sample from this distribution. To model the whole dataset, we draw 1000 samples.
Note that this is a similar activity to making random catalogs for use in correlation function summaries; here, though, we want to start comparing real data with mock data to begin understanding it.
End of explanation
"""
def make_noise(sigma=0.3,N=1000):
return pd.DataFrame({'size' : sigma*np.random.randn(N)})
sigma = 0.3
errors = make_noise(sigma=sigma,N=1000)
observed = intrinsic + errors
observed.hist(bins=np.linspace(0.0,5.0,100),figsize=(12,7),color='red')
matplotlib.pyplot.xlabel('Size / arcsec',fontsize=16)
matplotlib.pyplot.title('Observed Size',fontsize=20)
both = pd.DataFrame({'SDSS': data['size'], 'Model': observed['size']}, columns=['SDSS', 'Model'])
both.hist(alpha=0.5,bins=np.linspace(0.0,5.0,100),figsize=(12,7))
"""
Explanation: Now let's add some observational uncertainty. We can model this by drawing random Gaussian offsets $\epsilon$ and add one to each intrinsic size.
End of explanation
"""
V_data = np.var(data['size'])
print ("Variance of the SDSS distribution = ",V_data)
V_int = np.var(intrinsic['size'])
V_noise = np.var(errors['size'])
V_obs = np.var(observed['size'])
print ("Variance of the intrinsic distribution = ", V_int)
print ("Variance of the noise = ", V_noise)
print ("Variance of the observed distribution = ", V_int + V_noise, \
"cf", V_obs)
"""
Explanation: Q: How did we do? Is this a good model for our data?
Play around with the parameters $\mu$, $S$ and $\sigma$ and see if you can get a better match to the observed distribution of sizes.
<br>
One last thing: let's look at the variances of these distributions.
Recall:
$V(x) = \frac{1}{N} \sum_{i=1}^N (x_i - \nu)^2$
If $\nu$, the population mean of $x$, is not known, an estimator for $V$ is
$\hat{V}(x) = \frac{1}{N} \sum_{i=1}^N (x_i - \bar{x})^2$
where $\bar{x} = \frac{1}{N} \sum_{i=1}^N x_i$, the sample mean.
End of explanation
"""
from IPython.display import Image
Image(filename="samplingdistributions.png",width=300)
"""
Explanation: You may recall this last result from previous statistics courses.
Why is the variance of our mock dataset's galaxy sizes so much smaller than that of the SDSS sample?
Sampling Distributions
In the above example we drew 1000 samples from two probability distributions:
The intrinsic size distribution, ${\rm Pr}(R_{\rm true}|\mu,S)$
The "error" distribution, ${\rm Pr}(R_{\rm obs}|R_{\rm true},\sigma)$
The procedure of drawing numbers from the first, and then adding numbers from the second, produced mock data - which then appeared to have been drawn from:
${\rm Pr}(R_{\rm obs}|\mu,S,\sigma)$
which is broader than either the intrinsic distribution or the error distribution.
Q: What would we do differently if we wanted to simulate 1 Galaxy?
The three distributions are related by an integral:
${\rm Pr}(R_{\rm obs}|\mu,S,\sigma) = \int {\rm Pr}(R_{\rm obs}|R_{\rm true},\sigma) \; {\rm Pr}(R_{\rm true}|\mu,S) \; dR_{\rm true}$
Note that this is not a convolution, in general - but it's similar to one.
When we only plot the 1D histogram of observed sizes, we are summing over or "marginalizing out" the intrinsic ones.
Probabilistic Graphical Models
We can draw a diagram representing the above combination of probability distributions, that:
Shows the dependencies between variables
Gives you a recipe for generating mock data
We can do this in python, using the daft package.:
End of explanation
"""
|
muxuezi/jupyterworkflow | 101basic/001_Pandas_vs_SQL.ipynb | mit | import pandas as pd
import numpy as np
url = 'https://raw.github.com/pandas-dev/pandas/master/pandas/tests/data/tips.csv'
tips = pd.read_csv(url)
tips.head()
"""
Explanation: Why is Python Growing So Quickly?
<center><img width=512 src=https://zgab33vy595fw5zq-zippykid.netdna-ssl.com/wp-content/uploads/2017/09/related_tags_over_time-1-1024x1024.png></center>
Pandas vs SQL
End of explanation
"""
tips[['total_bill', 'tip', 'smoker', 'time']].head(5)
"""
Explanation: SELECT
sql
SELECT total_bill, tip, smoker, time
FROM tips
LIMIT 5;
pandas在DataFrame直接取字段名称即可
End of explanation
"""
tips[tips['time'] == 'Dinner'].head(5)
"""
Explanation: DataFrame取数时,如果不设置字段名称,就会取所有字段 (与 SQL 的 * 等效)
WHERE
sql
SELECT *
FROM tips
WHERE time = 'Dinner'
LIMIT 5;
DataFrames过滤方法很多,最常用的是布尔索引(boolean indexing)
End of explanation
"""
is_dinner = tips['time'] == 'Dinner'
is_dinner.value_counts()
tips[is_dinner].head(5)
"""
Explanation: 这行代码是向DataFrame传递一个True/False对象的Series,返回所有带True的行。
End of explanation
"""
tips[(tips['time'] == 'Dinner') & (tips['tip'] > 5.00)]
"""
Explanation: 与SQL的OR/AND类似,DataFrame 用 | (OR) 与 & (AND)
sql
SELECT *
FROM tips
WHERE time = 'Dinner' AND tip > 5.00;
End of explanation
"""
tips[(tips['size'] >= 5) | (tips['total_bill'] > 45)]
"""
Explanation: sql
SELECT *
FROM tips
WHERE size >= 5 OR total_bill > 45;
End of explanation
"""
frame = pd.DataFrame({
'col1': ['A', 'B', np.NaN, 'C', 'D'],
'col2': ['F', np.NaN, 'G', 'H', 'I']
})
frame
"""
Explanation: 用 notna() and isna() 方法检测NULL
End of explanation
"""
frame[frame['col2'].isna()]
"""
Explanation: 如果用SQL筛选col2是NULL:
sql
SELECT *
FROM frame
WHERE col2 IS NULL;
pandas用isna方法
End of explanation
"""
frame[frame['col1'].notna()]
"""
Explanation: 用SQL筛选col1不是NULL:
sql
SELECT *
FROM frame
WHERE col1 IS NOT NULL;
pandas用notna()方法
End of explanation
"""
tips.groupby('sex').size()
"""
Explanation: GROUP BY
SQL的GROUP BY操作,在pandas中是groupby()方法。
例如用SQL统计不同性别客人数量:
sql
SELECT sex, count(*)
FROM tips
GROUP BY sex;
/*
Female 87
Male 157
*/
pandas方法是:
End of explanation
"""
tips.groupby('sex').count()
"""
Explanation: 这里用pandas的size()方法,不是count()方法。因为count()方法会自动应用到每一字段,返回所有字段的计数。
End of explanation
"""
tips.groupby('sex')['total_bill'].count()
"""
Explanation: 也可以对一个字段用count()方法,实现同样效果:
End of explanation
"""
tips.groupby('day').agg({'tip': np.mean, 'day': np.size})
"""
Explanation: 也可以一次使用多个聚合函数。假如,要统计每星期的日均小费金额,以及给小费的人数,用SQL如下:
sql
SELECT day, AVG(tip), COUNT(*)
FROM tips
GROUP BY day;
/*
Fri 2.734737 19
Sat 2.993103 87
Sun 3.255132 76
Thur 2.771452 62
*/
pandas用agg()方法实现,参数是一个Python字典(字段名称: 方法名称, ...)
End of explanation
"""
tips.groupby(['smoker', 'day']).agg({'tip': [np.size, np.mean]})
"""
Explanation: sql
SELECT smoker, day, COUNT(*), AVG(tip)
FROM tips
GROUP BY smoker, day;
/*
smoker day
No Fri 4 2.812500
Sat 45 3.102889
Sun 57 3.167895
Thur 45 2.673778
Yes Fri 15 2.714000
Sat 42 2.875476
Sun 19 3.516842
Thur 17 3.030000
*/
End of explanation
"""
df1 = pd.DataFrame({'key': ['A', 'B', 'C', 'D'],
'value': np.random.randn(4)})
df2 = pd.DataFrame({'key': ['B', 'D', 'D', 'E'],
'value': np.random.randn(4)})
"""
Explanation: JOIN
JOIN在pandas是join()或merge()方法。join()方法默认是按照DataFrames的索引值(indices)连接,两种方法都支持四种连接(LEFT, RIGHT, INNER, FULL),也可以按字段连接。
End of explanation
"""
# 默认是INNER JOIN
pd.merge(df1, df2, on='key')
"""
Explanation: INNER JOIN
sql
SELECT *
FROM df1
INNER JOIN df2
ON df1.key = df2.key;
End of explanation
"""
indexed_df2 = df2.set_index('key')
pd.merge(df1, indexed_df2, left_on='key', right_index=True)
"""
Explanation: merge()也可以连接一个表的字段与另一个表的索引
End of explanation
"""
pd.merge(df1, df2, on='key', how='left')
"""
Explanation: LEFT OUTER JOIN
sql
SELECT *
FROM df1
LEFT OUTER JOIN df2
ON df1.key = df2.key;
End of explanation
"""
pd.merge(df1, df2, on='key', how='right')
"""
Explanation: RIGHT JOIN
sql
SELECT *
FROM df1
RIGHT OUTER JOIN df2
ON df1.key = df2.key;
End of explanation
"""
pd.merge(df1, df2, on='key', how='outer')
"""
Explanation: FULL JOIN
pandas支持FULL JOINs, 绝大部分RDBMS都支持,HIVE支持,MySQL不支持,
sql
SELECT *
FROM df1
FULL OUTER JOIN df2
ON df1.key = df2.key;
End of explanation
"""
df1 = pd.DataFrame({'city': ['Chicago', 'San Francisco', 'New York City'],
'rank': range(1, 4)})
df2 = pd.DataFrame({'city': ['Chicago', 'Boston', 'Los Angeles'],
'rank': [1, 4, 5]})
"""
Explanation: UNION
SQL的UNION ALL在pandas中是concat()
End of explanation
"""
pd.concat([df1, df2])
"""
Explanation: sql
SELECT city, rank
FROM df1
UNION ALL
SELECT city, rank
FROM df2;
/*
city rank
Chicago 1
San Francisco 2
New York City 3
Chicago 1
Boston 4
Los Angeles 5
*/
End of explanation
"""
pd.concat([df1, df2]).drop_duplicates()
"""
Explanation: SQL的UNION与UNION ALL类似,不过UNION会剔除重复行
SELECT city, rank
FROM df1
UNION
SELECT city, rank
FROM df2;
/*
city rank
Chicago 1
San Francisco 2
New York City 3
Boston 4
Los Angeles 5
*/
pandas需要用concat()+drop_duplicates()方法实现
End of explanation
"""
tips.assign(rn=tips.sort_values(['total_bill'], ascending=False).groupby(
['day']).cumcount() + 1).query('rn < 3').sort_values(['day', 'rn'])
"""
Explanation: 每组Top N 问题
mysql
SELECT * FROM (
SELECT
t.*,
ROW_NUMBER() OVER(PARTITION BY day ORDER BY total_bill DESC) AS rn
FROM tips t
)
WHERE rn < 3
ORDER BY day, rn;
End of explanation
"""
tips.assign(rnk=tips.groupby(['day'])['total_bill'].rank(
method='first', ascending=False)).query('rnk < 3').sort_values(['day', 'rnk'])
"""
Explanation: 也可以用rank(method=’first’)函数
End of explanation
"""
tips[tips['tip'] < 2].assign(rnk_min=tips.groupby(['sex'])['tip'].rank(
method='min')).query('rnk_min < 3').sort_values(['sex', 'rnk_min'])
"""
Explanation: 找出每组消费低于2美金(tips < 2)的前两名(rank < 3),这里用rank(method='min')
mysql
SELECT * FROM (
SELECT
t.*,
RANK() OVER(PARTITION BY sex ORDER BY tip) AS rnk
FROM tips t
WHERE tip < 2
)
WHERE rnk < 3
ORDER BY sex, rnk;
End of explanation
"""
tips.loc[tips['tip'] < 2, 'tip'] *= 2
"""
Explanation: UPDATE
mysql
UPDATE tips
SET tip = tip*2
WHERE tip < 2;
End of explanation
"""
tips = tips.loc[tips['tip'] <= 9]
"""
Explanation: DELETE
mysql
DELETE FROM tips
WHERE tip > 9;
pandas选择需要的数据,创建新Dataframe,而不是删除数据
End of explanation
"""
|
liganega/Gongsu-DataSci | previous/notes2017/W03/GongSu06_Errors_and_Exception_Handling.ipynb | gpl-3.0 | from __future__ import print_function
input_number = raw_input("A number please: ")
number = int(input_number)
print("제곱의 결과는", number**2, "입니다.")
"""
Explanation: 오류 및 예외 처리
개요
코딩할 때 발생할 수 있는 다양한 오류 살펴 보기
오류 메시지 정보 확인 방법
예외 처리, 즉 오류가 발생할 수 있는 예외적인 상황을 미리 고려하는 방법 소개
오늘의 주요 예제
아래 코드는 raw_input() 함수를 이용하여 사용자로부터 숫자를 입력받아 그 숫자의 제곱을 리턴하고자 하는 내용을 담고 있다. 코드를 실행하면 숫자를 입력하라는 창이 나오며,
여기에 숫자 3을 입력하면 정상적으로 작동한다.
하지만, 예를 들어, 3.2를 입력하면 값 오류(value error)가 발생한다.
End of explanation
"""
sentence = 'I am a sentence
"""
Explanation: 주의: 파이썬 3의 경우 input() 함수를 raw_input() 대신에 사용해야 한다.
위 코드는 정수들의 제곱을 계산하는 프로그램이다.
하지만 사용자가 경우에 따라 정수 이외의 값을 입력하면 시스템이 다운된다.
이에 대한 해결책을 다루고자 한다.
오류 예제
먼저 오류의 다양한 예제를 살펴보자.
다음 코드들은 모두 오류를 발생시킨다.
예제: 0으로 나누기 오류
python
4.6/0
오류 설명: 0으로 나눌 수 없다.
예제: 문법 오류
python
sentence = 'I am a sentence
오류 설명: 문자열 양 끝의 따옴표가 짝이 맞아야 한다.
* 작은 따옴표끼리 또는 큰 따옴표끼리
예제: 들여쓰기 문법 오류
python
for i in range(3):
j = i * 2
print(i, j)
오류 설명: 2번 줄과 3번 줄의 들여쓰기 정도가 동일해야 한다.
예제: 자료형 오류
```python
new_string = 'cat' - 'dog'
new_string = 'cat' * 'dog'
new_string = 'cat' / 'dog'
new_string = 'cat' + 3
new_string = 'cat' - 3
new_string = 'cat' / 3
```
오류 설명: 문자열 자료형끼리의 합, 문자열과 정수의 곱셈만 정의되어 있다.
예제: 이름 오류
python
print(party)
오류 설명: 미리 선언된 변수만 사용할 수 있다.
예제: 인덱스 오류
python
a_string = 'abcdefg'
a_string[12]
오류 설명: 인덱스는 문자열의 길이보다 작은 수만 사용할 수 있다.
예제: 값 오류
python
int(a_string)
오류 설명: int() 함수는 정수로만 구성된 문자열만 처리할 수 있다.
예제: 속성 오류
python
print(a_string.len())
오류 설명: 문자열 자료형에는 len() 메소드가 존재하지 않는다.
주의: len() 이라는 함수는 문자열의 길이를 확인하지만 문자열 메소드는 아니다.
이후에 다룰 리스트, 튜플 등에 대해서도 사용할 수 있는 함수이다.
오류 확인
앞서 언급한 코드들을 실행하면 오류가 발생하고 어디서 어떤 오류가 발생하였는가에 대한 정보를
파이썬 해석기가 바로 알려 준다.
예제
End of explanation
"""
a = 0
4/a
"""
Explanation: 오류를 확인하는 메시지가 처음 볼 때는 매우 생소하다.
위 오류 메시지를 간단하게 살펴보면 다음과 같다.
File "<ipython-input-37-a6097ed4dc2e>", line 1
1번 줄에서 오류 발생
sentence = 'I am a sentence
^
오류 발생 위치 명시
SyntaxError: EOL while scanning string literal
오류 종류 표시: 문법 오류(SyntaxError)
예제
아래 예제는 0으로 나눌 때 발생하는 오류를 나타낸다.
오류에 대한 정보를 잘 살펴보면서 어떤 내용을 담고 있는지 확인해 보아야 한다.
End of explanation
"""
from __future__ import print_function
number_to_square = raw_input("A number please")
# number_to_square 변수의 자료형이 문자열(str)임에 주의하라.
# 따라서 연산을 하고 싶으면 정수형(int)으로 형변환을 먼저 해야 한다.
number = int(number_to_square)
print("제곱의 결과는", number**2, "입니다.")
"""
Explanation: 오류의 종류
앞서 예제들을 통해 살펴 보았듯이 다양한 종류의 오류가 발생하며,
코드가 길어지거나 복잡해지면 오류가 발생할 가능성은 점차 커진다.
오류의 종류를 파악하면 어디서 왜 오류가 발생하였는지를 보다 쉽게 파악하여
코드를 수정할 수 있게 된다.
따라서 코드의 발생원인을 바로 알아낼 수 있어야 하며 이를 위해서는 오류 메시지를
제대로 확인할 수 있어야 한다.
하지만 여기서는 언급된 예제 정도의 수준만 다루고 넘어간다.
코딩을 하다 보면 어차피 다양한 오류와 마주치게 될 텐데 그때마다
스스로 오류의 내용과 원인을 확인해 나가는 과정을 통해
보다 많은 경험을 쌓는 길 외에는 달리 방법이 없다.
예외 처리
코드에 문법 오류가 포함되어 있는 경우 아예 실행되지 않는다.
그렇지 않은 경우에는 일단 실행이 되고 중간에 오류가 발생하면 바로 멈춰버린다.
이렇게 중간에 오류가 발생할 수 있는 경우를 미리 생각하여 대비하는 과정을
예외 처리(exception handling)라고 부른다.
예를 들어, 오류가 발생하더라도 오류발생 이전까지 생성된 정보들을 저장하거나, 오류발생 이유를 좀 더 자세히 다루거나, 아니면 오류발생에 대한 보다 자세한 정보를 사용자에게 알려주기 위해 예외 처리를 사용한다.
예제
아래 코드는 raw_input() 함수를 이용하여 사용자로부터 숫자를 입력받아 그 숫자의 제곱을 리턴하고자 하는 내용을 담고 있으며, 코드에는 문법적 오류가 없다.
그리고 코드를 실행하면 숫자를 입력하라는 창이 나온다.
여기에 숫자 3을 입력하면 정상적으로 작동하지만
예를 들어, 3.2를 입력하면 값 오류(value error)가 발생한다.
End of explanation
"""
number_to_square = raw_input("A number please:")
try:
number = int(number_to_square)
print("제곱의 결과는", number ** 2, "입니다.")
except:
print("정수를 입력해야 합니다.")
"""
Explanation: 3.2를 입력했을 때 오류가 발생하는 이유는 int() 함수가 정수 모양의 문자열만
처리할 수 있기 때문이다.
사실 정수들의 제곱을 계산하는 프로그램을 작성하였지만 경우에 따라
정수 이외의 값을 입력하는 경우가 발생하게 되며, 이런 경우를 대비해야 한다.
즉, 오류가 발생할 것을 미리 예상해야 하며, 어떻게 대처해야 할지 준비해야 하는데,
try ... except ...문을 이용하여 예외를 처리하는 방식을 활용할 수 있다.
End of explanation
"""
number_to_square = raw_input("A number please: ")
try:
number = int(number_to_square)
a = 5/(number - 4)
print("결과는", a, "입니다.")
except ValueError:
print("정수를 입력해야 합니다.")
except ZeroDivisionError:
print("4는 빼고 하세요.")
"""
Explanation: 오류 종류에 맞추어 다양한 대처를 하기 위해서는 오류의 종류를 명시하여 예외처리를 하면 된다.
아래 코드는 입력 갑에 따라 다른 오류가 발생하고 그에 상응하는 방식으로 예외처리를 실행한다.
값 오류(ValueError)의 경우
End of explanation
"""
number_to_square = raw_input("A number please: ")
try:
number = int(number_to_square)
a = 5/(number - 4)
print("결과는", a, "입니다.")
except ValueError:
print("정수를 입력해야 합니다.")
except ZeroDivisionError:
print("4는 빼고 하세요.")
"""
Explanation: 0으로 나누기 오류(ZeroDivisionError)의 경우
End of explanation
"""
try:
a = 1/0
except ValueError:
print("This program stops here.")
"""
Explanation: 주의: 이와 같이 발생할 수 예외를 가능한 한 모두 염두하는 프로그램을 구현해야 하는 일은
매우 어려운 일이다.
앞서 보았듯이 오류의 종류를 정확히 알 필요가 발생한다.
다음 예제에소 보듯이 오류의 종류를 틀리게 명시하면 예외 처리가 제대로 작동하지 않는다.
End of explanation
"""
def to_define():
"""아주 복잡하지만 지금 당장 불필요"""
raise NotImplementedError("아직 정의되어 있지 않음")
print(to_define())
"""
Explanation: raise 함수
강제로 오류를 발생시키고자 하는 경우에 사용한다.
예제
어떤 함수를 정확히 정의하지 않은 상태에서 다른 중요한 일을 먼저 처리하고자 할 때
아래와 같이 함수를 선언하고 넘어갈 수 있다.
그런데 아래 함수를 제대로 선언하지 않은 채로 다른 곳에서 호출하면
"아직 정의되어 있지 않음"
이란 메시지로 정보를 알려주게 된다.
End of explanation
"""
def to_define1():
"""아주 복잡하지만 지금 당장 불필요"""
print(to_define1())
"""
Explanation: 주의: 오류 처리를 사용하지 않으면 오류 메시지가 보이지 않을 수도 있음에 주의해야 한다.
End of explanation
"""
def square( number ):
"""
정수를 인자로 입력 받아 제곱을 리턴한다.
"""
square_of_number = number * 2
return square_of_number
"""
Explanation: 코드의 안전성 문제
문법 오류 또는 실행 중에 오류가 발생하지 않는다 하더라도 코드의 안전성이 보장되지는 않는다.
코드의 안정성이라 함은 코드를 실행할 때 기대하는 결과가 산출된다는 것을 보장한다는 의미이다.
예제
아래 코드는 숫자의 제곱을 리턴하는 square() 함수를 제대로 구현하지 못한 경우를 다룬다.
End of explanation
"""
square(3)
"""
Explanation: 위 함수를 아래와 같이 호출하면 오류가 전혀 발생하지 않지만,
엉뚱한 값을 리턴한다.
End of explanation
"""
help(square)
"""
Explanation: 주의: help() 를 이용하여 어떤 함수가 무슨 일을 하는지 내용을 확인할 수 있다.
단, 함수를 정의할 때 함께 적힌 문서화 문자열(docstring) 내용이 확인된다.
따라서, 함수를 정의할 때 문서화 문자열에 가능한 유효한 정보를 입력해 두어야 한다.
End of explanation
"""
from __future__ import print_function
number_to_square = raw_input("A number to divide 100: ")
number = int(number_to_square)
print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.")
"""
Explanation: 오류에 대한 보다 자세한 정보
파이썬에서 다루는 오류에 대한 보다 자세한 정보는 아래 사이트들에 상세하게 안내되어 있다.
파이썬 기본 내장 오류 정보 문서:
https://docs.python.org/3.4/library/exceptions.html
파이썬 예외처리 정보 문서:
https://docs.python.org/3.4/tutorial/errors.html
연습문제
연습
아래 코드는 100을 입력한 값으로 나누는 함수이다.
다만 0을 입력할 경우 0으로 나누기 오류(ZeroDivisionError)가 발생한다.
End of explanation
"""
number_to_square = raw_input("A number to divide 100: ")
try:
number = float(number_to_square)
print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.")
except ZeroDivisionError:
raise ZeroDivisionError('0이 아닌 숫자를 입력하세요.')
except ValueError:
raise ValueError('숫자를 입력하세요.')
number_to_square = raw_input("A number to divide 100: ")
try:
number = float(number_to_square)
print("100을 입력한 값으로 나눈 결과는", 100/number, "입니다.")
except ZeroDivisionError:
raise ZeroDivisionError('0이 아닌 숫자를 입력하세요.')
except ValueError:
raise ValueError('숫자를 입력하세요.')
"""
Explanation: 아래 내용이 충족되도록 위 코드를 수정하라.
나눗셈이 부동소수점으로 계산되도록 한다.
0이 아닌 숫자가 입력될 경우 100을 그 숫자로 나눈다.
0이 입력될 경우 0이 아닌 숫자를 입력하라고 전달한다.
숫자가 아닌 값이 입력될 경우 숫자를 입력하라고 전달한다.
견본답안:
End of explanation
"""
|
ireapps/cfj-2017 | completed/02. Working with data files.ipynb | mit | import csv
"""
Explanation: Working with data files
Reading and writing data files is a common task, and Python offers native support for working with many kinds of data files. Today, we're going to be working mainly with CSVs.
Import the csv module
We're going to be working with delimited text files, so the first thing we need to do is import this functionality from the standard library.
End of explanation
"""
# open the MLB data file `as` mlb
with open('data/mlb.csv', 'r') as mlb:
# create a reader object
reader = csv.reader(mlb)
# loop over the rows in the file
for row in reader:
# assign variables to each element in the row (shortcut!)
name, team, position, salary, start_year, end_year, years = row
# print the row, which is a list
print(row)
"""
Explanation: Opening a file to read the contents
We're going to use something called a with statement to open a file and read the contents. The open() function takes at least two arguments: The path to the file you're opening and what "mode" you're opening it in.
To start with, we're going to use the 'r' mode to read the data. We'll use the default arguments for delimiter -- comma -- and we don't need to specify a quote character.
Important: If you open a data file in w (write) mode, anything that's already in the file will be erased.
The file we're using -- MLB roster data from 2017 -- lives at data/mlb.csv.
Once we have the file open, we're going to use some functionality from the csv module to iterate over the lines of data and print each one.
Specifically, we're going to use the csv.reader method, which returns a list of lines in the data file. Each line, in turn, is a list of the "cells" of data in that line.
Then we're going to loop over the lines of data and print each line. We can also use bracket notation to retrieve elements from inside each line of data.
End of explanation
"""
# open the MLB data file `as` mlb
with open('data/mlb.csv', 'r') as mlb:
# create a reader object
reader = csv.reader(mlb)
# move past the header row
next(reader)
# loop over the rows in the file
for row in reader:
# assign variables to each element in the row (shortcut!)
name, team, position, salary, start_year, end_year, years = row
# print the line of data ~only~ if the player is on the Twins
if team == 'MIN':
# print the row, which is a list
print(row)
"""
Explanation: Simple filtering
If you wanted to filter your data, you could use an if statement inside your with block.
End of explanation
"""
# open the MLB data file `as` mlb
with open('data/mlb.csv', 'r') as mlb:
# create a reader object
reader = csv.reader(mlb)
# move past the header row
next(reader)
# loop over the rows in the file
for row in reader:
# assign variables to each element in the row (shortcut!)
name, team, position, salary, start_year, end_year, years = row
# print the line of data ~only~ if the player is on the Twins
if int(salary) >= 1000000:
# print the row, which is a list
print(name, salary)
"""
Explanation: Exercise
Read in the MLB data, print only the names and salaries of players who make at least $1 million. (Hint: Use type coercion!)
End of explanation
"""
# open the MLB data file `as` mlb
with open('data/mlb.csv', 'r') as mlb:
# create a reader object
reader = csv.DictReader(mlb)
# loop over the rows in the file
for row in reader:
# print just the player's name (the column header is "NAME")
print(row['NAME'])
"""
Explanation: DictReader: Another way to read CSV files
Sometimes it's more convenient to work with data files as a list of dictionaries instead of a list of lists. That way, you don't have to remember the position of each "column" of data -- you can just reference the column name. To do it, we'll use a csv.DictReader object instead of a csv.reader object. Otherwise the code is much the same.
End of explanation
"""
# define the column names
COLNAMES = ['name', 'org', 'position']
# let's make a few rows of data to write
DATA_TO_WRITE = [
['Cody', 'IRE', 'Training Director'],
['Maggie', 'The New York Times', 'Reporter'],
['Donald', 'The White House', 'President']
]
# open an output file in write mode
with open('people-list.csv', 'w') as outfile:
# create a writer object
writer = csv.writer(outfile)
# write the header row
writer.writerow(COLNAMES)
# loop over the data and write to file
for human in DATA_TO_WRITE:
writer.writerow(human)
"""
Explanation: Writing to CSV files
You can also use the csv module to create csv files -- same idea, you just need to change the mode to 'w'. As with reading, there's a list-based writing method and a dictionary-based method.
End of explanation
"""
# define the column names
COLNAMES = ['name', 'org', 'position']
# let's make a few rows of data to write
DATA_TO_WRITE = [
{'name': 'Cody', 'org': 'IRE', 'position': 'Training Director'},
{'name': 'Maggie', 'org': 'The New York Times', 'position': 'Reporter'},
{'name': 'Donald', 'org': 'The White House', 'position': 'President'}
]
# open an output file in write mode
with open('people-dict.csv', 'w') as outfile:
# create a writer object -- pass the list of column names to the `fieldnames` keyword argument
writer = csv.DictWriter(outfile, fieldnames=COLNAMES)
# use the writeheader method to write the header row
writer.writeheader()
# loop over the data and write to file
for human in DATA_TO_WRITE:
writer.writerow(human)
"""
Explanation: Using DictWriter to write data
Similar to using the list-based method, except that you need to ensure that the keys in your dictionaries of data match exactly a list of fieldnames.
End of explanation
"""
# open the MLB data file `as` mlb
# also, open `mlb-copy.csv` to write to
with open('data/mlb.csv', 'r') as mlb, open('mlb-copy.csv', 'w') as mlb_copy:
# create a reader object
reader = csv.DictReader(mlb)
# create a writer object
# we're going to use the `fieldnames` attribute of the DictReader object
# as our output headers, as well
# b/c we're basically just making a copy
writer = csv.DictWriter(mlb_copy, fieldnames=reader.fieldnames)
# write header row
writer.writeheader()
# loop over the rows in the file
for row in reader:
# what type of object is `row`?
# how would we find out?
# write row to output file
writer.writerow(row)
"""
Explanation: You can open multiple files for reading/writing
Sometimes you want to open multiple files at the same time. One thing you might want to do: Opening a file of raw data in read mode, clean each row in a loop and write out the clean data to a new file.
You can open multiple files in the same with block -- just separate your open() functions with a comma.
For this example, we're not going to do any cleaning -- we're just going to copy the contents of one file to another.
End of explanation
"""
|
jamiebull1/eppy | docs/runningeplus.ipynb | mit | # you would normaly install eppy by doing
# python setup.py install
# or
# pip install eppy
# or
# easy_install eppy
# if you have not done so, uncomment the following three lines
import sys
# pathnameto_eppy = 'c:/eppy'
pathnameto_eppy = '../'
sys.path.append(pathnameto_eppy)
from eppy.modeleditor import IDF
iddfile = "/Applications/EnergyPlus-8-3-0/Energy+.idd"
IDF.setiddname(iddfile)
idfname = "/Applications/EnergyPlus-8-3-0/ExampleFiles/BasicsFiles/Exercise1A.idf"
epwfile = "/Applications/EnergyPlus-8-3-0/WeatherData/USA_IL_Chicago-OHare.Intl.AP.725300_TMY3.epw"
idf = IDF(idfname, epwfile)
idf.run()
"""
Explanation: Running EnergyPlus from Eppy
It would be great if we could run EnergyPlus directly from our IDF wouldn’t it?
Well here’s how we can.
End of explanation
"""
help(idf.run)
"""
Explanation: if you are in a terminal, you will see something like this::
It’s as simple as that to run using the EnergyPlus defaults, but all the EnergyPlus command line interface options are also supported.
To get a description of the options available, as well as the defaults you can call the Python built-in help function on the IDF.run method and it will print a full description of the options to the console.
End of explanation
"""
|
mne-tools/mne-tools.github.io | stable/_downloads/88563c785f9a977b7ce2000e660aeacf/30_annotate_raw.ipynb | bsd-3-clause | import os
from datetime import timedelta
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
raw.crop(tmax=60).load_data()
"""
Explanation: Annotating continuous data
This tutorial describes adding annotations to a ~mne.io.Raw object,
and how annotations are used in later stages of data processing.
As usual we'll start by importing the modules we need, loading some
example data <sample-dataset>, and (since we won't actually analyze the
raw data in this tutorial) cropping the ~mne.io.Raw object to just 60
seconds before loading it into RAM to save memory:
End of explanation
"""
my_annot = mne.Annotations(onset=[3, 5, 7], # in seconds
duration=[1, 0.5, 0.25], # in seconds, too
description=['AAA', 'BBB', 'CCC'])
print(my_annot)
"""
Explanation: ~mne.Annotations in MNE-Python are a way of storing short strings of
information about temporal spans of a ~mne.io.Raw object. Below the
surface, ~mne.Annotations are list-like <list> objects,
where each element comprises three pieces of information: an onset time
(in seconds), a duration (also in seconds), and a description (a text
string). Additionally, the ~mne.Annotations object itself also keeps
track of orig_time, which is a POSIX timestamp_ denoting a real-world
time relative to which the annotation onsets should be interpreted.
Creating annotations programmatically
If you know in advance what spans of the ~mne.io.Raw object you want
to annotate, ~mne.Annotations can be created programmatically, and
you can even pass lists or arrays to the ~mne.Annotations
constructor to annotate multiple spans at once:
End of explanation
"""
raw.set_annotations(my_annot)
print(raw.annotations)
# convert meas_date (a tuple of seconds, microseconds) into a float:
meas_date = raw.info['meas_date']
orig_time = raw.annotations.orig_time
print(meas_date == orig_time)
"""
Explanation: Notice that orig_time is None, because we haven't specified it. In
those cases, when you add the annotations to a ~mne.io.Raw object,
it is assumed that the orig_time matches the time of the first sample of
the recording, so orig_time will be set to match the recording
measurement date (raw.info['meas_date']).
End of explanation
"""
time_of_first_sample = raw.first_samp / raw.info['sfreq']
print(my_annot.onset + time_of_first_sample)
print(raw.annotations.onset)
"""
Explanation: Since the example data comes from a Neuromag system that starts counting
sample numbers before the recording begins, adding my_annot to the
~mne.io.Raw object also involved another automatic change: an offset
equalling the time of the first recorded sample (raw.first_samp /
raw.info['sfreq']) was added to the onset values of each annotation
(see time-as-index for more info on raw.first_samp):
End of explanation
"""
time_format = '%Y-%m-%d %H:%M:%S.%f'
new_orig_time = (meas_date + timedelta(seconds=50)).strftime(time_format)
print(new_orig_time)
later_annot = mne.Annotations(onset=[3, 5, 7],
duration=[1, 0.5, 0.25],
description=['DDD', 'EEE', 'FFF'],
orig_time=new_orig_time)
raw2 = raw.copy().set_annotations(later_annot)
print(later_annot.onset)
print(raw2.annotations.onset)
"""
Explanation: If you know that your annotation onsets are relative to some other time, you
can set orig_time before you call :meth:~mne.io.Raw.set_annotations,
and the onset times will get adjusted based on the time difference between
your specified orig_time and raw.info['meas_date'], but without the
additional adjustment for raw.first_samp. orig_time can be specified
in various ways (see the documentation of ~mne.Annotations for the
options); here we'll use an ISO 8601_ formatted string, and set it to be 50
seconds later than raw.info['meas_date'].
End of explanation
"""
fig = raw.plot(start=2, duration=6)
"""
Explanation: <div class="alert alert-info"><h4>Note</h4><p>If your annotations fall outside the range of data times in the
`~mne.io.Raw` object, the annotations outside the data range will
not be added to ``raw.annotations``, and a warning will be issued.</p></div>
Now that your annotations have been added to a ~mne.io.Raw object,
you can see them when you visualize the ~mne.io.Raw object:
End of explanation
"""
fig = raw.plot(start=2, duration=6)
fig.fake_keypress('a')
"""
Explanation: The three annotations appear as differently colored rectangles because they
have different description values (which are printed along the top
edge of the plot area). Notice also that colored spans appear in the small
scroll bar at the bottom of the plot window, making it easy to quickly view
where in a ~mne.io.Raw object the annotations are so you can easily
browse through the data to find and examine them.
Annotating Raw objects interactively
Annotations can also be added to a ~mne.io.Raw object interactively
by clicking-and-dragging the mouse in the plot window. To do this, you must
first enter "annotation mode" by pressing :kbd:a while the plot window is
focused; this will bring up the annotation controls:
End of explanation
"""
new_annot = mne.Annotations(onset=3.75, duration=0.75, description='AAA')
raw.set_annotations(my_annot + new_annot)
raw.plot(start=2, duration=6)
"""
Explanation: The drop-down-menu on the left determines which existing label will be
created by the next click-and-drag operation in the main plot window. New
annotation descriptions can be added by clicking the :guilabel:Add
description button; the new description will be added to the list of
descriptions and automatically selected.
The following functions relate to which description is currently selected in
the drop-down-menu:
With :guilabel:Remove description you can remove description
including the annotations.
With :guilabel:Edit description you can edit
the description of either only one annotation (the one currently selected)
or all annotations of a description.
With :guilabel:Set Visible you can show or hide descriptions.
During interactive annotation it is also possible to adjust the start and end
times of existing annotations, by clicking-and-dragging on the left or right
edges of the highlighting rectangle corresponding to that annotation. When
an annotation is selected (the background of the label at the bottom changes
to darker) the values for start and stop are visible in two spinboxes and
can also be edited there.
<div class="alert alert-danger"><h4>Warning</h4><p>Calling :meth:`~mne.io.Raw.set_annotations` **replaces** any annotations
currently stored in the `~mne.io.Raw` object, so be careful when
working with annotations that were created interactively (you could lose
a lot of work if you accidentally overwrite your interactive
annotations). A good safeguard is to run
``interactive_annot = raw.annotations`` after you finish an interactive
annotation session, so that the annotations are stored in a separate
variable outside the `~mne.io.Raw` object.</p></div>
How annotations affect preprocessing and analysis
You may have noticed that the description for new labels in the annotation
controls window defaults to BAD_. The reason for this is that annotation
is often used to mark bad temporal spans of data (such as movement artifacts
or environmental interference that cannot be removed in other ways such as
projection <tut-projectors-background> or filtering). Several
MNE-Python operations
are "annotation aware" and will avoid using data that is annotated with a
description that begins with "bad" or "BAD"; such operations typically have a
boolean reject_by_annotation parameter. Examples of such operations are
independent components analysis (mne.preprocessing.ICA), functions
for finding heartbeat and blink artifacts
(:func:~mne.preprocessing.find_ecg_events,
:func:~mne.preprocessing.find_eog_events), and creation of epoched data
from continuous data (mne.Epochs). See tut-reject-data-spans
for details.
Operations on Annotations objects
~mne.Annotations objects can be combined by simply adding them with
the + operator, as long as they share the same orig_time:
End of explanation
"""
print(raw.annotations[0]) # just the first annotation
print(raw.annotations[:2]) # the first two annotations
print(raw.annotations[(3, 2)]) # the fourth and third annotations
"""
Explanation: Notice that it is possible to create overlapping annotations, even when they
share the same description. This is not possible when annotating
interactively; click-and-dragging to create a new annotation that overlaps
with an existing annotation with the same description will cause the old and
new annotations to be merged.
Individual annotations can be accessed by indexing an
~mne.Annotations object, and subsets of the annotations can be
achieved by either slicing or indexing with a list, tuple, or array of
indices:
End of explanation
"""
for ann in raw.annotations:
descr = ann['description']
start = ann['onset']
end = ann['onset'] + ann['duration']
print("'{}' goes from {} to {}".format(descr, start, end))
"""
Explanation: You can also iterate over the annotations within an ~mne.Annotations
object:
End of explanation
"""
# later_annot WILL be changed, because we're modifying the first element of
# later_annot.onset directly:
later_annot.onset[0] = 99
# later_annot WILL NOT be changed, because later_annot[0] returns a copy
# before the 'onset' field is changed:
later_annot[0]['onset'] = 77
print(later_annot[0]['onset'])
"""
Explanation: Note that iterating, indexing and slicing ~mne.Annotations all
return a copy, so changes to an indexed, sliced, or iterated element will not
modify the original ~mne.Annotations object.
End of explanation
"""
raw.annotations.save('saved-annotations.csv', overwrite=True)
annot_from_file = mne.read_annotations('saved-annotations.csv')
print(annot_from_file)
"""
Explanation: Reading and writing Annotations to/from a file
~mne.Annotations objects have a :meth:~mne.Annotations.save method
which can write :file:.fif, :file:.csv, and :file:.txt formats (the
format to write is inferred from the file extension in the filename you
provide). Be aware that the format of the onset information that is written
to the file depends on the file extension. While :file:.csv files store the
onset as timestamps, :file:.txt files write floats (in seconds). There is a
corresponding :func:~mne.read_annotations function to load them from disk:
End of explanation
"""
|
kubeflow/katib | examples/v1beta1/sdk/nas-with-darts.ipynb | apache-2.0 | # Install required package (Katib SDK).
!pip install kubeflow-katib==0.13.0
"""
Explanation: Neural Architecture Search with DARTS
In this example you will deploy Katib Experiment with Differentiable Architecture Search (DARTS) algorithm using Jupyter Notebook and Katib SDK. Your Kubernetes cluster must have at least one GPU for this example.
You can read more about how we use DARTS in Katib here.
The notebook shows how to create, get, check status and delete an Experiment.
End of explanation
"""
from kubeflow.katib import KatibClient
from kubernetes.client import V1ObjectMeta
from kubeflow.katib import V1beta1Experiment
from kubeflow.katib import V1beta1AlgorithmSpec
from kubeflow.katib import V1beta1AlgorithmSetting
from kubeflow.katib import V1beta1ObjectiveSpec
from kubeflow.katib import V1beta1MetricsCollectorSpec
from kubeflow.katib import V1beta1CollectorSpec
from kubeflow.katib import V1beta1SourceSpec
from kubeflow.katib import V1beta1FilterSpec
from kubeflow.katib import V1beta1FeasibleSpace
from kubeflow.katib import V1beta1ExperimentSpec
from kubeflow.katib import V1beta1NasConfig
from kubeflow.katib import V1beta1GraphConfig
from kubeflow.katib import V1beta1Operation
from kubeflow.katib import V1beta1ParameterSpec
from kubeflow.katib import V1beta1TrialTemplate
from kubeflow.katib import V1beta1TrialParameterSpec
"""
Explanation: Import required packages
End of explanation
"""
# Experiment name and namespace.
namespace = "kubeflow-user-example-com"
experiment_name = "darts-example"
metadata = V1ObjectMeta(
name=experiment_name,
namespace=namespace
)
# Algorithm specification.
algorithm_spec=V1beta1AlgorithmSpec(
algorithm_name="darts",
algorithm_settings=[
V1beta1AlgorithmSetting(
name="num_epochs",
value="2"
),
V1beta1AlgorithmSetting(
name="stem_multiplier",
value="1"
),
V1beta1AlgorithmSetting(
name="init_channels",
value="4"
),
V1beta1AlgorithmSetting(
name="num_nodes",
value="3"
),
]
)
# Objective specification. For DARTS Goal is omitted.
objective_spec=V1beta1ObjectiveSpec(
type="maximize",
objective_metric_name="Best-Genotype",
)
# Metrics collector specification.
# We should specify metrics format to get Genotype from training container.
metrics_collector_spec=V1beta1MetricsCollectorSpec(
collector=V1beta1CollectorSpec(
kind="StdOut"
),
source=V1beta1SourceSpec(
filter=V1beta1FilterSpec(
metrics_format=[
"([\\w-]+)=(Genotype.*)"
]
)
)
)
# Configuration for the Neural Network (NN).
# This NN contains 2 number of layers and 5 various operations with different parameters.
nas_config=V1beta1NasConfig(
graph_config=V1beta1GraphConfig(
num_layers=2
),
operations=[
V1beta1Operation(
operation_type="separable_convolution",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="dilated_convolution",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3", "5"]
),
)
]
),
V1beta1Operation(
operation_type="avg_pooling",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="max_pooling",
parameters=[
V1beta1ParameterSpec(
name="filter_size",
parameter_type="categorical",
feasible_space=V1beta1FeasibleSpace(
list=["3"]
),
)
]
),
V1beta1Operation(
operation_type="skip_connection",
),
]
)
# JSON template specification for the Trial's Worker Kubernetes Job.
trial_spec={
"apiVersion": "batch/v1",
"kind": "Job",
"spec": {
"template": {
"metadata": {
"annotations": {
"sidecar.istio.io/inject": "false"
}
},
"spec": {
"containers": [
{
"name": "training-container",
"image": "docker.io/kubeflowkatib/darts-cnn-cifar10:v0.13.0",
"command": [
'python3',
'run_trial.py',
'--algorithm-settings="${trialParameters.algorithmSettings}"',
'--search-space="${trialParameters.searchSpace}"',
'--num-layers="${trialParameters.numberLayers}"'
],
# Training container requires 1 GPU.
"resources": {
"limits": {
"nvidia.com/gpu": 1
}
}
}
],
"restartPolicy": "Never"
}
}
}
}
# Template with Trial parameters and Trial spec.
# Set retain to True to save trial resources after completion.
trial_template=V1beta1TrialTemplate(
retain=True,
primary_container_name="training-container",
trial_parameters=[
V1beta1TrialParameterSpec(
name="algorithmSettings",
description=" Algorithm settings of DARTS Experiment",
reference="algorithm-settings"
),
V1beta1TrialParameterSpec(
name="searchSpace",
description="Search Space of DARTS Experiment",
reference="search-space"
),
V1beta1TrialParameterSpec(
name="numberLayers",
description="Number of Neural Network layers",
reference="num-layers"
),
],
trial_spec=trial_spec
)
# Experiment object.
experiment = V1beta1Experiment(
api_version="kubeflow.org/v1beta1",
kind="Experiment",
metadata=metadata,
spec=V1beta1ExperimentSpec(
max_trial_count=1,
parallel_trial_count=1,
max_failed_trial_count=1,
algorithm=algorithm_spec,
objective=objective_spec,
metrics_collector_spec=metrics_collector_spec,
nas_config=nas_config,
trial_template=trial_template,
)
)
"""
Explanation: Define your Experiment
You have to create your Experiment object before deploying it. This Experiment is similar to this example.
You can read more about DARTS algorithm settings here.
End of explanation
"""
# Print the Trial template container info.
print(experiment.spec.trial_template.trial_spec["spec"]["template"]["spec"]["containers"][0])
"""
Explanation: You can print the Experiment's info to verify it before submission.
End of explanation
"""
# Create client.
kclient = KatibClient()
# Create your Experiment.
kclient.create_experiment(experiment,namespace=namespace)
"""
Explanation: Create your Experiment
You have to create Katib client to use the SDK
TODO (andreyvelich): Current Experiment link for NAS is incorrect.
End of explanation
"""
exp = kclient.get_experiment(name=experiment_name, namespace=namespace)
print(exp)
print("-----------------\n")
# Get the latest status.
print(exp["status"]["conditions"][-1])
"""
Explanation: Get your Experiment
You can get your Experiment by name and receive required data.
End of explanation
"""
kclient.get_experiment_status(name=experiment_name, namespace=namespace)
"""
Explanation: Get the current Experiment status
You can check the current Experiment status.
End of explanation
"""
kclient.is_experiment_succeeded(name=experiment_name, namespace=namespace)
"""
Explanation: You can check if your Experiment is succeeded.
End of explanation
"""
opt_trial = kclient.get_optimal_hyperparameters(name=experiment_name, namespace=namespace)
best_genotype = opt_trial["currentOptimalTrial"]["observation"]["metrics"][0]["latest"]
print(best_genotype)
"""
Explanation: Get the best Genotype
Best Genotype is located in the optimal Trial currently. The latest Genotype is the best.
Check your Trial logs to get more information about the training process.
End of explanation
"""
kclient.delete_experiment(name=experiment_name, namespace=namespace)
"""
Explanation: Delete your Experiments
You can delete your Experiments.
End of explanation
"""
|
zakandrewking/cobrapy | documentation_builder/io.ipynb | lgpl-2.1 | import cobra.test
import os
from os.path import join
data_dir = cobra.test.data_dir
print("mini test files: ")
print(", ".join(i for i in os.listdir(data_dir) if i.startswith("mini")))
textbook_model = cobra.test.create_test_model("textbook")
ecoli_model = cobra.test.create_test_model("ecoli")
salmonella_model = cobra.test.create_test_model("salmonella")
"""
Explanation: Reading and Writing Models
Cobrapy supports reading and writing models in SBML (with and without FBC), JSON, YAML, MAT, and pickle formats. Generally, SBML with FBC version 2 is the preferred format for general use. The JSON format may be more useful for cobrapy-specific functionality.
The package also ships with test models in various formats for testing purposes.
End of explanation
"""
cobra.io.read_sbml_model(join(data_dir, "mini_fbc2.xml"))
cobra.io.write_sbml_model(textbook_model, "test_fbc2.xml")
"""
Explanation: SBML
The Systems Biology Markup Language is an XML-based standard format for distributing models which has support for COBRA models through the FBC extension version 2.
Cobrapy has native support for reading and writing SBML with FBCv2. Please note that all id's in the model must conform to the SBML SID requirements in order to generate a valid SBML file.
End of explanation
"""
cobra.io.read_sbml_model(join(data_dir, "mini_cobra.xml"))
cobra.io.write_sbml_model(
textbook_model, "test_cobra.xml", use_fbc_package=False)
"""
Explanation: There are other dialects of SBML prior to FBC 2 which have previously been use to encode COBRA models. The primary ones is the "COBRA" dialect which used the "notes" fields in SBML files.
Cobrapy can use libsbml, which must be installed separately (see installation instructions) to read and write these files. When reading in a model, it will automatically detect whether FBC was used or not. When writing a model, the use_fbc_package flag can be used can be used to write files in this legacy "cobra" format.
Consider having the lxml package installed as it can speed up parsing considerably.
End of explanation
"""
cobra.io.load_json_model(join(data_dir, "mini.json"))
cobra.io.save_json_model(textbook_model, "test.json")
"""
Explanation: JSON
Cobrapy models have a JSON (JavaScript Object Notation) representation. This format was created for interoperability with escher.
End of explanation
"""
cobra.io.load_yaml_model(join(data_dir, "mini.yml"))
cobra.io.save_yaml_model(textbook_model, "test.yml")
"""
Explanation: YAML
Cobrapy models have a YAML (YAML Ain't Markup Language) representation. This format was created for more human readable model representations and automatic diffs between models.
End of explanation
"""
cobra.io.load_matlab_model(
join(data_dir, "mini.mat"), variable_name="mini_textbook")
"""
Explanation: MATLAB
Often, models may be imported and exported solely for the purposes of working with the same models in cobrapy and the MATLAB cobra toolbox. MATLAB has its own ".mat" format for storing variables. Reading and writing to these mat files from python requires scipy.
A mat file can contain multiple MATLAB variables. Therefore, the variable name of the model in the MATLAB file can be passed into the reading function:
End of explanation
"""
cobra.io.load_matlab_model(join(data_dir, "mini.mat"))
"""
Explanation: If the mat file contains only a single model, cobra can figure out which variable to read from, and the variable_name parameter is unnecessary.
End of explanation
"""
cobra.io.save_matlab_model(textbook_model, "test.mat")
"""
Explanation: Saving models to mat files is also relatively straightforward
End of explanation
"""
|
XInterns/IPL-Sparkers | src/Match Outcome Prediction with IPL Data (Gursahej).ipynb | mit | # The %... is an iPython thing, and is not part of the Python language.
# In this case we're just telling the plotting library to draw things on
# the notebook, instead of on a separate window.
%matplotlib inline
#this line above prepares IPython notebook for working with matplotlib
# See all the "as ..." contructs? They're just aliasing the package names.
# That way we can call methods like plt.plot() instead of matplotlib.pyplot.plot().
import numpy as np # imports a fast numerical programming library
import scipy as sp #imports stats functions, amongst other things
import matplotlib as mpl # this actually imports matplotlib
import matplotlib.cm as cm #allows us easy access to colormaps
import matplotlib.pyplot as plt #sets up plotting under plt
import pandas as pd #lets us handle data as dataframes
#sets up pandas table display
pd.set_option('display.width', 500)
pd.set_option('display.max_columns', 100)
pd.set_option('display.notebook_repr_html', True)
import seaborn as sns #sets up styles and gives us more plotting options
from __future__ import division
"""
Explanation: Predicting the Outcome of Cricket Matches
Introduction
In this project, we shall build a model which predicts the outcome of cricket matches in the Indian Premier League using data about matches and deliveries.
Data Mining:
Season : 2008 - 2015 (8 Seasons)
Teams : DD, KKR, MI, RCB, KXIP, RR, CSK (7 Teams)
Neglect matches that have inconsistencies such as No Result, Tie, D/L Method, etc.
Possible Features:
Average Batsman Rating (top 5)
Average Bowler Rating (top 4)
Player of the match frequency
Previous Encounter - Win by runs, Win by Wickets
Recent form (Last 5 Games)
Venue - Home, Away, Neutral
End of explanation
"""
# Reading in the data
allmatches = pd.read_csv("../data/matches.csv")
alldeliveries = pd.read_csv("../data/deliveries.csv")
allmatches.head(10)
# Selecting Seasons 2008 - 2015
matches_seasons = allmatches.loc[allmatches['season'] != 2016]
deliveries_seasons = alldeliveries.loc[alldeliveries['match_id'] < 518]
# Selecting teams DD, KKR, MI, RCB, KXIP, RR, CSK
matches_teams = matches_seasons.loc[(matches_seasons['team1'].isin(['Kolkata Knight Riders', \
'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \
'Mumbai Indians', 'Kings XI Punjab'])) & (matches_seasons['team2'].isin(['Kolkata Knight Riders', \
'Royal Challengers Bangalore', 'Delhi Daredevils', 'Chennai Super Kings', 'Rajasthan Royals', \
'Mumbai Indians', 'Kings XI Punjab']))]
matches_team_matchids = matches_teams.id.unique()
deliveries_teams = deliveries_seasons.loc[deliveries_seasons['match_id'].isin(matches_team_matchids)]
print "Teams selected:\n"
for team in matches_teams.team1.unique():
print team
# Neglect matches with inconsistencies like 'No Result' or 'D/L Applied'
matches = matches_teams.loc[(matches_teams['result'] == 'normal') & (matches_teams['dl_applied'] == 0)]
matches_matchids = matches.id.unique()
deliveries = deliveries_teams.loc[deliveries_teams['match_id'].isin(matches_matchids)]
# Verifying consistency between datasets
(matches.id.unique() == deliveries.match_id.unique()).all()
"""
Explanation: Data Mining
End of explanation
"""
# Team Strike rates for first 5 batsmen in the team (Higher the better)
def getMatchDeliveriesDF(match_id):
return deliveries.loc[deliveries['match_id'] == match_id]
def getInningsOneBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()[0:5]
def getInningsTwoBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()[0:5]
def getBatsmanStrikeRate(batsman, match_id):
onstrikedeliveries = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['batsman'] == batsman)]
total_runs = onstrikedeliveries['batsman_runs'].sum()
total_balls = onstrikedeliveries.shape[0]
if total_balls != 0:
return (total_runs/total_balls) * 100
else:
return None
def getTeamStrikeRate(batsmen, match_id):
strike_rates = []
for batsman in batsmen:
bsr = getBatsmanStrikeRate(batsman, match_id)
if bsr != None:
strike_rates.append(bsr)
return np.mean(strike_rates)
def getAverageStrikeRates(match_id):
match_deliveries = getMatchDeliveriesDF(match_id)
innOneBatsmen = getInningsOneBatsmen(match_deliveries)
innTwoBatsmen = getInningsTwoBatsmen(match_deliveries)
teamOneSR = getTeamStrikeRate(innOneBatsmen, match_id)
teamTwoSR = getTeamStrikeRate(innTwoBatsmen, match_id)
return teamOneSR, teamTwoSR
# Testing Functionality
getAverageStrikeRates(517)
# Bowler Rating : Wickets/Run (Higher the Better)
# Team 1: Batting First; Team 2: Fielding First
def getInningsOneBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()[0:4]
def getInningsTwoBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()[0:4]
def getBowlerWPR(bowler, match_id):
balls = deliveries.loc[(deliveries['match_id'] < match_id) & (deliveries['bowler'] == bowler)]
total_runs = balls['total_runs'].sum()
total_wickets = balls.loc[balls['dismissal_kind'].isin(['caught', 'bowled', 'lbw', \
'caught and bowled', 'stumped'])].shape[0]
if total_runs != 0:
return (total_wickets/total_runs) * 100
else:
return total_wickets
def getTeamWPR(bowlers, match_id):
totalWPRs = []
for bowler in bowlers:
totalWPRs.append(getBowlerWPR(bowler, match_id))
return np.mean(totalWPRs)
def getAverageWPR(match_id):
match_deliveries = getMatchDeliveriesDF(match_id)
innOneBowlers = getInningsOneBowlers(match_deliveries)
innTwoBowlers = getInningsTwoBowlers(match_deliveries)
teamOneWPR = getTeamWPR(innTwoBowlers, match_id)
teamTwoWPR = getTeamWPR(innOneBowlers, match_id)
return teamOneWPR, teamTwoWPR
#Testing Functionality
getAverageWPR(517)
# Man of the Match Awards for players of both Teams
def getInningsOneAllBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].batsman.unique()
def getInningsTwoAllBatsmen(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].batsman.unique()
def getInningsOneAllBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 2].bowler.unique()
def getInningsTwoAllBowlers(match_deliveries):
return match_deliveries.loc[match_deliveries['inning'] == 1].bowler.unique()
def getTeam(batsmen,bowlers):
p = []
p = np.append(p, batsmen)
for i in bowlers:
if i not in batsmen:
p = np.append(p, i)
return p
def getPlayerMVPAwards(player, match_id):
return matches.loc[(matches["player_of_match"] == player) & (matches['id'] < match_id)].shape[0]
def getTeamMVPAwards(team, match_id):
mvpAwards = 0
for player in team:
mvpAwards = mvpAwards + getPlayerMVPAwards(player,match_id)
return mvpAwards
def bothTeamMVPAwards(match_id):
matchDeliveries = getMatchDeliveriesDF(match_id)
innOneBatsmen = getInningsOneAllBatsmen(matchDeliveries)
innTwoBatsmen = getInningsTwoAllBatsmen(matchDeliveries)
innOneBowlers = getInningsTwoAllBowlers(matchDeliveries)
innTwoBowlers = getInningsOneAllBowlers(matchDeliveries)
team1 = getTeam(innOneBatsmen, innTwoBowlers)
team2 = getTeam(innTwoBatsmen, innOneBowlers)
team1Awards = getTeamMVPAwards(team1,match_id)
team2Awards = getTeamMVPAwards(team2,match_id)
return team1Awards, team2Awards
#Testing Functionality
bothTeamMVPAwards(517)
#Function to generate squad rating
def generateSquadRating(match_id):
gameday_teams = deliveries.loc[(deliveries['match_id'] == match_id)].batting_team.unique()
teamOne = gameday_teams[0]
teamTwo = gameday_teams[1]
teamOneSR, teamTwoSR = getAverageStrikeRates(match_id)
teamOneWPR, teamTwoWPR = getAverageWPR(match_id)
teamOneMVPs, teamTwoMVPs = bothTeamMVPAwards(match_id)
print "Comparing squads for {} vs {}".format(teamOne,teamTwo)
print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamOne,teamOneSR)
print "\nAverage Strike Rate for Batsmen in {} : {}".format(teamTwo,teamTwoSR)
print "\nBowler Rating (W/R) for {} : {}".format(teamOne,teamOneWPR)
print "\nBowler Rating (W/R) for {} : {}".format(teamTwo,teamTwoWPR)
print "\nNumber of MVP Awards in {} : {}".format(teamOne,teamOneMVPs)
print "\nNumber of MVP Awards in {} : {}".format(teamTwo,teamTwoMVPs)
#Testing Functionality
generateSquadRating(517)
## 2nd Feature : Previous Encounter
# Won by runs and won by wickets (Higher the better)
def getTeam1(match_id):
return matches.loc[matches["id"] == match_id].team1.unique()
def getTeam2(match_id):
return matches.loc[matches["id"] == match_id].team2.unique()
def getPreviousEncDF(match_id):
team1 = getTeam1(match_id)
team2 = getTeam2(match_id)
return matches.loc[(matches["id"] < match_id) & (((matches["team1"].isin(team1)) & (matches["team2"].isin(team2))) | ((matches["team1"].isin(team2)) & (matches["team2"].isin(team1))))]
def getTeamWBR(match_id, team):
WBR = 0
DF = getPreviousEncDF(match_id)
winnerDF = DF.loc[DF["winner"] == team]
WBR = winnerDF['win_by_runs'].sum()
return WBR
def getTeamWBW(match_id, team):
WBW = 0
DF = getPreviousEncDF(match_id)
winnerDF = DF.loc[DF["winner"] == team]
WBW = winnerDF['win_by_wickets'].sum()
return WBW
def getTeamWinPerc(match_id):
dF = getPreviousEncDF(match_id)
timesPlayed = dF.shape[0]
team1 = getTeam1(match_id)[0].strip("[]")
timesWon = dF.loc[dF["winner"] == team1].shape[0]
if timesPlayed != 0:
winPerc = (timesWon/timesPlayed) * 100
else:
winPerc = 0
return winPerc
def getBothTeamStats(match_id):
DF = getPreviousEncDF(match_id)
team1 = getTeam1(match_id)[0].strip("[]")
team2 = getTeam2(match_id)[0].strip("[]")
timesPlayed = DF.shape[0]
timesWon = DF.loc[DF["winner"] == team1].shape[0]
WBRTeam1 = getTeamWBR(match_id, team1)
WBRTeam2 = getTeamWBR(match_id, team2)
WBWTeam1 = getTeamWBW(match_id, team1)
WBWTeam2 = getTeamWBW(match_id, team2)
print "Out of {} times in the past {} have won {} times({}%) from {}".format(timesPlayed, team1, timesWon, getTeamWinPerc(match_id), team2)
print "{} won by {} total runs and {} total wickets.".format(team1, WBRTeam1, WBWTeam1)
print "{} won by {} total runs and {} total wickets.".format(team2, WBRTeam2, WBWTeam2)
#Testing functionality
getBothTeamStats(517)
#3rd Feature: Recent Form (Win Percentage of 3 previous matches of a team in the same season)
#Higher the better
def getMatchYear(match_id):
return matches.loc[matches["id"] == match_id].season.unique()
def getTeam1DF(match_id, year):
team1 = getTeam1(match_id)
return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team1)) | (matches["team2"].isin(team1)))].tail(3)
def getTeam2DF(match_id, year):
team2 = getTeam2(match_id)
return matches.loc[(matches["id"] < match_id) & (matches["season"] == year) & ((matches["team1"].isin(team2)) | (matches["team2"].isin(team2)))].tail(3)
def getTeamWinPercentage(match_id):
win = 0
total = 0
year = int(getMatchYear(match_id))
team1 = getTeam1(match_id)[0].strip("[]")
team2 = getTeam2(match_id)[0].strip("[]")
team1DF = getTeam1DF(match_id, year)
team2DF = getTeam2DF(match_id, year)
team1TotalMatches = team1DF.shape[0]
team1WinMatches = team1DF.loc[team1DF["winner"] == team1].shape[0]
team2TotalMatches = team2DF.shape[0]
team2WinMatches = team2DF.loc[team2DF["winner"] == team2].shape[0]
if (team1TotalMatches != 0) and (team2TotalMatches !=0):
winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100)
winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100)
elif (team1TotalMatches != 0) and (team2TotalMatches ==0):
winPercTeam1 = ((team1WinMatches / team1TotalMatches) * 100)
winPercTeam2 = 0
elif (team1TotalMatches == 0) and (team2TotalMatches !=0):
winPercTeam1 = 0
winPercTeam2 = ((team2WinMatches / team2TotalMatches) * 100)
else:
winPercTeam1 = 0
winPercTeam2 = 0
return winPercTeam1, winPercTeam2
def displayTeamWin(match_id):
year = int(getMatchYear(match_id))
team1 = getTeam1(match_id)[0].strip("[]")
team2 = getTeam2(match_id)[0].strip("[]")
P,Q = getTeamWinPercentage(match_id)
print "In the season of {}, {} has a win percentage of {}% and {} has a win percentage of {}% ".format(year, team1, P, team2, Q)
#Function to implement all features
def getAllFeatures(match_id):
generateSquadRating(match_id)
print ("\n")
getBothTeamStats(match_id)
print("\n")
displayTeamWin(match_id)
#Testing Functionality
getAllFeatures(517)
"""
Explanation: Building Features
End of explanation
"""
#Create Column for Team 1 Winning Status (1 = Won, 0 = Lost)
matches['team1Winning'] = np.where(matches['team1'] == matches['winner'], 1, 0)
#New Column for Difference of Average Strike rates (First Team SR - Second Team SR) [Negative value means Second team is better]
firstTeamSR = []
secondTeamSR = []
for i in matches['id'].unique():
P, Q = getAverageStrikeRates(i)
firstTeamSR.append(P), secondTeamSR.append(Q)
firstSRSeries = pd.Series(firstTeamSR)
secondSRSeries = pd.Series(secondTeamSR)
matches["Avg_SR_Difference"] = firstSRSeries.values - secondSRSeries.values
#New Column for Difference of Wickets Per Run (First Team WPR - Second Team WPR) [Negative value means Second team is better]
firstTeamWPR = []
secondTeamWPR = []
for i in matches['id'].unique():
R, S = getAverageWPR(i)
firstTeamWPR.append(R), secondTeamWPR.append(S)
firstWPRSeries = pd.Series(firstTeamWPR)
secondWPRSeries = pd.Series(secondTeamWPR)
matches["Avg_WPR_Difference"] = firstWPRSeries.values - secondWPRSeries.values
#New column for difference of MVP Awards (Negative value means Second team is better)
firstTeamMVP = []
secondTeamMVP = []
for i in matches['id'].unique():
T, U = bothTeamMVPAwards(i)
firstTeamMVP.append(T), secondTeamMVP.append(U)
firstMVPSeries = pd.Series(firstTeamMVP)
secondMVPSeries = pd.Series(secondTeamMVP)
matches["Total_MVP_Difference"] = firstMVPSeries.values - secondMVPSeries.values
#New column for win percentage of Team1 in previous encounter
firstTeamWP = []
for i in matches['id'].unique():
WP = getTeamWinPerc(i)
firstTeamWP.append(WP)
firstWPSeries = pd.Series(firstTeamWP)
matches["Prev_Enc_Team1_WinPerc"] = firstWPSeries.values
#New column for Recent form(Win Percentage in the current season) of 1st Team compared to 2nd Team(Negative means 2nd team has higher win percentage)
firstTeamRF = []
secondTeamRF = []
for i in matches['id'].unique():
K, L = getTeamWinPercentage(i)
firstTeamRF.append(K), secondTeamRF.append(L)
firstRFSeries = pd.Series(firstTeamRF)
secondRFSeries = pd.Series(secondTeamRF)
matches["Total_RF_Difference"] = firstRFSeries.values - secondRFSeries.values
#Testing
matches.tail(20)
"""
Explanation: Adding Columns
End of explanation
"""
#Graph for Strike Rate
matches.boxplot(column = 'Avg_SR_Difference', by='team1Winning', showfliers= False)
#Graph for WPR Difference
matches.boxplot(column = 'Avg_WPR_Difference', by='team1Winning', showfliers= False)
# Graph for MVP Difference
matches.boxplot(column = 'Total_MVP_Difference', by='team1Winning', showfliers= False)
#Graph for Previous encounters Win Percentage of Team #1
matches.boxplot(column = 'Prev_Enc_Team1_WinPerc', by='team1Winning', showfliers= False)
# Graph for Recent form(Win Percentage in the same season)
matches.boxplot(column = 'Total_RF_Difference', by='team1Winning', showfliers= False)
"""
Explanation: Visualisation
End of explanation
"""
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import train_test_split
from sklearn import metrics
from patsy import dmatrices
y, X = dmatrices('team1Winning ~ 0 + Avg_SR_Difference + Avg_WPR_Difference + Total_MVP_Difference + Prev_Enc_Team1_WinPerc + \
Total_RF_Difference', matches, return_type="dataframe")
y_arr = np.ravel(y)
"""
Explanation: Predictions for the data
End of explanation
"""
# instantiate a logistic regression model, and fit with X and y
model = LogisticRegression()
model = model.fit(X, y_arr)
# check the accuracy on the training set
print "Accuracy is", model.score(X, y_arr)*100, "%"
"""
Explanation: Training and testing on Entire Data
End of explanation
"""
# evaluate the model by splitting into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y_arr, random_state = 0)
# Logistic Regression on train_test_split
model2 = LogisticRegression()
model2.fit(X_train, y_train)
# predict class labels for the test set
predicted = model2.predict(X_test)
# generate evaluation metrics
print "Accuracy is ", metrics.accuracy_score(y_test, predicted)*100, "%"
# KNN Classification on train_test_split
k_range = list(range(1, 61))
k_score = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
k_score.append(metrics.accuracy_score(y_test, y_pred))
plt.plot(k_range, k_score)
# Best values of k in train_test_split
knn = KNeighborsClassifier(n_neighbors = 50)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print "Accuracy is ", metrics.accuracy_score(y_test, y_pred)*100, "%"
"""
Explanation: Splitting train and test using train_test_split
End of explanation
"""
#Splitting
X_timetrain = X.loc[X.index < 398]
Y_timetrain = y.loc[y.index < 398]
Y_timetrain_arr = np.ravel(Y_timetrain)
X_timetest = X.loc[X.index >= 398]
Y_timetest = y.loc[y.index >= 398]
Y_timetest_arr = np.ravel(Y_timetest)
# Logistic Regression on time-based split sets
model3 = LogisticRegression()
model3.fit(X_timetrain, Y_timetrain_arr)
timepredicted = model3.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, timepredicted)*100, "%"
# KNN Classification on time-based split sets
k_range = list(range(1, 61))
k_score = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
knn.fit(X_timetrain, Y_timetrain_arr)
y_pred = knn.predict(X_timetest)
k_score.append(metrics.accuracy_score(Y_timetest_arr, y_pred))
plt.plot(k_range, k_score)
# Best values of k in time-based split data
knn1 = KNeighborsClassifier(n_neighbors = 31)
knn1.fit(X_timetrain, Y_timetrain_arr)
y_pred = knn1.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, y_pred)*100, "%"
"""
Explanation: Splitting Training Set (2008-2013) and Test Set (2013-2015) based on Seasons
End of explanation
"""
clf = svm.SVC(gamma=0.001, C=10)
clf.fit(X_timetrain, Y_timetrain_arr)
clf_pred = clf.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, clf_pred)*100, "%"
"""
Explanation: Support Vector Machines
End of explanation
"""
rfc = RandomForestClassifier(n_jobs = -1, random_state = 1)
rfc.fit(X_timetrain, Y_timetrain_arr)
rfc_pred = rfc.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, rfc_pred)*100, "%"
fi = zip(X.columns, rfc.feature_importances_)
print "Feature Importance according to Random Forests Model\n"
for i in fi:
print i[0], ":", i[1]
"""
Explanation: Random Forests
End of explanation
"""
gclf = GaussianNB()
gclf.fit(X_timetrain, Y_timetrain_arr)
gclf_pred = gclf.predict(X_timetest)
print "Accuracy is ", metrics.accuracy_score(Y_timetest_arr, gclf_pred) *100, "%"
"""
Explanation: Naive Bayes Classifier
End of explanation
"""
|
ini-python-course/ss15 | notebooks/PCA and Eigenfaces.ipynb | mit | # prepare some imports
import numpy as np
from sklearn.datasets import fetch_mldata
import matplotlib.pyplot as plt
%matplotlib inline
"""
Explanation: Principal Component Analysis & Eigenfaces
Here we are going to introduce and implement Principal Component Analysis (PCA) which is a very common and important algorithm for machine learning, especially for the preprocessing of high-dimensional data.
Let's start with an example! One of the most popular benchmark data sets in machine learning is MNIST. It consists of $70000$ handwritten digits (0-9), $28 \times 28 = 784$ pixels each. The goal is to learn from the very different examples the regularities of how the digit $3$ for instance looks like and to distinguish it from the other digits.
End of explanation
"""
# plot samples from MNIST
mnist = fetch_mldata('MNIST original')
for i in range(9):
plt.subplot(3, 3, i+1)
sample = mnist.data[20000+i]
sample_2d = sample.reshape((28, 28))
plt.imshow(sample_2d, cmap='gray')
"""
Explanation: Here are a few examples from the data set (samples $20000-20009$):
End of explanation
"""
X = mnist.data
C = np.cov(X, rowvar=0) # slow! may take a while because the data set is large
print 'X.shape:', X.shape
print 'C.shape:', C.shape
"""
Explanation: A common task then looks like this:
Take the first $60000$ samples as training set and the remaining $10000$ samples as test set,
train a classifier on the training set,
evaluate the classifier on the test set, i.e., on samples it has never seen before.
Modern approaches are able to classify the unseen samples correctly with an error rate of less than $1\%$.
However, often the algorithms for classification are not applied to the 784-dimensional data directly because they don't work very well in high dimensions. In practice it's often better to project the data into a lower dimensional space first and then solve the (classification) task there.
Intuitively it makes sense that we do not need all $786$ dimensions to solve the problem. For instance we see that neighboring pixels are highly correlated. Also the pixels in the corner are likely to be always black and contain no information about the digits. So, there seem to be a lot of dimensions in the data that are irrelevant or at least highly redundant. That's why we want to project into a sub-space that is less redundant.
Mathematically, projecting a vector into a lower-dimensional sub-space is easy. In the MNIST example we have data vectors $\mathbf{x}_i \in \mathbb{R}^{786}$ with $i = 1, \dots, 70000$. Consider the direction of projection to be given by a vector $\mathbf{u} \in \mathbb{R}^{786}$ with length $||\mathbf{u}|| = 1$. Remember from school that a vector $\mathbf{x}_i$ can be projected onto $\mathbf{u}$ simply through a scalar product, that is: $y_i = \mathbf{u}^T \mathbf{x}_i$. Analogously, if we have have two directions for projection $\mathbf{u}_1$ and $\mathbf{u}_2$, data points can be projected through a matrix $\mathbf{U} = (\mathbf{u}_1, \mathbf{u}_2)$:
$\mathbf{y}_i = \mathbf{U}^T \mathbf{x}_i = (\substack{\mathbf{u}_1^T \ \mathbf{u}_2^T}) \mathbf{x}_i$
with $\mathbf{x}_i \in \mathbb{R}^{786}$, $\mathbf{U} \in \mathbb{R}^{786 \times 2}$ and $\mathbf{y}_i \in \mathbb{R}^2$.
Principal Component Analysis (PCA)
But how do we select the directions $\mathbf{u}_1, \mathbf{u}_2, \dots$ for projection given a data set? Without going too much into detail here, PCA selects the directions that have the smallest (linear) re-construction error, which is a reasonable objective in many cases. These directions of smallest re-construction error also happen to be the directions of highest variance in the data. If you think about it, this makes sense for the MNIST data for instance because all the black pixels in the corner have a very low variance and are thus discarded during the projection.
In the picture you see an example of many two-dimensional data points. The arrows indicate the direction of highest and second highest variance (lowest and second lowest re-construction error, respectively):
Admitedly, this is a very brief introduction to PCA. I recommend watching the videos of the Udacity Machine Learning Course about PCA as well (and if you want to read a nice explanation of the math of PCA, take a look at the PCA lecture of Prof. Wiskott):
So how do we find these directions of highest variance? It turns out that these directions are given by the "largest" eigenvectors of the covariance matrix of the data. The proof for this is not very difficult but we skip it here. Let's do it for MNIST!
End of explanation
"""
np.cov?
"""
Explanation: Note that data matrices in Python are often constructed such that the single data vectors are stored in rows (like above). This is exactly the other way round where as you would expect from the mathematical notation, where vectors often are notated as columns. But it has other advantages like the possibility of accessing the single data vectors by X[i] instead of X[i,:]. So that's the convention we are using in the following.
Question: What is this rowvar parameter good for? To find out, we can access the documentation from the console by calling np.cov?.
End of explanation
"""
E, U = np.linalg.eigh(C) # Eigenvalues and eigenvectors of C
"""
Explanation: Calculate eigenvalues and eigenvectors of covariance matrix $C$ and project data on it:
End of explanation
"""
idc = np.argsort(E)
print idc[:10] # first ten indices that would sort the eigenvalues (in increasing order)
print E[idc][:10] # first ten sorted eigenvalues
"""
Explanation: As the documentation tells us (call np.linalg.eigh?), the resulting eigenvalues and eigenvectors are not necessarily ordered. But there is a useful function to order them: np.argsort
End of explanation
"""
# sort eigenvalues and eigenvectors
E = E[idc]
U = U[:,idc] # sort columns
U = U[:,-2:] # consider the last two ("largest") eigenvectors
Y = np.dot(mnist.data, U) # project data on these eigenvectors
print Y.shape
"""
Explanation: Note that E[idc] uses a concept called advanced indexing, i.e., accessing elements in an array through a list of indices. For more details see the official documentation.
End of explanation
"""
for i in range(10):
mask = (mnist.target == i)
class_i = Y[mask,:]
plt.plot(class_i[:,0], class_i[:,1], '.')
"""
Explanation: Now plot each class in the two-dimensional feature space:
End of explanation
"""
import sklearn.datasets
download = sklearn.datasets.fetch_olivetti_faces()
faces = download['data']
print faces.shape
"""
Explanation: Indeed, quite a lot of information of the data set was preserved in the sense that the different classes can still be distinguished to some degree with only two dimensions (instead of $786$). As you may expect, classes become even more distinguishable when you add some more feature dimensions.
Eigenfaces
One of the early approaches to face classification was based exactly on such PCA features calculated on face data sets. Let's try this as an exercise!
End of explanation
"""
for i in range(18):
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_1, passphrase='foo')
"""
Explanation: Now we have an NumPy array of $400$ faces, $64 \times 64 = 4096$ pixels each. Visualize the first 18 images!
End of explanation
"""
# calculate covariance for faces
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_2, passphrase='foo')
"""
Explanation: As with the MNIST data set, calculating the covariance can take a few minutes. We therfore suggest to do this calculation in a seperate block.
End of explanation
"""
# calculate eigenvectors of face covariance and sort them
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_3, passphrase='foo')
"""
Explanation: The again, the eigenvectors must be calculated and sorted...
End of explanation
"""
# plot the 9 "largest" eigenvectors/eigenfaces
for i in range(9):
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_4, passphrase='foo')
"""
Explanation: With the result we do something a bit more different than above. This time we want to visualize the eigenvectors themselves. Note that each eigenvector $\textbf{u}_i$ is a vector of $64 \times 64 = 4096$ dimensions, each component corresponding to one pixel in the image space. Thus, we can re-arrange each eigenvector to a $64 \times 64$ 2D-NumPy-array and visualize with np.imshow as if it was an image. The result makes clear why the approach was called Eigenfaces. It should look somewhat like this:
Try this with the eigenvectors that you have just calculated!
End of explanation
"""
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_5, passphrase='foo')
"""
Explanation: Now these eigenfaces model how much variance there is in the pixels relative to the mean value. By calculating the mean face and then adding or substracting the eigenfaces we can reconstruct proper faces and even visualize the meaning that the different components have.
So, calculate and visualize the average face from the data set first!
End of explanation
"""
# your code here
# our solution
from solutions import *
decrypt_solution(solution_pca_6, passphrase='foo')
"""
Explanation: Next we visualize how the faces vary in different directions. Take the mean face and visualize how it looks like when the first eigenface is added/substracted to it. Do the same for some more eigenfaces...
Note: Eigenvectors are usually normalized to length one. To make them proportional to the variance that they are describing, we must multiply them with their corresponding eigenvalue first.
End of explanation
"""
|
mimoralea/applied-reinforcement-learning | notebooks/05-state-discretization.ipynb | mit | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tempfile
import base64
import pprint
import json
import sys
import gym
import io
from gym import wrappers
from subprocess import check_output
from IPython.display import HTML
"""
Explanation: State Space Discretization
From the previous notebook you probably end up thinking about the fact that the states and action we were dealing with on the Frozen Lake environment were discrete. It should upset you a bit, there is really not much we can do with that?
But worry-not! This notebook will relax that constraint. We will look at what to do when the state space is continuous. We will keep actions discrete, but that's OK. For now, we'll look at a very simple way of dealing with continuous states.
End of explanation
"""
env = gym.make('CartPole-v0')
observation = env.reset()
actions = env.action_space
env.close()
print(observation)
print(actions)
"""
Explanation: Q-Learning
In the world we will be looking into this time, the Cart Pole example, we have 4 continous values to represent a state. An X position, change in X, acceleration and change in acceleration.
Let's take a peek at a single observation:
End of explanation
"""
observations = []
for episode in range(1000):
observation = env.reset()
for t in range(100):
env.render()
observations.append(observation)
action = env.action_space.sample()
if episode < 25:
action = 1
elif episode < 50:
action = 0
observation, reward, done, info = env.step(action)
if done:
print("Episode finished after {} timesteps".format(t+1))
break
env.close()
x_vals = np.array(observations)[:,0]
xd_vals = np.array(observations)[:,1]
a_vals = np.array(observations)[:,2]
ad_vals = np.array(observations)[:,3]
y = np.zeros_like(x_vals)
"""
Explanation: Just as expected, a combination of 4 continuous variables make a state, and 2 discrete actions. Hmm, what to do? The actions are sort of the same as before, but the states are definitely different. We can't just create a Q table anymore...
Let's look a little more into the the possible values these variables can take. We will explore for 100 episodes just to collect some sample data. You can see I force the algorithm to take the action 1 and action 0 for several consecutive timesteps. This is to make sure we explore the entire space and not just random. If we do random actions the cart would just jitter on the same place and the x axis would not get explored much. Stay with me, I'll show you.
End of explanation
"""
plt.plot(x_vals, y + 0.10, '.')
plt.plot(xd_vals, y + 0.05, '.')
plt.plot(a_vals, y - 0.05, '.')
plt.plot(ad_vals, y - 0.10, '.')
plt.ylim([-0.15, 0.15])
"""
Explanation: Let's visualize the values that each of the variables can take:
End of explanation
"""
x_thres = ((env.env.observation_space.low/2)[0],
(env.env.observation_space.high/2)[0])
a_thres = ((env.env.observation_space.low/2)[2],
(env.env.observation_space.high/2)[2])
print(x_thres, a_thres)
"""
Explanation: See? The x values seem to go from -1 to 1, changes in x from -2 to 2, etc.
In fact, we can get the exact values from the env, although you could just sample a bunch of episodes and find out the limits yourself. Let's use the environments to keep focus.
End of explanation
"""
x1 = np.linspace(x_thres[0] + .5, x_thres[1] - .5,
4, endpoint=False)[1:]
y1 = np.zeros(len(x1)) + 0.05
plt.ylim([-0.075, 0.075])
plt.plot(x1, y1, 'o')
plt.plot(x_vals, y, '.')
"""
Explanation: Not exactly the same as we thought. Can you think why? And, how to explore the state space throughout?
Regardless, I will move forward trying to solve this environment. What we have above is good enough because it gives us the most commonly sampled observations for each of the variables. Cutting through these and boxing them is a process called discretization. It is basically a way to convert from continuous states to discrete. This would let us deal with the continous states nicely.
Let's try it.
End of explanation
"""
xd1 = np.sort(np.append(np.linspace(-1.5, 1.5, 4, endpoint=True), 0))
y1 = np.zeros(len(xd1)) + 0.05
plt.ylim([-0.075, 0.075])
plt.plot(xd1, y1, 'o')
plt.plot(xd_vals, y, '.')
"""
Explanation: I'm creating 4 states, anything left/right of each of the dots represent a unique state.
Let's do the same for the variable 'change in x' or xd:
End of explanation
"""
a1 = np.sort(np.linspace(a_thres[0], a_thres[1],
10, endpoint=False)[1:])
y1 = np.zeros(len(a1)) + 0.05
plt.ylim([-0.1, 0.1])
plt.plot(a1, y1, 'o')
plt.plot(a_vals, y, '.')
"""
Explanation: And for the acceleration variable:
End of explanation
"""
all_vals = np.sort(np.append(
(np.logspace(-7, 4, 6, endpoint=False, base=2)[1:],
-np.logspace(-7, 4, 6, endpoint=False, base=2)[1:]), 0))
idxs = np.where(np.abs(all_vals) < 2)
ad1 = all_vals[idxs]
y1 = np.zeros(len(ad1)) + 0.05
plt.ylim([-0.075, 0.075])
plt.plot(ad1, y1, 'o')
plt.plot(ad_vals, y, '.')
"""
Explanation: Finally, the 'change in acceleration':
End of explanation
"""
a1
"""
Explanation: Now, we use a function called 'digitize' which basically creates the buckets as mentioned above.
End of explanation
"""
np.digitize(-0.99, a1)
"""
Explanation: For example, look at the acceleration variable. We go from -0.16 to 0.16 with equally spaced chunks. What would you expect a value of -0.99 fall into? Which bucket?
End of explanation
"""
np.digitize(0, a1)
"""
Explanation: Right. Bucket 0. How about a value of 0. If -0.99, which is less than -0.16 (the first bucket) falls into the first bucket, what do you think 0 would fall into?
End of explanation
"""
np.digitize(-0.0001, a1)
"""
Explanation: That was actually a tricky question. Digitize default to the 'left' if the value is equal to any of the buckets. How about a value slightly less than zero then? What bucket do you call?
End of explanation
"""
np.digitize(0.0001, a1)
"""
Explanation: Right, 4 it is, how about a number slightly more than zero?
End of explanation
"""
yx1 = np.zeros_like(x1) + 0.25
yx = np.zeros_like(x_vals) + 0.20
yxd1 = np.zeros_like(xd1) + 0.10
yxd = np.zeros_like(xd_vals) + 0.05
ya1 = np.zeros_like(a1) - 0.05
ya = np.zeros_like(a_vals) - 0.10
yad1 = np.zeros_like(ad1) - 0.20
yad = np.zeros_like(ad_vals) - 0.25
plt.ylim([-0.3, 0.3])
plt.plot(x1, yx1, '|')
plt.plot(xd1, yxd1, '|')
plt.plot(a1, ya1, '|')
plt.plot(ad1, yad1, '|')
plt.plot(x_vals, yx, '.')
plt.plot(xd_vals, yxd, '.')
plt.plot(a_vals, ya, '.')
plt.plot(ad_vals, yad, '.')
"""
Explanation: Back to 5.
Cool!!
Let's look at the buckets next to each of the variables.
End of explanation
"""
yx1 = np.zeros_like(x1) + 0.25
yx = np.zeros_like(x_vals) + 0.20
yxd1 = np.zeros_like(xd1) + 0.10
yxd = np.zeros_like(xd_vals) + 0.05
ya1 = np.zeros_like(a1) - 0.05
ya = np.zeros_like(a_vals) - 0.10
yad1 = np.zeros_like(ad1) - 0.20
yad = np.zeros_like(ad_vals) - 0.25
plt.plot(x1, yx1, '|')
plt.plot(xd1, yxd1, '|')
plt.plot(a1, ya1, '|')
plt.plot(ad1, yad1, '|')
plt.plot(x_vals, yx, '.')
plt.plot(xd_vals, yxd, '.')
plt.plot(a_vals, ya, '.')
plt.plot(ad_vals, yad, '.')
plt.ylim([-0.3, 0.3])
plt.plot((x_thres[0], x_thres[0]), (0.15, 0.25), 'k-', color='red')
plt.plot((x_thres[1], x_thres[1]), (0.15, 0.25), 'k-', color='red')
plt.plot((a_thres[0], a_thres[0]), (-0.05, -0.15), 'k-', color='red')
plt.plot((a_thres[1], a_thres[1]), (-0.05, -0.15), 'k-', color='red')
"""
Explanation: Nice, how about we add the lower and upper boundaries we got before?
End of explanation
"""
def learning_schedule(episode, n_episodes):
return max(0., min(0.8, 1 - episode/n_episodes))
"""
Explanation: Nice, you might not agree with the buckets I selected. That is OK, in fact I hope you don't, there seems to be a better way of manually selecting these buckets. You will get a chance at improving on this later.
Let me give you a couple of functions that would make our algorithm work.
End of explanation
"""
def action_selection(state, Q, episode, n_episodes):
epsilon = 0.99 if episode < n_episodes//4 else 0.33 if episode < n_episodes//2 else 0.
if np.random.random() < epsilon:
action = np.random.randint(Q.shape[1])
else:
action = np.argmax(Q[state])
return action, epsilon
"""
Explanation: Learning schedule is similar to what action_selection did for our previous notebooks. This time we are doing it with the alpha which is the learning rate and determines the importance and weight we give to newly calculated values in comparison with values we calculated in ealier iterations. Think about it like how much you trust your past knowledge. Intuitively, early in the exploration phase we don't know much of the environment, so perhaps that is an indication that we should rely on our previous calculation that much. But the more experience, the more should hold onto your knowledge. Now, remember this is also a 'dilemma' meaning there is a tradeoff and there is not a clear cut answer.
Let's continue. We define action_selection just as before:
End of explanation
"""
def observation_to_state(observation, bins):
ss = []
for i in range(len(observation)):
ss.append(int(np.digitize(observation[i], bins=bins[i])))
state = int("".join(map(lambda feature: str(int(feature)), ss)))
return state
sample_states = [[0.33, 0.2, 0.1, 0.],
[-0.33, 0.2, 0.1, 0.],
[0.33, -0.2, 0.1, 0.],
[0.33, 0.2, -0.1, 0.],
[0.33, 0.2, 0.1, .99]]
for sample_state in sample_states:
print(observation_to_state(sample_state, (x1, xd1, a1, ad1)))
"""
Explanation: And this function, observation_to_state will take care of the discretazition. It will input a tuple of continuous values and give us an integer. Let's take a look:
End of explanation
"""
def q_learning(env, bins, gamma = 0.99):
nS = 10 * 10 * 10 * 10
nA = env.env.action_space.n
Q = np.random.random((nS, nA)) - 0.5
n_episodes = 5000
alphas = []
epsilons = []
states = []
actions = []
for episode in range(n_episodes):
observation = env.reset()
state = observation_to_state(observation, bins)
done = False
while not done:
states.append(state)
action, epsilon = action_selection(state, Q, episode, n_episodes)
epsilons.append(epsilon)
actions.append(action)
observation, reward, done, info = env.step(action)
nstate = observation_to_state(observation, bins)
alpha = learning_schedule(episode, n_episodes)
alphas.append(alpha)
Q[state][action] += alpha * (reward + gamma * Q[nstate].max() * (not done) - Q[state][action])
state = nstate
return Q, (alphas, epsilons, states, actions)
"""
Explanation: Cool, right?
Alright, I'll give you the new q_learning algorithm.
End of explanation
"""
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
Q, stats = q_learning(env, (x1, xd1, a1, ad1))
"""
Explanation: You can see it, just like we did before, only differences are the use of the functions defined above and the collection of the alphas, epsilons, etc. This latter one is just to show you some stats and graphs, you could remove them and it would do just as well.
Let's run this algorithm?! Shall we?
End of explanation
"""
videos = np.array(env.videos)
n_videos = 3
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag = """
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>"""
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
"""
Explanation: Now, let's take a look at some of the episodes:
End of explanation
"""
V = np.max(Q, axis=1)
V
V.max()
pi = np.argmax(Q, axis=1)
pi
"""
Explanation: Interesting right?
The last episode should show how the agent 'knows' what the goal of the environment, but perhaps not an impressive performance.
Let's look into the value function and policy:
End of explanation
"""
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
"""
Explanation: So, policy looks pretty much the same they looked before. This is because we "made" this environment discrete. Hold your thoughts...
Let's close this environment and see how the agent did per OpenAI Gym.
End of explanation
"""
alphas, epsilons, states, actions = stats
plt.plot(np.arange(len(alphas)), alphas, '.')
plt.title('Alphas')
plt.xlabel('Episode')
plt.ylabel('Percentage')
"""
Explanation: Not thaaaat well. The agent should have shown learning, but very likely it did not pass the environment. In other words, it learned, but definitely not a solid enough policy.
Let's look at some of the things collected.
End of explanation
"""
plt.plot(np.arange(len(epsilons)), epsilons, '.')
plt.title('Epsilon')
plt.xlabel('Episode')
plt.ylabel('Percentage')
"""
Explanation: See the learning rate goes from 0.8 to 0? Alright.
End of explanation
"""
hist, bins = np.histogram(states, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.title('States Visited')
plt.xlabel('State')
plt.ylabel('Count')
"""
Explanation: Interesting exploration style. We force 100% exploration for few episodes, then to ~30% and then become greedy at about 150,000 episodes. Not bad. Could we do better?
End of explanation
"""
hist, bins = np.histogram(actions, bins=3)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.title('Actions Selected')
plt.xlabel('Action')
plt.ylabel('Count')
"""
Explanation: See some states not visited? That's a by product of the way we discretized the state space, it should not be a problem unless we have a very very large state space after discretization. Then, it might be beneficial tighting things up.
How about the actions?
End of explanation
"""
plt.plot(x_vals, y + 0.10, '.')
plt.plot(xd_vals, y + 0.05, '.')
plt.plot(a_vals, y - 0.05, '.')
plt.plot(ad_vals, y - 0.10, '.')
plt.ylim([-0.15, 0.15])
x1 = # Set this to a better value
y1 = # Set this to a better value
plt.ylim([-0.075, 0.075])
plt.plot(x1, y1, 'o')
plt.plot(x_vals, y, '.')
xd1 = # Set this to some better value
y1 = np.zeros(len(xd1)) + 0.05
plt.ylim([-0.075, 0.075])
plt.plot(xd1, y1, 'o')
plt.plot(xd_vals, y, '.')
a1 = # Set this to some better value
y1 = np.zeros(len(a1)) + 0.05
plt.ylim([-0.1, 0.1])
plt.plot(a1, y1, 'o')
plt.plot(a_vals, y, '.')
ad1 = # Set this to some better value
y1 = np.zeros(len(ad1)) + 0.05
plt.ylim([-0.075, 0.075])
plt.plot(ad1, y1, 'o')
plt.plot(ad_vals, y, '.')
yx1 = np.zeros_like(x1) + 0.25
yx = np.zeros_like(x_vals) + 0.20
yxd1 = np.zeros_like(xd1) + 0.10
yxd = np.zeros_like(xd_vals) + 0.05
ya1 = np.zeros_like(a1) - 0.05
ya = np.zeros_like(a_vals) - 0.10
yad1 = np.zeros_like(ad1) - 0.20
yad = np.zeros_like(ad_vals) - 0.25
plt.ylim([-0.3, 0.3])
plt.plot(x1, yx1, '|')
plt.plot(xd1, yxd1, '|')
plt.plot(a1, ya1, '|')
plt.plot(ad1, yad1, '|')
plt.plot(x_vals, yx, '.')
plt.plot(xd_vals, yxd, '.')
plt.plot(a_vals, ya, '.')
plt.plot(ad_vals, yad, '.')
yx1 = np.zeros_like(x1) + 0.25
yx = np.zeros_like(x_vals) + 0.20
yxd1 = np.zeros_like(xd1) + 0.10
yxd = np.zeros_like(xd_vals) + 0.05
ya1 = np.zeros_like(a1) - 0.05
ya = np.zeros_like(a_vals) - 0.10
yad1 = np.zeros_like(ad1) - 0.20
yad = np.zeros_like(ad_vals) - 0.25
plt.plot(x1, yx1, '|')
plt.plot(xd1, yxd1, '|')
plt.plot(a1, ya1, '|')
plt.plot(ad1, yad1, '|')
plt.plot(x_vals, yx, '.')
plt.plot(xd_vals, yxd, '.')
plt.plot(a_vals, ya, '.')
plt.plot(ad_vals, yad, '.')
plt.ylim([-0.3, 0.3])
plt.plot((x_thres[0], x_thres[0]), (0.15, 0.25), 'k-', color='red')
plt.plot((x_thres[1], x_thres[1]), (0.15, 0.25), 'k-', color='red')
plt.plot((a_thres[0], a_thres[0]), (-0.05, -0.15), 'k-', color='red')
plt.plot((a_thres[1], a_thres[1]), (-0.05, -0.15), 'k-', color='red')
def learning_schedule(episode, n_episodes):
return max(0., min(0.8, 1 - episode/n_episodes))
def action_selection(state, Q, episode, n_episodes):
epsilon = 0.99 if episode < n_episodes//4 else 0.33 if episode < n_episodes//2 else 0.
if np.random.random() < epsilon:
action = np.random.randint(Q.shape[1])
else:
action = np.argmax(Q[state])
return action, epsilon
def observation_to_state(observation, bins):
ss = []
for i in range(len(observation)):
ss.append(int(np.digitize(observation[i], bins=bins[i])))
state = int("".join(map(lambda feature: str(int(feature)), ss)))
return state
def q_learning(env, bins, gamma = 0.99):
nS = 10 * 10 * 10 * 10
nA = env.env.action_space.n
Q = np.random.random((nS, nA)) - 0.5
n_episodes = 5000
alphas = []
epsilons = []
states = []
actions = []
for episode in range(n_episodes):
observation = env.reset()
state = observation_to_state(observation, bins)
done = False
while not done:
states.append(state)
action, epsilon = action_selection(state, Q, episode, n_episodes)
epsilons.append(epsilon)
actions.append(action)
observation, reward, done, info = env.step(action)
nstate = observation_to_state(observation, bins)
alpha = learning_schedule(episode, n_episodes)
alphas.append(alpha)
Q[state][action] += alpha * (reward + gamma * Q[nstate].max() * (not done) - Q[state][action])
state = nstate
return Q, (alphas, epsilons, states, actions)
mdir = tempfile.mkdtemp()
env = gym.make('CartPole-v0')
env = wrappers.Monitor(env, mdir, force=True)
Q, stats = q_learning(env, (x1, xd1, a1, ad1))
videos = np.array(env.videos)
n_videos = 3
idxs = np.linspace(0, len(videos) - 1, n_videos).astype(int)
videos = videos[idxs,:]
strm = ''
for video_path, meta_path in videos:
video = io.open(video_path, 'r+b').read()
encoded = base64.b64encode(video)
with open(meta_path) as data_file:
meta = json.load(data_file)
html_tag = """
<h2>{0}<h2/>
<video width="960" height="540" controls>
<source src="data:video/mp4;base64,{1}" type="video/mp4" />
</video>"""
strm += html_tag.format('Episode ' + str(meta['episode_id']), encoded.decode('ascii'))
HTML(data=strm)
V = np.max(Q, axis=1)
V
V.max()
pi = np.argmax(Q, axis=1)
pi
env.close()
gym.upload(mdir, api_key='<YOUR API KEY>')
alphas, epsilons, states, actions = stats
plt.plot(np.arange(len(alphas)), alphas, '.')
plt.plot(np.arange(len(epsilons)), epsilons, '.')
hist, bins = np.histogram(states, bins=50)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
hist, bins = np.histogram(actions, bins=3)
width = 0.7 * (bins[1] - bins[0])
center = (bins[:-1] + bins[1:]) / 2
plt.bar(center, hist, align='center', width=width)
plt.show()
"""
Explanation: Expected, I would say. If you want to balance a pole you will have to go left and right to keep the pole in place.
Ok, so from these figures you should think about ways to make this algorithm work. Why things didn't work?? How can we make it learn more or better?
I'll let you explore from now on...
Your turn
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/test-institute-3/cmip6/models/sandbox-1/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-3', 'sandbox-1', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: TEST-INSTITUTE-3
Source ID: SANDBOX-1
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:46
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
tensorflow/examples | courses/udacity_intro_to_tensorflow_for_deep_learning/l08c07_forecasting_with_stateful_rnn.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
"""
Explanation: Forecasting with a stateful RNN
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c07_forecasting_with_stateful_rnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c07_forecasting_with_stateful_rnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
Setup
End of explanation
"""
def sequential_window_dataset(series, window_size):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=window_size, drop_remainder=True)
ds = ds.flat_map(lambda window: window.batch(window_size + 1))
ds = ds.map(lambda window: (window[:-1], window[1:]))
return ds.batch(1).prefetch(1)
for X_batch, y_batch in sequential_window_dataset(tf.range(10), 3):
print(X_batch.numpy(), y_batch.numpy())
class ResetStatesCallback(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.model.reset_states()
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = sequential_window_dataset(x_train, window_size)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True,
batch_input_shape=[1, None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 30))
reset_states = ResetStatesCallback()
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100,
callbacks=[lr_schedule, reset_states])
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = sequential_window_dataset(x_train, window_size)
valid_set = sequential_window_dataset(x_valid, window_size)
model = keras.models.Sequential([
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True,
batch_input_shape=[1, None, 1]),
keras.layers.SimpleRNN(100, return_sequences=True, stateful=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200.0)
])
optimizer = keras.optimizers.SGD(lr=1e-7, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
reset_states = ResetStatesCallback()
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint, reset_states])
model = keras.models.load_model("my_checkpoint.h5")
model.reset_states()
rnn_forecast = model.predict(series[np.newaxis, :, np.newaxis])
rnn_forecast = rnn_forecast[0, split_time - 1:-1, 0]
rnn_forecast.shape
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
"""
Explanation: Stateful RNN Forecasting
End of explanation
"""
|
tensorflow/docs-l10n | site/en-snapshot/hub/tutorials/tf2_semantic_approximate_nearest_neighbors.ipynb | apache-2.0 | # Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: Copyright 2019 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
!pip install apache_beam
!pip install 'scikit_learn~=0.23.0' # For gaussian_random_matrix.
!pip install annoy
"""
Explanation: Semantic Search with Approximate Nearest Neighbors and Text Embeddings
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/hub/tutorials/tf2_semantic_approximate_nearest_neighbors"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_semantic_approximate_nearest_neighbors.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/hub/blob/master/examples/colab/tf2_semantic_approximate_nearest_neighbors.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/hub/examples/colab/tf2_semantic_approximate_nearest_neighbors.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
<td>
<a href="https://tfhub.dev/google/nnlm-en-dim128/2"><img src="https://www.tensorflow.org/images/hub_logo_32px.png" />See TF Hub model</a>
</td>
</table>
This tutorial illustrates how to generate embeddings from a TensorFlow Hub (TF-Hub) model given input data, and build an approximate nearest neighbours (ANN) index using the extracted embeddings. The index can then be used for real-time similarity matching and retrieval.
When dealing with a large corpus of data, it's not efficient to perform exact matching by scanning the whole repository to find the most similar items to a given query in real-time. Thus, we use an approximate similarity matching algorithm which allows us to trade off a little bit of accuracy in finding exact nearest neighbor matches for a significant boost in speed.
In this tutorial, we show an example of real-time text search over a corpus of news headlines to find the headlines that are most similar to a query. Unlike keyword search, this captures the semantic similarity encoded in the text embedding.
The steps of this tutorial are:
1. Download sample data.
2. Generate embeddings for the data using a TF-Hub model
3. Build an ANN index for the embeddings
4. Use the index for similarity matching
We use Apache Beam to generate the embeddings from the TF-Hub model. We also use Spotify's ANNOY library to build the approximate nearest neighbor index.
More models
For models that have the same architecture but were trained on a different language, refer to this collection. Here you can find all text embeddings that are currently hosted on tfhub.dev.
Setup
Install the required libraries.
End of explanation
"""
import os
import sys
import pickle
from collections import namedtuple
from datetime import datetime
import numpy as np
import apache_beam as beam
from apache_beam.transforms import util
import tensorflow as tf
import tensorflow_hub as hub
import annoy
from sklearn.random_projection import gaussian_random_matrix
print('TF version: {}'.format(tf.__version__))
print('TF-Hub version: {}'.format(hub.__version__))
print('Apache Beam version: {}'.format(beam.__version__))
"""
Explanation: Import the required libraries
End of explanation
"""
!wget 'https://dataverse.harvard.edu/api/access/datafile/3450625?format=tab&gbrecs=true' -O raw.tsv
!wc -l raw.tsv
!head raw.tsv
"""
Explanation: 1. Download Sample Data
A Million News Headlines dataset contains news headlines published over a period of 15 years sourced from the reputable Australian Broadcasting Corp. (ABC). This news dataset has a summarised historical record of noteworthy events in the globe from early-2003 to end-2017 with a more granular focus on Australia.
Format: Tab-separated two-column data: 1) publication date and 2) headline text. We are only interested in the headline text.
End of explanation
"""
!rm -r corpus
!mkdir corpus
with open('corpus/text.txt', 'w') as out_file:
with open('raw.tsv', 'r') as in_file:
for line in in_file:
headline = line.split('\t')[1].strip().strip('"')
out_file.write(headline+"\n")
!tail corpus/text.txt
"""
Explanation: For simplicity, we only keep the headline text and remove the publication date
End of explanation
"""
embed_fn = None
def generate_embeddings(text, model_url, random_projection_matrix=None):
# Beam will run this function in different processes that need to
# import hub and load embed_fn (if not previously loaded)
global embed_fn
if embed_fn is None:
embed_fn = hub.load(model_url)
embedding = embed_fn(text).numpy()
if random_projection_matrix is not None:
embedding = embedding.dot(random_projection_matrix)
return text, embedding
"""
Explanation: 2. Generate Embeddings for the Data.
In this tutorial, we use the Neural Network Language Model (NNLM) to generate embeddings for the headline data. The sentence embeddings can then be easily used to compute sentence level meaning similarity. We run the embedding generation process using Apache Beam.
Embedding extraction method
End of explanation
"""
def to_tf_example(entries):
examples = []
text_list, embedding_list = entries
for i in range(len(text_list)):
text = text_list[i]
embedding = embedding_list[i]
features = {
'text': tf.train.Feature(
bytes_list=tf.train.BytesList(value=[text.encode('utf-8')])),
'embedding': tf.train.Feature(
float_list=tf.train.FloatList(value=embedding.tolist()))
}
example = tf.train.Example(
features=tf.train.Features(
feature=features)).SerializeToString(deterministic=True)
examples.append(example)
return examples
"""
Explanation: Convert to tf.Example method
End of explanation
"""
def run_hub2emb(args):
'''Runs the embedding generation pipeline'''
options = beam.options.pipeline_options.PipelineOptions(**args)
args = namedtuple("options", args.keys())(*args.values())
with beam.Pipeline(args.runner, options=options) as pipeline:
(
pipeline
| 'Read sentences from files' >> beam.io.ReadFromText(
file_pattern=args.data_dir)
| 'Batch elements' >> util.BatchElements(
min_batch_size=args.batch_size, max_batch_size=args.batch_size)
| 'Generate embeddings' >> beam.Map(
generate_embeddings, args.model_url, args.random_projection_matrix)
| 'Encode to tf example' >> beam.FlatMap(to_tf_example)
| 'Write to TFRecords files' >> beam.io.WriteToTFRecord(
file_path_prefix='{}/emb'.format(args.output_dir),
file_name_suffix='.tfrecords')
)
"""
Explanation: Beam pipeline
End of explanation
"""
def generate_random_projection_weights(original_dim, projected_dim):
random_projection_matrix = None
random_projection_matrix = gaussian_random_matrix(
n_components=projected_dim, n_features=original_dim).T
print("A Gaussian random weight matrix was creates with shape of {}".format(random_projection_matrix.shape))
print('Storing random projection matrix to disk...')
with open('random_projection_matrix', 'wb') as handle:
pickle.dump(random_projection_matrix,
handle, protocol=pickle.HIGHEST_PROTOCOL)
return random_projection_matrix
"""
Explanation: Generating Random Projection Weight Matrix
Random projection is a simple, yet powerful technique used to reduce the dimensionality of a set of points which lie in Euclidean space. For a theoretical background, see the Johnson-Lindenstrauss lemma.
Reducing the dimensionality of the embeddings with random projection means less time needed to build and query the ANN index.
In this tutorial we use Gaussian Random Projection from the Scikit-learn library.
End of explanation
"""
model_url = 'https://tfhub.dev/google/nnlm-en-dim128/2' #@param {type:"string"}
projected_dim = 64 #@param {type:"number"}
"""
Explanation: Set parameters
If you want to build an index using the original embedding space without random projection, set the projected_dim parameter to None. Note that this will slow down the indexing step for high-dimensional embeddings.
End of explanation
"""
import tempfile
output_dir = tempfile.mkdtemp()
original_dim = hub.load(model_url)(['']).shape[1]
random_projection_matrix = None
if projected_dim:
random_projection_matrix = generate_random_projection_weights(
original_dim, projected_dim)
args = {
'job_name': 'hub2emb-{}'.format(datetime.utcnow().strftime('%y%m%d-%H%M%S')),
'runner': 'DirectRunner',
'batch_size': 1024,
'data_dir': 'corpus/*.txt',
'output_dir': output_dir,
'model_url': model_url,
'random_projection_matrix': random_projection_matrix,
}
print("Pipeline args are set.")
args
print("Running pipeline...")
%time run_hub2emb(args)
print("Pipeline is done.")
!ls {output_dir}
"""
Explanation: Run pipeline
End of explanation
"""
embed_file = os.path.join(output_dir, 'emb-00000-of-00001.tfrecords')
sample = 5
# Create a description of the features.
feature_description = {
'text': tf.io.FixedLenFeature([], tf.string),
'embedding': tf.io.FixedLenFeature([projected_dim], tf.float32)
}
def _parse_example(example):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example, feature_description)
dataset = tf.data.TFRecordDataset(embed_file)
for record in dataset.take(sample).map(_parse_example):
print("{}: {}".format(record['text'].numpy().decode('utf-8'), record['embedding'].numpy()[:10]))
"""
Explanation: Read some of the generated embeddings...
End of explanation
"""
def build_index(embedding_files_pattern, index_filename, vector_length,
metric='angular', num_trees=100):
'''Builds an ANNOY index'''
annoy_index = annoy.AnnoyIndex(vector_length, metric=metric)
# Mapping between the item and its identifier in the index
mapping = {}
embed_files = tf.io.gfile.glob(embedding_files_pattern)
num_files = len(embed_files)
print('Found {} embedding file(s).'.format(num_files))
item_counter = 0
for i, embed_file in enumerate(embed_files):
print('Loading embeddings in file {} of {}...'.format(i+1, num_files))
dataset = tf.data.TFRecordDataset(embed_file)
for record in dataset.map(_parse_example):
text = record['text'].numpy().decode("utf-8")
embedding = record['embedding'].numpy()
mapping[item_counter] = text
annoy_index.add_item(item_counter, embedding)
item_counter += 1
if item_counter % 100000 == 0:
print('{} items loaded to the index'.format(item_counter))
print('A total of {} items added to the index'.format(item_counter))
print('Building the index with {} trees...'.format(num_trees))
annoy_index.build(n_trees=num_trees)
print('Index is successfully built.')
print('Saving index to disk...')
annoy_index.save(index_filename)
print('Index is saved to disk.')
print("Index file size: {} GB".format(
round(os.path.getsize(index_filename) / float(1024 ** 3), 2)))
annoy_index.unload()
print('Saving mapping to disk...')
with open(index_filename + '.mapping', 'wb') as handle:
pickle.dump(mapping, handle, protocol=pickle.HIGHEST_PROTOCOL)
print('Mapping is saved to disk.')
print("Mapping file size: {} MB".format(
round(os.path.getsize(index_filename + '.mapping') / float(1024 ** 2), 2)))
embedding_files = "{}/emb-*.tfrecords".format(output_dir)
embedding_dimension = projected_dim
index_filename = "index"
!rm {index_filename}
!rm {index_filename}.mapping
%time build_index(embedding_files, index_filename, embedding_dimension)
!ls
"""
Explanation: 3. Build the ANN Index for the Embeddings
ANNOY (Approximate Nearest Neighbors Oh Yeah) is a C++ library with Python bindings to search for points in space that are close to a given query point. It also creates large read-only file-based data structures that are mapped into memory. It is built and used by Spotify for music recommendations. If you are interested you can play along with other alternatives to ANNOY such as NGT, FAISS, etc.
End of explanation
"""
index = annoy.AnnoyIndex(embedding_dimension)
index.load(index_filename, prefault=True)
print('Annoy index is loaded.')
with open(index_filename + '.mapping', 'rb') as handle:
mapping = pickle.load(handle)
print('Mapping file is loaded.')
"""
Explanation: 4. Use the Index for Similarity Matching
Now we can use the ANN index to find news headlines that are semantically close to an input query.
Load the index and the mapping files
End of explanation
"""
def find_similar_items(embedding, num_matches=5):
'''Finds similar items to a given embedding in the ANN index'''
ids = index.get_nns_by_vector(
embedding, num_matches, search_k=-1, include_distances=False)
items = [mapping[i] for i in ids]
return items
"""
Explanation: Similarity matching method
End of explanation
"""
# Load the TF-Hub model
print("Loading the TF-Hub model...")
%time embed_fn = hub.load(model_url)
print("TF-Hub model is loaded.")
random_projection_matrix = None
if os.path.exists('random_projection_matrix'):
print("Loading random projection matrix...")
with open('random_projection_matrix', 'rb') as handle:
random_projection_matrix = pickle.load(handle)
print('random projection matrix is loaded.')
def extract_embeddings(query):
'''Generates the embedding for the query'''
query_embedding = embed_fn([query])[0].numpy()
if random_projection_matrix is not None:
query_embedding = query_embedding.dot(random_projection_matrix)
return query_embedding
extract_embeddings("Hello Machine Learning!")[:10]
"""
Explanation: Extract embedding from a given query
End of explanation
"""
#@title { run: "auto" }
query = "confronting global challenges" #@param {type:"string"}
print("Generating embedding for the query...")
%time query_embedding = extract_embeddings(query)
print("")
print("Finding relevant items in the index...")
%time items = find_similar_items(query_embedding, 10)
print("")
print("Results:")
print("=========")
for item in items:
print(item)
"""
Explanation: Enter a query to find the most similar items
End of explanation
"""
|
DistrictDataLabs/yellowbrick | examples/jkeung/testing.ipynb | apache-2.0 | import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.metrics import roc_curve, auc
from sklearn.model_selection import train_test_split
"""
Explanation: ROC Curve Example
Inspired by: http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html
This is an example of how to create an ROC Curvs in sklearn vs using the Yellowbrick libarary. The data used is the breast cancer dataset that is included in sklearn.
Import Libraries
End of explanation
"""
bc = datasets.load_breast_cancer()
X = bc.data
y = bc.target
random_state = np.random.RandomState(0)
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=random_state)
"""
Explanation: Import some data to play with
End of explanation
"""
# Learn to predict each class against the other
classifier = svm.SVC(kernel='linear', probability=True, random_state=random_state)
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr, tpr, _ = roc_curve(y_test, y_score)
roc_auc = auc(fpr, tpr)
"""
Explanation: Split the data and prepare data for ROC Curve
End of explanation
"""
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
"""
Explanation: Plot ROC Curve using Matplotlib
End of explanation
"""
import yellowbrick as yb
from yellowbrick.classifier import ROCAUC
visualizer = ROCAUC(classifier)
visualizer.fit(X_train, y_train) # Fit the training data to the visualizer
visualizer.score(X_test, y_test) # Evaluate the model on the test data
g = visualizer.show() # Draw/show/show the data
"""
Explanation: Create ROCAUC using YellowBrick
End of explanation
"""
|
Merinorus/adaisawesome | Homework/04 - Applied ML/Homework_04_Referees_teamawesome-Q1 + Bonus.ipynb | gpl-3.0 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Import the random forest package
from sklearn.ensemble import RandomForestClassifier
filename ="CrowdstormingDataJuly1st.csv"
Data = pd.read_csv(filename)
"""
Explanation: I. Setting up the Problem
End of explanation
"""
Data.ix[:10,:13]
Data.ix[:10,13:28]
"""
Explanation: 1) Peeking into the Data
End of explanation
"""
# 1) Remove the players without rater 1 / 2 rating because we won't be
# able to train or test the values (this can be done as bonus later)
Data_hasImage = Data[pd.notnull(Data['photoID'])]
#Data_hasImage.ix[:10,13:28]
"""
Explanation: II. Preparing the training & test data : Unique Game Row version
1) Keep only players that have a Rater Image
End of explanation
"""
Data_hasImage['mean_rater']=(Data_hasImage['rater1']+Data_hasImage['rater2'])/2
"""
Explanation: 2) Disaggregate the data so each row is 1 game
Got a lot of help from this script ! https://osf.io/w7tds/
It will be much simpler for us to train our random forest if each row corresponds to one game. This way, we won't have to give a different "weight" to each row according to the number of played games.
But let's start by doing the mean value of rater1 and rater 2, because if we keep them separated we might get some strange results.
Indeed, what if for a player, rater1 = 0.0 and rater2 = 0.75 ?
It would not make a lot of sense, or at least we would know our model is not viable !
End of explanation
"""
game_counter = 0
game_total_number = sum(Data_hasImage['games'])
# Raw table that we'll have to convert to a dataframe later
output = [0 for i in range(game_total_number)]
# We now iterate each row of our dataframe, which may contains more that one game
for i, row in Data_hasImage.iterrows():
# Number of games in the current row
row_game_number = row['games']
# Number of cumulated cards for the games in the current row
yellowCards = row['yellowCards']
yellowReds = row['yellowReds']
redCards = row['redCards']
# We want to seperate each of these games
for j in range (row_game_number):
game = row
game['yellowCards'] = 0
game['yellowReds'] = 0
game['redCards'] = 0
# Basically, we distribute the cards we have on separate games.
# ie: if we have 2 yellowCard and 1 redCard for a total of 4 games,
# the first two games will be assigned a yellowCard,
# the third game will be assigned a redCard,
# and the last game won't have any card assigned, because there is no card left.
if yellowCards > 0:
game['yellowCards'] = 1
yellowCards = yellowCards - 1
elif yellowReds > 0:
game['yellowReds'] = 1
yellowReds = yellowReds - 1
elif redCards > 0:
game['redCards'] = 1
redCards = redCards - 1
# Convert from pandas Series to prevent overwriting previous values of the output
gamelist=list(game)
# Add the new game to the output
output[game_counter] = gamelist
game_counter = game_counter + 1
# Here is the output dataframe
Data_OneGamePerRow = pd.DataFrame(output, columns=list(Data_hasImage.columns))
Data_OneGamePerRow
"""
Explanation: Let's now disaggregate the games:
End of explanation
"""
# Removing columns that we do not need
Data_Simple1 = Data_OneGamePerRow[['playerShort', 'yellowCards', 'yellowReds', 'redCards',
'refNum', 'refCountry', 'games', 'position', 'mean_rater']]
# Take a random 80% sample of the Data for the Training Sample
#Data_Training = Data_Simple1.sample(frac=0.8)
# Take a random 20% sample of the Data for the Testing Sample
#Data_Testing = Data_Simple1.loc[~Data_Simple1.index.isin(Data_Training.index)]
Data_Simple1
#find proportion of yellow & red cards to games
Data_Simple1['fractionYellow'] = Data_Simple1['yellowCards']/Data_Simple1['games']
Data_Simple1['fractionYellowRed'] = Data_Simple1['yellowReds']/Data_Simple1['games']
Data_Simple1['fractionRed'] = Data_Simple1['redCards']/Data_Simple1['games']
Data_Simple2 = Data_Simple1[['playerShort', 'fractionYellow', 'fractionYellowRed', 'fractionRed',
'refNum', 'refCountry', 'games', 'position', 'mean_rater']]
Data_Simple2
allpositions = (Data_Simple2['position'])
unique_pos = set(allpositions)
unique_pos_list = list(unique_pos)
unique_pos_list
# we must convert players positions into proxy numbers (floats) to run random forest
position_proxy = []
A = len(allpositions)
for i in range (0,A):
if allpositions[i] == 'NaN':
position_proxy.append(0);
elif allpositions[i] == 'Center Midfielder':
position_proxy.append(1);
elif allpositions[i] == 'Attacking Midfielder':
position_proxy.append(2);
elif allpositions[i] == 'Goalkeeper':
position_proxy.append(3);
elif allpositions[i] == 'Right Winger':
position_proxy.append(4);
elif allpositions[i] == 'Left Winger':
position_proxy.append(5);
elif allpositions[i] == 'Center Forward':
position_proxy.append(6);
elif allpositions[i] == 'Right Fullback':
position_proxy.append(7);
elif allpositions[i] == 'Right Midfielder':
position_proxy.append(8);
elif allpositions[i] == 'Defensive Midfielder':
position_proxy.append(9);
elif allpositions[i] == 'Center Back':
position_proxy.append(10);
elif allpositions[i] == 'Left Fullback':
position_proxy.append(11);
elif allpositions[i] == 'Left Midfielder':
position_proxy.append(12);
else:
position_proxy.append(99);
Data_Simple2['position_proxy'] = position_proxy
Data_Simple3 = Data_Simple2[['playerShort', 'fractionYellow', 'fractionYellowRed', 'fractionRed',
'refNum', 'refCountry', 'games', 'position_proxy', 'mean_rater']]
Data_Simple3.head()
colRate = ['mean_rater']
Col_Rating = Data_Simple3[colRate].values
Ratings_Scale = [];
Col_Rating
# Must now convert this continuous scale into a categorical one, with 20 categories
A = len(Col_Rating)
for i in range (0,A):
if Col_Rating[i] >= 0 and Col_Rating[i] <0.05:
Ratings_Scale.append(1);
elif Col_Rating[i] >= 0.05 and Col_Rating[i] <0.1:
Ratings_Scale.append(2);
elif Col_Rating[i] >= 0.1 and Col_Rating[i] <0.15:
Ratings_Scale.append(3);
elif Col_Rating[i] >= 0.15 and Col_Rating[i] <0.2:
Ratings_Scale.append(4);
elif Col_Rating[i] >= 0.2 and Col_Rating[i] <0.25:
Ratings_Scale.append(5);
elif Col_Rating[i] >= 0.25 and Col_Rating[i] <0.3:
Ratings_Scale.append(6);
elif Col_Rating[i] >= 0.3 and Col_Rating[i] <0.35:
Ratings_Scale.append(7);
elif Col_Rating[i] >= 0.35 and Col_Rating[i] <0.4:
Ratings_Scale.append(8);
elif Col_Rating[i] >= 0.4 and Col_Rating[i] <0.45:
Ratings_Scale.append(9);
elif Col_Rating[i] >= 0.45 and Col_Rating[i] <0.5:
Ratings_Scale.append(10);
elif Col_Rating[i] >= 0.5 and Col_Rating[i] <0.55:
Ratings_Scale.append(11);
elif Col_Rating[i] >= 0.55 and Col_Rating[i] <0.6:
Ratings_Scale.append(12);
elif Col_Rating[i] >= 0.6 and Col_Rating[i] <0.65:
Ratings_Scale.append(13);
elif Col_Rating[i] >= 0.65 and Col_Rating[i] <0.7:
Ratings_Scale.append(14);
elif Col_Rating[i] >= 0.7 and Col_Rating[i] <0.75:
Ratings_Scale.append(15);
elif Col_Rating[i] >= 0.75 and Col_Rating[i] <0.8:
Ratings_Scale.append(16);
elif Col_Rating[i] >= 0.8 and Col_Rating[i] <0.85:
Ratings_Scale.append(17);
elif Col_Rating[i] >= 0.85 and Col_Rating[i] <0.9:
Ratings_Scale.append(18);
elif Col_Rating[i] >= 0.9 and Col_Rating[i] <0.95:
Ratings_Scale.append(19);
elif Col_Rating[i] >= 0.95 and Col_Rating[i] <=1:
Ratings_Scale.append(20);
else:
Ratings_Scale.append(99);
Data_Simple3['raterScale'] = Ratings_Scale
Data_Simple3.head()
## Some of the values in trainRes_1 are larger than one! We must delete them from the simple data set to avoid errors in the training process.
# drop values on scale which are equal to 99
Data_Simple4 = Data_Simple3[Data_Simple3.raterScale != 99]
Data_Simple5 = Data_Simple4[Data_Simple4.position_proxy != 99]
Data_Simple5.dropna(axis=0)
Data_Simple5
"""
Explanation: 3) Create the Training and Testing Datframes with only select data
End of explanation
"""
#create test and training matrix
cols = ['games', 'fractionYellow', 'fractionYellowRed', 'fractionRed', 'refNum', 'refCountry', 'position_proxy']
exclude = ['raterScale','mean_rater', 'playerShort']
colsRes1 = ['raterScale']
# Take a random 80% sample of the Data for the Training Sample
Data_Training = Data_Simple5.sample(frac=0.8)
# Need to split this into the data and the results columns
# http://stackoverflow.com/questions/34246336/python-randomforest-unknown-label-error
Input_Data_Training = Data_Training.drop(exclude, axis=1)
#Results_Data_Training = list(Data_Training.raterAvg.values)
Results_Data_Training = Data_Training[colsRes1]
Input_Data_Training.head()
# Take a random 20% sample of the Data for the Testing Sample
#Data_Testing = Data_Simple1.loc[~Data_Simple1.index.isin(Data_Training.index)]
# Need to split this into the data and the results columns
# http://stackoverflow.com/questions/34246336/python-randomforest-unknown-label-error
#Input_Data_Testing = Data_Testing.drop(colsRes, axis=1)
#Results_Data_Testing = list(Data_Testing.raterAvg.values)
# Need to make arrays
# http://www.analyticbridge.com/profiles/blogs/random-forest-in-python
trainArr = Input_Data_Training.as_matrix() #training array
#trainRes = Results_Data_Training.as_matrix(colsRes) #training results
trainRes_1 = Data_Training['raterScale'].values
trainArr
"""
Explanation: II. Preparing the training & test data : Fraction version
1) Create the Training and Testing Datframes with only select data
End of explanation
"""
#Initialize
forest = RandomForestClassifier(n_estimators = 100)
# Fit the training data and create the decision trees
forest = forest.fit(trainArr,trainRes_1)
# Take the same decision trees and run it on the test data
Data_Testing = Data_Simple5.sample(frac=0.2)
Input_Data_Testing = Data_Testing.drop(exclude, axis=1)
testArr = Input_Data_Testing.as_matrix()
results = forest.predict(testArr)
Data_Testing['predictions'] = results
Data_Testing.head()
#see percentage of right predictions
correct = list(Data_Testing[Data_Testing['raterScale'] == Data_Testing['predictions']].index)
A = len(correct)
percCorrect = A/Data_Testing['raterScale'].size
percCorrect
"""
Explanation: III. Random Forest
End of explanation
"""
#See features importance
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(trainArr.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
"""
Explanation: The first attempt resulted in a 69,4% success of predicions with n_estimatos = 100.
End of explanation
"""
#make necessary changes to parameters
exclude2 = ['raterScale','mean_rater', 'playerShort', 'fractionYellowRed', 'fractionRed', 'fractionYellow']
exclude3 = ['raterScale','mean_rater', 'playerShort', 'fractionYellowRed', 'fractionRed', 'fractionYellow', 'predictions']
Input_Data_Training2 = Data_Training.drop(exclude2, axis=1)
trainArr2 = Input_Data_Training2.as_matrix() #training array
trainRes_2 = Data_Training['raterScale'].values
Input_Data_Testing2 = Data_Testing.drop(exclude3, axis=1)
testArr2 = Input_Data_Testing2.as_matrix()
testArr2
#Re-Initialize Classifier
forest = RandomForestClassifier(n_estimators = 100)
# Fit the training data and create the decision trees
forest = forest.fit(trainArr2,trainRes_2)
# Take the same decision trees and run it on the test data
results2 = forest.predict(testArr2)
Data_Testing['predictions2'] = results2
Data_Testing.head()
#see percentage of right predictions
correct = list(Data_Testing[Data_Testing['raterScale'] == Data_Testing['predictions2']].index)
A = len(correct)
percCorrect = A/Data_Testing['raterScale'].size
percCorrect
"""
Explanation: redCountry, Games, Position, and redNum are the most important features. We could therefore drop some features already, such as fraction yellow and fraction yellowRed & fraction Red. Let us delete all cards and see if we can better predict this.
End of explanation
"""
#See features importance
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(trainArr2.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
"""
Explanation: Accuracy goes down to 67.3% from changing the input parameters...
End of explanation
"""
exclude4 = ['raterScale','mean_rater', 'playerShort', 'refNum', 'refCountry', 'games', 'position_proxy']
exclude5 = ['raterScale','mean_rater', 'playerShort', 'refNum', 'refCountry', 'games', 'position_proxy', 'predictions', 'predictions2']
Input_Data_Training3 = Data_Training.drop(exclude4, axis=1)
trainArr3 = Input_Data_Training3.as_matrix() #training array
trainRes_3 = Data_Training['raterScale'].values
Input_Data_Testing3 = Data_Testing.drop(exclude5, axis=1)
testArr3 = Input_Data_Testing3.as_matrix()
testArr3
#Re-Initialize Classifier
forest = RandomForestClassifier(n_estimators = 100)
# Fit the training data and create the decision trees
forest = forest.fit(trainArr3,trainRes_3)
# Take the same decision trees and run it on the test data
results3 = forest.predict(testArr3)
Data_Testing['predictions3'] = results3
Data_Testing.head()
#see percentage of right predictions
correct = list(Data_Testing[Data_Testing['raterScale'] == Data_Testing['predictions3']].index)
A = len(correct)
percCorrect = A/Data_Testing['raterScale'].size
percCorrect
"""
Explanation: The most important feature in this case is refNum, games, refCountry, position_proxy
Alternatively we can see what happens when we only use the number of cards...
End of explanation
"""
# Curve for Test 1 - all variables
Test1 = [];
for i in range (0,20):
count = list(Data_Testing[Data_Testing['predictions']==i].index)
A = len(count)
Test1.append(A)
# Curve for Test 2 - exclude card variables
Test2 = [];
for i in range (0,20):
count2 = list(Data_Testing[Data_Testing['predictions2']==i].index)
B = len(count2)
Test2.append(B)
# Curve for Test 3 - only card variables
Test3 = [];
for i in range (0,20):
count3 = list(Data_Testing[Data_Testing['predictions3']==i].index)
C = len(count3)
Test3.append(C)
# Real Curve
Test4 = [];
for i in range (0,20):
count4 = list(Data_Testing[Data_Testing['raterScale']==i].index)
D = len(count4)
Test4.append(D)
import matplotlib.patches as mpatches
X = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20];
T1 = plt.plot(X, Test1,'b')
T2 = plt.plot(X, Test2, 'r')
T3 = plt.plot(X, Test3, 'g')
T4 = plt.plot(X, Test4, 'y')
plt.ylabel('Count')
plt.xlabel('Rater Scale')
plt.show()
"""
Explanation: The percentage of correct ratings drops to 32%...
BONUS Question: We can try to analyze accuracy across the scale for the three cases above and see if there is bias in any extreme
End of explanation
"""
|
antongrin/EasyMig | EasyMig_v4-interact3.ipynb | apache-2.0 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 13:21:45 2016
@author: GrinevskiyAS
"""
from __future__ import division
import numpy as np
from numpy import sin,cos,tan,pi,sqrt
import matplotlib as mpl
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
%matplotlib inline
font = {'family': 'Arial', 'weight': 'normal', 'size':14}
mpl.rc('font', **font)
mpl.rc('figure', figsize=(10, 8))
"""
Explanation: 0. Before start
OK, to begin we need to import some standart Python modules
End of explanation
"""
#This would be the size of each grid cell (X is the spatial coordinate, T is two-way time)
xstep = 5
tstep = 5
#size of the whole grid
xmax = 320
tmax = 220
#that's the arrays of x and t
xarray = np.arange(0, xmax, xstep).astype(float)
tarray = np.arange(0, tmax, tstep).astype(float)
#now fimally we created a 2D array img, which is now all zeros, but later we will add some amplitudes there
img = np.zeros((len(xarray), len(tarray)))
"""
Explanation: 1. Setup
First, let us setup the working area.
End of explanation
"""
plt.imshow(img.T, interpolation = 'none', cmap = cm.Greys, vmin = -2,vmax = 2,
extent = [xarray[0] - xstep/2, xarray[-1] + xstep/2, tarray[-1] + tstep/2, tarray[0] - tstep/2])
"""
Explanation: Let's show our all-zero image
End of explanation
"""
class Hyperbola:
def __init__(self, xarray, tarray, x0, t0, v=2):
"""
input parameters define a difractor's position (x0,t0),
P-wave velocity of homogeneous subsurface,
and x- and t-arrays to compute traveltimes on.
"""
self.x = xarray
self.x0 = x0
self.t0 = t0
self.v = v
#compute traveltimes
self.t = sqrt(t0**2 + (2*(xarray-x0)/v)**2)
#obtain some grid parameters
xstep = xarray[1]-xarray[0]
tbegin = tarray[0]
tend = tarray[-1]
tstep = tarray[1]-tarray[0]
#delete t's and x's for samples where t exceeds maxt
self.x = self.x[ (self.t >= tbegin) & (self.t <= tend) ]
self.t = self.t[ (self.t >= tbegin) & (self.t <= tend) ]
#compute indices of hyperbola's X coordinates in xarray
#self.imgind = ((self.x - xarray[0])/xstep).astype(int)
#compute how amplitudes decrease according to geometrical spreading
self.amp = 1/(self.t/self.t0)
self.grid_resample(xarray, tarray)
def grid_resample(self, xarray, tarray):
"""that's a function that computes at which 'cells' of image we should place the hyperbola"""
tend=tarray[-1]
tstep=tarray[1]-tarray[0]
self.xind=((self.x-xarray[0])/xstep).astype(int) #X cells numbers
self.tind=np.round((self.t - tarray[0])/tstep).astype(int) #T cells numbers
self.tind=self.tind[self.tind*tstep <= tarray[-1]] #delete T's exceeding max T
self.tgrid=tarray[self.tind] # get 'gridded' T-values
#self.coord=np.vstack((self.xind,tarray[self.tind]))
def add_to_img(self, img, wavelet):
"""puts the hyperbola into the right cells of image with a given wavelet"""
maxind = np.size(img,1)
wavlen = np.floor(len(wavelet)/2).astype(int)
# to ensure that we will not exceed image size
#self.imgind=self.xind[self.tind < maxind-wavlen-1]
#self.amp = self.amp[self.tind < maxind-wavlen-1]
#self.tind=self.tind[self.tind < maxind-wavlen-1]
ind_begin = self.tind-wavlen
ind_to_use = self.tind < maxind-wavlen-1
#add amplitudes of the wavelet to the image
for i,sample in enumerate(wavelet):
img[self.xind[ind_to_use], ind_begin[ind_to_use]+i] = img[self.xind[ind_to_use],ind_begin[ind_to_use]+i] + sample*self.amp[ind_to_use]
return img
"""
Explanation: 2. Main class definition
What we are now going to do is create a class named Hyperbola
Each object of this class is capable of computing traveltimes to a certain subsurface point (diffractor) and plotting this point response (hyperbola) on a grid
How? to more clearly define a class? probably change to a function?
End of explanation
"""
Hyp_test = Hyperbola(xarray, tarray, x0 = 100, t0 = 30, v = 2)
#Create a fugure and add axes to it
fgr_test1 = plt.figure(figsize=(7,5), facecolor='w')
ax_test1 = fgr_test1.add_subplot(111)
#Now plot Hyp_test's parameters: X vs T
ax_test1.plot(Hyp_test.x, Hyp_test.t, 'r', lw = 2)
#and their 'gridded' equivalents
ax_test1.plot(Hyp_test.x, Hyp_test.tgrid, ls='none', marker='o', ms=6, mfc=[0,0.5,1],mec='none')
#Some commands to add gridlines, change the directon of T axis and move x axis to top
ax_test1.set_ylim(tarray[-1],tarray[0])
ax_test1.xaxis.set_ticks_position('top')
ax_test1.grid(True, alpha = 0.1, ls='-',lw=.5)
ax_test1.set_xlabel('X, m')
ax_test1.set_ylabel('T, ms')
ax_test1.xaxis.set_label_position('top')
plt.show()
"""
Explanation: For testing purposes, let's create an object named Hyp_test and view its parameters
End of explanation
"""
point_diff_x0 = [100, 150, 210]
point_diff_t0 = [100, 50, 70]
plt.scatter(point_diff_x0,point_diff_t0, c='r',s=70)
plt.xlim(0, xmax)
plt.ylim(tmax, 0)
plt.gca().set_xlabel('X, m')
plt.gca().set_ylabel('T, ms')
plt.gca().xaxis.set_ticks_position('top')
plt.gca().xaxis.set_label_position('top')
plt.gca().grid(True, alpha = 0.1, ls='-',lw=.5)
"""
Explanation: 3. Creating the model and 'forward modelling'
OK, now let's define a subsurface model. For the sake of simplicity, the model will consist of two types of objects:
1. Point diffractor in a homogeneous medium
* defined by their coordinates $(x_0, t_0)$ in data domain.
2. Plane reflecting surface
* defined by their end points $(x_1, t_1)$ and $(x_2, t_2)$, also in data domain.
We will be able to add any number of these objects to image.
Let's start by adding three point diffractors:
End of explanation
"""
hyps=[]
for x0,t0 in zip(point_diff_x0,point_diff_t0):
hyp_i = Hyperbola(xarray, tarray, x0, t0, v=2)
hyps.append(hyp_i)
"""
Explanation: Next step is computing traveltimes for these subsurface diffractors. This is done by creating an instance of Hyperbola class for every diffractor.
End of explanation
"""
wav1 = np.array([-1,2,-1])
plt.axhline(0,c='k')
markerline, stemlines, baseline = plt.stem((np.arange(len(wav1)) - np.floor(len(wav1)/2)).astype(int), wav1)
plt.gca().set_xlim(-2*len(wav1), 2*len(wav1))
plt.gca().set_ylim(np.min(wav1)-1, np.max(wav1)+1)
for hyp_i in hyps:
hyp_i.add_to_img(img,wav1)
plt.imshow(img.T,interpolation='none',cmap=cm.Greys, vmin=-2,vmax=2, extent=[xarray[0]-xstep/2, xarray[-1]+xstep/2, tarray[-1]+tstep/2, tarray[0]-tstep/2])
plt.gca().xaxis.set_ticks_position('top')
plt.gca().grid(ls=':', alpha=0.25, lw=1, c='w' )
"""
Explanation: ~~Next step is computing Green's functions for these subsurface diffractors. To do this, we need to setup a wavelet.~~
Of course, we are going to create an extremely simple wavelet.
End of explanation
"""
class Line:
def __init__(self, xmin, xmax, tmin, tmax, xarray, tarray):
"""a Line object is decribed by its start and end coordinates"""
self.xmin=xmin
self.xmax=xmax
self.tmin=tmin
self.tmax=tmax
xstep=xarray[1]-xarray[0]
tstep=tarray[1]-tarray[0]
xmin=xmin-np.mod(xmin,xstep)
xmax=xmax-np.mod(xmax,xstep)
tmin=tmin-np.mod(tmin,tstep)
tmax=tmax-np.mod(tmax,tstep)
self.x = np.arange(xmin,xmax+xstep,xstep)
self.t = tmin+(tmax-tmin)*(self.x-xmin)/(xmax-xmin)
self.imgind=((self.x-xarray[0])/xstep).astype(int)
self.tind=((self.t-tarray[0])/tstep).astype(int)
def add_to_img(self, img, wavelet):
maxind=np.size(img,1)
wavlen=np.floor(len(wavelet)/2).astype(int)
self.imgind=self.imgind[self.tind < maxind-1]
self.tind=self.tind[self.tind < maxind-1]
ind_begin=self.tind-wavlen
for i, sample_amp in enumerate(wavelet):
img[self.imgind,ind_begin+i]=img[self.imgind,ind_begin+i] + sample_amp
return img
"""
Explanation: Define a Line class
End of explanation
"""
line1=Line(100,250,50,150,xarray,tarray)
img=line1.add_to_img(img, wav1)
line2=Line(40,270,175,100,xarray,tarray)
img=line2.add_to_img(img, wav1
plt.imshow(img.T, interpolation='none', cmap=cm.Greys, vmin=-2, vmax=2,
extent=[xarray[0]-xstep/2, xarray[-1]+xstep/2, tarray[-1]+tstep/2, tarray[0]-tstep/2])
plt.gca().xaxis.set_ticks_position('top')
plt.gca().grid(ls=':', alpha=0.25, lw=1, c='w' )
"""
Explanation: Create a line and add it to image
End of explanation
"""
def migrate(img, v, aper, xarray, tarray):
imgmig=np.zeros_like(img)
xstep=xarray[1]-xarray[0]
for x0 in xarray:
for t0 in tarray[1:-1]:
#only a region between (x0-aper) and (x0+aper) should be taken into account
xmig=xarray[(x0-aper <= xarray) & (xarray <= x0+aper)]
hi = Hyperbola(xmig,tarray,x0,t0,v)
migind_start = hi.x[0]/xstep
migind_stop = (hi.x[-1]+xstep)/xstep
hi.imgind=np.arange(migind_start, migind_stop).astype(int)
#Sum (in fact, count the mean value of) all the amplitudes on current hyperbola hi
si = np.mean(img[hi.imgind,hi.tind]*hi.amp)
imgmig[(x0/xstep).astype(int),(t0/tstep).astype(int)] = si
return imgmig
"""
Explanation: Excellent. The image now is pretty messy, so we need to migrate it and see what we can achieve
4. Migration definition
End of explanation
"""
vmig = 2
aper = 200
res = migrate(img, vmig, aper, xarray, tarray)
plt.imshow(res.T,interpolation='none',vmin=-2,vmax=2,cmap=cm.Greys, extent=[xarray[0]-xstep/2, xarray[-1]+xstep/2, tarray[-1]+tstep/2, tarray[0]-tstep/2])
"""
Explanation: 5. Migration application
End of explanation
"""
def migshow(vmig_i, aper_i, gain_i, interp):
res_i = migrate(img, vmig_i, aper_i, xarray, tarray)
if interp:
interp_style = 'bilinear'
else:
interp_style = 'none'
plt.imshow(res_i.T,interpolation=interp_style,vmin=-gain_i,vmax=gain_i,cmap=cm.Greys, extent=[xarray[0]-xstep/2, xarray[-1]+xstep/2, tarray[-1]+tstep/2, tarray[0]-tstep/2])
plt.title('Vmig = '+str(vmig_i))
plt.show()
interact(migshow, vmig_i = widgets.FloatSlider(min = 1.0,max = 3.0, step = 0.01, value=2.0,continuous_update=False,description='Migration velocity: '),
aper_i = widgets.IntSlider(min = 10,max = 500, step = 1, value=200,continuous_update=False,description='Migration aperture: '),
gain_i = widgets.FloatSlider(min = 0.0,max = 5.0, step = 0.1, value=2.0,continuous_update=False,description='Gain: '),
interp = widgets.Checkbox(value=True, description='interpolate'))
#interact(migrate, img=fixed(img), v = widgets.IntSlider(min = 1.0,max = 3.0, step = 0.1, value=2), aper=fixed(aper), xarray=fixed(xarray), tarray=fixed(tarray))
"""
Explanation: Excellent!
The next section is only for interactive parameter selection
6. Interactive parameter change
End of explanation
"""
|
WNoxchi/Kaukasos | FAI_old/lesson1/dogs_cats_redux.ipynb | mit | #Verify we are in the lesson1 directory
%pwd
#Create references to important directories we will use over and over
import os, sys
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
# DATA_HOME_DIR = current_dir+'/data/redux'
DATA_HOME_DIR = current_dir+'/data'
#Allow relative imports to directories above lesson1/
# sys.path.insert(1, os.path.join(sys.path[0], '..'))
sys.path.insert(1, os.path.join(LESSON_HOME_DIR, '../utils'))
#import modules
from utils import *
from vgg16 import Vgg16
#Instantiate plotting tool
#In Jupyter notebooks, you will need to run this command before doing any plotting
%matplotlib inline
"""
Explanation: Dogs vs Cat Redux
In this tutorial, you will learn how generate and submit predictions to a Kaggle competiton
Dogs vs. Cats Redux: Kernels Edition
To start you will need to download and unzip the competition data from Kaggle and ensure your directory structure looks like this
utils/
vgg16.py
utils.py
lesson1/
redux.ipynb
data/
redux/
train/
cat.437.jpg
dog.9924.jpg
cat.1029.jpg
dog.4374.jpg
test/
231.jpg
325.jpg
1235.jpg
9923.jpg
You can download the data files from the competition page here or you can download them from the command line using the Kaggle CLI.
You should launch your notebook inside the lesson1 directory
cd lesson1
jupyter notebook
End of explanation
"""
#Create directories
%cd $DATA_HOME_DIR
%mkdir valid
%mkdir results
%mkdir -p sample/train
%mkdir -p sample/test
%mkdir -p sample/valid
%mkdir -p sample/results
%mkdir -p test/unknown
%cd $DATA_HOME_DIR/train
g = glob('*.jpg')
shuf = np.random.permutation(g)
for i in range(2000): os.rename(shuf[i], DATA_HOME_DIR+'/valid/' + shuf[i])
from shutil import copyfile
g = glob('*.jpg')
shuf = np.random.permutation(g)
for i in range(200): copyfile(shuf[i], DATA_HOME_DIR+'/sample/train/' + shuf[i])
%cd $DATA_HOME_DIR/valid
g = glob('*.jpg')
shuf = np.random.permutation(g)
for i in range(50): copyfile(shuf[i], DATA_HOME_DIR+'/sample/valid/' + shuf[i])
"""
Explanation: Action Plan
Create Validation and Sample sets
Rearrange image files into their respective directories
Finetune and Train model
Generate predictions
Validate predictions
Submit predictions to Kaggle
Create validation set and sample
End of explanation
"""
#Divide cat/dog images into separate directories
%cd $DATA_HOME_DIR/sample/train
%mkdir cats
%mkdir dogs
%mv cat.*.jpg cats/
%mv dog.*.jpg dogs/
%cd $DATA_HOME_DIR/sample/valid
%mkdir cats
%mkdir dogs
%mv cat.*.jpg cats/
%mv dog.*.jpg dogs/
%cd $DATA_HOME_DIR/valid
%mkdir cats
%mkdir dogs
%mv cat.*.jpg cats/
%mv dog.*.jpg dogs/
%cd $DATA_HOME_DIR/train
%mkdir cats
%mkdir dogs
%mv cat.*.jpg cats/
%mv dog.*.jpg dogs/
# Create single 'unknown' class for test set
%cd $DATA_HOME_DIR/test
%mv *.jpg unknown/
"""
Explanation: Rearrange image files into their respective directories
End of explanation
"""
%cd $DATA_HOME_DIR
#Set path to sample/ path if desired
path = DATA_HOME_DIR + '/' #'/sample/'
test_path = DATA_HOME_DIR + '/test/' #We use all the test data
results_path=DATA_HOME_DIR + '/results/'
train_path=path + '/train/'
valid_path=path + '/valid/'
#import Vgg16 helper class
vgg = Vgg16()
#Set constants. You can experiment with no_of_epochs to improve the model
batch_size=64
no_of_epochs=3
#Finetune the model
batches = vgg.get_batches(train_path, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
vgg.finetune(batches)
#Not sure if we set this for all fits
vgg.model.optimizer.lr = 0.01
#Notice we are passing in the validation dataset to the fit() method
#For each epoch we test our model against the validation set
latest_weights_filename = None
for epoch in range(no_of_epochs):
print "Running epoch: %d" % epoch
vgg.fit(batches, val_batches, nb_epoch=1)
# latest_weights_filename = 'ft%d.h5' % epoch
# vgg.model.save_weights(results_path+latest_weights_filename)
print "Completed %s fit operations" % no_of_epochs
"""
Explanation: Finetuning and Training
End of explanation
"""
batches, preds = vgg.test(test_path, batch_size = batch_size*2)
#For every image, vgg.test() generates two probabilities
#based on how we've ordered the cats/dogs directories.
#It looks like column one is cats and column two is dogs
print preds[:5]
filenames = batches.filenames
print filenames[:5]
#You can verify the column ordering by viewing some images
from PIL import Image
Image.open(test_path + filenames[2])
#Save our test results arrays so we can use them again later
save_array(results_path + 'test_preds.dat', preds)
save_array(results_path + 'filenames.dat', filenames)
"""
Explanation: Generate Predictions
Let's use our new model to make predictions on the test dataset
End of explanation
"""
vgg.model.load_weights(results_path+latest_weights_filename)
val_batches, probs = vgg.test(valid_path, batch_size = batch_size)
filenames = val_batches.filenames
expected_labels = val_batches.classes #0 or 1
#Round our predictions to 0/1 to generate labels
our_predictions = probs[:,0]
our_labels = np.round(1-our_predictions)
from keras.preprocessing import image
#Helper function to plot images by index in the validation set
#Plots is a helper function in utils.py
def plots_idx(idx, titles=None):
plots([image.load_img(valid_path + filenames[i]) for i in idx], titles=titles)
#Number of images to view for each visualization task
n_view = 4
#1. A few correct labels at random
correct = np.where(our_labels==expected_labels)[0]
print "Found %d correct labels" % len(correct)
idx = permutation(correct)[:n_view]
plots_idx(idx, our_predictions[idx])
#2. A few incorrect labels at random
incorrect = np.where(our_labels!=expected_labels)[0]
print "Found %d incorrect labels" % len(incorrect)
idx = permutation(incorrect)[:n_view]
plots_idx(idx, our_predictions[idx])
#3a. The images we most confident were cats, and are actually cats
correct_cats = np.where((our_labels==0) & (our_labels==expected_labels))[0]
print "Found %d confident correct cats labels" % len(correct_cats)
most_correct_cats = np.argsort(our_predictions[correct_cats])[::-1][:n_view]
plots_idx(correct_cats[most_correct_cats], our_predictions[correct_cats][most_correct_cats])
#3b. The images we most confident were dogs, and are actually dogs
correct_dogs = np.where((our_labels==1) & (our_labels==expected_labels))[0]
print "Found %d confident correct dogs labels" % len(correct_dogs)
most_correct_dogs = np.argsort(our_predictions[correct_dogs])[:n_view]
plots_idx(correct_dogs[most_correct_dogs], our_predictions[correct_dogs][most_correct_dogs])
#4a. The images we were most confident were cats, but are actually dogs
incorrect_cats = np.where((our_labels==0) & (our_labels!=expected_labels))[0]
print "Found %d incorrect cats" % len(incorrect_cats)
if len(incorrect_cats):
most_incorrect_cats = np.argsort(our_predictions[incorrect_cats])[::-1][:n_view]
plots_idx(incorrect_cats[most_incorrect_cats], our_predictions[incorrect_cats][most_incorrect_cats])
#4b. The images we were most confident were dogs, but are actually cats
incorrect_dogs = np.where((our_labels==1) & (our_labels!=expected_labels))[0]
print "Found %d incorrect dogs" % len(incorrect_dogs)
if len(incorrect_dogs):
most_incorrect_dogs = np.argsort(our_predictions[incorrect_dogs])[:n_view]
plots_idx(incorrect_dogs[most_incorrect_dogs], our_predictions[incorrect_dogs][most_incorrect_dogs])
#5. The most uncertain labels (ie those with probability closest to 0.5).
most_uncertain = np.argsort(np.abs(our_predictions-0.5))
plots_idx(most_uncertain[:n_view], our_predictions[most_uncertain])
"""
Explanation: Validate Predictions
Keras' fit() function conveniently shows us the value of the loss function, and the accuracy, after every epoch ("epoch" refers to one full run through all training examples). The most important metrics for us to look at are for the validation set, since we want to check for over-fitting.
Tip: with our first model we should try to overfit before we start worrying about how to reduce over-fitting - there's no point even thinking about regularization, data augmentation, etc if you're still under-fitting! (We'll be looking at these techniques shortly).
As well as looking at the overall metrics, it's also a good idea to look at examples of each of:
1. A few correct labels at random
2. A few incorrect labels at random
3. The most correct labels of each class (ie those with highest probability that are correct)
4. The most incorrect labels of each class (ie those with highest probability that are incorrect)
5. The most uncertain labels (ie those with probability closest to 0.5).
Let's see what we can learn from these examples. (In general, this is a particularly useful technique for debugging problems in the model. However, since this model is so simple, there may not be too much to learn at this stage.)
Calculate predictions on validation set, so we can find correct and incorrect examples:
End of explanation
"""
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(expected_labels, our_labels)
"""
Explanation: Perhaps the most common way to analyze the result of a classification model is to use a confusion matrix. Scikit-learn has a convenient function we can use for this purpose:
End of explanation
"""
plot_confusion_matrix(cm, val_batches.class_indices)
"""
Explanation: We can just print out the confusion matrix, or we can show a graphical view (which is mainly useful for dependents with a larger number of categories).
End of explanation
"""
#Load our test predictions from file
preds = load_array(results_path + 'test_preds.dat')
filenames = load_array(results_path + 'filenames.dat')
#Grab the dog prediction column
isdog = preds[:,1]
print "Raw Predictions: " + str(isdog[:5])
print "Mid Predictions: " + str(isdog[(isdog < .6) & (isdog > .4)])
print "Edge Predictions: " + str(isdog[(isdog == 1) | (isdog == 0)])
"""
Explanation: Submit Predictions to Kaggle!
Here's the format Kaggle requires for new submissions:
imageId,isDog
1242, .3984
3947, .1000
4539, .9082
2345, .0000
Kaggle wants the imageId followed by the probability of the image being a dog. Kaggle uses a metric called Log Loss to evaluate your submission.
End of explanation
"""
#Visualize Log Loss when True value = 1
#y-axis is log loss, x-axis is probabilty that label = 1
#As you can see Log Loss increases rapidly as we approach 0
#But increases slowly as our predicted probability gets closer to 1
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import log_loss
x = [i*.0001 for i in range(1,10000)]
y = [log_loss([1],[[i*.0001,1-(i*.0001)]],eps=1e-15) for i in range(1,10000,1)]
plt.plot(x, y)
plt.axis([-.05, 1.1, -.8, 10])
plt.title("Log Loss when true label = 1")
plt.xlabel("predicted probability")
plt.ylabel("log loss")
plt.show()
#So to play it safe, we use a sneaky trick to round down our edge predictions
#Swap all ones with .95 and all zeros with .05
isdog = isdog.clip(min=0.05, max=0.95)
#Extract imageIds from the filenames in our test/unknown directory
filenames = batches.filenames
ids = np.array([int(f[8:f.find('.')]) for f in filenames])
"""
Explanation: Log Loss doesn't support probability values of 0 or 1--they are undefined (and we have many). Fortunately, Kaggle helps us by offsetting our 0s and 1s by a very small value. So if we upload our submission now we will have lots of .99999999 and .000000001 values. This seems good, right?
Not so. There is an additional twist due to how log loss is calculated--log loss rewards predictions that are confident and correct (p=.9999,label=1), but it punishes predictions that are confident and wrong far more (p=.0001,label=1). See visualization below.
End of explanation
"""
subm = np.stack([ids,isdog], axis=1)
subm[:5]
%cd $DATA_HOME_DIR
submission_file_name = 'submission1.csv'
np.savetxt(submission_file_name, subm, fmt='%d,%.5f', header='id,label', comments='')
from IPython.display import FileLink
%cd $LESSON_HOME_DIR
FileLink('data/redux/'+submission_file_name)
"""
Explanation: Here we join the two columns into an array of [imageId, isDog]
End of explanation
"""
|
ghvn7777/ghvn7777.github.io | content/fluent_python/21_metaclass.ipynb | apache-2.0 | class Dog:
def __init__(self, name, weight, owner):
self.name = name
self.weight = weight
self.owner = owner
rex = Dog('Rex', 30, 'Bob')
rex
"""
Explanation: 类元编程是指在运行时创建或定制类的技艺,在 Python 中,类是一等对象,因此任何时候都可以使用函数新建类,无需使用 class 关键字。类装饰器也是函数,不公审查,修改甚至可以把被装饰类替换成其它类。最后,元类是类元编程最高级的工具,使用元类可以创建具有某种特质的全新类种,例如我们见过的抽象基类
类工厂函数
标准库的一个类工厂函数 -- collections.namedtuple。我们把一个类名和几个属性名传给这个函数,它会创建一个 tuple 的子类,其中元素通过名称获取,还为调试提供了友好的字符串表示(__repr__)
End of explanation
"""
def record_factory(cls_name, field_names):
try:
# 这里体现了鸭子类型,尝试在都好或空格处拆分 field_names,如果失败,则假定 field_names 本身就是可迭代对象
field_names = field_names.replace(',', ' ').split()
except AttributeError: #不能调用 .replace 或 .split 方法
pass # 假定 field_names 本就是标识符组成的序列
field_names = tuple(field_names) #使用属性名构建元组,这将成为新建类的 __slots__属性
# __slots__变量,来限制该class能添加的属性
# 将变成新建类的 __init__ 方法
def __init__(self, *args, **kwargs):
attrs = dict(zip(self.__slots__, args))
attrs.update(kwargs)
for name, value in attrs.items():
setattr(self, name, value)
# 把类的实例变成可迭代对象,按照 __slots__ 设定的顺序产出字段值
def __iter__(self):
for name in self.__slots__:
yield getattr(self, name)
def __repr__(self):
values = ', '.join('{}={!r}'.format(*i) for i
in zip(self.__slots__, self))
return '{}({})'.format(self.__class__.__name__, values)
# 组建类属性字典
cls_attrs = dict(__slots__ = field_names,
__init__ = __init__, # 相当于 '__init__': __init__
__iter__ = __iter__,
__repr__ = __repr__)
# 用 type 方法构造,构建新类,然后返回
return type(cls_name, (object,), cls_attrs)
Dog = record_factory('Dog', 'name weight owner')
rex = Dog('Rex', 30, 'Bob')
rex
name, weight, _ = rex # 实例是可迭代对象,所以可以方便的拆包
name, weight
"{2}'s dog weight {1}kg".format(*rex) # 实例是可迭代对象,所以可以方便的拆包
rex.weight = 32 记录实例是可变的对象
rex
Dog.__mro__ # 新建的类继承 Object 类,和我们的工厂函数没有关系
"""
Explanation: 这段代码各个字段名都出现了三次,让人厌烦,字符串表现形式也不友好,我们编写一个 record_factory 类工厂函数解决这个问题
End of explanation
"""
Foo = type('Foo', (), {'bar':True})
Foo
Foo.bar
f = Foo()
f
"""
Explanation: 通常,我们将 type 视为函数,因为我们像函数那样使用它,type(my_object) 获取对象所属的类 -- 作用与 my_object.__class__ 相同。然而,type 是一个类,当成类使用的时候传入三个参数可以新建一个类(是的,type 可以根据传入的不同参数有不同的用法
type(类名, 父类的元组(针对继承的情况,可以为空),包含属性的字典(名称和值))
比如下面的代码是等价的
```
class MyShinyClass:
pass
type('MyShinyClass', (), {})
```
因此我们要新建如下类:
class Foo:
bar = True
可以写成:
End of explanation
"""
age = 35
print(age.__class__)
age.__class__.__class__
name = 'bob'
print(name.__class__)
name.__class__.__class__
def foo(): pass
foo.__class__
foo.__class__.__class__
class Bar(object): pass
b = Bar()
b.__class__
b.__class__.__class__
"""
Explanation: 如果你继承 Foo 类,可以写成
FooChild = type('FooChild', (Foo,),{})
我们看到 type 函数可以创建一个类,因为 type 是元类,Python 中所有对象都是由 type 创建而来,注意,Python 中所有的东西都是对象,包括 整数,字符串、函数以及类,都由 type 创建而来
End of explanation
"""
import abc
class AutoStorage:
__counter = 0
def __init__(self):
cls = self.__class__
prefix = cls.__name__
index = cls.__counter
self.storage_name = '_{}#{}'.format(prefix, index)
cls.__counter += 1
def __get__(self, instance, owner):
if instance is None:
return self
else:
return getattr(instance, self.storage_name)
def __set__(self, instance, value):
setattr(instance, self.storage_name, value) # 不进行验证
class Validated(abc.ABC, AutoStorage): # 抽象类,也继承自 AutoStorage
def __set__(self, instance, value):
# __set__ 方法把验证委托给 validate 方法
value = self.validate(instance, value)
#返回的 value 值返回给超类的 __set__ 方法,存储值
super().__set__(instance, value)
@abc.abstractmethod
def validate(self, instance, value): # 抽象方法
'''return validated value or raise ValueError'''
class Quantity(Validated):
'''a number greater than zero'''
# 只需要根据不同的验证规则实现 validate 方法即可
def validate(self, instance, value):
if value <= 0:
raise ValueError('value must be > 0')
return value
class NonBlank(Validated):
'''a string with at least one not-space character'''
def validate(self, instance, value):
value = value.strip()
if len(value) == 0:
raise ValueError('value cannot be empty or blank')
return value
# class LineItem: # 托管类
# weight = Quantity()
# price = Quantity()
# description = NonBlank()
# def __init__(self, description, weight, price):
# self.description = description
# self.weight = weight
# self.price = price
# def subtotal(self):
# return self.weight * self.price
## --------------------
## 上面的和 上一章代码相同, LineItem 类只加了 1 行,在下面实现
## --------------------
def entity(cls):
for key, attr in cls.__dict__.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
return cls #返回修改后的类
@entity # 类装饰器,定义类的时候就会调用
class LineItem:
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
raisins = LineItem('Golden raisins', 10, 6.95)
dir(raisins)[:3]
LineItem.description.storage_name
raisins.description
getattr(raisins, '_NonBlank#description')
"""
Explanation: 总之,前面的 record_factory 函数最后一行会构建一个类,类的名称是 cls_name 参数的值,唯一直接超类是 object,有 __slots__, __init__, __iter__, __repr__ 四个类属性,其中后 3 个是实例方法。
我们本来可以将 __slots__ 类属性改成其它值,不过那样就要实现 __setattr__ 方法,为属性赋值时验证属性的名称,而且顺序相同,然而第 9 章说过,__slots__ 属性最主要特点就是节省内存,能处理数百万个实例,不过也有一些缺点。
把 3 个参数传给 type 是动态创建类的常用方式,如果查看 collections.namedtuple 源码会发现另一种方式,先声明一个 _class_template 变量,其值是字符串形式源码模板,然后在 namedtuple 函数中调用 _class_template.format(...) 方法,填充模板里的空白,最后,使用内置的 exec函数计算得到源码字符串
在 Python 元编程时,最好不要使用 exec 和 eval 函数,如果接受字符串来自不可信的源,这两个函数会有严重的安全风险,Python 提供了足够的内省工具,大多数时候不需要这两个函数。
record_factory 函数创建的类不能够序列化,即不能使用 pikle 模块里的 dump/load 函数处理,
定制描述符的类装饰器
上一章的 LineItem 例子还有个问题,就是储存的属性不具有描述性,即属性 _Quantity#0 不便于调试,如果能存储成 _Quantity#weight 之类的就好多了,上一章说过,我们不能使用描述性的存储属性名称,因为实例化描述符时无法得知托管属性,如前面的 weight 的名称,可是如果组建好整个类,而且把描述符绑定到类属性后,我们就可以审查类,并为描述符设置合理的存储属性名称。LineItem 的 __new__ 方法可以做到这一点,因此,在 __init__ 方法中使用描述符时,存储属性已经设置了正确的名称。为了解决这个问题使用 __new__ 方法属于白费力气,每次新建 LineItem 实例时都会运行 __new__ 方法中的逻辑,可是一旦 LineItem 类构建好了,描述符与托管属性之间的绑定就不会变了。因此,我们要在创建类时设置存储属性的名称。使用类装饰器或元类可以做到这一点,我们先使用简单的方式。
类装饰器和函数装饰器非常类似,是参数为类对象的函数,返回原来的类或修改后的类
End of explanation
"""
#!/usr/bin/env python
# encoding: utf-8
from evalsupport import deco_alpha
print('<[0]> evaltime module start')
def test():
class Test:
print('<[1]> evaltime test Test')
class ClassOne():
print('<[2]> ClassOne body')
def __init__(self):
print('<[3]> ClassOne.__init__')
def __del__(self):
print('<[4]> ClassOne.__del__')
def method_x(self):
print('<[5]> ClassOne.method_x')
class ClassTwo(object):
print('<[6]> ClassTwo body')
@deco_alpha
class ClassThree():
print('<[7]> ClassThree body')
def method_y(self):
print('<[8]> ClassThree.method_y')
class ClassFour(ClassThree):
print('<[9]> ClassFour body')
def method_y(self):
print('<[10]> ClassFour.method_y')
if __name__ == '__main__':
print('<[11]> ClassOne tests', 30 * '.')
one = ClassOne()
one.method_x()
print('<[12]> ClassThree tests', 30 * '.')
three = ClassThree()
three.method_y()
print('<[13]> ClassFour tests', 30 * '.')
four = ClassFour()
four.method_y()
print('<[14]> evaltime module end')
"""
Explanation: 类装饰器能以较简单的方式做到以前需要元类去做的事情 -- 创建类的时候定制类
类装饰器有个重大的缺点:只对直接依附的类有效,这意味着,被装饰的类的子类可能继承也可能不继承装饰类所做的改动,具体情况视改动方式而定
导入时和运行时比较
定义两个文件, evaltime.py
End of explanation
"""
#!/usr/bin/env python
# encoding: utf-8
print('<[100]> evalsupport module start')
def deco_alpha(cls):
print('<[200]> deco_alpha')
def inner_1(self):
print('<[300]> deco_alpha:inner_1')
cls.method_y = inner_1
return cls
class MetaAleph(type):
print('<[400]> MetaAleph body')
def __init__(cls, name, bases, dic):
print('<[500]> MetaAleph.__init__')
def inner_2(self):
print('<[600]> MetaAleph.__init__:inner_2')
cls.method_z = inner_2
print('<[700]> evalsupport module end')
"""
Explanation: evalsupport.py
End of explanation
"""
import collections
collections.Iterable.__class__
import abc
abc.ABCMeta.__class__
abc.ABCMeta.__mro__
"""
Explanation: In [1]: import evaltime
<[100]> evalsupport module start #evalsupport 模块中所有顶层代码在导入模块时执行,解释器会编译 deco_alpha 函数,但不会执行定义体
<[400]> MetaAleph body # 类定义体运行了
<[700]> evalsupport module end
<[0]> evaltime module start
<[2]> ClassOne body # 每个类的定义体都执行了
<[6]> ClassTwo body #包括嵌套的类
<[7]> ClassThree body
<[200]> deco_alpha # 先计算被装饰的 ClassThree 类定义体,然后运行装饰器函数
<[9]> ClassFour body
<[14]> evaltime module end #这里,evaltime 是被导入的,不会运行 if __name == '__main__'
(py35) kaka@kaka-deep:~/kaka$ python3 evaltime.py
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[0]> evaltime module start
<[2]> ClassOne body
<[6]> ClassTwo body
<[7]> ClassThree body
<[200]> deco_alpha
<[9]> ClassFour body
<[11]> ClassOne tests ..............................
<[3]> ClassOne.__init__
<[5]> ClassOne.method_x
<[12]> ClassThree tests ..............................
<[300]> deco_alpha:inner_1 # 类装饰器改变了 ClassThree.method_y 方法
<[13]> ClassFour tests ..............................
<[10]> ClassFour.method_y
<[14]> evaltime module end
<[4]> ClassOne.__del__ # 程序结束后,绑定在全局变量 one 上的 ClassOne 实例才会被垃圾回收
元类基础知识
元类是制造类的工厂,不过不是函数,而是类。
根据 Python对象模型,类是对象,因此类肯定是另外某个类的实例,默认情况下,Python 中的类是 type 的实例,也就是说,type 是大多数内置的类和用户定义的类的元类,为了避免无限递归,type 是自身的实例。注意,我们没有说 str 或者 LineItem 继承自 type,而是说 str 和 LineItem 是 type 的实例。
object 类和 type 类之间的关系很独特,object 是 type 的实例,type 是 object 的子类,这种关系很独特,无法使用 Python 代码表述,因为其定义其中一个之前另一个必须存在,type 是自身的实例这一点也很神奇
除了 type,标准库中还有一些别的类,例如 ABCMeta 和 Enum。如下所示:
End of explanation
"""
#!/usr/bin/env python
# encoding: utf-8
from evalsupport import deco_alpha
from evalsupport import MetaAleph
print('<[1]> evaltime module start')
@deco_alpha
class ClassThree():
print('<[2]> ClassThree body')
def method_y(self):
print('<[3]> ClassThree.method_y')
class ClassFour(ClassThree):
print('<[4]> ClassFour body')
def method_y(self):
print('<[5]> ClassFour.method_y')
class ClassFive(metaclass=MetaAleph):
print('<[6]> ClassFive body')
def __init__(self):
print('<[7]> ClassFive body')
def method_z(self):
print('<[8]> ClassFive.method_z')
class ClassSix(ClassFive):
print('<[9]> ClassSix body')
def method_z(self):
print('<[10]> ClassSix.method_z')
if __name__ == '__main__':
print('<[11]> ClassThree tests', 30 * '.')
three = ClassThree()
three.method_y()
print('<[12]> ClassFour tests', 30 * '.')
four = ClassFour()
four.method_y()
print('<[13]> ClassFive tests', 30 * '.')
five = ClassFive()
five.method_z()
print('<[14]> ClassSix tests', 30 * '.')
six = ClassSix()
six.method_z()
print('<[15]> evaltime module end')
"""
Explanation: 向上追溯,ABCMeta 最终所属的类也是 type,所有类都直接或间接的是 type 的实例,不过只有元类同事也是 type 的子类。若理解元类,一定要知道这种关系:元类(如 ABCMeta)从 type 类继承了构建类的能力。
我们要抓住的重点是,所有类都是 type 的实例,但元类还是 type 的子类,因此可以作为制造类的工厂,具体来说,元类可以通过实现 __init__ 方法来定制。元类的 __init__ 方法可以做到类装饰器能做的任何事情,但是作用更大
理解元类计算时间的练习
我们让 evalsupport.py 与原来相同,新建一个 evaltime_meta.py 作为主脚本:
End of explanation
"""
class EntityMeta(type):
"""元类,用于创建带有验证字段的业务实体"""
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict) # 在超类(这里是 type)上调用 __init__
for key, attr in attr_dict.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
class Entity(metaclass=EntityMeta): # 这个类只是为了用起来便利,这个模块的用户直接继承它即可,不用关心元类
'''带有验证字段的业务实体'''
class LineItem(Entity):
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
"""
Explanation: 引入操作:
In [1]: import evaltime_meta
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[1]> evaltime module start
<[2]> ClassThree body
<[200]> deco_alpha
<[4]> ClassFour body
<[6]> ClassFive body
<[500]> MetaAleph.__init__ #与前面关键区别是,创建 ClassFive时调用了 MetaAleph.__init__ 方法
<[9]> ClassSix body
<[500]> MetaAleph.__init__ # 同上
<[15]> evaltime module end
Python 解释器计算 ClassFive 类的定义体时没有调用 type 构建具体的类定义体,而是调用 MetaAleph 类。MetaAleph 类的 __init__ 有 4 个参数。
self: 要初始化的对象,例如 ClassFive
name, bases, dic: 与构建类时传给 type 的参数一样
重新看一下这个类:
```
class MetaAleph(type):
print('<[400]> MetaAleph body')
def __init__(cls, name, bases, dic):
print('<[500]> MetaAleph.__init__')
def inner_2(self):
print('<[600]> MetaAleph.__init__:inner_2')
cls.method_z = inner_2
```
编写元类时候,通常把 self 参数改成 cls。__init__ 方法的定义体中定义了 inner_2 函数,然后绑定给 cls.method_z。MetaAleph.__init__ 方法签名中的 cls 指代要创建的类(例如 ClassFive)。而 inner_2 函数签名中的 self 最终是指代我们创建的类的实例(例如 ClassFive 类的实例)
运行脚本:
(pytorch) kaka@kaka-dell:~/kaka/python$ python3 evaltime_meta.py
<[100]> evalsupport module start
<[400]> MetaAleph body
<[700]> evalsupport module end
<[1]> evaltime module start
<[2]> ClassThree body
<[200]> deco_alpha
<[4]> ClassFour body
<[6]> ClassFive body
<[500]> MetaAleph.__init__
<[9]> ClassSix body
<[500]> MetaAleph.__init__
<[11]> ClassThree tests ..............................
<[300]> deco_alpha:inner_1
<[12]> ClassFour tests ..............................
<[5]> ClassFour.method_y
<[13]> ClassFive tests ..............................
<[7]> ClassFive body
<[600]> MetaAleph.__init__:inner_2 # MetaAleph 类的 __init__ 方法把ClassFive.method_z 方法替换成 inner_2 函数。
<[14]> ClassSix tests ..............................
<[7]> ClassFive body
<[600]> MetaAleph.__init__:inner_2 # ClassFive 的子类 ClassSix 也是一样
<[15]> evaltime module end
注意,ClassSix 类没有直接引用 MetaAleph 类,但是却收到了影响,因为它是 ClassFive 的子类,进而也是 MetaAleph 类的实例,所以由 MetaAleph.__init__ 实例化
定制描述符的元类
End of explanation
"""
import collections
class EntityMeta(type):
"""元类,用于创建带有验证字段的业务实体"""
@classmethod
def __prepare__(cls, name, bases):
return collections.OrderedDict() # 返回空的 OrderedDict 实例,存储类属性
def __init__(cls, name, bases, attr_dict):
super().__init__(name, bases, attr_dict) # 在超类(这里是 type)上调用 __init__
cls._field_names = []
for key, attr in attr_dict.items():
if isinstance(attr, Validated):
type_name = type(attr).__name__
attr.storage_name = '_{}#{}'.format(type_name, key)
cls._field_names.append(key) # 按顺序存储类属性
class Entity(metaclass=EntityMeta): # 这个类只是为了用起来便利,这个模块的用户直接继承它即可,不用关心元类
'''带有验证字段的业务实体'''
@classmethod
def field_names(cls):
for name in cls._field_names:
yield name # 按照添加字段的顺序产出字段名称
class LineItem(Entity):
weight = Quantity()
price = Quantity()
description = NonBlank()
def __init__(self, description, weight, price):
self.description = description
self.weight = weight
self.price = price
def subtotal(self):
return self.weight * self.price
for name in LineItem.field_names():
print(name)
"""
Explanation: 写成这种语法,用户完全不用知道描述符或元类,直接继承库中提供的类就能满足要求
元类的特殊用法 __prepare__
在某些应用中,可能要知道类属性的定义顺序,例如读写 csv 文件的库,用户定义的类可能想要把类中按顺序声明的字段与 csv 文件中的各列对应起来
前面说过,type 构造方法以及元类的 __new__ 和 __init__ 都接收类的定义体,形式是一个名称到属性的字典,也就是说,当元类或装饰器获得映射时,属性的顺序已经丢失了。
在 Python 3 中可以使用 __prepare__, 这个特殊方法只能在元类中使用,而且要声明为类方法(即,要使用 classmethod 类装饰器定义)。解释器调用元类 __new__ 方法之前会调用 __prepare__ 方法,使用类定义提中的属性创建映射。__prepare 第一个参数是元类,随后两个参数是类的名称以及组成的元祖,返回值是映射。元类构建新类时,__prepare__ 方法返回的映射会传给 __new__ 方法的最后一个参数,然后再传给 __init__ 方法
End of explanation
"""
|
gtrichards/QuasarSelection | SpIESHighzQuasars2.ipynb | mit | %matplotlib inline
from astropy.table import Table
import numpy as np
import matplotlib.pyplot as plt
data = Table.read('GTR-ADM-QSO-ir-testhighz_findbw_lup_2016_starclean.fits')
# X is in the format need for all of the sklearn tools, it just has the colors
# X = np.vstack([ data['ug'], data['gr'], data['ri'], data['iz'], data['zs1'], data['s1s2'], data['imag'], data['extinctu']]).T
# Don't use imag and extinctu since they don't contribute much to the accuracy and they add a lot to the data volume.
X = np.vstack([ data['ug'], data['gr'], data['ri'], data['iz'], data['zs1'], data['s1s2'] ]).T
y = np.array(data['labels'])
# For algorithms that need scaled data:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X) # Use the full training set now
XStrain = scaler.transform(X)
# SVM
from sklearn.svm import SVC
svm = SVC(random_state=42)
svm.fit(XStrain,y)
# Bagging
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
bag = BaggingClassifier(KNeighborsClassifier(n_neighbors=7), max_samples=0.5, max_features=1.0, random_state=42)
bag.fit(XStrain, y)
"""
Explanation: Final SpIES High-z Quasar Selection
Notebook performing selection of $3.5<z<5$ quasars from SDSS+SpIES data.
Largely the same as SpIESHighzQuasars notebook except using the algoirthm(s) from
SpIESHighzCandidateSelection2. See notes below for creating a version of the
test set that includes i-band mag and extinctu. (This wasn't easy.)
First load the training data, then instantiate and train the algorithm; see https://github.com/gtrichards/QuasarSelection/blob/master/SpIESHighzCandidateSelection2.ipynb
End of explanation
"""
#data2 = Table.read('GTR-ADM-QSO-ir_good_test_2016n.fits')
data2 = Table.read('GTR-ADM-QSO-ir_good_test_2016.fits')
print data2.keys()
"""
Explanation: Second, load the test data
Test Data
Test set data set was made as follows (see 18 April 2016 README entry):
maketest_2016.py
Output is:
classifiers_out = open('GTR-ADM-QSO-ir_classifiers_good_test_2016.dat','w')
others_out= open('GTR-ADM-QSO-ir_others_good_test_2016.dat','w')
czr_out = open('GTR-ADM-QSO-ir_photoz_in7_good_test_2016.dat','w')
Really need the first two files combined (so that we have both RA/Dec and colors in one place).
But couldn't merge them with TOPCAT or STILTS. So had to break them into 3 pieces (with TOPCAT),
then used combine_test_files_STILTS.py to merge them together (just changing the input/output file names by hand).
Actually ran this on dirac so that I'd have more memory than on quasar. Copied the output files back to quasar and merged them together with TOPCAT.
So<br>
GTR-ADM-QSO-ir_others_good_test_2016a.dat + GTR-ADM-QSO-ir_classifiers_good_test_2016a.dat<br>
gives<br>
GTR-ADM-QSO-ir_good_test_2016a.dat<br>
(and so on for "b" and "c").
Then<br>
GTR-ADM-QSO-ir_good_test_2016a.dat + GTR-ADM-QSO-ir_good_test_2016b.dat + GTR-ADM-QSO-ir_good_test_2016c.dat<br>
gives<br>
GTR-ADM-QSO-ir_good_test_2016.dat<br>
and similarly for the fits output file.
Since I wanted to use the imag and extinctu, then I also had to make a version of the test file with combine_test_files_STILTSn.py (on quasar). This was fairly involved because of memory issues. The new output file is GTR-ADM-QSO-ir_good_test_2016n.dat. In the end, I ended up not using that and this is more of an exploration of SVM and bagging as alternatives to RF.
Now read in the test file and convert it to an appropriate array format for sklearn.
End of explanation
"""
# Not sure why I need to do this because there don't appear to be any unfilled columns
# but the code segment below won't run without it.
# Only need to do for the file with imag and extinctu
# data2 = data2.filled()
"""
Explanation: I had some problems with GTR-ADM-QSO-ir_good_test_2016n.fits because it thought that there were blank entries among the attributes. There actually weren't (as far as I could tell), but I found that I could use filled to fix the problem. However, that just caused problems later!
End of explanation
"""
ramask = ( ( (data2['ra']>=300.0) & (data2['ra']<=360.0) ) | ( (data2['ra']>=0.0) & (data2['ra']<=60.0) ) )
decmask = ((data2['dec']>=-1.5) & (data2['dec']<=1.5))
dataS82 = data2[ramask & decmask]
print len(dataS82)
#Xtest = np.vstack([dataS82['ug'], dataS82['gr'], dataS82['ri'], dataS82['iz'], dataS82['zs1'], dataS82[]'s1s2'], dataS82['i'], data2['extinctu']]).T
Xtest = np.vstack([dataS82['ug'], dataS82['gr'], dataS82['ri'], dataS82['iz'], dataS82['zs1'], dataS82['s1s2'] ]).T
XStest = scaler.transform(Xtest)
"""
Explanation: Taking too long to do all the objects, so just do Stripe 82, which is all that we really care about anyway.
End of explanation
"""
from dask import compute, delayed
def processSVM(Xin):
return svm.predict(Xin)
# Create dask objects
# Reshape is necessary because the format of x as drawm from Xtest
# is not what sklearn wants.
dobjsSVM = [delayed(processSVM)(x.reshape(1,-1)) for x in XStest]
import dask.threaded
ypredSVM = compute(*dobjsSVM, get=dask.threaded.get)
ypredSVM = np.array(ypredSVM).reshape(1,-1)[0]
from dask import compute, delayed
def processBAG(Xin):
return bag.predict(Xin)
# Create dask objects
# Reshape is necessary because the format of x as drawm from Xtest
# is not what sklearn wants.
dobjsBAG = [delayed(processBAG)(x.reshape(1,-1)) for x in XStest]
import dask.threaded
ypredBAG = compute(*dobjsBAG, get=dask.threaded.get)
ypredBAG = np.array(ypredBAG).reshape(1,-1)[0]
"""
Explanation: Quasar Candidates
Finally, do the classification and output the test file, including the predicted labels.
End of explanation
"""
dataS82['ypredSVM'] = ypredSVM
dataS82['ypredBAG'] = ypredBAG
#dataS82.write('GTR-ADM-QSO-ir_good_test_2016_Stripe82svm.fits', format='fits')
"""
Explanation: Now write results to output file. Didn't do bagging b/c takes too long. See SpIESHighzQuasarsS82all.py which I ran on dirac.
End of explanation
"""
|
analysiscenter/dataset | examples/experiments/weights_distributions/weights_distributions.ipynb | apache-2.0 | import sys
sys.path.append('../../utils')
import pickle
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from utils import plot_weights
"""
Explanation: Distributions of weights in ResNet34 and ResNet50
In this notebook we will compare the distribution of weights from two almost identical architectures. More information about what different architectures you can read in this notebook.
End of explanation
"""
bottle_weights_path = 'path/with/saved_bottle_weights.pkl'
res_weights_path = 'path/with/saved_res_weights.pkl'
with open(bottle_weights_path, 'rb') as f:
bottle_names, bottle_weights, bottle_params = pickle.load(f)
with open(res_weights_path', 'rb') as f:
res_names, res_weights, res_params = pickle.load(f)
"""
Explanation: First of all, load the weights that we saved in the tutorials.
End of explanation
"""
plot_weights(bottle_names, bottle_weights, bottle_params, ['r', 'c', 'b', 'g'], [4, 4], [0, 4, 7, 14])
"""
Explanation: Below is drawn the distribution of weights of 0, 4th, 7th, 14th blocks from the ResNet50 model. Drawing function you can see in utils.
End of explanation
"""
plot_weights(res_names, res_weights, res_params, ['g', 'y', 'r'], [4, 3], [0, 3, 7, 14], bottleneck=False)
"""
Explanation: It's not difficult to notice, that distribution of 1x1 convolutions has a larger variance than in 3x3 convolution. Therefore, they put a stronger influence on the output.
Black lines show the initial distribution of weights
Now let's draw distribution of 0th, 3rd, 7th, 14th blocks from the ResNet34 model.
End of explanation
"""
indices = [i for i in range(len(bottle_names)) if 'conv' in bottle_names[i][:8]]
_, ax = plt.subplots(2, 2, sharex='all', figsize=(23, 24))
ax = ax.reshape(-1)
num_plot = 0
num_blocks = [3, 6, 9, 13]
res_layers = np.where(res_names == 'layer-4')[0][num_blocks]
bottle_layers = np.where(bottle_names == 'layer-4')[0][num_blocks]
for i,j in zip(res_layers, bottle_layers):
ax[num_plot].set_title('convolution layer with kernel 3x3 №{}'.format(num_blocks[num_plot]), fontsize=18)
sns.distplot(res_weights[i].reshape(-1), ax=ax[num_plot], color='y', label='simple')
sns.distplot(bottle_weights[j].reshape(-1), ax=ax[num_plot], color='c', label='bottleneck')
ax[num_plot].legend()
ax[num_plot].set_xlabel('value', fontsize=20)
ax[num_plot].set_ylabel('quantity', fontsize=20)
num_plot += 1
if num_plot == ax.shape[0]:
break
"""
Explanation: It is not difficult to see that the distribution of the first and the second 3x3 convolutions are the same.
Now, let's compare the distribution of the second layer of ResNet34 architecture and the 3х3 layer of ResNet50 from 3rd, 6th, 9th, 13th blocks. Will they be the same?
End of explanation
"""
|
sbu-python-summer/python-tutorial | day-5/scipy-exercises.ipynb | bsd-3-clause | def hilbert(n):
""" return a Hilbert matrix, H_ij = (i + j - 1)^{-1} """
H = np.zeros((n,n), dtype=np.float64)
for i in range(1, n+1):
for j in range(1, n+1):
H[i-1,j-1] = 1.0/(i + j - 1.0)
return H
"""
Explanation: Q1: integrating a sampled vs. analytic function
Numerical integration methods work differently depending on whether you have the analytic function available (in which case you can evaluate it freely at any point you please) or if it is sampled for you.
Create a function to integrate, and use NumPy to sample it at $N$ points. Compare the answer you get from integrating the function directly (using integrate.quad to the integral of the sampled function (using integrate.simps).
To get a better sense of the accuracy, vary $N$, and look at how the error changes (if you plot the error vs. $N$, you can measure the convergence).
Q2: Condition number
For a linear system, ${\bf A x} = {\bf b}$, we can only solve for $x$ if the determinant of the matrix ${\bf A}$ is non-zero. If the determinant is zero, then we call the matrix singular. The condition number of a matrix is a measure of how close we are to being singular. The formal definition is:
\begin{equation}
\mathrm{cond}({\bf A}) = \| {\bf A}\| \| {\bf A}^{-1} \|
\end{equation}
But we can think of it as a measure of how much ${\bf x}$ would change due to a small change in ${\bf b}$. A large condition number means that our solution for ${\bf x}$ could be inaccurate.
A Hilbert matrix has $H_{ij} = (i + j + 1)^{-1}$, and is known to have a large condition number. Here's a routine to generate a Hilbert matrix
End of explanation
"""
def rhs(t, Y, q, omega_d, b):
""" damped driven pendulum system derivatives. Here, Y = (theta, omega) are
the solution variables. """
f = np.zeros_like(Y)
f[0] = Y[1]
f[1] = -q*Y[1] - np.sin(Y[0]) + b*np.cos(omega_d*t)
return f
"""
Explanation: Let's solve ${\bf Hx} ={\bf b}$. Create a linear system by picking an ${\bf x}$ and generating a ${\bf b}$ by multiplying by the matrix ${\bf H}$. Then use the scipy.linalg.solve() function to recover ${\bf x}$. Compute the error in ${\bf x}$ as a function of the size of the matrix.
You won't need a large matrix, $n \sim 13$ or so, will start showing big errors.
You can compute the condition number with numpy.linalg.cond()
There are methods that can do a better job with nearly-singular matricies. Take a look at scipy.linalg.lstsq() for example.
Q3: damped driven pendulum and chaos
There are a large class of ODE integration methods available through the scipy.integrate.ode() function. Not all of them provide dense output -- most will just give you the value at the end of the integration.
The explicit dopri5 integrator will store the solution at intermediate points and allow you to access them. We'll use that here. You'll need to use the set_solout() method to define a function that takes the current integration solution and store it).
The damped driven pendulum obeys the following equations:
$$\dot{\theta} = \omega$$
$$\dot{\omega} = -q \omega - \sin \theta + b \cos \omega_d t$$
here, $\theta$ is the angle of the pendulum from vertical and $\omega$ is the angular velocity. $q$ is a damping coefficient, $b$ is a forcing amplitude, and $\omega_d$ is a driving frequency.
Choose $q = 0.5$ and $\omega_d = 2/3$.
Integrate the system for different values of $b$ (start with $b = 0.9$ and increase by $0.05$, and plot the results ($\theta$ vs. $t$). Here's a RHS function to get you started:
End of explanation
"""
def restrict_theta(theta):
""" convert theta to be restricted to lie between -pi and pi"""
tnew = theta + np.pi
tnew += -2.0*np.pi*np.floor(tnew/(2.0*np.pi))
tnew -= np.pi
return tnew
"""
Explanation: Note that the pendulum can flip over, giving values of $\theta$ outside of $[-\pi, \pi]$. The following function can be used to restrict it back to $[-\pi, \pi]$ for plotting.
End of explanation
"""
|
erikdrysdale/erikdrysdale.github.io | _rmd/extra_power/winners_curse.ipynb | mit | # modules used in the rest of the post
from scipy.stats import norm, truncnorm
import numpy as np
from numpy.random import randn
from scipy.optimize import minimize_scalar
import plotnine
from plotnine import *
import pandas as pd
"""
Explanation: A winner's curse adjustment for a single test statistic
Background
The naive application of applied statistics for conducting inference in scientific research is one of the primary culprits in the reproducability crisis. Even excluding cases of scientific misconduct, cited research findings are likely to innaccurate due to 1) the file drawer problem, 2) researchers' degrees of freedom and 3) underpowered statistical designs. To address the problem of publication bias some journals are now accepting the findings regardless of their statistical signifance. More than 200 journals now use the Open Science Foundation's pre-registration framework to help improve reproducability and reduce the garden of forking paths problem.
On ongoing challenge in many disciplines is to improve the power of research designs. For example in the empirical economics literature, the median power is estimated to be only 18%. In biomedical research there is at least more attention paid to power, but due to financial incentives (the NIH requires power > 80% for successful grants) the estimates of power are likely to be exagerated. Most researchers believe that statistical power is important because it ensures that the resources used to carry out research are not wasted. But in addition to pecuniary considerations, the power of a test is intimately linked to the reproducability crisis because studies with low power have inflated effect sizes.
One consequence of using frequentist statistical tools to conduct scientific inference is that all statistically significant findings are biased even if the test itself is unbiased. This is because statistically significant findings have to be a certain number of standard deviations away from zero, and concomitantly certain values of the test are never observed (in the statistically significant space). The power-bias relationship helps to explain the Proteus phenomenon whereby follow-up studies tend to have a smaller effect size. The magnitude of this bias is known as the Winner's Curse, and several adjustment procedures have been proposed in the context of multiple tests.[[^1]] Is is especially relevant in genomics for the polygenic risk scores developed with genome-wide association studies.
In this post I will review briefly review the frequentist paradigm that is used to conduct scientic inference and demonstrate how the probability of type-1 and type-2 errors are related to biased effect sizes. In the final section of the post I propose a Winner's Curse adjustment (WCA) procedure for a single test statistic. I am not aware of such a method being proposed before, but if it has been please contact me so that I can properly credit alternative methods.[[^2]]
In summary this post will provide to explicit formulas for:
The relationship between power and effect size bias (equation X).
An effect size adjuster for single test statistic results (equation Y).
In the sections below the examples and math will be kept as simple as possible. All null/alternative hypothesis will be assumed to come from a Gaussian distribution. Variances will be fixed and known. All hypothesis will be one-sided hypothesis. Each of these assumptions can be relaxed without any change to the implications of the examples below, but do require a bit more math. Also note that $\Phi$ refers to the standard normal CDF and its quantile function $\Phi^{-1}$.
End of explanation
"""
# EXAMPLE OF TYPE-I ERROR RATE
alpha, n, sig2, seed, nsim = 0.05, 18, 2, 1234, 50000
c_alpha = norm.ppf(1-alpha)
np.random.seed(1234)
err1 = np.mean([ (np.mean(randn(n)) - np.mean(randn(n)))/np.sqrt(2/n) > c_alpha for i in range(nsim)])
print('Empirical type-I error rate: %0.3f\nExpected type-I error rate: %0.3f' % (err1, alpha))
"""
Explanation: (1) Review of Type-I and Type-II errors
Imagine a simple hypothesis test: to determine whether one gaussian distribution, with a known variance, has a larger mean than another: $y_{i1} \sim N(\mu_A, \sigma^2/2)$ and $y_{i2} \sim N(\mu_B, \sigma^2/2)$, then $\bar y_i \sim N(\mu_i, \sigma^2/n)$ and $\bar d = \bar y_1 - \bar y_2 \sim N(\mu_A, \sigma^2/n)$. The sample mean (difference) will have a variance of $\sigma^2/n$.[[^3]]
$$
\begin{align}
\bar d &\sim N(d, \sigma^2/n) \
\bar z = \frac{\bar d - d}{\sigma / \sqrt{n}} &\sim N(0, 1)
\end{align}
$$
The null hypothesis is that: $H_0: \mu_A \leq \mu_B$, with the alternative hypothoesis that $\mu_A > \mu_B$ (equavilent to $d \leq 0$ and $d >0$, respectively). Recall that in frequentist statistical paradigm, the goal is find a rejection region of the test statistic ($z$) that bounds the type-I error rate and maximizes power. When the null is true ($d\leq 0$) then setting $c_\alpha = \Phi_{1-\alpha}^{-1}$, and rejecting the null when $\bar z > c_\alpha$ will obtain a type-I error rate of exactly $\alpha$.[[^4]]
$$
\begin{align}
P(\bar z > c) &\leq \alpha \
1-\Phi ( c ) &\leq \alpha \
c &\geq \Phi^{-1}(1-\alpha)
\end{align}
$$
[^1]: Note that the Winner's Curse in economics is a different but related phenomenon.
[^2]: There is an approach with uses a simple MLE to invert the observed mean of a truncated Gaussian, but as I discuss below this approach has signficant drawbacks when the true effect size is zero or small.
[^3]: If the variances were unknown, then the difference in means would have a student-t distribution with slightly fatter tails.
[^4]: If $c > c_\alpha$, then the type-I error rate would be lower (which is good), but, the power would also be lower in the event that the null were false. It is therefore desirable the rejection region obtain the exactly desired type-I error rate, and then the statistician can decide what type-I level to choose.
End of explanation
"""
# EXAMPLE OF TYPE-II ERROR RATE
d = 0.75
beta = norm.cdf(c_alpha - d / np.sqrt(2/n))
err2 = np.mean([ (np.mean(d + randn(n)) - np.mean(randn(n)))/np.sqrt(sig2 / n) < c_alpha for i in range(nsim)])
print('Empirical type-II error rate: %0.3f\nExpected type-II error rate: %0.3f' % (err2, beta))
"""
Explanation: In the event that the null is not true $(d > 0)$ then power of the test will depend four things:
The magnitute of the effect (the bigger the value of $d$ the better)
The number of samples (the more the better)
The type-I error rate (the larger the better)
The magnitute of the variance (the smaller the better)
Defining the empirical test statistic as $\bar z$, the type-II error rate is:
$$
\begin{align}
1 - \beta &= P( \bar z > c_\alpha) \
1 - \beta &= P\Bigg( \frac{\bar d - 0}{\sigma/\sqrt{n}} > c_\alpha \Bigg) \
1 - \beta &= P( z > c_\alpha - \sqrt{n} \cdot d / \sigma ) \
\beta &= \Phi\Bigg(c_\alpha - \frac{\sqrt{n} \cdot d}{\sigma} \Bigg)
\end{align}
$$
End of explanation
"""
def power_fun(alpha, n, mu, sig2):
thresh = norm.ppf(1-alpha)
t2_err = norm.cdf(thresh - mu/np.sqrt(sig2/n))
return 1 - t2_err
return
def bias_ratio(alpha, n, mu, sig2):
power = power_fun(alpha=alpha, n=n, mu=d, sig2=sig2)
num = np.sqrt(sig2) * norm.pdf(norm.ppf(1-power))
den = mu * np.sqrt(n) * power
return 1 + num / den
# SIMULATE ONE EXAMPLE #
np.random.seed(seed)
nsim = 125000
n, d = 16, 0.5
holder = np.zeros([nsim, 2])
for ii in range(nsim):
y1, y2 = randn(n) + d, randn(n)
dbar = y1.mean() - y2.mean()
zbar = dbar / np.sqrt(sig2 / n)
holder[ii] = [dbar, zbar]
emp_power = np.mean(holder[:,1] > c_alpha)
theory_power = power_fun(alpha=alpha, n=n, mu=0.5, sig2=sig2)
emp_ratio = holder[:,0][holder[:,1] > c_alpha].mean() / d
theory_ratio = bias_ratio(alpha, n, d, sig2)
print('Empirical power: %0.2f, theoretical power: %0.2f' % (emp_power, theory_power))
print('Empirical bias-ratio: %0.2f, theoretical power: %0.2f' % (emp_ratio, theory_ratio))
# CALCULATE CLOSED-FORM RATIO #
n_seq = np.arange(1,11,1)**2
d_seq = np.linspace(0.01,1,len(n_seq))
df_ratio = pd.DataFrame(np.array(np.meshgrid(n_seq, d_seq)).reshape([2,len(n_seq)*len(d_seq)]).T, columns=['n','d'])
df_ratio.n = df_ratio.n.astype(int)
df_ratio = df_ratio.assign(ratio = lambda x: bias_ratio(alpha, x.n, x.d, sig2),
power = lambda x: power_fun(alpha, x.n, x.d, sig2))
gg_ratio = (ggplot(df_ratio, aes(x='power',y='np.log(ratio)',color='n')) +
geom_point() + theme_bw() +
ggtitle('Figure 1: Relationship between power and effect size bias') +
labs(x='Power',y='log(Bias Ratio)') +
scale_color_gradient2(low='blue',mid='yellow',high='red',midpoint=50,
name='Sample Size'))
plotnine.options.figure_size = (5,3.5)
gg_ratio
"""
Explanation: (2) Relationship between power and effect size bias
Most practioneers of applied statistics will be familiar with type-I and type-II error rates and will use these to interpret the results of studies and design trials. In most disciplines it is common that only statistically significant results (i.e. those ones that that reject the null) are analyzed. In research domains where there are many hypothesis tests under consideration (such as genomics), it is required that multiple testing adjustments be made so that the number of aggregate false discoveries is bounded. Note that such adjustments are equivalent to increasing the value of $c_\alpha$ and will lower the power of eaah test.
Unfortunately few researchers in my experience understand the relationship between power and effect size bias. Even though rigourously pre-specified research designs will likely have an accurate number of "true discoveries", the distribution of significanct effect sizes will almost certaintly be overstated. An example will help to illustrate. Returning to the difference in Gaussian sample means, the distribution of statistically significant means will follow the following conditional distribution:
$$
\begin{align}
\bar d^ &= \bar d | \bar z > c_\alpha \
&= \bar d | \bar d > \sigma \cdot c_\alpha / \sqrt{n}
\end{align*}
$$
Notice that the smallest observable and statistically significant mean difference will be at least $c_\alpha$ root-$n$ normalized standard deviations above zero. Because $\bar d$ has a Gaussian distribution, $\bar d^*$ has a truncated Gaussian distribution:
$$
\begin{align}
\bar d^ &\sim TN(\mu, \sigma^2, l, u) \
&\sim TN(d, \sigma^2 / n, \sigma \cdot c_\alpha / \sqrt{n}, \infty) \
a &= \frac{l - \mu}{\sigma} = c_\alpha - \sqrt{n}\cdot d / \sigma = \Phi^{-1}(\beta) \
E[\bar d^] &= d + \frac{\phi(a)}{1 - \Phi(a)} \cdot (\sigma/\sqrt{n}) \
&= d + \underbrace{\frac{\sigma \cdot \phi(\Phi_\beta^{-1})}{\sqrt{n}(1 - \beta)}}_{\text{bias}}
\end{align}
$$
The bias of the truncated Gaussian is shown to be related to a handful of statistical parameters including the power of the test! The bias can also be expressed as a ratio of the mean of the statistically significant effect size to the true one, what I will call the bias ratio,
$$
\begin{align}
\text{R}(\beta;n,d,\sigma) &= \frac{E[\bar d^]}{d} = 1 + \frac{\sigma \cdot \phi(\Phi_\beta^{-1})}{d\cdot\sqrt{n}\cdot(1 - \beta)}
\end{align*}
$$
where $\beta = f(n,d,\sigma)$, and $R$ is ultimately a function of the sample size, true effect size, and measurement error. The simulations below show the relationship between the bias ratio and power for different effect and sample sample sizes.
End of explanation
"""
np.round(df_ratio[(df_ratio.ratio > 1.3) & (df_ratio.ratio < 1.4)].sort_values('power').head(),2)
np.round(df_ratio[(df_ratio.power > 0.45) & (df_ratio.power < 0.52)].sort_values('ratio').head(),2)
"""
Explanation: Figure 1 shows that while there is not a one-to-one relationship between the power and the bias ratio, generally speaking the higher the power the lower the ratio. The variation in low powered tests is driven by the sample sizes. Tests that have low power with a large sample sizes but small effect sizes will have a much smaller bias than equivalently powered tests with large effect sizes and small sample sizes. The tables below highlight this fact by showing the range in power for tests with similar bias ratios, the range in bias ratios for similarly powered tests.
End of explanation
"""
def mu_trunc(mu_true, alpha, n, sig2):
sig = np.sqrt(sig2 / n)
a = norm.ppf(1-alpha) - mu_true / sig
return mu_true + norm.pdf(a)/(1-norm.cdf(a)) * sig
def mu_diff(mu_true, mu_star, alpha, n, sig2):
diff = mu_star - mu_trunc(mu_true, alpha, n, sig2)
return diff**2
def mu_find(mu_star, alpha, n, sig2):
hat = minimize_scalar(fun=mu_diff,args=(mu_star, alpha, n, sig2),method='brent').x
return hat
n = 16
nsim = 100000
np.random.seed(seed)
d_seq = np.round(np.linspace(-1,2,7),1)
res = np.zeros(len(d_seq))
for jj, d in enumerate(d_seq):
holder = np.zeros([nsim,2])
# Generate from truncated normal
dbar_samp = truncnorm.rvs(a=c_alpha-d/np.sqrt(sig2/n),b=np.infty,loc=d,scale=np.sqrt(sig2/n),size=nsim,random_state=seed)
z_samp = dbar_samp / np.sqrt(sig2/n)
res[jj] = mu_find(dbar_samp.mean(), alpha, n, sig2)
df_res = pd.DataFrame({'estimate':res, 'actual':d_seq})
plotnine.options.figure_size = (5.5, 3.5)
gg_res = (ggplot(df_res, aes(y='estimate',x='actual')) + geom_point() +
theme_bw() + labs(y='Estimate',x='Actual') +
geom_abline(intercept=0,slope=1,color='blue') +
ggtitle('Figure 2: Unbiased estimate of true mean possible for repeated samples'))
gg_res
"""
Explanation: (3) Why estimating the bias of statistically significant effects is hard!
If the true effect size were known, then it would be possible to explicitely calculate the bias term. Unfortunately this parameter is never known in the real world. If there happened to be multiple draws from the same hypothesis then an estimate of the true mean could be found. With multiple draws, there will be an observed distribution of $\bar d^$ so that the empirical mean $\hat{\bar d}^$ could be used by optimization methods to estimate $d$ using the formula for the mean of a truncated Gaussian.
$$
\begin{align}
d^ &= \arg\min_d \hspace{2mm} \Bigg[ \hat{\bar d}^ - \Bigg( d + \frac{\phi(c_\alpha-\sqrt{n}\cdot d/\sigma)}{1 - \Phi(c_\alpha-\sqrt{n}\cdot d/\sigma)} \cdot (\sigma/\sqrt{n}) \Bigg) \Bigg]^2
\end{align}
$$
The simulations below show that with enough hypothesis rejections, the true value of $d$ could be determined. However if the null could be sampled multiple times then the exact value of $d$ could be determined by just looking at $\bar d$! The code is merely to highlight the principle.
End of explanation
"""
n = 25
sig, rn = np.sqrt(sig2), np.sqrt(n)
d_seq = np.linspace(-10,2,201)
df1 = pd.DataFrame({'dstar':d_seq,'d':[d + norm.pdf(c_alpha - rn*d/sig) / norm.cdf(rn*d/sig - c_alpha) * (sig/rn) for d in d_seq]})
sample = truncnorm.rvs(c_alpha, np.infty, loc=0, scale=sig/rn, size=1000, random_state=seed)
df2 = pd.DataFrame({'d':sample,'tt':'dist'})
plotnine.options.figure_size = (5,3.5)
plt1 = (ggplot(df1,aes(y='dstar',x='d')) + geom_point() + theme_bw() +
geom_vline(xintercept=c_alpha*sig/rn,color='blue') +
labs(x='Observed mean',y='Estimate of d') +
ggtitle('Figure 3A: Negative bias in ML estimate'))
plt1
fig2 = (ggplot(df1,aes(x='d')) + theme_bw() +
geom_histogram(fill='grey',color='blue',bins=30) +
labs(x='Observed value',y='Frequency') +
ggtitle('Figure 3B: Distribution under d=0'))
fig2
"""
Explanation: But if multiple samples are unavailable to estimate $\hat{\bar d}^$, then can the value of $d$ ever be estimated? A naive reproach using only a single value to find $d^$ from the equation above yields negative estimates when $\mu \approx 0$ because many values below the median of the truncated normal with a small mean have will match a large and negative mean for another truncated normal. Figures 3A and 3B show this asymmetric phenomenon.
End of explanation
"""
d_seq = np.linspace(-1,2,31)
bias_d0 = norm.pdf(c_alpha)/norm.cdf(-c_alpha)*np.sqrt(sig2/n)
df_bias = pd.DataFrame({'d':d_seq,'deflated':[mu_trunc(dd,alpha,n,sig2)-bias_d0 for dd in d_seq]})
plotnine.options.figure_size = (5,3.5)
gg_bias1 = (ggplot(df_bias,aes(x='d',y='deflated')) + theme_bw() +
geom_point() + labs(x='True effect',y='Deflated effect') +
geom_abline(intercept=0,slope=1,color='blue') +
scale_x_continuous(limits=[min(d_seq),max(d_seq)]) +
scale_y_continuous(limits=[min(d_seq),max(d_seq)]) +
ggtitle('Figure 4: Naive deflation leads to large bias'))
gg_bias1
"""
Explanation: (4) Approaches to de-biasing single-test statistic results
A conversative method to ensure that $E[ \bar d^* -d ] \leq 0$ when $d\geq 0$ is to subtract off the bias when the null is zero: $(\sigma \cdot \phi(c_\alpha)) / (\sqrt{n}\cdot\Phi(-c_\alpha))$. The problem with this approach is that for true effect ($d>0$), the bias estimate will be too large and the estimate of the true effect will actually be too small as Figure 4 shows.
End of explanation
"""
nsim = 10000
di_cn = {'index':'tt','25%':'lb','75%':'ub','mean':'mu'}
d_seq = np.linspace(-0.5,1.5,21)
n_seq = [16, 25, 100, 250]
holder = []
for n in n_seq:
bias_d0 = norm.pdf(c_alpha)/norm.cdf(-c_alpha)*np.sqrt(sig2/n)
dist_d0 = truncnorm(a=c_alpha,b=np.infty,loc=0,scale=np.sqrt(sig2/n))
sample_d0 = dist_d0.rvs(size=nsim, random_state=seed)
w0 = dist_d0.pdf(sample_d0).mean()
sim = []
for ii, d in enumerate(d_seq):
dist = truncnorm(a=c_alpha-d/np.sqrt(sig2/n),b=np.infty,loc=d,scale=np.sqrt(sig2/n))
sample = dist.rvs(size=nsim, random_state=seed)
deflator = 2*(1-dist_d0.cdf(sample))*bias_d0
# deflator = dist_d0.pdf(sample)*bias_d0 / w0
d_adj = sample - deflator
mat = pd.DataFrame({'adj':d_adj,'raw':sample}).describe()[1:].T.reset_index().rename(columns=di_cn)[list(di_cn.values())].assign(d=d,n=n)
sim.append(mat)
holder.append(pd.concat(sim))
df_defl = pd.concat(holder)
plotnine.options.figure_size = (9,6)
gg_bias2 = (ggplot(df_defl,aes(x='d',y='mu',color='tt')) + theme_bw() +
geom_linerange(aes(ymin='lb',ymax='ub',color='tt')) +
geom_point() + labs(x='True effect',y='Observed statistically significant effect') +
geom_abline(intercept=0,slope=1,color='black') +
geom_vline(xintercept=0, linetype='--') +
scale_x_continuous(limits=[-0.75,1.8]) +
scale_y_continuous(limits=[-0.75,1.8]) +
facet_wrap('~n', labeller=label_both) +
scale_color_discrete(name='Type',labels=['Deflated','Observed']) +
ggtitle('Figure 5: Deflating by the cdf of d=0 achieves better results\nVertical lines show IQR'))
gg_bias2
"""
Explanation: A better approach I have devised is to weight the statistically significant observation by where it falls in the cdf of truncated Gaussian for $d=0$. When $d>0$ most $\bar d^*$ will be above this range and receive little penatly, whereas for values of $d \approx 0$ they will tend to receive a stronger deflation.
$$
\begin{align}
b_0 &= \frac{\sigma}{\sqrt{n}} \frac{\phi(c_\alpha)}{\Phi(-c_\alpha)} = \text{bias}(\bar d^ | d=0) \
d^ &= \bar d^ - 2\cdot[1 - F_{\bar d>c_\alpha\sigma/\sqrt{n}}(\bar d^|d=0)] \cdot b_0
\end{align}
$$
The simulations below implement the deflation procedure suggested by equation Y for a single point estimate for different sample and effect sizes.
End of explanation
"""
alpha = 0.05
c_alpha = norm.ppf(1-alpha)
dstar = 0.42
sig2 = 1.85**2
n = 105
bias_d0 = norm.pdf(c_alpha)/norm.cdf(-c_alpha)*np.sqrt(sig2/n)
dist_d0 = truncnorm(a=c_alpha,b=np.infty,loc=0,scale=np.sqrt(sig2/n))
adj = 2*(1-dist_d0.cdf(dstar))*bias_d0
print('Baseline effect: %0.3f, P-value: %0.3f\nBias when d=0: %0.3f\nDeflator: %0.3f\nAdjusted effect: %0.3f' %
(dstar, 1-norm.cdf(dstar/np.sqrt(sig2/n)),bias_d0, adj, dstar - adj))
"""
Explanation: Figure 5 shows that the bias for values of $d \geq 0$ is now conservative and limited. Especially for larger samples, a large and otherwise highly significant effect will be brough much closer to its true value. The primary drawback to using the WCA from equation Y is that it adds further noise to the point estimate. While this is statistically problematic, from an epistemological viewpoint it could be useful to reduce the confidence of reserachers in their "significant" findings that are unlikely to replicate anyways.
(5) Summary
WCAs for single test results are much more challenging that those for repeated test measurements due to a lack of measured information. I have proposed a simple formula (Y) that can be used on all statistically significant results requiring only the observed effect size, type-I error rate, sample size, and noise estimate. For small to medium sample sizes this deflator leads to additional noise in the point estimate, but may have a humbling effect of researcher confidence. While it has no doubt being expressed before, I also derive the analytical relationship between power and effect size bias (X).
As a final motivating example consider the well-regarded paper Labor Market Returns to Early Childhood Stimulation by Gertler et. al (2013) that even included a Nobel-prize winning economist in its author list. They claim to show that an educational intervention using randomized control trial improved long-run income earnings by 42%. This is a huge increase.
These findings show that psychosocial stimulation early in childhood in disadvantaged settings can have substantial effects on labormarket outcomes and reduce later life inequality.
Notice that the authors state: "The results ... show that the impact on earnings remains large and statistically significant". As this post has discussed, it is quite likely that they should have said these results are statistically significant because they were large.
Table 3 in the paper shows a p-value for 0.01 a sample size of 105, implying that $1 - \Phi(0.42/(1.9/\sqrt{105})) \approx 0.01$, with a z-score of around 2.7. The code below shows that if there were no effect, then the average statistically significant effect that would be observed would be 0.372. However because the result (42%) is in the 80th percentile of such a distribution, the adjustment procedure suggests removing 15% off of the point estimate. Using a WCA adjustment for this paper reduces the findings to 27%, which is still quite high and respectable. I hope this post will help to spread the word about the importance of understanding and addressing the winner's curse in applied statistics research.
End of explanation
"""
|
machinelearningnanodegree/stanford-cs231 | solutions/levin/assignment1/two_layer_net.ipynb | mit | # A bit of setup
import numpy as np
import matplotlib.pyplot as plt
from cs231n.classifiers.neural_net import TwoLayerNet
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
"""
Explanation: Implementing a Neural Network
In this exercise we will develop a neural network with fully-connected layers to perform classification, and test it out on the CIFAR-10 dataset.
End of explanation
"""
# Create a small net and some toy data to check your implementations.
# Note that we set the random seed for repeatable experiments.
input_size = 4
hidden_size = 10
num_classes = 3
num_inputs = 5
def init_toy_model():
np.random.seed(0)
return TwoLayerNet(input_size, hidden_size, num_classes, std=1e-1)
def init_toy_data():
np.random.seed(1)
X = 10 * np.random.randn(num_inputs, input_size)
y = np.array([0, 1, 2, 2, 1])
return X, y
net = init_toy_model()
X, y = init_toy_data()
"""
Explanation: We will use the class TwoLayerNet in the file cs231n/classifiers/neural_net.py to represent instances of our network. The network parameters are stored in the instance variable self.params where keys are string parameter names and values are numpy arrays. Below, we initialize toy data and a toy model that we will use to develop your implementation.
End of explanation
"""
scores = net.loss(X)
print 'Your scores:'
print scores
print
print 'correct scores:'
correct_scores = np.asarray([
[-0.81233741, -1.27654624, -0.70335995],
[-0.17129677, -1.18803311, -0.47310444],
[-0.51590475, -1.01354314, -0.8504215 ],
[-0.15419291, -0.48629638, -0.52901952],
[-0.00618733, -0.12435261, -0.15226949]])
print correct_scores
print
# The difference should be very small. We get < 1e-7
print 'Difference between your scores and correct scores:'
print np.sum(np.abs(scores - correct_scores))
"""
Explanation: Forward pass: compute scores
Open the file cs231n/classifiers/neural_net.py and look at the method TwoLayerNet.loss. This function is very similar to the loss functions you have written for the SVM and Softmax exercises: It takes the data and weights and computes the class scores, the loss, and the gradients on the parameters.
Implement the first part of the forward pass which uses the weights and biases to compute the scores for all inputs.
End of explanation
"""
loss, _ = net.loss(X, y, reg=0.1)
correct_loss = 1.30378789133
# should be very small, we get < 1e-12
print 'Difference between your loss and correct loss:'
print np.sum(np.abs(loss - correct_loss))
"""
Explanation: Forward pass: compute loss
In the same function, implement the second part that computes the data and regularizaion loss.
End of explanation
"""
from cs231n.gradient_check import eval_numerical_gradient
# Use numeric gradient checking to check your implementation of the backward pass.
# If your implementation is correct, the difference between the numeric and
# analytic gradients should be less than 1e-8 for each of W1, W2, b1, and b2.
loss, grads = net.loss(X, y, reg=0.1)
# these should all be less than 1e-8 or so
for param_name in grads:
f = lambda W: net.loss(X, y, reg=0.1)[0]
param_grad_num = eval_numerical_gradient(f, net.params[param_name], verbose=False)
print '%s max relative error: %e' % (param_name, rel_error(param_grad_num, grads[param_name]))
"""
Explanation: Backward pass
Implement the rest of the function. This will compute the gradient of the loss with respect to the variables W1, b1, W2, and b2. Now that you (hopefully!) have a correctly implemented forward pass, you can debug your backward pass using a numeric gradient check:
End of explanation
"""
net = init_toy_model()
stats = net.train(X, y, X, y,
learning_rate=1e-1, reg=1e-5,
num_iters=100, verbose=False)
print 'Final training loss: ', stats['loss_history'][-1]
# plot the loss history
plt.plot(stats['loss_history'])
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
"""
Explanation: Train the network
To train the network we will use stochastic gradient descent (SGD), similar to the SVM and Softmax classifiers. Look at the function TwoLayerNet.train and fill in the missing sections to implement the training procedure. This should be very similar to the training procedure you used for the SVM and Softmax classifiers. You will also have to implement TwoLayerNet.predict, as the training process periodically performs prediction to keep track of accuracy over time while the network trains.
Once you have implemented the method, run the code below to train a two-layer network on toy data. You should achieve a training loss less than 0.2.
End of explanation
"""
from cs231n.data_utils import load_CIFAR10
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the two-layer neural net classifier. These are the same steps as
we used for the SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# Subsample the data
mask = range(num_training, num_training + num_validation)
X_val = X_train[mask]
y_val = y_train[mask]
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
# Reshape data to rows
X_train = X_train.reshape(num_training, -1)
X_val = X_val.reshape(num_validation, -1)
X_test = X_test.reshape(num_test, -1)
return X_train, y_train, X_val, y_val, X_test, y_test
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test = get_CIFAR10_data()
print 'Train data shape: ', X_train.shape
print 'Train labels shape: ', y_train.shape
print 'Validation data shape: ', X_val.shape
print 'Validation labels shape: ', y_val.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
"""
Explanation: Load the data
Now that you have implemented a two-layer network that passes gradient checks and works on toy data, it's time to load up our favorite CIFAR-10 data so we can use it to train a classifier on a real dataset.
End of explanation
"""
input_size = 32 * 32 * 3
hidden_size = 50
num_classes = 10
net = TwoLayerNet(input_size, hidden_size, num_classes)
# Train the network
stats = net.train(X_train, y_train, X_val, y_val,
num_iters=1000, batch_size=200,
learning_rate=1e-4, learning_rate_decay=0.95,
reg=0.5, verbose=True)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
print 'Validation accuracy: ', val_acc
"""
Explanation: Train a network
To train our network we will use SGD with momentum. In addition, we will adjust the learning rate with an exponential learning rate schedule as optimization proceeds; after each epoch, we will reduce the learning rate by multiplying it by a decay rate.
End of explanation
"""
# Plot the loss function and train / validation accuracies
plt.subplot(2, 1, 1)
plt.plot(stats['loss_history'])
plt.title('Loss history')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.subplot(2, 1, 2)
plt.plot(stats['train_acc_history'], label='train')
plt.plot(stats['val_acc_history'], label='val')
plt.title('Classification accuracy history')
plt.xlabel('Epoch')
plt.ylabel('Clasification accuracy')
plt.show()
from cs231n.vis_utils import visualize_grid
# Visualize the weights of the network
def show_net_weights(net):
W1 = net.params['W1']
W1 = W1.reshape(32, 32, 3, -1).transpose(3, 0, 1, 2)
plt.imshow(visualize_grid(W1, padding=3).astype('uint8'))
plt.gca().axis('off')
plt.show()
show_net_weights(net)
"""
Explanation: Debug the training
With the default parameters we provided above, you should get a validation accuracy of about 0.29 on the validation set. This isn't very good.
One strategy for getting insight into what's wrong is to plot the loss function and the accuracies on the training and validation sets during optimization.
Another strategy is to visualize the weights that were learned in the first layer of the network. In most neural networks trained on visual data, the first layer weights typically show some visible structure when visualized.
End of explanation
"""
#################################################################################
# TODO: Tune hyperparameters using the validation set. Store your best trained #
# model in best_net. #
# #
# To help debug your network, it may help to use visualizations similar to the #
# ones we used above; these visualizations will have significant qualitative #
# differences from the ones we saw above for the poorly tuned network. #
# #
# Tweaking hyperparameters by hand can be fun, but you might find it useful to #
# write code to sweep through possible combinations of hyperparameters #
# automatically like we did on the previous exercises. #
#################################################################################
input_size = 32 * 32 * 3
hidden_size = 200
num_classes = 10
best_val = -1
# Train the network
num_iters = 1800
batch_size=200
# hyperparameters
learning_rates = [8e-4]
regs = [5e-2]
learning_rate_decays = [0.95]
for lr in learning_rates:
for reg in regs:
for decay in learning_rate_decays:
print("learning rate: {}, regulation: {}, decay: {}".format(lr, reg, decay))
net = TwoLayerNet(input_size, hidden_size, num_classes)
net.train(X_train, y_train, X_val, y_val,
num_iters=num_iters,
batch_size=batch_size,
learning_rate=lr,
learning_rate_decay= decay,
reg=reg,
verbose=False)
# Predict on the validation set
val_acc = (net.predict(X_val) == y_val).mean()
train_acc = (net.predict(X_train) == y_train).mean()
if val_acc > best_val:
best_net = net
best_val = val_acc
print 'Train accuracy:{}, Validation accuracy:{}'.format(train_acc, val_acc)
print 'Best accuracy:{}'.format(best_val)
#################################################################################
# END OF YOUR CODE #
#################################################################################
# visualize the weights of the best network
show_net_weights(best_net)
"""
Explanation: Tune your hyperparameters
What's wrong?. Looking at the visualizations above, we see that the loss is decreasing more or less linearly, which seems to suggest that the learning rate may be too low. Moreover, there is no gap between the training and validation accuracy, suggesting that the model we used has low capacity, and that we should increase its size. On the other hand, with a very large model we would expect to see more overfitting, which would manifest itself as a very large gap between the training and validation accuracy.
Tuning. Tuning the hyperparameters and developing intuition for how they affect the final performance is a large part of using Neural Networks, so we want you to get a lot of practice. Below, you should experiment with different values of the various hyperparameters, including hidden layer size, learning rate, numer of training epochs, and regularization strength. You might also consider tuning the learning rate decay, but you should be able to get good performance using the default value.
Approximate results. You should be aim to achieve a classification accuracy of greater than 48% on the validation set. Our best network gets over 52% on the validation set.
Experiment: You goal in this exercise is to get as good of a result on CIFAR-10 as you can, with a fully-connected Neural Network. For every 1% above 52% on the Test set we will award you with one extra bonus point. Feel free implement your own techniques (e.g. PCA to reduce dimensionality, or adding dropout, or adding features to the solver, etc.).
End of explanation
"""
test_acc = (best_net.predict(X_test) == y_test).mean()
print 'Test accuracy: ', test_acc
"""
Explanation: Run on the test set
When you are done experimenting, you should evaluate your final trained network on the test set; you should get above 48%.
We will give you extra bonus point for every 1% of accuracy above 52%.
End of explanation
"""
|
joshnsolomon/phys202-2015-work | assignments/assignment09/IntegrationEx02.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy import integrate
"""
Explanation: Integration Exercise 2
Imports
End of explanation
"""
def integrand(x, a):
return 1.0/(x**2 + a**2)
def integral_approx(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand, 0, np.inf, args=(a,))
return I
def integral_exact(a):
return 0.5*np.pi/a
print("Numerical: ", integral_approx(1.0))
print("Exact : ", integral_exact(1.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Indefinite integrals
Here is a table of definite integrals. Many of these integrals has a number of parameters $a$, $b$, etc.
Find five of these integrals and perform the following steps:
Typeset the integral using LateX in a Markdown cell.
Define an integrand function that computes the value of the integrand.
Define an integral_approx funciton that uses scipy.integrate.quad to peform the integral.
Define an integral_exact function that computes the exact value of the integral.
Call and print the return value of integral_approx and integral_exact for one set of parameters.
Here is an example to show what your solutions should look like:
Example
Here is the integral I am performing:
$$ I = \int_0^\infty \frac{dx}{x^2 + a^2} = \frac{\pi}{2a} $$
End of explanation
"""
def integrand1(x, p):
return np.sin(p*x)**2/(x**2)
def integral_approx1(p):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand1, 0, np.inf, args=(p,))
return I
def integral_exact1(p):
return p*np.pi/2
print("Numerical: ", integral_approx1(1.0))
print("Exact : ", integral_exact1(1.0))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 1
$$ I_1 = \int_{0}^{\infty} \frac{\sin ^{2}px}{x^{2}}\ dx=\frac{\pi p}{2} $$
End of explanation
"""
def integrand2(x):
return x/(np.exp(x)-1)
def integral_approx2():
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand2, 0, np.inf)
return I
def integral_exact2():
return np.pi**2/6
print("Numerical: ", integral_approx2())
print("Exact : ", integral_exact2())
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 2
$$ I_2 = \int_0^\infty \frac {x}{e^{x}-1}\ dx= \frac {\pi^2}{6} $$
End of explanation
"""
def integrand3(x, a):
return 1.0/((a**2-x**2 )**(.5))
def integral_approx3(a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand3, 0, a, args=(a,))
return I
def integral_exact3(a):
return np.pi/2
print("Numerical: ", integral_approx3(17))
print("Exact : ", integral_exact3(17))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 3
$$ I_3 = \int_0^a \frac{dx}{\sqrt{a^{2}-x^{2}}}=\frac{\pi }{2} $$
End of explanation
"""
def integrand4(x, m, a):
return (x*np.sin(m*x))/(x**2+a**2)
def integral_approx4(m, a):
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand4, 0, np.inf, args=(m,a,))
return I
def integral_exact4(m, a):
return (np.pi/2)*np.exp(-1*m*a)
print("Numerical: ", integral_approx4(.001,.001))
print("Exact : ", integral_exact4(.001,.001))
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 4
$$ I_4 =\int_0^\infty \frac{x \sin mx}{x^2+a^2}\ dx=\frac{\pi}{2}e^{-ma} $$
End of explanation
"""
def integrand5(x):
return (np.exp(-1*(x**2)))
def integral_approx5():
# Use the args keyword argument to feed extra arguments to your integrand
I, e = integrate.quad(integrand5, -1*np.inf, np.inf)
return I
def integral_exact5():
return np.pi**(1/2)
print("Numerical: ", integral_approx5())
print("Exact : ", integral_exact5())
assert True # leave this cell to grade the above integral
"""
Explanation: Integral 5
$$ I_5 = \int_{-\infty}^\infty e^{-x^2}\,dx=\sqrt{\pi} $$
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.24/_downloads/0a1bad60270bfbdeeea274fcca0015d2/multidict_reweighted_tfmxne.ipynb | bsd-3-clause | # Author: Mathurin Massias <mathurin.massias@gmail.com>
# Yousra Bekhti <yousra.bekhti@gmail.com>
# Daniel Strohmeier <daniel.strohmeier@tu-ilmenau.de>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD-3-Clause
import os.path as op
import mne
from mne.datasets import somato
from mne.inverse_sparse import tf_mixed_norm, make_stc_from_dipoles
from mne.viz import plot_sparse_source_estimates
print(__doc__)
"""
Explanation: Compute iterative reweighted TF-MxNE with multiscale time-frequency dictionary
The iterative reweighted TF-MxNE solver is a distributed inverse method
based on the TF-MxNE solver, which promotes focal (sparse) sources
:footcite:StrohmeierEtAl2015. The benefits of this approach are that:
it is spatio-temporal without assuming stationarity (source properties
can vary over time),
activations are localized in space, time, and frequency in one step,
the solver uses non-convex penalties in the TF domain, which results in a
solution less biased towards zero than when simple TF-MxNE is used,
using a multiscale dictionary allows to capture short transient
activations along with slower brain waves :footcite:BekhtiEtAl2016.
End of explanation
"""
data_path = somato.data_path()
subject = '01'
task = 'somato'
raw_fname = op.join(data_path, 'sub-{}'.format(subject), 'meg',
'sub-{}_task-{}_meg.fif'.format(subject, task))
fwd_fname = op.join(data_path, 'derivatives', 'sub-{}'.format(subject),
'sub-{}_task-{}-fwd.fif'.format(subject, task))
# Read evoked
raw = mne.io.read_raw_fif(raw_fname)
raw.pick_types(meg=True, eog=True, stim=True)
events = mne.find_events(raw, stim_channel='STI 014')
reject = dict(grad=4000e-13, eog=350e-6)
event_id, tmin, tmax = dict(unknown=1), -0.5, 0.5
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, reject=reject,
baseline=(None, 0))
evoked = epochs.average()
evoked.crop(tmin=0.0, tmax=0.2)
# Compute noise covariance matrix
cov = mne.compute_covariance(epochs, rank='info', tmax=0.)
del epochs, raw
# Handling forward solution
forward = mne.read_forward_solution(fwd_fname)
"""
Explanation: Load somatosensory MEG data
End of explanation
"""
alpha, l1_ratio = 20, 0.05
loose, depth = 0.9, 1.
# Use a multiscale time-frequency dictionary
wsize, tstep = [4, 16], [2, 4]
n_tfmxne_iter = 10
# Compute TF-MxNE inverse solution with dipole output
dipoles, residual = tf_mixed_norm(
evoked, forward, cov, alpha=alpha, l1_ratio=l1_ratio,
n_tfmxne_iter=n_tfmxne_iter, loose=loose,
depth=depth, tol=1e-3,
wsize=wsize, tstep=tstep, return_as_dipoles=True,
return_residual=True)
"""
Explanation: Run iterative reweighted multidict TF-MxNE solver
End of explanation
"""
stc = make_stc_from_dipoles(dipoles, forward['src'])
plot_sparse_source_estimates(
forward['src'], stc, bgcolor=(1, 1, 1), opacity=0.1,
fig_name=f"irTF-MxNE (cond {evoked.comment})")
"""
Explanation: Generate stc from dipoles
End of explanation
"""
ylim = dict(grad=[-300, 300])
evoked.copy().pick_types(meg='grad').plot(
titles=dict(grad='Evoked Response: Gradiometers'), ylim=ylim)
residual.copy().pick_types(meg='grad').plot(
titles=dict(grad='Residuals: Gradiometers'), ylim=ylim)
"""
Explanation: Show the evoked response and the residual for gradiometers
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/introduction_to_tensorflow/solutions/what_if_mortgage.ipynb | apache-2.0 | import sys
python_version = sys.version_info[0]
print("Python Version: ", python_version)
!pip3 install witwidget
import pandas as pd
import numpy as np
import witwidget
from witwidget.notebook.visualization import WitWidget, WitConfigBuilder
"""
Explanation: LABXX: What-if Tool: Model Interpretability Using Mortgage Data
Learning Objectives
Create a What-if Tool visualization
What-if Tool exploration using the XGBoost Model
Introduction
This notebook shows how to use the What-if Tool (WIT) on a deployed Cloud AI Platform model. The What-If Tool provides an easy-to-use interface for expanding understanding of black-box classification and regression ML models. With the plugin, you can perform inference on a large set of examples and immediately visualize the results in a variety of ways. Additionally, examples can be edited manually or programmatically and re-run through the model in order to see the results of the changes. It contains tooling for investigating model performance and fairness over subsets of a dataset. The purpose of the tool is to give people a simple, intuitive, and powerful way to explore and investigate trained ML models through a visual interface with absolutely no code required.
Extreme Gradient Boosting (XGBoost) is a decision-tree-based ensemble Machine Learning algorithm that uses a gradient boosting framework. In prediction problems involving unstructured data (images, text, etc.) artificial neural networks tend to outperform all other algorithms or frameworks. However, when it comes to small-to-medium structured/tabular data, decision tree based algorithms are considered best-in-class right now. Please see the chart below for the evolution of tree-based algorithms over the years.
You don't need your own cloud project to run this notebook.
UPDATE LINK BEFORE PRODUCTION : Each learning objective will correspond to a #TODO in the student lab notebook) -- try to complete that notebook first before reviewing this solution notebook.
Set up environment variables and load necessary libraries
We will start by importing the necessary libraries for this lab.
End of explanation
"""
# Download our Pandas dataframe and our test features and labels
!gsutil cp gs://mortgage_dataset_files/data.pkl .
!gsutil cp gs://mortgage_dataset_files/x_test.npy .
!gsutil cp gs://mortgage_dataset_files/y_test.npy .
"""
Explanation: Loading the mortgage test dataset
The model we'll be exploring here is a binary classification model built with XGBoost and trained on a mortgage dataset. It predicts whether or not a mortgage application will be approved. In this section we'll:
Download some test data from Cloud Storage and load it into a numpy array + Pandas DataFrame
Preview the features for our model in Pandas
End of explanation
"""
features = pd.read_pickle('data.pkl')
features.head()
features.info()
"""
Explanation: Preview the Features
Preview the features from our model as a pandas DataFrame
End of explanation
"""
x_test = np.load('x_test.npy')
y_test = np.load('y_test.npy')
"""
Explanation: Load the test features and labels into numpy arrays
Developing machine learning models in Python often requires the use of NumPy arrays. Recall that NumPy, which stands for Numerical Python, is a library consisting of multidimensional array objects and a collection of routines for processing those arrays. NumPy arrays are efficient data structures for working with data in Python, and machine learning models like those in the scikit-learn library, and deep learning models like those in the Keras library, expect input data in the format of NumPy arrays and make predictions in the format of NumPy arrays. As such, it is common to need to save NumPy arrays to file. Note that the data info reveals the following datatypes dtypes: float64(8), int16(1), int8(1), uint8(34) -- and no strings or "objects". So, let's now load the features and labels into numpy arrays.
End of explanation
"""
print(x_test)
"""
Explanation: Let's take a look at the contents of the 'x_test.npy' file. You can see the "array" structure.
End of explanation
"""
test_examples = np.hstack((x_test,y_test.reshape(-1,1)))
"""
Explanation: Combine the features and labels into one array for the What-if Tool
Note that the numpy.hstack() function is used to stack the sequence of input arrays horizontally (i.e. column wise) to make a single array. In the following example, the numpy matrix is reshaped into a vector using the reshape function with .reshape((-1, 1) to convert the array into a single column matrix.
End of explanation
"""
# ******** DO NOT RUN THIS CELL ********
# TODO 1
PROJECT_ID = 'YOUR_PROJECT_ID'
MODEL_NAME = 'YOUR_MODEL_NAME'
VERSION_NAME = 'YOUR_VERSION_NAME'
TARGET_FEATURE = 'mortgage_status'
LABEL_VOCAB = ['denied', 'approved']
# TODO 1a
config_builder = (WitConfigBuilder(test_examples.tolist(), features.columns.tolist() + ['mortgage_status'])
.set_ai_platform_model(PROJECT_ID, MODEL_NAME, VERSION_NAME, adjust_prediction=adjust_prediction)
.set_target_feature(TARGET_FEATURE)
.set_label_vocab(LABEL_VOCAB))
"""
Explanation: Using the What-if Tool to interpret our model
With our test examples ready, we can now connect our model to the What-if Tool using the WitWidget. To use the What-if Tool with Cloud AI Platform, we need to send it:
* A Python list of our test features + ground truth labels
* Optionally, the names of our columns
* Our Cloud project, model, and version name (we've created a public one for you to play around with)
See the next cell for some exploration ideas in the What-if Tool.
Create a What-if Tool visualization
This prediction adjustment function is needed as this xgboost model's prediction returns just a score for the positive class of the binary classification, whereas the What-If Tool expects a list of scores for each class (in this case, both the negative class and the positive class).
NOTE: The WIT may take a minute to load. While it is loading, review the parameters that are defined in the next cell, BUT NOT RUN IT, it is simply for reference.
End of explanation
"""
# TODO 1b
def adjust_prediction(pred):
return [1 - pred, pred]
config_builder = (WitConfigBuilder(test_examples.tolist(), features.columns.tolist() + ['mortgage_status'])
.set_ai_platform_model('wit-caip-demos', 'xgb_mortgage', 'v1', adjust_prediction=adjust_prediction)
.set_target_feature('mortgage_status')
.set_label_vocab(['denied', 'approved']))
WitWidget(config_builder, height=800)
"""
Explanation: Run this cell to load the WIT config builder. NOTE: The WIT may take a minute to load
End of explanation
"""
|
tpin3694/tpin3694.github.io | regex/match_any_character.ipynb | mit | # Load regex package
import re
"""
Explanation: Title: Match Any Character
Slug: match_any_character
Summary: Match Any Character
Date: 2016-05-01 12:00
Category: Regex
Tags: Basics
Authors: Chris Albon
Based on: Regular Expressions Cookbook
Preliminaries
End of explanation
"""
# Create a variable containing a text string
text = 'The quick brown fox jumped over the lazy brown bear.'
"""
Explanation: Create some text
End of explanation
"""
# Find anything with a 'T' and then the next two characters
re.findall(r'T..', text)
"""
Explanation: Apply regex
End of explanation
"""
|
GoogleCloudPlatform/asl-ml-immersion | notebooks/introduction_to_tensorflow/solutions/3_keras_sequential_api_vertex.ipynb | apache-2.0 | import datetime
import os
import shutil
import numpy as np
import pandas as pd
import tensorflow as tf
from google.cloud import aiplatform
from matplotlib import pyplot as plt
from tensorflow import keras
from tensorflow.keras.callbacks import TensorBoard
from tensorflow.keras.layers import Dense, DenseFeatures
from tensorflow.keras.models import Sequential
print(tf.__version__)
%matplotlib inline
"""
Explanation: Introducing the Keras Sequential API
Learning Objectives
1. Build a DNN model using the Keras Sequential API
1. Learn how to use feature columns in a Keras model
1. Learn how to train a model with Keras
1. Learn how to save/load, and deploy a Keras model on GCP
1. Learn how to deploy and make predictions with the Keras model
Introduction
The Keras sequential API allows you to create Tensorflow models layer-by-layer. This is useful for building most kinds of machine learning models but it does not allow you to create models that share layers, re-use layers or have multiple inputs or outputs.
In this lab, we'll see how to build a simple deep neural network model using the Keras sequential api and feature columns. Once we have trained our model, we will deploy it using Vertex AI and see how to call our model for online prediciton.
Start by importing the necessary libraries for this lab.
End of explanation
"""
!ls -l ../data/*.csv
!head ../data/taxi*.csv
"""
Explanation: Load raw data
We will use the taxifare dataset, using the CSV files that we created in the first notebook of this sequence. Those files have been saved into ../data.
End of explanation
"""
CSV_COLUMNS = [
"fare_amount",
"pickup_datetime",
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude",
"passenger_count",
"key",
]
LABEL_COLUMN = "fare_amount"
DEFAULTS = [[0.0], ["na"], [0.0], [0.0], [0.0], [0.0], [0.0], ["na"]]
UNWANTED_COLS = ["pickup_datetime", "key"]
def features_and_labels(row_data):
label = row_data.pop(LABEL_COLUMN)
features = row_data
for unwanted_col in UNWANTED_COLS:
features.pop(unwanted_col)
return features, label
def create_dataset(pattern, batch_size=1, mode="eval"):
dataset = tf.data.experimental.make_csv_dataset(
pattern, batch_size, CSV_COLUMNS, DEFAULTS
)
dataset = dataset.map(features_and_labels)
if mode == "train":
dataset = dataset.shuffle(buffer_size=1000).repeat()
# take advantage of multi-threading; 1=AUTOTUNE
dataset = dataset.prefetch(1)
return dataset
"""
Explanation: Use tf.data to read the CSV files
We wrote these functions for reading data from the csv files above in the previous notebook.
End of explanation
"""
INPUT_COLS = [
"pickup_longitude",
"pickup_latitude",
"dropoff_longitude",
"dropoff_latitude",
"passenger_count",
]
# Create input layer of feature columns
feature_columns = {
colname: tf.feature_column.numeric_column(colname) for colname in INPUT_COLS
}
"""
Explanation: Build a simple keras DNN model
We will use feature columns to connect our raw data to our keras DNN model. Feature columns make it easy to perform common types of feature engineering on your raw data. For example, you can one-hot encode categorical data, create feature crosses, embeddings and more. We'll cover these in more detail later in the course, but if you want to a sneak peak browse the official TensorFlow feature columns guide.
In our case we won't do any feature engineering. However, we still need to create a list of feature columns to specify the numeric values which will be passed on to our model. To do this, we use tf.feature_column.numeric_column()
We use a python dictionary comprehension to create the feature columns for our model, which is just an elegant alternative to a for loop.
End of explanation
"""
# Build a keras DNN model using Sequential API
model = Sequential(
[
DenseFeatures(feature_columns=feature_columns.values()),
Dense(units=32, activation="relu", name="h1"),
Dense(units=8, activation="relu", name="h2"),
Dense(units=1, activation="linear", name="output"),
]
)
"""
Explanation: Next, we create the DNN model. The Sequential model is a linear stack of layers and when building a model using the Sequential API, you configure each layer of the model in turn. Once all the layers have been added, you compile the model.
End of explanation
"""
# Create a custom evalution metric
def rmse(y_true, y_pred):
return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true)))
# Compile the keras model
model.compile(optimizer="adam", loss="mse", metrics=[rmse, "mse"])
"""
Explanation: Next, to prepare the model for training, you must configure the learning process. This is done using the compile method. The compile method takes three arguments:
An optimizer. This could be the string identifier of an existing optimizer (such as rmsprop or adagrad), or an instance of the Optimizer class.
A loss function. This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function from the Losses class (such as categorical_crossentropy or mse), or it can be a custom objective function.
A list of metrics. For any machine learning problem you will want a set of metrics to evaluate your model. A metric could be the string identifier of an existing metric or a custom metric function.
We will add an additional custom metric called rmse to our list of metrics which will return the root mean square error.
End of explanation
"""
TRAIN_BATCH_SIZE = 1000
NUM_TRAIN_EXAMPLES = 10000 * 5 # training dataset will repeat, wrap around
NUM_EVALS = 50 # how many times to evaluate
NUM_EVAL_EXAMPLES = 10000 # enough to get a reasonable sample
trainds = create_dataset(
pattern="../data/taxi-train*", batch_size=TRAIN_BATCH_SIZE, mode="train"
)
evalds = create_dataset(
pattern="../data/taxi-valid*", batch_size=1000, mode="eval"
).take(NUM_EVAL_EXAMPLES // 1000)
"""
Explanation: Train the model
To train your model, Keras provides two functions that can be used:
1. .fit() for training a model for a fixed number of epochs (iterations on a dataset).
2. .train_on_batch() runs a single gradient update on a single batch of data.
The .fit() function works for various formats of data such as Numpy array, list of Tensors tf.data and Python generators. The .train_on_batch() method is for more fine-grained control over training and accepts only a single batch of data.
Our create_dataset function above generates batches of training examples, so we can use .fit.
We start by setting up some parameters for our training job and create the data generators for the training and validation data.
We refer you the the blog post ML Design Pattern #3: Virtual Epochs for further details on why express the training in terms of NUM_TRAIN_EXAMPLES and NUM_EVALS and why, in this training code, the number of epochs is really equal to the number of evaluations we perform.
End of explanation
"""
%%time
steps_per_epoch = NUM_TRAIN_EXAMPLES // (TRAIN_BATCH_SIZE * NUM_EVALS)
LOGDIR = "./taxi_trained"
history = model.fit(
x=trainds,
steps_per_epoch=steps_per_epoch,
epochs=NUM_EVALS,
validation_data=evalds,
callbacks=[TensorBoard(LOGDIR)],
)
"""
Explanation: There are various arguments you can set when calling the .fit method. Here x specifies the input data which in our case is a tf.data dataset returning a tuple of (inputs, targets). The steps_per_epoch parameter is used to mark the end of training for a single epoch. Here we are training for NUM_EVALS epochs. Lastly, for the callback argument we specify a Tensorboard callback so we can inspect Tensorboard after training.
End of explanation
"""
model.summary()
"""
Explanation: High-level model evaluation
Once we've run data through the model, we can call .summary() on the model to get a high-level summary of our network. We can also plot the training and evaluation curves for the metrics we computed above.
End of explanation
"""
RMSE_COLS = ["rmse", "val_rmse"]
pd.DataFrame(history.history)[RMSE_COLS].plot()
LOSS_COLS = ["loss", "val_loss"]
pd.DataFrame(history.history)[LOSS_COLS].plot()
"""
Explanation: Running .fit (or .fit_generator) returns a History object which collects all the events recorded during training. Similar to Tensorboard, we can plot the training and validation curves for the model loss and rmse by accessing these elements of the History object.
End of explanation
"""
model.predict(
x={
"pickup_longitude": tf.convert_to_tensor([-73.982683]),
"pickup_latitude": tf.convert_to_tensor([40.742104]),
"dropoff_longitude": tf.convert_to_tensor([-73.983766]),
"dropoff_latitude": tf.convert_to_tensor([40.755174]),
"passenger_count": tf.convert_to_tensor([3.0]),
},
steps=1,
)
"""
Explanation: Making predictions with our model
To make predictions with our trained model, we can call the predict method, passing to it a dictionary of values. The steps parameter determines the total number of steps before declaring the prediction round finished. Here since we have just one example, we set steps=1 (setting steps=None would also work). Note, however, that if x is a tf.data dataset or a dataset iterator, and steps is set to None, predict will run until the input dataset is exhausted.
End of explanation
"""
OUTPUT_DIR = "./export/savedmodel"
shutil.rmtree(OUTPUT_DIR, ignore_errors=True)
TIMESTAMP = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
EXPORT_PATH = os.path.join(OUTPUT_DIR, TIMESTAMP)
tf.saved_model.save(model, EXPORT_PATH) # with default serving function
!saved_model_cli show \
--tag_set serve \
--signature_def serving_default \
--dir {EXPORT_PATH}
!find {EXPORT_PATH}
os.environ['EXPORT_PATH'] = EXPORT_PATH
"""
Explanation: Export and deploy our model
Of course, making individual predictions is not realistic, because we can't expect client code to have a model object in memory. For others to use our trained model, we'll have to export our model to a file, and expect client code to instantiate the model from that exported file.
We'll export the model to a TensorFlow SavedModel format. Once we have a model in this format, we have lots of ways to "serve" the model, from a web application, from JavaScript, from mobile applications, etc.
End of explanation
"""
PROJECT = !gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT = PROJECT[0]
BUCKET = PROJECT
REGION = "us-central1"
MODEL_DISPLAYNAME = f"taxifare-{TIMESTAMP}"
print(f"MODEL_DISPLAYNAME: {MODEL_DISPLAYNAME}")
# from https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers
SERVING_CONTAINER_IMAGE_URI = (
"us-docker.pkg.dev/vertex-ai/prediction/tf2-cpu.2-3:latest"
)
os.environ["BUCKET"] = BUCKET
os.environ["REGION"] = REGION
%%bash
# Create GCS bucket if it doesn't exist already...
exists=$(gsutil ls -d | grep -w gs://${BUCKET}/)
if [ -n "$exists" ]; then
echo -e "Bucket exists, let's not recreate it."
else
echo "Creating a new GCS bucket."
gsutil mb -l ${REGION} gs://${BUCKET}
echo "\nHere are your current buckets:"
gsutil ls
fi
!gsutil cp -R $EXPORT_PATH gs://$BUCKET/$MODEL_DISPLAYNAME
uploaded_model = aiplatform.Model.upload(
display_name=MODEL_DISPLAYNAME,
artifact_uri=f"gs://{BUCKET}/{MODEL_DISPLAYNAME}",
serving_container_image_uri=SERVING_CONTAINER_IMAGE_URI,
)
MACHINE_TYPE = "n1-standard-2"
endpoint = uploaded_model.deploy(
machine_type=MACHINE_TYPE,
accelerator_type=None,
accelerator_count=None,
)
instance = {
"pickup_longitude": -73.982683,
"pickup_latitude": 40.742104,
"dropoff_longitude": -73.983766,
"dropoff_latitude": 40.755174,
"passenger_count": 3.0,
}
endpoint.predict([instance])
"""
Explanation: Deploy our model to Vertex AI
Finally, we will deploy our trained model to Vertex AI and see how we can make online predicitons.
End of explanation
"""
endpoint.undeploy_all()
"""
Explanation: Cleanup
When deploying a model to an endpoint for online prediction, the minimum min-replica-count is 1, and it is charged per node hour. So let's delete the endpoint to reduce unnecessary charges. Before we can delete the endpoint, we first undeploy all attached models...
End of explanation
"""
endpoint.delete()
"""
Explanation: ...then delete the endpoint.
End of explanation
"""
|
mbbrodie/fmri_reconstruction | tools/miyawaki_eigenbrain.ipynb | mit | # Some basic imports
import time
import sys
import eigenbrain
"""
Explanation: Reconstruction of visual stimuli from Miyawaki et al. 2008
This example reproduces the experiment presented in
Visual image reconstruction from human brain activity
using a combination of multiscale local image decoders
<http://www.cell.com/neuron/abstract/S0896-6273%2808%2900958-6>_,
Miyawaki, Y., Uchida, H., Yamashita, O., Sato, M. A.,
Morito, Y., Tanabe, H. C., ... & Kamitani, Y. (2008).
Neuron, 60(5), 915-929.
It reconstructs 10x10 binary images from functional MRI data. Random images
are used as training set and structured images are used for reconstruction.
The code is a bit elaborate as the example uses, as the original article,
a multiscale prediction on the images seen by the subject.
See also
:ref:sphx_glr_auto_examples_02_decoding_plot_miyawaki_encoding.py for a
encoding approach for the same dataset.
End of explanation
"""
from nilearn import datasets
sys.stderr.write("Fetching dataset...")
t0 = time.time()
miyawaki_dataset = datasets.fetch_miyawaki2008()
# print basic information on the dataset
print('First functional nifti image (4D) is located at: %s' %
miyawaki_dataset.func[0]) # 4D data
X_random_filenames = miyawaki_dataset.func[12:]
X_figure_filenames = miyawaki_dataset.func[:12]
y_random_filenames = miyawaki_dataset.label[12:]
y_figure_filenames = miyawaki_dataset.label[:12]
y_shape = (10, 10)
sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))
"""
Explanation: First we load the Miyawaki dataset
End of explanation
"""
import numpy as np
from nilearn.input_data import MultiNiftiMasker
np.set_printoptions(threshold=np.inf)
sys.stderr.write("Preprocessing data...")
t0 = time.time()
X_train, eig_vectors, col_means, col_std_devs = eigenbrain.create_eigenbrains(X_random_filenames, None, None)
X_test, n_blocks, shape = eigenbrain.load_and_flatten(X_figure_filenames)
orig_flat = X_test[0]
print("X_test file shape: " + str(shape))
X_test = eigenbrain.mean_std_normalize(X_test, col_means, col_std_devs)
norm_flat = X_test[0]
X_test = eigenbrain.project_matrix_onto_eigspace(X_test, eig_vectors)
new = X_test[0]
recreated = eigenbrain.recreate_blocks2(X_test[0], eig_vectors, None)
print("Subtraction:")
#print(np.subtract(norm_flat, recreated))
print(np.sum(np.subtract(norm_flat, recreated)))
print(str(orig_flat.shape) + " " + str(norm_flat.shape) + " " + str(recreated.shape))
affine = eigenbrain.get_affine(X_figure_filenames)
orig_block = eigenbrain.unflatten_blocks(orig_flat.reshape((1,122880)), (64,64,30,1))
norm_block = eigenbrain.unflatten_blocks(norm_flat.reshape((1,122880)), (64,64,30,1))
recreated_block = eigenbrain.unflatten_blocks(recreated, (64,64,30,1))
eigenbrain.save_nii(orig_block, affine, "original_block.nii.gz")
eigenbrain.save_nii(norm_block, affine, "normalized_block.nii.gz")
eigenbrain.save_nii(recreated_block, affine, "recreated_from_eigspace_block.nii.gz")
from nilearn import plotting
plotting.plot_glass_brain("original_block.nii.gz", black_bg=True, title="orig")
plotting.plot_glass_brain("normalized_block.nii.gz", black_bg=True, title="normalized")
plotting.plot_glass_brain("recreated_from_eigspace_block.nii.gz", black_bg=True, title="recreated")
plotting.show()
# Load and mask fMRI data
#masker = MultiNiftiMasker(mask_img=miyawaki_dataset.mask, detrend=True,
# standardize=False)
#masker.fit()
#X_train = masker.transform(X_random_filenames)
#X_test = masker.transform(X_figure_filenames)
print("X_train shape: " + str(X_train.shape))
print("X_test shape: " + str(X_test.shape))
# We load the visual stimuli from csv files
y_train = []
for y in y_random_filenames:
y_train.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
(-1,) + y_shape, order='F'))
y_test = []
for y in y_figure_filenames:
y_test.append(np.reshape(np.loadtxt(y, dtype=np.int, delimiter=','),
(-1,) + y_shape, order='F'))
#X_train = np.vstack([x[2:] for x in X_train])
y_train = np.vstack([y[:-2] for y in y_train]).astype(float)
#X_test = np.vstack([x[2:] for x in X_test])
y_test = np.vstack([y[:-2] for y in y_test]).astype(float)
n_pixels = y_train.shape[1]
n_features = X_train.shape[1]
def flatten(list_of_2d_array):
flattened = []
for array in list_of_2d_array:
flattened.append(array.ravel())
return flattened
# Build the design matrix for multiscale computation
# Matrix is squared, y_rows == y_cols
y_cols = y_shape[1]
# Original data
design_matrix = np.eye(100)
# Example of matrix used for multiscale (sum pixels vertically)
#
# 0.5 *
#
# 1 1 0 0 0 0 0 0 0 0
# 0 1 1 0 0 0 0 0 0 0
# 0 0 1 1 0 0 0 0 0 0
# 0 0 0 1 1 0 0 0 0 0
# 0 0 0 0 1 1 0 0 0 0
# 0 0 0 0 0 1 1 0 0 0
# 0 0 0 0 0 0 1 1 0 0
# 0 0 0 0 0 0 0 1 1 0
# 0 0 0 0 0 0 0 0 1 1
height_tf = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5
width_tf = height_tf.T
yt_tall = [np.dot(height_tf, m) for m in y_train]
yt_large = [np.dot(m, width_tf) for m in y_train]
yt_big = [np.dot(height_tf, np.dot(m, width_tf)) for m in y_train]
# Add it to the training set
y_train = [np.r_[y.ravel(), t.ravel(), l.ravel(), b.ravel()]
for y, t, l, b in zip(y_train, yt_tall, yt_large, yt_big)]
y_test = np.asarray(flatten(y_test))
y_train = np.asarray(y_train)
# Remove rest period
X_train = X_train[y_train[:, 0] != -1]
y_train = y_train[y_train[:, 0] != -1]
X_test = X_test[y_test[:, 0] != -1]
y_test = y_test[y_test[:, 0] != -1]
sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))
print("final x_train shape: " + str(X_train.shape))
print("final x_test shape: " + str(X_test.shape))
print(y_train.shape)
print(y_test.shape)
"""
Explanation: Then we prepare and mask the data
End of explanation
"""
sys.stderr.write("Training classifiers... \r")
t0 = time.time()
# OMP: Orthogonal Matching Pursuit
from sklearn.linear_model import OrthogonalMatchingPursuit as OMP
from sklearn.feature_selection import f_classif, SelectKBest
from sklearn.pipeline import Pipeline
# Create as many OMP as voxels to predict
clfs = []
n_clfs = y_train.shape[1]
for i in range(y_train.shape[1]):
sys.stderr.write("Training classifiers %03d/%d... \r" % (i + 1, n_clfs))
clf = Pipeline([('selection', SelectKBest(f_classif, 500)),
('clf', OMP(n_nonzero_coefs=10))])
clf.fit(X_train, y_train[:, i])
clfs.append(clf)
sys.stderr.write("Training classifiers %03d/%d... Done (%.2fs).\n" % (
n_clfs, n_clfs, time.time() - t0))
"""
Explanation: We define our prediction function
End of explanation
"""
sys.stderr.write("Calculating scores and outputs...")
t0 = time.time()
y_pred = []
for clf in clfs:
y_pred.append(clf.predict(X_test))
y_pred = np.asarray(y_pred).T
# We need to the multi scale reconstruction
def split_multi_scale(y, y_shape):
""" Split data into 4 original multi_scale images
"""
yw, yh = y_shape
# Index of original image
split_index = [yw * yh]
# Index of large image
split_index.append(split_index[-1] + (yw - 1) * yh)
# Index of tall image
split_index.append(split_index[-1] + yw * (yh - 1))
# Index of big image
split_index.append(split_index[-1] + (yw - 1) * (yh - 1))
# We split according to computed indices
y_preds = np.split(y, split_index, axis=1)
# y_pred is the original image
y_pred = y_preds[0]
# y_pred_tall is the image with 1x2 patch application. We have to make
# some calculus to get it back in original shape
height_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=-1))[:, :y_cols - 1] * .5
height_tf_i.flat[0] = 1
height_tf_i.flat[-1] = 1
y_pred_tall = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()
for m in y_preds[1]]
y_pred_tall = np.asarray(y_pred_tall)
# y_pred_large is the image with 2x1 patch application. We have to make
# some calculus to get it back in original shape
width_tf_i = (np.eye(y_cols) + np.eye(y_cols, k=1))[:y_cols - 1] * .5
width_tf_i.flat[0] = 1
width_tf_i.flat[-1] = 1
y_pred_large = [np.dot(np.reshape(m, (yw, yh - 1)), width_tf_i).flatten()
for m in y_preds[2]]
y_pred_large = np.asarray(y_pred_large)
# y_pred_big is the image with 2x2 patch application. We use previous
# matrices to get it back in original shape
y_pred_big = [np.dot(np.reshape(m, (yw - 1, yh - 1)), width_tf_i)
for m in y_preds[3]]
y_pred_big = [np.dot(height_tf_i, np.reshape(m, (yw - 1, yh))).flatten()
for m in y_pred_big]
y_pred_big = np.asarray(y_pred_big)
return (y_pred, y_pred_tall, y_pred_large, y_pred_big)
y_pred, y_pred_tall, y_pred_large, y_pred_big = \
split_multi_scale(y_pred, y_shape)
y_pred = (.25 * y_pred + .25 * y_pred_tall + .25 * y_pred_large
+ .25 * y_pred_big)
sys.stderr.write(" Done (%.2fs).\n" % (time.time() - t0))
"""
Explanation: Here we run the prediction: the decoding itself
End of explanation
"""
from sklearn.metrics import (accuracy_score, precision_score, recall_score,
f1_score)
print("Scores")
print("------")
print(" - Accuracy (percent): %f" % np.mean([
accuracy_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - Precision: %f" % np.mean([
precision_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - Recall: %f" % np.mean([
recall_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
print(" - F1-score: %f" % np.mean([
f1_score(y_test[:, i], y_pred[:, i] > .5) for i in range(100)]))
"""
Explanation: Let us quantify our prediction error
End of explanation
"""
from matplotlib import pyplot as plt
for i in range(6):
j = 10 * i
fig = plt.figure()
sp1 = plt.subplot(131)
sp1.axis('off')
plt.title('Stimulus')
sp2 = plt.subplot(132)
sp2.axis('off')
plt.title('Reconstruction')
sp3 = plt.subplot(133)
sp3.axis('off')
plt.title('Binarized')
sp1.imshow(np.reshape(y_test[j], (10, 10)), cmap=plt.cm.gray,
interpolation='nearest'),
sp2.imshow(np.reshape(y_pred[j], (10, 10)), cmap=plt.cm.gray,
interpolation='nearest'),
sp3.imshow(np.reshape(y_pred[j] > .5, (10, 10)), cmap=plt.cm.gray,
interpolation='nearest')
plt.savefig('miyawaki2008_reconstruction_%d' % i)
plt.show()
"""
Explanation: And finally, we plot six reconstructed images, to compare with
ground truth
End of explanation
"""
|
datascience-practice/data-quest | python_introduction/beginner/.ipynb_checkpoints/Dictionaries-checkpoint.ipynb | mit | # Let's parse the data from the last mission as an example.
# First, we open the wait times file from the last mission.
f = open("crime_rates.csv", 'r')
data = f.read()
rows = data.split('\n')
full_data = []
for row in rows:
split_row = row.split(",")
full_data.append(split_row)
weather_data = []
f = open("la_weather.csv", 'r')
data = f.read()
rows = data.split('\n')
full_data = []
for row in rows:
split_row = row.split(",")
weather_data.append(split_row)
print(weather_data[:10])
"""
Explanation: 2: Parsing the file
Instructions
Open "la_weather.csv", parse it, and assign the result to weather_data.
Answer
End of explanation
"""
# The "days" column in our data isn't extremely useful for our task, so we need to just grab the second column, with the weather.
# We looped over lists before, and this is how we will extract the second column.
lolist = [[1,2],[3,4],[5,6],[7,8]]
second_column = []
for item in lolist:
# Each item in lolist is a list.
# We can get just the second column value by indexing the item.
value = item[1]
second_column.append(value)
# second_column is now a list containing only values from the second column of lolist.
print(second_column)
# Let's read in our weather data again.
weather_data = []
f = open("la_weather.csv", 'r')
data = f.read()
rows = data.split('\n')
for row in rows:
split_row = row.split(",")
weather_data.append(split_row)
weather_column = []
for row in weather_data:
val = row[1]
weather_column.append(val)
print(weather_column)
"""
Explanation: 3: Getting a single column from the data
Instructions
Get all of the values in the second column and append them to weather_column.
Answer
End of explanation
"""
weather = weather_column
# In order to make it easier to use the weather column that we just parsed, we're going to automatically include it from now on.
# It's been specially added before our code runs.
# We can interact with it normally -- it's a list.
print(weather[0])
count = len(weather)
print(count)
"""
Explanation: 4: Pre-defined variables
Instructions
Loop over the weather variable, and set count equal to the number of items in weather.
Answer
End of explanation
"""
# Let's practice with some list slicing.
a = [4,5,6,7,8]
# New list containing index 2 and 3.
print(a[2:4])
# New list with no elements.
print(a[2:2])
# New list containing only index 2.
print(a[2:3])
slice_me = [7,6,4,5,6]
slice1 = slice_me[2:4]
slice2 = slice_me[1:2]
slice3 = slice_me[3:]
print(slice1, slice2, slice3)
"""
Explanation: 6: Practice slicing a list
Instructions
Assign a slice containing index 2 and 3 from slice_me to slice1. Assign a slice containing index 1 from slice_me to slice2. Assign a slice containing index 3 and 4 from slice_me to slice3.
Answer
End of explanation
"""
new_weather = weather[1:]
print(new_weather)
"""
Explanation: 7: Removing our header
Instructions
The weather data is in the weather variable. Slice the data and remove the header. The slice can end at 367. Assign the result to new_weather.
Answer
End of explanation
"""
# We can make a dictionary with curly braces.
dictionary_one = {}
# The we can add keys and values.
dictionary_one["key_one"] = 2
print(dictionary_one)
# Keys and values can be anything.
# And dictionaries can have multiple keys
dictionary_one[10] = 5
dictionary_one[5.2] = "hello"
print(dictionary_one)
dictionary_two = {
"test": 5,
10: "hello"
}
print(dictionary_two)
"""
Explanation: 9: Making a dictionary
Instructions
Assign the value 5 to the key "test" in dictionary_two. Assign the value "hello" to the key 10 in dictionary_two.
Answer
End of explanation
"""
dictionary_one = {}
dictionary_one["test"] = 10
dictionary_one["key"] = "fly"
# We can retrieve values from dictionaries with square brackets.
print(dictionary_one["test"])
print(dictionary_one["key"])
dictionary_two = {}
dictionary_two["key1"] = "high"
dictionary_two["key2"] = 10
dictionary_two["key3"] = 5.6
a, b, c = dictionary_two["key1"], dictionary_two["key2"], dictionary_two["key3"]
print(a, b, c)
"""
Explanation: 10: Indexing a dictionary
Instructions
Assign the value in "key1" in dictionary_two to a. Assign the value in "key2" in dictionary_two to b. Assign the value in "key3" in dictionary_two to c.
Answer
End of explanation
"""
# We can define dictionaries that already contain values.
# All we do is add in keys and values separated by colons.
# We have to separate pairs of keys and values with commas.
a = {"key1": 10, "key2": "indubitably", "key3": "dataquest", 3: 5.6}
# a is initialized with those keys and values, so we can access them.
print(a["key1"])
# Another example
b = {4: "robin", 5: "bluebird", 6: "sparrow"}
print(b[4])
c = {
7: "raven",
8: "goose",
9: "duck"
}
d = {
"morning": 9,
"afternoon": 14,
"evening": 19,
"night": 23
}
print(c, d)
"""
Explanation: 11: Defining a dictionary with values
Instructions
Make a dictionary c with the keys 7, 8, and 9 corresponding to the values "raven", "goose", and "duck". Make a dictionary d with the keys "morning", "afternoon", "evening", and "night" corresponding to the values 9, 14, 19, and 23 respectively.
Answer
End of explanation
"""
# We can check if values are in lists using the in statement.
the_list = [10,60,-5,8]
# This is True because 10 is in the_list
print(10 in the_list)
# This is True because -5 is in the_list
print(-5 in the_list)
# This is False because 9 isn't in the_list
print(9 in the_list)
# We can assign the results of an in statement to a variable.
# Just like any other boolean.
a = 7 in the_list
list2 = [8, 5.6, 70, 800]
c, d, e = 9 in list2, 8 in list2, -1 in list2
print(c, d, e)
"""
Explanation: 13: Testing if items are in a list
Instructions
Check if 9 is in list2, and assign the result to c. Check if 8 is in list2, and assign the result to d. Check if -1 is in list2, and assign the result to e.
Answer
End of explanation
"""
# We can check if a key is in a dictionary with the in statement.
the_dict = {"robin": "red", "cardinal": "red", "oriole": "orange", "lark": "blue"}
# This is True
print("robin" in the_dict)
# This is False
print("crow" in the_dict)
# We can also assign the boolean to a variable
a = "cardinal" in the_dict
print(a)
dict2 = {"mercury": 1, "venus": 2, "earth": 3, "mars": 4}
b = "jupiter" in dict2
c = "earth" in dict2
print(b, c)
"""
Explanation: 14: More uses for the in statement
Instructions
Check whether "jupiter" is a key in dict2 and assign the result to b. Check whether "earth" is a key in dict2 and assign the result to c.
Answer
End of explanation
"""
# The code in an else statement will be executed if the if statement boolean is False.
# This will print "Not 7!"
a = 6
# a doesn't equal 7, so this is False.
if a == 7:
print(a)
else:
print("Not 7!")
# This will print "Nintendo is the best!"
video_game = "Mario"
# video_game is "Mario", so this is True
if video_game == "Mario":
print("Nintendo is the best!")
else:
print("Sony is the best!")
season = "Spring"
if season == "Summer":
print("It's hot!")
else:
print("It might be hot!")
"""
Explanation: 16: Practicing with the else statement
Instructions
Write an if statement that prints "It's hot!" when the season is "Summer" Add an else statement to the if that prints "It might be hot!".
Answer
End of explanation
"""
# We can count how many times items appear in a list using dictionaries.
pantry = ["apple", "orange", "grape", "apple", "orange", "apple", "tomato", "potato", "grape"]
# Create an empty dictionary
pantry_counts = {}
# Loop through the whole list
for item in pantry:
# If the list item is already a key in the dictionary, then add 1 to the value of that key.
# This is because we've seen the item again, so our count goes up.
if item in pantry_counts:
pantry_counts[item] = pantry_counts[item] + 1
else:
# If the item isn't already a key in the count dictionary, then add the key, and set the value to 1.
# We set the value to 1 because we are seeing the item, so it's occured once already in the list.
pantry_counts[item] = 1
print(pantry_counts)
us_presidents = ["Adams", "Bush", "Clinton", "Obama", "Harrison", "Taft", "Bush", "Adams", "Wilson", "Roosevelt", "Roosevelt"]
us_president_counts = {}
for p in us_presidents:
if p not in us_president_counts:
us_president_counts[p] = 0
us_president_counts[p] += 1
print(us_president_counts)
"""
Explanation: 17: Counting with dictionaries
Instructions
Count how many times each presidential last name appears in us_presidents. Assign the counts to us_president_counts.
Answer
End of explanation
"""
|
GoogleCloudPlatform/vertex-ai-samples | notebooks/official/explainable_ai/sdk_automl_tabular_binary_classification_batch_explain.ipynb | apache-2.0 | import os
# Google Cloud Notebook
if os.path.exists("/opt/deeplearning/metadata/env_version"):
USER_FLAG = "--user"
else:
USER_FLAG = ""
! pip3 install --upgrade google-cloud-aiplatform $USER_FLAG
"""
Explanation: Vertex SDK: AutoML training tabular binary classification model for batch explanation
<table align="left">
<td>
<a href="https://colab.research.google.com/github/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_binary_classification_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/colab-logo-32px.png" alt="Colab logo"> Run in Colab
</a>
</td>
<td>
<a href="https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_binary_classification_batch_explain.ipynb">
<img src="https://cloud.google.com/ml-engine/images/github-logo-32px.png" alt="GitHub logo">
View on GitHub
</a>
</td>
<td>
<a href="https://console.cloud.google.com/vertex-ai/platform/notebooks/deploy-notebook?download_url=https://github.com/GoogleCloudPlatform/vertex-ai-samples/tree/master/notebooks/official/automl/sdk_automl_tabular_binary_classification_batch_explain.ipynb">
Open in Vertex AI Workbench
</a>
</td>
</table>
<br/><br/><br/>
Overview
This tutorial demonstrates how to use the Vertex SDK to create tabular binary classification models and do batch prediction with explanation using a Google Cloud AutoML model.
Dataset
The dataset used for this tutorial is the Bank Marketing . This dataset does not require any feature engineering. The version of the dataset you will use in this tutorial is stored in a public Cloud Storage bucket.
Objective
In this tutorial, you create an AutoML tabular binary classification model from a Python script, and send a batch prediction request with explainability using the Vertex SDK. You can alternatively create and deploy models using the gcloud command-line tool or online using the Cloud Console.
The steps performed include:
Create a Vertex Dataset resource.
Train the model.
View the model evaluation.
Make a batch prediction request with explainability.
There is one key difference between using batch prediction and using online prediction:
Prediction Service: Does an on-demand prediction for the entire set of instances (i.e., one or more data items) and returns the results in real-time.
Batch Prediction Service: Does a queued (batch) prediction for the entire set of instances in the background and stores the results in a Cloud Storage bucket when ready.
Costs
This tutorial uses billable components of Google Cloud:
Vertex AI
Cloud Storage
Learn about Vertex AI
pricing and Cloud Storage
pricing, and use the Pricing
Calculator
to generate a cost estimate based on your projected usage.
Set up your local development environment
If you are using Colab or Google Cloud Notebook, your environment already meets all the requirements to run this notebook. You can skip this step.
Otherwise, make sure your environment meets this notebook's requirements. You need the following:
The Cloud Storage SDK
Git
Python 3
virtualenv
Jupyter notebook running in a virtual environment with Python 3
The Cloud Storage guide to Setting up a Python development environment and the Jupyter installation guide provide detailed instructions for meeting these requirements. The following steps provide a condensed set of instructions:
Install and initialize the SDK.
Install Python 3.
Install virtualenv and create a virtual environment that uses Python 3.
Activate that environment and run pip3 install Jupyter in a terminal shell to install Jupyter.
Run jupyter notebook on the command line in a terminal shell to launch Jupyter.
Open this notebook in the Jupyter Notebook Dashboard.
Installation
Install the latest version of Vertex SDK for Python.
End of explanation
"""
! pip3 install -U google-cloud-storage $USER_FLAG
if os.getenv("IS_TESTING"):
! pip3 install --upgrade tensorflow $USER_FLAG
"""
Explanation: Install the latest GA version of google-cloud-storage library as well.
End of explanation
"""
import os
if not os.getenv("IS_TESTING"):
# Automatically restart kernel after installs
import IPython
app = IPython.Application.instance()
app.kernel.do_shutdown(True)
"""
Explanation: Restart the kernel
Once you've installed the additional packages, you need to restart the notebook kernel so it can find the packages.
End of explanation
"""
PROJECT_ID = "[your-project-id]" # @param {type:"string"}
if PROJECT_ID == "" or PROJECT_ID is None or PROJECT_ID == "[your-project-id]":
# Get your GCP project id from gcloud
shell_output = ! gcloud config list --format 'value(core.project)' 2>/dev/null
PROJECT_ID = shell_output[0]
print("Project ID:", PROJECT_ID)
! gcloud config set project $PROJECT_ID
"""
Explanation: Before you begin
GPU runtime
This tutorial does not require a GPU runtime.
Set up your Google Cloud project
The following steps are required, regardless of your notebook environment.
Select or create a Google Cloud project. When you first create an account, you get a $300 free credit towards your compute/storage costs.
Make sure that billing is enabled for your project.
Enable the following APIs: Vertex AI APIs, Compute Engine APIs, and Cloud Storage.
The Google Cloud SDK is already installed in Google Cloud Notebook.
Enter your project ID in the cell below. Then run the cell to make sure the
Cloud SDK uses the right project for all the commands in this notebook.
Note: Jupyter runs lines prefixed with ! as shell commands, and it interpolates Python variables prefixed with $.
End of explanation
"""
REGION = "us-central1" # @param {type: "string"}
"""
Explanation: Region
You can also change the REGION variable, which is used for operations
throughout the rest of this notebook. Below are regions supported for Vertex AI. We recommend that you choose the region closest to you.
Americas: us-central1
Europe: europe-west4
Asia Pacific: asia-east1
You may not use a multi-regional bucket for training with Vertex AI. Not all regions provide support for all Vertex AI services.
Learn more about Vertex AI regions
End of explanation
"""
from datetime import datetime
TIMESTAMP = datetime.now().strftime("%Y%m%d%H%M%S")
"""
Explanation: Timestamp
If you are in a live tutorial session, you might be using a shared test account or project. To avoid name collisions between users on resources created, you create a timestamp for each instance session, and append the timestamp onto the name of resources you create in this tutorial.
End of explanation
"""
# If you are running this notebook in Colab, run this cell and follow the
# instructions to authenticate your GCP account. This provides access to your
# Cloud Storage bucket and lets you submit training jobs and prediction
# requests.
import os
import sys
# If on Google Cloud Notebook, then don't execute this code
if not os.path.exists("/opt/deeplearning/metadata/env_version"):
if "google.colab" in sys.modules:
from google.colab import auth as google_auth
google_auth.authenticate_user()
# If you are running this notebook locally, replace the string below with the
# path to your service account key and run this cell to authenticate your GCP
# account.
elif not os.getenv("IS_TESTING"):
%env GOOGLE_APPLICATION_CREDENTIALS ''
"""
Explanation: Authenticate your Google Cloud account
If you are using Google Cloud Notebook, your environment is already authenticated. Skip this step.
If you are using Colab, run the cell below and follow the instructions when prompted to authenticate your account via oAuth.
Otherwise, follow these steps:
In the Cloud Console, go to the Create service account key page.
Click Create service account.
In the Service account name field, enter a name, and click Create.
In the Grant this service account access to project section, click the Role drop-down list. Type "Vertex" into the filter box, and select Vertex Administrator. Type "Storage Object Admin" into the filter box, and select Storage Object Admin.
Click Create. A JSON file that contains your key downloads to your local environment.
Enter the path to your service account key as the GOOGLE_APPLICATION_CREDENTIALS variable in the cell below and run the cell.
End of explanation
"""
BUCKET_NAME = "gs://[your-bucket-name]" # @param {type:"string"}
if BUCKET_NAME == "" or BUCKET_NAME is None or BUCKET_NAME == "gs://[your-bucket-name]":
BUCKET_NAME = "gs://" + PROJECT_ID + "aip-" + TIMESTAMP
"""
Explanation: Create a Cloud Storage bucket
The following steps are required, regardless of your notebook environment.
When you initialize the Vertex SDK for Python, you specify a Cloud Storage staging bucket. The staging bucket is where all the data associated with your dataset and model resources are retained across sessions.
Set the name of your Cloud Storage bucket below. Bucket names must be globally unique across all Google Cloud projects, including those outside of your organization.
End of explanation
"""
! gsutil mb -l $REGION $BUCKET_NAME
"""
Explanation: Only if your bucket doesn't already exist: Run the following cell to create your Cloud Storage bucket.
End of explanation
"""
! gsutil ls -al $BUCKET_NAME
"""
Explanation: Finally, validate access to your Cloud Storage bucket by examining its contents:
End of explanation
"""
import google.cloud.aiplatform as aip
"""
Explanation: Set up variables
Next, set up some variables used throughout the tutorial.
Import libraries and define constants
End of explanation
"""
aip.init(project=PROJECT_ID, staging_bucket=BUCKET_NAME)
"""
Explanation: Initialize Vertex SDK for Python
Initialize the Vertex SDK for Python for your project and corresponding bucket.
End of explanation
"""
IMPORT_FILE = "gs://cloud-ml-tables-data/bank-marketing.csv"
"""
Explanation: Tutorial
Now you are ready to start creating your own AutoML tabular binary classification model.
Location of Cloud Storage training data.
Now set the variable IMPORT_FILE to the location of the CSV index file in Cloud Storage.
End of explanation
"""
count = ! gsutil cat $IMPORT_FILE | wc -l
print("Number of Examples", int(count[0]))
print("First 10 rows")
! gsutil cat $IMPORT_FILE | head
heading = ! gsutil cat $IMPORT_FILE | head -n1
label_column = str(heading).split(",")[-1].split("'")[0]
print("Label Column Name", label_column)
if label_column is None:
raise Exception("label column missing")
"""
Explanation: Quick peek at your data
You will use a version of the Bank Marketing dataset that is stored in a public Cloud Storage bucket, using a CSV index file.
Start by doing a quick peek at the data. You count the number of examples by counting the number of rows in the CSV index file (wc -l) and then peek at the first few rows.
You also need for training to know the heading name of the label column, which is save as label_column. For this dataset, it is the last column in the CSV file.
End of explanation
"""
dataset = aip.TabularDataset.create(
display_name="Bank Marketing" + "_" + TIMESTAMP, gcs_source=[IMPORT_FILE]
)
print(dataset.resource_name)
"""
Explanation: Create the Dataset
Next, create the Dataset resource using the create method for the TabularDataset class, which takes the following parameters:
display_name: The human readable name for the Dataset resource.
gcs_source: A list of one or more dataset index files to import the data items into the Dataset resource.
bq_source: Alternatively, import data items from a BigQuery table into the Dataset resource.
This operation may take several minutes.
End of explanation
"""
dag = aip.AutoMLTabularTrainingJob(
display_name="bank_" + TIMESTAMP,
optimization_prediction_type="classification",
optimization_objective="minimize-log-loss",
)
print(dag)
"""
Explanation: Create and run training pipeline
To train an AutoML model, you perform two steps: 1) create a training pipeline, and 2) run the pipeline.
Create training pipeline
An AutoML training pipeline is created with the AutoMLTabularTrainingJob class, with the following parameters:
display_name: The human readable name for the TrainingJob resource.
optimization_prediction_type: The type task to train the model for.
classification: A tabuar classification model.
regression: A tabular regression model.
column_transformations: (Optional): Transformations to apply to the input columns
optimization_objective: The optimization objective to minimize or maximize.
binary classification:
minimize-log-loss
maximize-au-roc
maximize-au-prc
maximize-precision-at-recall
maximize-recall-at-precision
multi-class classification:
minimize-log-loss
regression:
minimize-rmse
minimize-mae
minimize-rmsle
The instantiated object is the DAG (directed acyclic graph) for the training pipeline.
End of explanation
"""
model = dag.run(
dataset=dataset,
model_display_name="bank_" + TIMESTAMP,
training_fraction_split=0.6,
validation_fraction_split=0.2,
test_fraction_split=0.2,
budget_milli_node_hours=8000,
disable_early_stopping=False,
target_column=label_column,
)
"""
Explanation: Run the training pipeline
Next, you run the DAG to start the training job by invoking the method run, with the following parameters:
dataset: The Dataset resource to train the model.
model_display_name: The human readable name for the trained model.
training_fraction_split: The percentage of the dataset to use for training.
test_fraction_split: The percentage of the dataset to use for test (holdout data).
validation_fraction_split: The percentage of the dataset to use for validation.
target_column: The name of the column to train as the label.
budget_milli_node_hours: (optional) Maximum training time specified in unit of millihours (1000 = hour).
disable_early_stopping: If True, training maybe completed before using the entire budget if the service believes it cannot further improve on the model objective measurements.
The run method when completed returns the Model resource.
The execution of the training pipeline will take upto 20 minutes.
End of explanation
"""
# Get model resource ID
models = aip.Model.list(filter="display_name=bank_" + TIMESTAMP)
# Get a reference to the Model Service client
client_options = {"api_endpoint": f"{REGION}-aiplatform.googleapis.com"}
model_service_client = aip.gapic.ModelServiceClient(client_options=client_options)
model_evaluations = model_service_client.list_model_evaluations(
parent=models[0].resource_name
)
model_evaluation = list(model_evaluations)[0]
print(model_evaluation)
"""
Explanation: Review model evaluation scores
After your model has finished training, you can review the evaluation scores for it.
First, you need to get a reference to the new model. As with datasets, you can either use the reference to the model variable you created when deployed the model or you can list all of the models in your project.
End of explanation
"""
! gsutil cat $IMPORT_FILE | head -n 1 > tmp.csv
! gsutil cat $IMPORT_FILE | tail -n 10 >> tmp.csv
! cut -d, -f1-16 tmp.csv > batch.csv
gcs_input_uri = BUCKET_NAME + "/test.csv"
! gsutil cp batch.csv $gcs_input_uri
"""
Explanation: Send a batch prediction request
Send a batch prediction to your deployed model.
Make test items
You will use synthetic data as a test data items. Don't be concerned that we are using synthetic data -- we just want to demonstrate how to make a prediction.
Make the batch input file
Now make a batch input file, which you will store in your local Cloud Storage bucket. Unlike image, video and text, the batch input file for tabular is only supported for CSV. For CSV file, you make:
The first line is the heading with the feature (fields) heading names.
Each remaining line is a separate prediction request with the corresponding feature values.
For example:
"feature_1", "feature_2". ...
value_1, value_2, ...
End of explanation
"""
batch_predict_job = model.batch_predict(
job_display_name="bank_" + TIMESTAMP,
gcs_source=gcs_input_uri,
gcs_destination_prefix=BUCKET_NAME,
instances_format="csv",
predictions_format="csv",
generate_explanation=True,
sync=False,
)
print(batch_predict_job)
"""
Explanation: Make the batch explanation request
Now that your Model resource is trained, you can make a batch prediction by invoking the batch_predict() method, with the following parameters:
job_display_name: The human readable name for the batch prediction job.
gcs_source: A list of one or more batch request input files.
gcs_destination_prefix: The Cloud Storage location for storing the batch prediction resuls.
instances_format: The format for the input instances, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
predictions_format: The format for the output predictions, either 'csv' or 'jsonl'. Defaults to 'jsonl'.
generate_explanations: Set to True to generate explanations.
sync: If set to True, the call will block while waiting for the asynchronous batch job to complete.
End of explanation
"""
batch_predict_job.wait()
"""
Explanation: Wait for completion of batch prediction job
Next, wait for the batch job to complete. Alternatively, one can set the parameter sync to True in the batch_predict() method to block until the batch prediction job is completed.
End of explanation
"""
import tensorflow as tf
bp_iter_outputs = batch_predict_job.iter_outputs()
explanation_results = list()
for blob in bp_iter_outputs:
if blob.name.split("/")[-1].startswith("explanation"):
explanation_results.append(blob.name)
tags = list()
for explanation_result in explanation_results:
gfile_name = f"gs://{bp_iter_outputs.bucket.name}/{explanation_result}"
with tf.io.gfile.GFile(name=gfile_name, mode="r") as gfile:
for line in gfile.readlines():
print(line)
"""
Explanation: Get the explanations
Next, get the explanation results from the completed batch prediction job.
The results are written to the Cloud Storage output bucket you specified in the batch prediction request. You call the method iter_outputs() to get a list of each Cloud Storage file generated with the results. Each file contains one or more explanation requests in a CSV format:
CSV header + predicted_label
CSV row + explanation, per prediction request
End of explanation
"""
delete_all = True
if delete_all:
# Delete the dataset using the Vertex dataset object
try:
if "dataset" in globals():
dataset.delete()
except Exception as e:
print(e)
# Delete the model using the Vertex model object
try:
if "model" in globals():
model.delete()
except Exception as e:
print(e)
# Delete the endpoint using the Vertex endpoint object
try:
if "endpoint" in globals():
endpoint.delete()
except Exception as e:
print(e)
# Delete the AutoML or Pipeline trainig job
try:
if "dag" in globals():
dag.delete()
except Exception as e:
print(e)
# Delete the custom trainig job
try:
if "job" in globals():
job.delete()
except Exception as e:
print(e)
# Delete the batch prediction job using the Vertex batch prediction object
try:
if "batch_predict_job" in globals():
batch_predict_job.delete()
except Exception as e:
print(e)
# Delete the hyperparameter tuning job using the Vertex hyperparameter tuning object
try:
if "hpt_job" in globals():
hpt_job.delete()
except Exception as e:
print(e)
if "BUCKET_NAME" in globals():
! gsutil rm -r $BUCKET_NAME
"""
Explanation: Cleaning up
To clean up all Google Cloud resources used in this project, you can delete the Google Cloud
project you used for the tutorial.
Otherwise, you can delete the individual resources you created in this tutorial:
Dataset
Pipeline
Model
Endpoint
AutoML Training Job
Batch Job
Custom Job
Hyperparameter Tuning Job
Cloud Storage Bucket
End of explanation
"""
|
tpin3694/tpin3694.github.io | machine-learning/random_forest_classifier_example.ipynb | mit | # Load the library with the iris dataset
from sklearn.datasets import load_iris
# Load scikit's random forest classifier library
from sklearn.ensemble import RandomForestClassifier
# Load pandas
import pandas as pd
# Load numpy
import numpy as np
# Set random seed
np.random.seed(0)
"""
Explanation: Title: Random Forest Classifier Example
Slug: random_forest_classifier_example_scikit
Summary: random_forest_classifier_example using Scikit.
Date: 2016-09-21 12:00
Category: Machine Learning
Tags: Trees And Forests
Authors: Chris Albon
This tutorial is based on Yhat's 2013 tutorial on Random Forests in Python. If you want a good summary of the theory and uses of random forests, I suggest you check out their guide. In the tutorial below, I annotate, correct, and expand on a short code example of random forests they present at the end of the article. Specifically, I 1) update the code so it runs in the latest version of pandas and Python, 2) write detailed comments explaining what is happening in each step, and 3) expand the code in a number of ways.
Let's get started!
A Note About The Data
The data for this tutorial is famous. Called, the iris dataset, it contains four variables measuring various parts of iris flowers of three related species, and then a fourth variable with the species name. The reason it is so famous in machine learning and statistics communities is because the data requires very little preprocessing (i.e. no missing values, all features are floating numbers, etc.).
Preliminaries
End of explanation
"""
# Create an object called iris with the iris data
iris = load_iris()
# Create a dataframe with the four feature variables
df = pd.DataFrame(iris.data, columns=iris.feature_names)
# View the top 5 rows
df.head()
# Add a new column with the species names, this is what we are going to try to predict
df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)
# View the top 5 rows
df.head()
"""
Explanation: Load Data
End of explanation
"""
# Create a new column that for each row, generates a random number between 0 and 1, and
# if that value is less than or equal to .75, then sets the value of that cell as True
# and false otherwise. This is a quick and dirty way of randomly assigning some rows to
# be used as the training data and some as the test data.
df['is_train'] = np.random.uniform(0, 1, len(df)) <= .75
# View the top 5 rows
df.head()
# Create two new dataframes, one with the training rows, one with the test rows
train, test = df[df['is_train']==True], df[df['is_train']==False]
# Show the number of observations for the test and training dataframes
print('Number of observations in the training data:', len(train))
print('Number of observations in the test data:',len(test))
"""
Explanation: Create Training And Test Data
End of explanation
"""
# Create a list of the feature column's names
features = df.columns[:4]
# View features
features
# train['species'] contains the actual species names. Before we can use it,
# we need to convert each species name into a digit. So, in this case there
# are three species, which have been coded as 0, 1, or 2.
y = pd.factorize(train['species'])[0]
# View target
y
"""
Explanation: Preprocess Data
End of explanation
"""
# Create a random forest Classifier. By convention, clf means 'Classifier'
clf = RandomForestClassifier(n_jobs=2, random_state=0)
# Train the Classifier to take the training features and learn how they relate
# to the training y (the species)
clf.fit(train[features], y)
"""
Explanation: Train The Random Forest Classifier
End of explanation
"""
# Apply the Classifier we trained to the test data (which, remember, it has never seen before)
clf.predict(test[features])
"""
Explanation: Huzzah! We have done it! We have officially trained our random forest Classifier! Now let's play with it. The Classifier model itself is stored in the clf variable.
Apply Classifier To Test Data
If you have been following along, you will know we only trained our classifier on part of the data, leaving the rest out. This is, in my humble opinion, the most important part of machine learning. Why? Because by leaving out a portion of the data, we have a set of data to test the accuracy of our model!
Let's do that now.
End of explanation
"""
# View the predicted probabilities of the first 10 observations
clf.predict_proba(test[features])[0:10]
"""
Explanation: What are you looking at above? Remember that we coded each of the three species of plant as 0, 1, or 2. What the list of numbers above is showing you is what species our model predicts each plant is based on the the sepal length, sepal width, petal length, and petal width. How confident is the classifier about each plant? We can see that too.
End of explanation
"""
# Create actual english names for the plants for each predicted plant class
preds = iris.target_names[clf.predict(test[features])]
# View the PREDICTED species for the first five observations
preds[0:5]
# View the ACTUAL species for the first five observations
test['species'].head()
"""
Explanation: There are three species of plant, thus [ 1. , 0. , 0. ] tells us that the classifier is certain that the plant is the first class. Taking another example, [ 0.9, 0.1, 0. ] tells us that the classifier gives a 90% probability the plant belongs to the first class and a 10% probability the plant belongs to the second class. Because 90 is greater than 10, the classifier predicts the plant is the first class.
Evaluate Classifier
Now that we have predicted the species of all plants in the test data, we can compare our predicted species with the that plant's actual species.
End of explanation
"""
# Create confusion matrix
pd.crosstab(test['species'], preds, rownames=['Actual Species'], colnames=['Predicted Species'])
"""
Explanation: That looks pretty good! At least for the first five observations. Now let's use look at all the data.
Create a confusion matrix
A confusion matrix can be, no pun intended, a little confusing to interpret at first, but it is actually very straightforward. The columns are the species we predicted for the test data and the rows are the actual species for the test data. So, if we take the top row, we can wee that we predicted all 13 setosa plants in the test data perfectly. However, in the next row, we predicted 5 of the versicolor plants correctly, but mis-predicted two of the versicolor plants as virginica.
The short explanation of how to interpret a confusion matrix is: anything on the diagonal was classified correctly and anything off the diagonal was classified incorrectly.
End of explanation
"""
# View a list of the features and their importance scores
list(zip(train[features], clf.feature_importances_))
"""
Explanation: View Feature Importance
While we don't get regression coefficients like with OLS, we do get a score telling us how important each feature was in classifying. This is one of the most powerful parts of random forests, because we can clearly see that petal width was more important in classification than sepal width.
End of explanation
"""
|
xpharry/Udacity-DLFoudation | tutorials/sentiment_network/Sentiment Classification - Mini Project 4.ipynb | mit | def pretty_print_review_and_label(i):
print(labels[i] + "\t:\t" + reviews[i][:80] + "...")
g = open('reviews.txt','r') # What we know!
reviews = list(map(lambda x:x[:-1],g.readlines()))
g.close()
g = open('labels.txt','r') # What we WANT to know!
labels = list(map(lambda x:x[:-1].upper(),g.readlines()))
g.close()
len(reviews)
reviews[0]
labels[0]
"""
Explanation: Sentiment Classification & How To "Frame Problems" for a Neural Network
by Andrew Trask
Twitter: @iamtrask
Blog: http://iamtrask.github.io
What You Should Already Know
neural networks, forward and back-propagation
stochastic gradient descent
mean squared error
and train/test splits
Where to Get Help if You Need it
Re-watch previous Udacity Lectures
Leverage the recommended Course Reading Material - Grokking Deep Learning (40% Off: traskud17)
Shoot me a tweet @iamtrask
Tutorial Outline:
Intro: The Importance of "Framing a Problem"
Curate a Dataset
Developing a "Predictive Theory"
PROJECT 1: Quick Theory Validation
Transforming Text to Numbers
PROJECT 2: Creating the Input/Output Data
Putting it all together in a Neural Network
PROJECT 3: Building our Neural Network
Understanding Neural Noise
PROJECT 4: Making Learning Faster by Reducing Noise
Analyzing Inefficiencies in our Network
PROJECT 5: Making our Network Train and Run Faster
Further Noise Reduction
PROJECT 6: Reducing Noise by Strategically Reducing the Vocabulary
Analysis: What's going on in the weights?
Lesson: Curate a Dataset
End of explanation
"""
print("labels.txt \t : \t reviews.txt\n")
pretty_print_review_and_label(2137)
pretty_print_review_and_label(12816)
pretty_print_review_and_label(6267)
pretty_print_review_and_label(21934)
pretty_print_review_and_label(5297)
pretty_print_review_and_label(4998)
"""
Explanation: Lesson: Develop a Predictive Theory
End of explanation
"""
from collections import Counter
import numpy as np
positive_counts = Counter()
negative_counts = Counter()
total_counts = Counter()
for i in range(len(reviews)):
if(labels[i] == 'POSITIVE'):
for word in reviews[i].split(" "):
positive_counts[word] += 1
total_counts[word] += 1
else:
for word in reviews[i].split(" "):
negative_counts[word] += 1
total_counts[word] += 1
positive_counts.most_common()
pos_neg_ratios = Counter()
for term,cnt in list(total_counts.most_common()):
if(cnt > 100):
pos_neg_ratio = positive_counts[term] / float(negative_counts[term]+1)
pos_neg_ratios[term] = pos_neg_ratio
for word,ratio in pos_neg_ratios.most_common():
if(ratio > 1):
pos_neg_ratios[word] = np.log(ratio)
else:
pos_neg_ratios[word] = -np.log((1 / (ratio+0.01)))
# words most frequently seen in a review with a "POSITIVE" label
pos_neg_ratios.most_common()
# words most frequently seen in a review with a "NEGATIVE" label
list(reversed(pos_neg_ratios.most_common()))[0:30]
"""
Explanation: Project 1: Quick Theory Validation
End of explanation
"""
from IPython.display import Image
review = "This was a horrible, terrible movie."
Image(filename='sentiment_network.png')
review = "The movie was excellent"
Image(filename='sentiment_network_pos.png')
"""
Explanation: Transforming Text into Numbers
End of explanation
"""
vocab = set(total_counts.keys())
vocab_size = len(vocab)
print(vocab_size)
list(vocab)
import numpy as np
layer_0 = np.zeros((1,vocab_size))
layer_0
from IPython.display import Image
Image(filename='sentiment_network.png')
word2index = {}
for i,word in enumerate(vocab):
word2index[word] = i
word2index
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
def get_target_for_label(label):
if(label == 'POSITIVE'):
return 1
else:
return 0
labels[0]
get_target_for_label(labels[0])
labels[1]
get_target_for_label(labels[1])
"""
Explanation: Project 2: Creating the Input/Output Data
End of explanation
"""
import time
import sys
import numpy as np
# Let's tweak our network from before to model these phenomena
class SentimentNetwork:
def __init__(self, reviews,labels,hidden_nodes = 10, learning_rate = 0.1):
# set our random number generator
np.random.seed(1)
self.pre_process_data(reviews, labels)
self.init_network(len(self.review_vocab),hidden_nodes, 1, learning_rate)
def pre_process_data(self, reviews, labels):
review_vocab = set()
for review in reviews:
for word in review.split(" "):
review_vocab.add(word)
self.review_vocab = list(review_vocab)
label_vocab = set()
for label in labels:
label_vocab.add(label)
self.label_vocab = list(label_vocab)
self.review_vocab_size = len(self.review_vocab)
self.label_vocab_size = len(self.label_vocab)
self.word2index = {}
for i, word in enumerate(self.review_vocab):
self.word2index[word] = i
self.label2index = {}
for i, label in enumerate(self.label_vocab):
self.label2index[label] = i
def init_network(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_0_1 = np.zeros((self.input_nodes,self.hidden_nodes))
self.weights_1_2 = np.random.normal(0.0, self.output_nodes**-0.5,
(self.hidden_nodes, self.output_nodes))
self.learning_rate = learning_rate
self.layer_0 = np.zeros((1,input_nodes))
def update_input_layer(self,review):
# clear out previous state, reset the layer to be all 0s
self.layer_0 *= 0
for word in review.split(" "):
if(word in self.word2index.keys()):
self.layer_0[0][self.word2index[word]] += 1
def get_target_for_label(self,label):
if(label == 'POSITIVE'):
return 1
else:
return 0
def sigmoid(self,x):
return 1 / (1 + np.exp(-x))
def sigmoid_output_2_derivative(self,output):
return output * (1 - output)
def train(self, training_reviews, training_labels):
assert(len(training_reviews) == len(training_labels))
correct_so_far = 0
start = time.time()
for i in range(len(training_reviews)):
review = training_reviews[i]
label = training_labels[i]
#### Implement the forward pass here ####
### Forward pass ###
# Input Layer
self.update_input_layer(review)
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
layer_2_error = layer_2 - self.get_target_for_label(label) # Output layer error is the difference between desired target and actual output.
layer_2_delta = layer_2_error * self.sigmoid_output_2_derivative(layer_2)
# TODO: Backpropagated error
layer_1_error = layer_2_delta.dot(self.weights_1_2.T) # errors propagated to the hidden layer
layer_1_delta = layer_1_error # hidden layer gradients - no nonlinearity so it's the same as the error
# TODO: Update the weights
self.weights_1_2 -= layer_1.T.dot(layer_2_delta) * self.learning_rate # update hidden-to-output weights with gradient descent step
self.weights_0_1 -= self.layer_0.T.dot(layer_1_delta) * self.learning_rate # update input-to-hidden weights with gradient descent step
if(np.abs(layer_2_error) < 0.5):
correct_so_far += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(training_reviews)))[:4] + "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] + " #Correct:" + str(correct_so_far) + " #Trained:" + str(i+1) + " Training Accuracy:" + str(correct_so_far * 100 / float(i+1))[:4] + "%")
if(i % 2500 == 0):
print("")
def test(self, testing_reviews, testing_labels):
correct = 0
start = time.time()
for i in range(len(testing_reviews)):
pred = self.run(testing_reviews[i])
if(pred == testing_labels[i]):
correct += 1
reviews_per_second = i / float(time.time() - start)
sys.stdout.write("\rProgress:" + str(100 * i/float(len(testing_reviews)))[:4] \
+ "% Speed(reviews/sec):" + str(reviews_per_second)[0:5] \
+ "% #Correct:" + str(correct) + " #Tested:" + str(i+1) + " Testing Accuracy:" + str(correct * 100 / float(i+1))[:4] + "%")
def run(self, review):
# Input Layer
self.update_input_layer(review.lower())
# Hidden layer
layer_1 = self.layer_0.dot(self.weights_0_1)
# Output layer
layer_2 = self.sigmoid(layer_1.dot(self.weights_1_2))
if(layer_2[0] > 0.5):
return "POSITIVE"
else:
return "NEGATIVE"
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.1)
# evaluate our model before training (just to show how horrible it is)
mlp.test(reviews[-1000:],labels[-1000:])
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.01)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
mlp = SentimentNetwork(reviews[:-1000],labels[:-1000], learning_rate=0.001)
# train the network
mlp.train(reviews[:-1000],labels[:-1000])
"""
Explanation: Project 3: Building a Neural Network
Start with your neural network from the last chapter
3 layer neural network
no non-linearity in hidden layer
use our functions to create the training data
create a "pre_process_data" function to create vocabulary for our training data generating functions
modify "train" to train over the entire corpus
Where to Get Help if You Need it
Re-watch previous week's Udacity Lectures
Chapters 3-5 - Grokking Deep Learning - (40% Off: traskud17)
End of explanation
"""
from IPython.display import Image
Image(filename='sentiment_network.png')
def update_input_layer(review):
global layer_0
# clear out previous state, reset the layer to be all 0s
layer_0 *= 0
for word in review.split(" "):
layer_0[0][word2index[word]] += 1
update_input_layer(reviews[0])
layer_0
review_counter = Counter()
for word in reviews[0].split(" "):
review_counter[word] += 1
review_counter.most_common()
"""
Explanation: Understanding Neural Noise
End of explanation
"""
|
zakandrewking/cobrapy | documentation_builder/building_model.ipynb | lgpl-2.1 | from __future__ import print_function
from cobra import Model, Reaction, Metabolite
# Best practise: SBML compliant IDs
model = Model('example_model')
reaction = Reaction('3OAS140')
reaction.name = '3 oxoacyl acyl carrier protein synthase n C140 '
reaction.subsystem = 'Cell Envelope Biosynthesis'
reaction.lower_bound = 0. # This is the default
reaction.upper_bound = 1000. # This is the default
"""
Explanation: Building a Model
This simple example demonstrates how to create a model, create a reaction, and then add the reaction to the model.
We'll use the '3OAS140' reaction from the STM_1.0 model:
1.0 malACP[c] + 1.0 h[c] + 1.0 ddcaACP[c] $\rightarrow$ 1.0 co2[c] + 1.0 ACP[c] + 1.0 3omrsACP[c]
First, create the model and reaction.
End of explanation
"""
ACP_c = Metabolite(
'ACP_c',
formula='C11H21N2O7PRS',
name='acyl-carrier-protein',
compartment='c')
omrsACP_c = Metabolite(
'3omrsACP_c',
formula='C25H45N2O9PRS',
name='3-Oxotetradecanoyl-acyl-carrier-protein',
compartment='c')
co2_c = Metabolite('co2_c', formula='CO2', name='CO2', compartment='c')
malACP_c = Metabolite(
'malACP_c',
formula='C14H22N2O10PRS',
name='Malonyl-acyl-carrier-protein',
compartment='c')
h_c = Metabolite('h_c', formula='H', name='H', compartment='c')
ddcaACP_c = Metabolite(
'ddcaACP_c',
formula='C23H43N2O8PRS',
name='Dodecanoyl-ACP-n-C120ACP',
compartment='c')
"""
Explanation: We need to create metabolites as well. If we were using an existing model, we could use Model.get_by_id to get the appropriate Metabolite objects instead.
End of explanation
"""
reaction.add_metabolites({
malACP_c: -1.0,
h_c: -1.0,
ddcaACP_c: -1.0,
co2_c: 1.0,
ACP_c: 1.0,
omrsACP_c: 1.0
})
reaction.reaction # This gives a string representation of the reaction
"""
Explanation: Adding metabolites to a reaction requires using a dictionary of the metabolites and their stoichiometric coefficients. A group of metabolites can be added all at once, or they can be added one at a time.
End of explanation
"""
reaction.gene_reaction_rule = '( STM2378 or STM1197 )'
reaction.genes
"""
Explanation: The gene_reaction_rule is a boolean representation of the gene requirements for this reaction to be active as described in Schellenberger et al 2011 Nature Protocols 6(9):1290-307. We will assign the gene reaction rule string, which will automatically create the corresponding gene objects.
End of explanation
"""
print('%i reactions initially' % len(model.reactions))
print('%i metabolites initially' % len(model.metabolites))
print('%i genes initially' % len(model.genes))
"""
Explanation: At this point in time, the model is still empty
End of explanation
"""
model.add_reactions([reaction])
# Now there are things in the model
print('%i reaction' % len(model.reactions))
print('%i metabolites' % len(model.metabolites))
print('%i genes' % len(model.genes))
"""
Explanation: We will add the reaction to the model, which will also add all associated metabolites and genes
End of explanation
"""
# Iterate through the the objects in the model
print("Reactions")
print("---------")
for x in model.reactions:
print("%s : %s" % (x.id, x.reaction))
print("")
print("Metabolites")
print("-----------")
for x in model.metabolites:
print('%9s : %s' % (x.id, x.formula))
print("")
print("Genes")
print("-----")
for x in model.genes:
associated_ids = (i.id for i in x.reactions)
print("%s is associated with reactions: %s" %
(x.id, "{" + ", ".join(associated_ids) + "}"))
"""
Explanation: We can iterate through the model objects to observe the contents
End of explanation
"""
model.objective = '3OAS140'
"""
Explanation: Last we need to set the objective of the model. Here, we just want this to be the maximization of the flux in the single reaction we added and we do this by assigning the reaction's identifier to the objective property of the model.
End of explanation
"""
print(model.objective.expression)
print(model.objective.direction)
"""
Explanation: The created objective is a symbolic algebraic expression and we can examine it by printing it
End of explanation
"""
|
kowey/attelo | doc/tut_parser2.ipynb | gpl-3.0 | from __future__ import print_function
from os import path as fp
from attelo.io import (load_multipack)
CORPUS_DIR = 'example-corpus'
PREFIX = fp.join(CORPUS_DIR, 'tiny')
# load the data into a multipack
mpack = load_multipack(PREFIX + '.edus',
PREFIX + '.pairings',
PREFIX + '.features.sparse',
PREFIX + '.features.sparse.vocab',
verbose=True)
test_dpack = mpack.values()[0]
train_mpack = {k: mpack[k] for k in mpack.keys()[1:]}
train_dpacks = train_mpack.values()
train_targets = [x.target for x in train_dpacks]
def print_results(dpack):
'summarise parser results'
for i, (edu1, edu2) in enumerate(dpack.pairings):
wanted = dpack.get_label(dpack.target[i])
got = dpack.get_label(dpack.graph.prediction[i])
print(i, edu1.id, edu2.id, '\t|', got, '\twanted:', wanted)
"""
Explanation: Parsers (part 2)
In the previous tutorial, we saw a couple of basic parsers, and also introduced the notion of a pipeline parser. It turns out that some of the parsers we introduced and had taken for granted are themselves pipelines. In this tutorial we will break these pipelines down and explore some of finer grained tasks that a parser can do.
Preliminaries
We begin with the same multipacks and the same breakdown into a training and test set
End of explanation
"""
import numpy as np
from attelo.learning import (SklearnAttachClassifier)
from attelo.parser.attach import (AttachClassifierWrapper)
from sklearn.linear_model import (LogisticRegression)
def print_results_verbose(dpack):
"""Print detailed parse results"""
for i, (edu1, edu2) in enumerate(dpack.pairings):
attach = "{:.2f}".format(dpack.graph.attach[i])
label = np.around(dpack.graph.label[i,:], decimals=2)
got = dpack.get_label(dpack.graph.prediction[i])
print(i, edu1.id, edu2.id, '\t|', attach, label, got)
learner = SklearnAttachClassifier(LogisticRegression())
parser1a = AttachClassifierWrapper(learner)
parser1a.fit(train_dpacks, train_targets)
dpack = parser1a.transform(test_dpack)
print_results_verbose(dpack)
"""
Explanation: Breaking a parser down (attach)
If we examine the source code for the attach pipeline, we can see that it is in fact a two step pipeline combining the attach classifier wrapper and a decoder. So let's see what happens when we run the attach classifier by itself.
End of explanation
"""
from attelo.decoding.baseline import (LocalBaseline)
decoder = LocalBaseline(threshold=0.4)
dpack2 = decoder.transform(dpack)
print_results_verbose(dpack2)
"""
Explanation: Parsers and weighted datapacks
In the output above, we have dug a little bit deeper into our datapacks. Recall above that a parser translates datapacks to datapacks. The output of a parser is always a weighted datapack., ie. a datapack whose 'graph'
attribute is set to a record containing
attachment weights
label weights
predictions (like target values)
So called "standalone" parsers will take an unweighted datapack (graph == None) and produce a weighted datapack with predictions set. But some parsers tend to be more useful as part of a pipeline:
the attach classfier wrapper fills the attachment weights
likewise the label classifier wrapper assigns label weights
a decoder assigns predictions from weights
We see the first case in the above output. Notice that the attachments have been set to values from a model, but the label weights and predictions are assigned default values.
NB: all parsers should do "something sensible" in the face of all inputs. This typically consists of assuming the default weight of 1.0 for unweighted datapacks.
Decoders
Having now transformed a datapack with the attach classifier wrapper, let's now pass its results to a decoder. In fact, let's try a couple of different decoders and compare the output.
End of explanation
"""
from attelo.parser.pipeline import (Pipeline)
# this is basically attelo.parser.attach.AttachPipeline
parser1 = Pipeline(steps=[('attach weights', parser1a),
('decoder', decoder)])
parser1.fit(train_dpacks, train_targets)
print_results_verbose(parser1.transform(test_dpack))
"""
Explanation: The result above is what we get if we run a decoder on the output of the attach classifier wrapper. This is in fact, the the same thing as running the attachment pipeline. We can define a similar pipeline below.
End of explanation
"""
from attelo.learning.local import (SklearnLabelClassifier)
from attelo.parser.label import (LabelClassifierWrapper,
SimpleLabeller)
from attelo.parser.full import (AttachTimesBestLabel)
learner_l = SklearnLabelClassifier(LogisticRegression())
print("Post-labelling")
print("--------------")
parser = Pipeline(steps=[('attach weights', parser1a),
('decoder', decoder),
('labels', SimpleLabeller(learner_l))])
parser.fit(train_dpacks, train_targets)
print_results_verbose(parser.transform(test_dpack))
print()
print("Joint")
print("-----")
parser = Pipeline(steps=[('attach weights', parser1a),
('label weights', LabelClassifierWrapper(learner_l)),
('attach times label', AttachTimesBestLabel()),
('decoder', decoder)])
parser.fit(train_dpacks, train_targets)
print_results_verbose(parser.transform(test_dpack))
"""
Explanation: Mixing and matching
Being able to break parsing down to this level of granularity lets us experiment with parsing techniques by composing different parsing substeps in different ways. For example, below, we write two slightly different pipelines, one which sets labels separately from decoding, and one which combines attach and label scores before handing them off to a decoder.
End of explanation
"""
|
shumway/srt_bootcamp | SymPyExample.ipynb | mit | import sympy as sp
sp.init_printing()
"""
Explanation: SymPy
The SymPy package is useful for symbolic algebra, much like the commercial software Mathematica.
We won't make much use of SymPy during the boot camp, but it is definitely useful to know about
for mathematics courses.
End of explanation
"""
x, y = sp.symbols("x y")
expression = (x+y)**4
expression
sp.expand(expression)
expression = 8*x**2 + 26*x*y + 15*y**2
expression
sp.factor(expression)
expression - 20 *x*y - 14*y**2
sp.factor(expression - 20*x*y - 14*y**2)
"""
Explanation: Symbols and Expressions
We'll define x and y to be sympy symbols, then do some symbolic algebra.
End of explanation
"""
expression
f = sp.lambdify((x,y), expression, 'numpy')
f(3,4)
8 * 3**2 + 26 * 3 * 4 + 15 * 4**2
"""
Explanation: Lambdify: Making python functions from sympy expressions
End of explanation
"""
expression = 5*x**2 * sp.sin(3*x**3)
expression
expression.diff(x)
expression = sp.cos(x)
expression.integrate(x)
expression.integrate((x, 0, sp.pi / 2))
"""
Explanation: Calculus
You can use sympy to perform symbolic integration or differentiation.
End of explanation
"""
deriv = sp.Derivative(expression)
deriv
deriv.doit()
inte = sp.Integral(expression, (x, 0, sp.pi / 2))
inte
inte.doit()
"""
Explanation: You can also create unevalated integrals or derivatives. These can later be evaluated with their doit methods.
End of explanation
"""
|
tritemio/multispot_paper | usALEX - Corrections - Leakage fit.ipynb | mit | #bsearch_ph_sel = 'all-ph'
#bsearch_ph_sel = 'Dex'
bsearch_ph_sel = 'DexDem'
data_file = 'results/usALEX-5samples-PR-raw-%s.csv' % bsearch_ph_sel
"""
Explanation: Leakage coefficient fit
This notebook estracts the leakage coefficient from the set of 5 us-ALEX smFRET measurements.
What it does?
For each measurement, we fit the donor-only peak position of the uncorrected proximity ratio histogram. These values are saved in a .txt file. This notebook just performs a weighted mean where the weights are the number of bursts in each measurement.
This notebook read data from the file:
End of explanation
"""
from __future__ import division
import numpy as np
import pandas as pd
from IPython.display import display
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
%config InlineBackend.figure_format='retina' # for hi-dpi displays
sns.set_style('whitegrid')
palette = ('Paired', 10)
sns.palplot(sns.color_palette(*palette))
sns.set_palette(*palette)
data = pd.read_csv(data_file).set_index('sample')
data
display(data[['E_pr_do_gauss', 'E_pr_do_kde', 'E_pr_do_hsm', 'n_bursts_do']])
print('KDE Mean (%): ', data.E_pr_do_kde.mean()*100)
print('KDE Std. Dev. (%):', data.E_pr_do_kde.std()*100)
d = data[['E_pr_do_gauss', 'E_pr_do_kde', 'E_pr_do_hsm']]#, 'n_bursts_do']]
d.plot(lw=3);
"""
Explanation: To recompute the PR data used by this notebook run the
8-spots paper analysis notebook.
Computation
End of explanation
"""
E_table = data[['E_pr_do_gauss', 'E_pr_do_kde']]
E_table
lk_table = E_table / (1 - E_table)
lk_table.columns = [c.replace('E_pr_do', 'lk') for c in E_table.columns]
lk_table['num_bursts'] = data['n_bursts_do']
lk_table
"""
Explanation: Create Leakage Table
End of explanation
"""
data.E_pr_do_kde
lk_table.lk_kde
E_m = np.average(data.E_pr_do_kde, weights=data.n_bursts_do)
E_m
k_E_m = E_m / (1 - E_m)
k_E_m
k_m = np.average(lk_table.lk_kde, weights=data.n_bursts_do)
k_m
"""
Explanation: Average leakage coefficient
End of explanation
"""
stats = pd.concat([lk_table.mean(), lk_table.std()], axis=1, keys=['mean', 'std']).T
stats
table_to_save = lk_table.append(stats)
table_to_save = table_to_save.round({'lk_gauss': 5, 'lk_kde': 5, 'num_bursts': 2})
table_to_save
table_to_save.to_csv('results/table_usalex_5samples_leakage_coeff.csv')
"""
Explanation: Conclusions
Either averaging $E_{PR}$ or the corresponding $k = n_d/n_a$ the result for the leakage coefficient is ~10 % (D-only peak fitted finding the maximum of the KDE).
Save data
Full table
End of explanation
"""
'%.5f' % k_m
with open('results/usALEX - leakage coefficient %s.csv' % bsearch_ph_sel, 'w') as f:
f.write('%.5f' % k_m)
"""
Explanation: Average coefficient
End of explanation
"""
|
atulsingh0/MachineLearning | MasteringML_wSkLearn/07_Dimensionality_Reduction_with_PCA.ipynb | gpl-3.0 | # import
import numpy as np
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.datasets import load_iris
%matplotlib inline
X = [[2, 0, -1.4],
[2.2, 0.2, -1.5],
[2.4, 0.1, -1],
[1.9, 0, -1.2]]
print(np.array(X))
print(np.array(X).T)
print(np.cov(np.array(X).T))
print(np.cov(np.array(X)))
"""
Explanation: Principal Component Analysis, also known as the Karhunen-Loeve Transform, is a
technique used to search for patterns in high-dimensional data. PCA is commonly
used to explore and visualize high-dimensional data sets. It can also be used to
compress data, and process data before it is used by another estimator. PCA reduces
a set of possibly-correlated, high-dimensional variables to a lower-dimensional
set of linearly uncorrelated synthetic variables called principal components. The
lower-dimensional data will preserve as much of the variance of the original data
as possible.
In general, an n-dimensional dataset can be reduced by projecting the dataset onto a
k-dimensional subspace, where k is less than n. More formally, PCA can be used to
find a set of vectors that span a subspace, which minimizes the sum of the squared
errors of the projected data. This projection will retain the greatest proportion of the
original data set's variance.
PCA rotates the data set to align with its principal components to maximize the variance
contained within the first several principal components.
PCA is most useful when the variance in a data set is distributed unevenly across the
dimensions. Consider a three-dimensional data set with a spherical convex hull. PCA
cannot be used effectively with this data set because there is equal variance in each
dimension; none of the dimensions can be discarded without losing a significant
amount of information.
It is easy to visually identify the principal components of data sets with only two or
three dimensions.
Performing Principal Component Analysis
Variance, Covariance, and Covariance Matrices
variance is a measure of how a set of values are spread out. Variance is
calculated as the average of the squared differences of the values and mean of the
values -
$$ var(s^2) = \frac{\sum_{i=1}^{n}(x_i - \bar x)^2}{n-1} $$
Covariance is a measure of how much two variables change together; it is a measure
of the strength of the correlation between two sets of variables. If the covariance of two
variables is zero, the variables are uncorrelated.
....................Note that uncorrelated variables are
not necessarily independent, as correlation is only a measure of linear dependence. The
covariance of two variables is calculated using the following equation:
$$ coVar(X,y) = \frac{\sum_{i=1}^{n}(x_i - \bar x)(y_i - \bar y)}{n-1} $$
A covariance
matrix describes the covariance values between each pair of dimensions in a data
set. The element (i, j) indicates the covariance of the ith and jth dimensions of the
data.
End of explanation
"""
w, v = np.linalg.eig(np.array([[1, -2], [2, -3]]))
print("eigen vector : ", w)
print("eigen values : ",v)
"""
Explanation: Eigenvectors and eigenvalues
An eigenvector of a matrix is a non-zero vector that satisfies the following equation:
$$ A \overrightarrow{v} = \lambda\overrightarrow{v} $$
End of explanation
"""
x1 = [ 0.9, 2.4, 1.2, 0.5, 0.3, 1.8, 0.5, 0.3, 2.5, 1.3 ]
x2 = [ 1, 2.6, 1.7, 0.7, 0.7, 1.4, 0.6, 0.6, 2.6, 1.1 ]
# deducting mean
x11 = x1 - np.mean(x1)
x21 = x2 - np.mean(x2)
"""
Explanation: Dimensionality reduction with Principal Component Analysis
End of explanation
"""
cov = np.cov(x11, x21)
print(cov)
w, v = np.linalg.eig(cov)
print("eigen vector : ", w)
print("eigen values : ",v)
"""
Explanation: principal components are the eigenvectors of the data's covariance matrix ordered
by their eigenvalues.
Way1: The first technique requires calculating the covariance matrix of the
data. Since the covariance matrix will be square, we can calculate the eigenvectors
and eigenvalues
Way2: The
second technique uses singular value decomposition of the data matrix to find the
eigenvectors and square roots of the eigenvalues of the covariance matrix.
End of explanation
"""
A = np.array([x11, x21]).T
l = np.array([[0.73251454],
[0.68075138]])
np.dot(A, l)
"""
Explanation: The first eigenvector
has the greatest eigenvalue and is the first principal component. We will build a
transformation matrix in which each column of the matrix is the eigenvector for
a principal component. If we were reducing a five-dimensional data set to three
dimensions, we would build a matrix with three columns
End of explanation
"""
data = load_iris()
y = data.target
X = data.data
pca = PCA(n_components=2)
reduced_X = pca.fit_transform(X)
red_x, red_y = [], []
blue_x, blue_y = [], []
green_x, green_y = [], []
for i in range(len(reduced_X)):
if y[i] == 0:
red_x.append(reduced_X[i][0])
red_y.append(reduced_X[i][1])
elif y[i] == 1:
blue_x.append(reduced_X[i][0])
blue_y.append(reduced_X[i][1])
else:
green_x.append(reduced_X[i][0])
green_y.append(reduced_X[i][1])
plt.figure(figsize=(12,9))
plt.scatter(red_x, red_y, c='r', marker='x')
plt.scatter(blue_x, blue_y, c='b', marker='D')
plt.scatter(green_x, green_y, c='g', marker='.')
plt.show()
"""
Explanation: Many implementations of PCA, including the one of scikit-learn, use singular value
decomposition to calculate the eigenvectors and eigenvalues. SVD is given by the
following equation:
$$ X = U \sum V^T $$
The columns of U are called left singular vectors of the data matrix, the columns of
V are its right singular vectors, and the diagonal entries of are its singular values.
Using PCA to visualize high-dimensional data
End of explanation
"""
|
GoogleCloudPlatform/training-data-analyst | courses/machine_learning/deepdive2/how_google_does_ml/bigquery/solution/analyze_with_bigquery_solution.ipynb | apache-2.0 | import os
import pandas as pd
PROJECT = "<YOUR PROJECT>" #TODO Replace with your project id
os.environ["PROJECT"] = PROJECT
pd.options.display.max_columns = 50
"""
Explanation: Analyze a large dataset with Google BigQuery
Learning Objectives
Access an ecommerce dataset
Look at the dataset metadata
Remove duplicate entries
Write and execute queries
Introduction
BigQuery is Google's fully managed, NoOps, low cost analytics database. With BigQuery you can query terabytes and terabytes of data without having any infrastructure to manage or needing a database administrator. BigQuery uses SQL and can take advantage of the pay-as-you-go model. BigQuery allows you to focus on analyzing data to find meaningful insights.
We have a publicly available ecommerce dataset that has millions of Google Analytics records for the Google Merchandise Store loaded into a table in BigQuery. In this lab, you use a copy of that dataset. Sample scenarios are provided, from which you look at the data and ways to remove duplicate information. The lab then steps you through further analysis the data.
BigQuery can be accessed by its own browser-based interface, Google Data Studio, and many third party tools. In this lab you will use the BigQuery directly in notebook cells using the iPython magic command %%bigquery.
The steps you will follow in the lab are analogous to what you would do to prepare data for use in advanced ML operations. You will follow the notebook to experiment with the BigQuery queries provided to analyze the data.
Set up the notebook environment
VERY IMPORTANT: In the cell below you must replace the text <YOUR PROJECT> with you GCP project id.
End of explanation
"""
%%bigquery --project $PROJECT
#standardsql
SELECT *
EXCEPT
(table_catalog, table_schema, is_generated, generation_expression, is_stored,
is_updatable, is_hidden, is_system_defined, is_partitioning_column, clustering_ordinal_position)
FROM `data-to-insights.ecommerce.INFORMATION_SCHEMA.COLUMNS`
WHERE table_name="all_sessions_raw"
"""
Explanation: Explore eCommerce data and identify duplicate records
Scenario: You were provided with Google Analytics logs for an eCommerce website in a BigQuery dataset. The data analyst team created a new BigQuery table of all the raw eCommerce visitor session data. This data tracks user interactions, location, device types, time on page, and details of any transaction. Your ultimate plan is to use this data in an ML capacity to create a model that delivers highly accurate predictions of user behavior to support tailored marketing campaigns.
First, a few notes on BigQuery within a python notebook context. Any cell that starts with %%bigquery (the BigQuery Magic) will be interpreted as a SQL query that is executed on BigQuery, and the result is printed to our notebook.
BigQuery supports two flavors of SQL syntax: legacy SQL and standard SQL. The preferred is standard SQL because it complies with the official SQL:2011 standard. To instruct BigQuery to interpret our syntax as such we start the query with #standardSQL.
Our first query is accessing the BigQuery Information Schema which stores all object-related metadata. In this case we want to see metadata details for the "all_sessions_raw" table.
Tip: To run the current cell you can click the cell and hit shift enter
TODO 2
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*)
FROM `data-to-insights.ecommerce.all_sessions_raw`
"""
Explanation: Next examine how many rows are in the table.
TODO 1
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT *
FROM `data-to-insights.ecommerce.all_sessions_raw`
LIMIT 7
"""
Explanation: Now take a quick at few rows of data in the table.
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS num_duplicate_rows,
*
FROM `data-to-insights.ecommerce.all_sessions_raw`
GROUP BY fullvisitorid,
channelgrouping,
time,
country,
city,
totaltransactionrevenue,
transactions,
timeonsite,
pageviews,
sessionqualitydim,
date,
visitid,
type,
productrefundamount,
productquantity,
productprice,
productrevenue,
productsku,
v2productname,
v2productcategory,
productvariant,
currencycode,
itemquantity,
itemrevenue,
transactionrevenue,
transactionid,
pagetitle,
searchkeyword,
pagepathlevel1,
ecommerceaction_type,
ecommerceaction_step,
ecommerceaction_option
HAVING num_duplicate_rows > 1;
"""
Explanation: Identify duplicate rows
Seeing a sample amount of data may give you greater intuition for what is included in the dataset. But since the table is quite large, a preview is not likely to render meaningful results. As you scan and scroll through the sample rows you see there is no singular field that uniquely identifies a row, so you need advanced logic to identify duplicate rows.
The query below uses the SQL GROUP BY function on every field and counts (COUNT) where there are rows that have the same values across every field.
If every field is unique, the COUNT will return 1 as there are no other groupings of rows with the exact same value for all fields.
If there is a row with the same values for all fields, they will be grouped together and the COUNT will be greater than 1. The last part of the query is an aggregation filter using HAVING to only show the results that have a COUNT of duplicates greater than 1.
Run the following query to find duplicate records across all columns.
TODO 3
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT fullvisitorid, # the unique visitor ID
visitid, # a visitor can have multiple visits
date, # session date stored as string YYYYMMDD
time, # time of the individual site hit (can be 0 or more)
v2productname, # not unique since a product can have variants like Color
productsku, # unique for each product
type, # visit and/or event trigger
ecommerceaction_type, # maps to ‘add to cart', ‘completed checkout'
ecommerceaction_step,
ecommerceaction_option,
transactionrevenue, # revenue of the order
transactionid, # unique identifier for revenue bearing transaction
count(*) AS row_count
FROM `data-to-insights.ecommerce.all_sessions`
GROUP BY 1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12
HAVING row_count > 1 # find duplicates
"""
Explanation: As you can see there are quite a few "duplicate" records (615) when analyzed with these parameters.
In your own datasets, even if you have a unique key, it is still beneficial to confirm the uniqueness of the rows with COUNT, GROUP BY, and HAVING before you begin your analysis.
Analyze the new all_sessions table
In this section you use a deduplicated table called all_sessions.
Scenario: Your data analyst team has provided you with a relevant query, and your schema experts have identified the key fields that must be unique for each record per your schema.
Run the query to confirm that no duplicates exist, this time against the "all_sessions" table:
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(DISTINCT fullvisitorid) AS unique_visitors
FROM `data-to-insights.ecommerce.all_sessions`;
"""
Explanation: The query returns zero records indicating no duplicates exist.
Write basic SQL against the eCommerce data (TODO 4)
In this section, you query for insights on the ecommerce dataset.
A good first path of analysis is to find the total unique visitors
The query below determines the total views by counting product_views and the number of unique visitors by counting fullVisitorID.
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(DISTINCT fullvisitorid) AS unique_visitors,
channelgrouping
FROM `data-to-insights.ecommerce.all_sessions`
GROUP BY 2
ORDER BY 2 DESC;
"""
Explanation: The next query shows total unique visitors(fullVisitorID) by the referring site (channelGrouping):
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
( v2productname ) AS ProductName
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
"""
Explanation: To find deeper insights in the data, the next query lists the five products with the most views (product_views) from unique visitors. The query counts number of times a product (v2ProductName) was viewed (product_views), puts the list in descending order, and lists the top 5 entries:
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(productquantity) AS orders,
sum(productquantity) AS quantity_product_ordered,
v2productname
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
"""
Explanation: Now expand your previous query to include the total number of distinct products ordered and the total number of total units ordered (productQuantity):
End of explanation
"""
%%bigquery --project $PROJECT
#standardSQL
SELECT count(*) AS product_views,
count(productquantity) AS orders,
sum(productquantity) AS quantity_product_ordered,
sum(productquantity) / Count(productquantity) AS avg_per_order,
v2productname AS productName
FROM `data-to-insights.ecommerce.all_sessions`
WHERE type = 'PAGE'
GROUP BY v2productname
ORDER BY product_views DESC
LIMIT 5;
"""
Explanation: Lastly, expand the query to include the average amount of product per order (total number of units ordered/total number of orders, or SUM(productQuantity)/COUNT(productQuantity)).
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.2/tutorials/passband_updates.ipynb | gpl-3.0 | !pip install -I "phoebe>=2.2,<2.3"
"""
Explanation: Advanced: passband versioning & updates
Let's first make sure we have the latest version of PHOEBE 2.2 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
import phoebe
print(phoebe.list_installed_passbands())
print(phoebe.list_all_update_passbands_available())
print(phoebe.update_passband_available('Johnson:V'))
"""
Explanation: As of the 2.2 release, PHOEBE allows you to check for online updates to local install passbands. If we add a new atmosphere table or feature (extinction as in this release, for example) to a passband table, you can now update directly from the python interface of PHOEBE.
If you try using extinction in a version of a table that does not support extinction, for example, an error will be raised (either during run_compute or you can check by calling run_checks manually). If extinction tables are available in the online version of the passband (from tables.phoebe-project.org), then the local installed version will automatically be updated if the timestamps on the passbands match, otherwise a message will be raised with instructions to manually call phoebe.update_passband.
If you'd like to manually check for updates, you can use phoebe.list_all_update_passbands_available or phoebe.update_passband_available.
End of explanation
"""
phoebe.update_all_passbands()
"""
Explanation: If there are updates available that you'd like to apply, you can apply them all via phoebe.update_all_passbands
End of explanation
"""
phoebe.update_passband('Johnson:V')
"""
Explanation: To update a single passband to the latest online version with the same contents as the locally installed version (or with new tables, see the content argument), call phoebe.update_passband.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/nasa-giss/cmip6/models/sandbox-1/atmos.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nasa-giss', 'sandbox-1', 'atmos')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmos
MIP Era: CMIP6
Institute: NASA-GISS
Source ID: SANDBOX-1
Topic: Atmos
Sub-Topics: Dynamical Core, Radiation, Turbulence Convection, Microphysics Precipitation, Cloud Scheme, Observation Simulation, Gravity Waves, Solar, Volcanos.
Properties: 156 (127 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:20
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties --> Overview
2. Key Properties --> Resolution
3. Key Properties --> Timestepping
4. Key Properties --> Orography
5. Grid --> Discretisation
6. Grid --> Discretisation --> Horizontal
7. Grid --> Discretisation --> Vertical
8. Dynamical Core
9. Dynamical Core --> Top Boundary
10. Dynamical Core --> Lateral Boundary
11. Dynamical Core --> Diffusion Horizontal
12. Dynamical Core --> Advection Tracers
13. Dynamical Core --> Advection Momentum
14. Radiation
15. Radiation --> Shortwave Radiation
16. Radiation --> Shortwave GHG
17. Radiation --> Shortwave Cloud Ice
18. Radiation --> Shortwave Cloud Liquid
19. Radiation --> Shortwave Cloud Inhomogeneity
20. Radiation --> Shortwave Aerosols
21. Radiation --> Shortwave Gases
22. Radiation --> Longwave Radiation
23. Radiation --> Longwave GHG
24. Radiation --> Longwave Cloud Ice
25. Radiation --> Longwave Cloud Liquid
26. Radiation --> Longwave Cloud Inhomogeneity
27. Radiation --> Longwave Aerosols
28. Radiation --> Longwave Gases
29. Turbulence Convection
30. Turbulence Convection --> Boundary Layer Turbulence
31. Turbulence Convection --> Deep Convection
32. Turbulence Convection --> Shallow Convection
33. Microphysics Precipitation
34. Microphysics Precipitation --> Large Scale Precipitation
35. Microphysics Precipitation --> Large Scale Cloud Microphysics
36. Cloud Scheme
37. Cloud Scheme --> Optical Cloud Properties
38. Cloud Scheme --> Sub Grid Scale Water Distribution
39. Cloud Scheme --> Sub Grid Scale Ice Distribution
40. Observation Simulation
41. Observation Simulation --> Isscp Attributes
42. Observation Simulation --> Cosp Attributes
43. Observation Simulation --> Radar Inputs
44. Observation Simulation --> Lidar Inputs
45. Gravity Waves
46. Gravity Waves --> Orographic Gravity Waves
47. Gravity Waves --> Non Orographic Gravity Waves
48. Solar
49. Solar --> Solar Pathways
50. Solar --> Solar Constant
51. Solar --> Orbital Parameters
52. Solar --> Insolation Ozone
53. Volcanos
54. Volcanos --> Volcanoes Treatment
1. Key Properties --> Overview
Top level key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmosphere model code (CAM 4.0, ARPEGE 3.2,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "AGCM"
# "ARCM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of atmospheric model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.overview.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "primitive equations"
# "non-hydrostatic"
# "anelastic"
# "Boussinesq"
# "hydrostatic"
# "quasi-hydrostatic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the atmosphere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.horizontal_resolution_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Resolution
Characteristics of the model resolution
2.1. Horizontal Resolution Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of the model grid, e.g. T42, N48.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, e.g. 2.5 x 3.75 degrees lat-lon.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 1 deg (Equator) - 0.5 deg
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.4. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on the computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.resolution.high_top')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 2.5. High Top
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the atmosphere have a high-top? High-Top atmospheres have a fully resolved stratosphere with a model top above the stratopause.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping
Characteristics of the atmosphere model time stepping
3.1. Timestep Dynamics
Is Required: TRUE Type: STRING Cardinality: 1.1
Timestep for the dynamics, e.g. 30 min.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_shortwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep Shortwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the shortwave radiative transfer, e.g. 1.5 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.timestepping.timestep_longwave_radiative_transfer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestep Longwave Radiative Transfer
Is Required: FALSE Type: STRING Cardinality: 0.1
Timestep for the longwave radiative transfer, e.g. 3 hours.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "modified"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Orography
Characteristics of the model orography
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the orography.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.key_properties.orography.changes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "related to ice sheets"
# "related to tectonics"
# "modified mean"
# "modified variance if taken into account in model (cf gravity waves)"
# TODO - please enter value(s)
"""
Explanation: 4.2. Changes
Is Required: TRUE Type: ENUM Cardinality: 1.N
If the orography type is modified describe the time adaptation changes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid --> Discretisation
Atmosphere grid discretisation
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of grid discretisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spectral"
# "fixed grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Discretisation --> Horizontal
Atmosphere discretisation in the horizontal
6.1. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "finite elements"
# "finite volumes"
# "finite difference"
# "centered finite difference"
# TODO - please enter value(s)
"""
Explanation: 6.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.scheme_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "second"
# "third"
# "fourth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.3. Scheme Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation function order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.horizontal_pole')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "filter"
# "pole rotation"
# "artificial island"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. Horizontal Pole
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal discretisation pole singularity treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.horizontal.grid_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gaussian"
# "Latitude-Longitude"
# "Cubed-Sphere"
# "Icosahedral"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.5. Grid Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.grid.discretisation.vertical.coordinate_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "isobaric"
# "sigma"
# "hybrid sigma-pressure"
# "hybrid pressure"
# "vertically lagrangian"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Discretisation --> Vertical
Atmosphere discretisation in the vertical
7.1. Coordinate Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Type of vertical coordinate system
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Dynamical Core
Characteristics of the dynamical core
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere dynamical core
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the dynamical core of the model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.timestepping_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Adams-Bashforth"
# "explicit"
# "implicit"
# "semi-implicit"
# "leap frog"
# "multi-step"
# "Runge Kutta fifth order"
# "Runge Kutta second order"
# "Runge Kutta third order"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Timestepping Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Timestepping framework type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface pressure"
# "wind components"
# "divergence/curl"
# "temperature"
# "potential temperature"
# "total water"
# "water vapour"
# "water liquid"
# "water ice"
# "total water moments"
# "clouds"
# "radiation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of the model prognostic variables
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_boundary_condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 9. Dynamical Core --> Top Boundary
Type of boundary layer at the top of the model
9.1. Top Boundary Condition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Top boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Top Heat
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary heat treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.top_boundary.top_wind')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Top Wind
Is Required: TRUE Type: STRING Cardinality: 1.1
Top boundary wind treatment
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.lateral_boundary.condition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sponge layer"
# "radiation boundary condition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Dynamical Core --> Lateral Boundary
Type of lateral boundary condition (if the model is a regional model)
10.1. Condition
Is Required: FALSE Type: ENUM Cardinality: 0.1
Type of lateral boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Dynamical Core --> Diffusion Horizontal
Horizontal diffusion scheme
11.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Horizontal diffusion scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.diffusion_horizontal.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "iterated Laplacian"
# "bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal diffusion scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Heun"
# "Roe and VanLeer"
# "Roe and Superbee"
# "Prather"
# "UTOPIA"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Dynamical Core --> Advection Tracers
Tracer advection scheme
12.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Tracer advection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Eulerian"
# "modified Euler"
# "Lagrangian"
# "semi-Lagrangian"
# "cubic semi-Lagrangian"
# "quintic semi-Lagrangian"
# "mass-conserving"
# "finite volume"
# "flux-corrected"
# "linear"
# "quadratic"
# "quartic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "dry mass"
# "tracer mass"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.3. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Tracer advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_tracers.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Priestley algorithm"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.4. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracer advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "VanLeer"
# "Janjic"
# "SUPG (Streamline Upwind Petrov-Galerkin)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Dynamical Core --> Advection Momentum
Momentum advection scheme
13.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Momentum advection schemes name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_characteristics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "2nd order"
# "4th order"
# "cell-centred"
# "staggered grid"
# "semi-staggered grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Scheme Characteristics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.scheme_staggering_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa D-grid"
# "Arakawa E-grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.3. Scheme Staggering Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conserved_quantities')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Angular momentum"
# "Horizontal momentum"
# "Enstrophy"
# "Mass"
# "Total energy"
# "Vorticity"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.4. Conserved Quantities
Is Required: TRUE Type: ENUM Cardinality: 1.N
Momentum advection scheme conserved quantities
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.dynamical_core.advection_momentum.conservation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "conservation fixer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Conservation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Momentum advection scheme conservation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.aerosols')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "sulphate"
# "nitrate"
# "sea salt"
# "dust"
# "ice"
# "organic"
# "BC (black carbon / soot)"
# "SOA (secondary organic aerosols)"
# "POM (particulate organic matter)"
# "polar stratospheric ice"
# "NAT (nitric acid trihydrate)"
# "NAD (nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particle)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiation
Characteristics of the atmosphere radiation process
14.1. Aerosols
Is Required: TRUE Type: ENUM Cardinality: 1.N
Aerosols whose radiative effect is taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Radiation --> Shortwave Radiation
Properties of the shortwave radiation scheme
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of shortwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Shortwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Shortwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Shortwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiation --> Shortwave GHG
Representation of greenhouse gases in the shortwave radiation scheme
16.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose shortwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose shortwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiation --> Shortwave Cloud Ice
Shortwave radiative properties of ice crystals in clouds
17.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiation --> Shortwave Cloud Liquid
Shortwave radiative properties of liquid droplets in clouds
18.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiation --> Shortwave Cloud Inhomogeneity
Cloud inhomogeneity in the shortwave radiation scheme
19.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiation --> Shortwave Aerosols
Shortwave radiative properties of aerosols
20.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the shortwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.shortwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiation --> Shortwave Gases
Shortwave radiative properties of gases
21.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General shortwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Radiation --> Longwave Radiation
Properties of the longwave radiation scheme
22.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of longwave radiation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the longwave radiation scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_integration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "wide-band model"
# "correlated-k"
# "exponential sum fitting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Spectral Integration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Longwave radiation scheme spectral integration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.transport_calculation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "two-stream"
# "layer interaction"
# "bulk"
# "adaptive"
# "multi-stream"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.4. Transport Calculation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Longwave radiation transport calculation methods
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_radiation.spectral_intervals')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 22.5. Spectral Intervals
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Longwave radiation scheme number of spectral intervals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.greenhouse_gas_complexity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CO2"
# "CH4"
# "N2O"
# "CFC-11 eq"
# "CFC-12 eq"
# "HFC-134a eq"
# "Explicit ODSs"
# "Explicit other fluorinated gases"
# "O3"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiation --> Longwave GHG
Representation of greenhouse gases in the longwave radiation scheme
23.1. Greenhouse Gas Complexity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Complexity of greenhouse gases whose longwave radiative effects are taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.ODS')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CFC-12"
# "CFC-11"
# "CFC-113"
# "CFC-114"
# "CFC-115"
# "HCFC-22"
# "HCFC-141b"
# "HCFC-142b"
# "Halon-1211"
# "Halon-1301"
# "Halon-2402"
# "methyl chloroform"
# "carbon tetrachloride"
# "methyl chloride"
# "methylene chloride"
# "chloroform"
# "methyl bromide"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. ODS
Is Required: FALSE Type: ENUM Cardinality: 0.N
Ozone depleting substances whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_GHG.other_flourinated_gases')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HFC-134a"
# "HFC-23"
# "HFC-32"
# "HFC-125"
# "HFC-143a"
# "HFC-152a"
# "HFC-227ea"
# "HFC-236fa"
# "HFC-245fa"
# "HFC-365mfc"
# "HFC-43-10mee"
# "CF4"
# "C2F6"
# "C3F8"
# "C4F10"
# "C5F12"
# "C6F14"
# "C7F16"
# "C8F18"
# "c-C4F8"
# "NF3"
# "SF6"
# "SO2F2"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Other Flourinated Gases
Is Required: FALSE Type: ENUM Cardinality: 0.N
Other flourinated gases whose longwave radiative effects are explicitly taken into account in the atmosphere model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiation --> Longwave Cloud Ice
Longwave radiative properties of ice crystals in clouds
24.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud ice crystals
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.physical_reprenstation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bi-modal size distribution"
# "ensemble of ice crystals"
# "mean projected area"
# "ice water path"
# "crystal asymmetry"
# "crystal aspect ratio"
# "effective crystal radius"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.2. Physical Reprenstation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_ice.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud ice crystals in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiation --> Longwave Cloud Liquid
Longwave radiative properties of liquid droplets in clouds
25.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with cloud liquid droplets
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud droplet number concentration"
# "effective cloud droplet radii"
# "droplet size distribution"
# "liquid water path"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_liquid.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "geometric optics"
# "Mie theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to cloud liquid droplets in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_cloud_inhomogeneity.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Monte Carlo Independent Column Approximation"
# "Triplecloud"
# "analytic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiation --> Longwave Cloud Inhomogeneity
Cloud inhomogeneity in the longwave radiation scheme
26.1. Cloud Inhomogeneity
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method for taking into account horizontal cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiation --> Longwave Aerosols
Longwave radiative properties of aerosols
27.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with aerosols
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.physical_representation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "number concentration"
# "effective radii"
# "size distribution"
# "asymmetry"
# "aspect ratio"
# "mixing state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.2. Physical Representation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical representation of aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_aerosols.optical_methods')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "T-matrix"
# "geometric optics"
# "finite difference time domain (FDTD)"
# "Mie theory"
# "anomalous diffraction approximation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27.3. Optical Methods
Is Required: TRUE Type: ENUM Cardinality: 1.N
Optical methods applicable to aerosols in the longwave radiation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.radiation.longwave_gases.general_interactions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "scattering"
# "emission/absorption"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiation --> Longwave Gases
Longwave radiative properties of gases
28.1. General Interactions
Is Required: TRUE Type: ENUM Cardinality: 1.N
General longwave radiative interactions with gases
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Turbulence Convection
Atmosphere Convective Turbulence and Clouds
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of atmosphere convection and turbulence
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Mellor-Yamada"
# "Holtslag-Boville"
# "EDMF"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Turbulence Convection --> Boundary Layer Turbulence
Properties of the boundary layer turbulence scheme
30.1. Scheme Name
Is Required: FALSE Type: ENUM Cardinality: 0.1
Boundary layer turbulence scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TKE prognostic"
# "TKE diagnostic"
# "TKE coupled with water"
# "vertical profile of Kz"
# "non-local diffusion"
# "Monin-Obukhov similarity"
# "Coastal Buddy Scheme"
# "Coupled with convection"
# "Coupled with gravity waves"
# "Depth capped at cloud base"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Boundary layer turbulence scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Closure Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Boundary layer turbulence scheme closure order
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.boundary_layer_turbulence.counter_gradient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Counter Gradient
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Uses boundary layer turbulence scheme counter gradient
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31. Turbulence Convection --> Deep Convection
Properties of the deep convection scheme
31.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Deep convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "adjustment"
# "plume ensemble"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.scheme_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "CAPE"
# "bulk"
# "ensemble"
# "CAPE/WFN based"
# "TKE/CIN based"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Deep convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vertical momentum transport"
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "updrafts"
# "downdrafts"
# "radiative effect of anvils"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of deep convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.deep_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for deep convection. Microphysical processes directly control the amount of detrainment of cloud hydrometeor and water vapor from updrafts
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Turbulence Convection --> Shallow Convection
Properties of the shallow convection scheme
32.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Shallow convection scheme name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_type')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mass-flux"
# "cumulus-capped boundary layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.2. Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.N
shallow convection scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.scheme_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "same as deep (unified)"
# "included in boundary layer turbulence"
# "separate diagnosis"
# TODO - please enter value(s)
"""
Explanation: 32.3. Scheme Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
shallow convection scheme method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convective momentum transport"
# "entrainment"
# "detrainment"
# "penetrative convection"
# "re-evaporation of convective precipitation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Physical processes taken into account in the parameterisation of shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.turbulence_convection.shallow_convection.microphysics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "tuning parameter based"
# "single moment"
# "two moment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.5. Microphysics
Is Required: FALSE Type: ENUM Cardinality: 0.N
Microphysics scheme for shallow convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33. Microphysics Precipitation
Large Scale Cloud Microphysics and Precipitation
33.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of large scale cloud microphysics and precipitation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Microphysics Precipitation --> Large Scale Precipitation
Properties of the large scale precipitation scheme
34.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the large scale precipitation parameterisation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_precipitation.hydrometeors')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "liquid rain"
# "snow"
# "hail"
# "graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34.2. Hydrometeors
Is Required: TRUE Type: ENUM Cardinality: 1.N
Precipitating hydrometeors taken into account in the large scale precipitation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Microphysics Precipitation --> Large Scale Cloud Microphysics
Properties of the large scale cloud microphysics scheme
35.1. Scheme Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name of the microphysics parameterisation scheme used for large scale clouds.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.microphysics_precipitation.large_scale_cloud_microphysics.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "mixed phase"
# "cloud droplets"
# "cloud ice"
# "ice nucleation"
# "water vapour deposition"
# "effect of raindrops"
# "effect of snow"
# "effect of graupel"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Large scale cloud microphysics processes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Cloud Scheme
Characteristics of the cloud scheme
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the atmosphere cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.2. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.atmos_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "atmosphere_radiation"
# "atmosphere_microphysics_precipitation"
# "atmosphere_turbulence_convection"
# "atmosphere_gravity_waves"
# "atmosphere_solar"
# "atmosphere_volcano"
# "atmosphere_cloud_simulator"
# TODO - please enter value(s)
"""
Explanation: 36.3. Atmos Coupling
Is Required: FALSE Type: ENUM Cardinality: 0.N
Atmosphere components that are linked to the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.uses_separate_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.4. Uses Separate Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Different cloud schemes for the different types of clouds (convective, stratiform and boundary layer)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "entrainment"
# "detrainment"
# "bulk cloud"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Processes included in the cloud scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.6. Prognostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a prognostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.diagnostic_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 36.7. Diagnostic Scheme
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the cloud scheme a diagnostic scheme?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "cloud amount"
# "liquid"
# "ice"
# "rain"
# "snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.8. Prognostic Variables
Is Required: FALSE Type: ENUM Cardinality: 0.N
List the prognostic variables used by the cloud scheme, if applicable.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_overlap_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "random"
# "maximum"
# "maximum-random"
# "exponential"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 37. Cloud Scheme --> Optical Cloud Properties
Optical cloud properties
37.1. Cloud Overlap Method
Is Required: FALSE Type: ENUM Cardinality: 0.1
Method for taking into account overlapping of cloud layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.optical_cloud_properties.cloud_inhomogeneity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Cloud Inhomogeneity
Is Required: FALSE Type: STRING Cardinality: 0.1
Method for taking into account cloud inhomogeneity
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 38. Cloud Scheme --> Sub Grid Scale Water Distribution
Sub-grid scale water distribution
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale water distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 38.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale water distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 38.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale water distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_water_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 38.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale water distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# TODO - please enter value(s)
"""
Explanation: 39. Cloud Scheme --> Sub Grid Scale Ice Distribution
Sub-grid scale ice distribution
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sub-grid scale ice distribution type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 39.2. Function Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Sub-grid scale ice distribution function name
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.function_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 39.3. Function Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Sub-grid scale ice distribution function type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.cloud_scheme.sub_grid_scale_ice_distribution.convection_coupling')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "coupled with deep"
# "coupled with shallow"
# "not coupled with convection"
# TODO - please enter value(s)
"""
Explanation: 39.4. Convection Coupling
Is Required: TRUE Type: ENUM Cardinality: 1.N
Sub-grid scale ice distribution coupling with convection
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40. Observation Simulation
Characteristics of observation simulation
40.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of observation simulator characteristics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_estimation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "no adjustment"
# "IR brightness"
# "visible optical depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Observation Simulation --> Isscp Attributes
ISSCP Characteristics
41.1. Top Height Estimation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator ISSCP top height estimation methodUo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.isscp_attributes.top_height_direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "lowest altitude level"
# "highest altitude level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. Top Height Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator ISSCP top height direction
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.run_configuration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Inline"
# "Offline"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 42. Observation Simulation --> Cosp Attributes
CFMIP Observational Simulator Package attributes
42.1. Run Configuration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator COSP run configuration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_grid_points')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.2. Number Of Grid Points
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of grid points
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_sub_columns')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.3. Number Of Sub Columns
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of sub-cloumns used to simulate sub-grid variability
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.cosp_attributes.number_of_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 42.4. Number Of Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Cloud simulator COSP number of levels
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.frequency')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 43. Observation Simulation --> Radar Inputs
Characteristics of the cloud radar simulator
43.1. Frequency
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Cloud simulator radar frequency (Hz)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "surface"
# "space borne"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 43.2. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator radar type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.gas_absorption')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.3. Gas Absorption
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses gas absorption
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.radar_inputs.effective_radius')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 43.4. Effective Radius
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Cloud simulator radar uses effective radius
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.ice_types')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ice spheres"
# "ice non-spherical"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44. Observation Simulation --> Lidar Inputs
Characteristics of the cloud lidar simulator
44.1. Ice Types
Is Required: TRUE Type: ENUM Cardinality: 1.1
Cloud simulator lidar ice type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.observation_simulation.lidar_inputs.overlap')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "max"
# "random"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 44.2. Overlap
Is Required: TRUE Type: ENUM Cardinality: 1.N
Cloud simulator lidar overlap
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 45. Gravity Waves
Characteristics of the parameterised gravity waves in the atmosphere, whether from orography or other sources.
45.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of gravity wave parameterisation in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.sponge_layer')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Rayleigh friction"
# "Diffusive sponge layer"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.2. Sponge Layer
Is Required: TRUE Type: ENUM Cardinality: 1.1
Sponge layer in the upper levels in order to avoid gravity wave reflection at the top.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "continuous spectrum"
# "discrete spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.3. Background
Is Required: TRUE Type: ENUM Cardinality: 1.1
Background wave distribution
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.subgrid_scale_orography')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "effect on drag"
# "effect on lifting"
# "enhanced topography"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 45.4. Subgrid Scale Orography
Is Required: TRUE Type: ENUM Cardinality: 1.N
Subgrid scale orography effects taken into account.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 46. Gravity Waves --> Orographic Gravity Waves
Gravity waves generated due to the presence of orography
46.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear mountain waves"
# "hydraulic jump"
# "envelope orography"
# "low level flow blocking"
# "statistical sub-grid scale variance"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "non-linear calculation"
# "more than two cardinal directions"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "includes boundary layer ducting"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 46.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 47. Gravity Waves --> Non Orographic Gravity Waves
Gravity waves generated by non-orographic processes.
47.1. Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Commonly used name for the non-orographic gravity wave scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.source_mechanisms')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "convection"
# "precipitation"
# "background spectrum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.2. Source Mechanisms
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave source mechanisms
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.calculation_method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "spatially dependent"
# "temporally dependent"
# TODO - please enter value(s)
"""
Explanation: 47.3. Calculation Method
Is Required: TRUE Type: ENUM Cardinality: 1.N
Non-orographic gravity wave calculation method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.propagation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "linear theory"
# "non-linear theory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.4. Propagation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave propogation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.gravity_waves.non_orographic_gravity_waves.dissipation_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "total wave"
# "single wave"
# "spectral"
# "linear"
# "wave saturation vs Richardson number"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 47.5. Dissipation Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Non-orographic gravity wave dissipation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 48. Solar
Top of atmosphere solar insolation characteristics
48.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of solar insolation of the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_pathways.pathways')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "SW radiation"
# "precipitating energetic particles"
# "cosmic rays"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 49. Solar --> Solar Pathways
Pathways for solar forcing of the atmosphere
49.1. Pathways
Is Required: TRUE Type: ENUM Cardinality: 1.N
Pathways for the solar forcing of the atmosphere model domain
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 50. Solar --> Solar Constant
Solar constant and top of atmosphere insolation characteristics
50.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of the solar constant.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.fixed_value')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 50.2. Fixed Value
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If the solar constant is fixed, enter the value of the solar constant (W m-2).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.solar_constant.transient_characteristics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 50.3. Transient Characteristics
Is Required: TRUE Type: STRING Cardinality: 1.1
solar constant transient characteristics (W m-2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "transient"
# TODO - please enter value(s)
"""
Explanation: 51. Solar --> Orbital Parameters
Orbital parameters and top of atmosphere insolation characteristics
51.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time adaptation of orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.fixed_reference_date')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 51.2. Fixed Reference Date
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Reference date for fixed orbital parameters (yyyy)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.transient_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 51.3. Transient Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Description of transient orbital parameters
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.orbital_parameters.computation_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Berger 1978"
# "Laskar 2004"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 51.4. Computation Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Method used for computing orbital parameters.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.solar.insolation_ozone.solar_ozone_impact')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 52. Solar --> Insolation Ozone
Impact of solar insolation on stratospheric ozone
52.1. Solar Ozone Impact
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does top of atmosphere insolation impact on stratospheric ozone?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 53. Volcanos
Characteristics of the implementation of volcanoes
53.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview description of the implementation of volcanic effects in the atmosphere
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmos.volcanos.volcanoes_treatment.volcanoes_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "high frequency solar constant anomaly"
# "stratospheric aerosols optical thickness"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 54. Volcanos --> Volcanoes Treatment
Treatment of volcanoes in the atmosphere
54.1. Volcanoes Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How volcanic effects are modeled in the atmosphere.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/bcc/cmip6/models/sandbox-2/toplevel.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'bcc', 'sandbox-2', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: BCC
Source ID: SANDBOX-2
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:39
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
deflaux/linkage-disequilibrium | datalab/Exploring_Linkage_Disequilibrium_Data.ipynb | apache-2.0 | import gcp.bigquery as bq
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Get references to the BigQuery tables of linkage disequilibrium
# in the five superpopulations of the 1000 Genomes Project
# (http://www.1000genomes.org/faq/which-populations-are-part-your-study):
# AMR: Admixed American
# AFR: African
# EUR: European
# SAS: South Asian
# EAS: East Asian
populations = {
"AFR": ["ACB", "ASW", "ESN", "GWD", "LWK", "MSL", "YRI"],
"AMR": ["CLM", "MXL", "PEL", "PUR"],
"EAS": ["CDX", "CHB", "CHS", "JPT", "KHV"],
"EUR": ["CEU", "FIN", "GBR", "IBS", "TSI"],
"SAS": ["BEB", "GIH", "ITU", "PJL", "STU"],
}
POPULATION_PLOT_ORDER = ["AMR", "AFR", "EUR", "SAS", "EAS"]
def get_ld_tablename(population):
"""Returns the name of the BigQuery table with the publicly-available LD data."""
return "genomics-public-data:linkage_disequilibrium_1000G_phase_3.super_pop_%s" % population
tables = {}
for superpopulation, subpopulations in populations.iteritems():
# Only load the superpopulations for this analysis.
tables[superpopulation] = bq.Table(get_ld_tablename(superpopulation))
tables["EUR"].length
"""
Explanation: <!-- Copyright 2015 Google Inc. All rights reserved. -->
<!-- Licensed under the Apache License, Version 2.0 (the "License"); -->
<!-- you may not use this file except in compliance with the License. -->
<!-- You may obtain a copy of the License at -->
<!-- http://www.apache.org/licenses/LICENSE-2.0 -->
<!-- Unless required by applicable law or agreed to in writing, software -->
<!-- distributed under the License is distributed on an "AS IS" BASIS, -->
<!-- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -->
<!-- See the License for the specific language governing permissions and -->
<!-- limitations under the License. -->
Exploring Linkage Disequilibrium Data
This notebook demonstrates working with linkage disequilibrium (LD) data stored as publicly accessible BigQuery datasets.
Specifically, we will work with LD calculated on the 1000 Genomes Phase 3 variants. The source variants were imported to Google Genomics and then LD calculations were performed on various subsets of the data, including all 26 populations in their own datasets as well as on the five superpopulations aggregated separately. The resulting datasets were exported to BigQuery using pipelines in the https://github.com/googlegenomics/linkage-disequilibrium project.
If you want to explore more genomics samples, see https://github.com/googlegenomics/datalab-examples. You can import them into your Datalab instance by uploading them while on the notebook list page.
End of explanation
"""
%%sql --module ld_count_distributions
SELECT
qchrom,
count(*) / 2 AS num_pairs
FROM $all_ld_table
GROUP BY qchrom
# Load LD result pair counts for each superpopulation.
ld_counts = {}
for superpopulation in populations:
table = tables[superpopulation]
ld_counts[superpopulation] = bq.Query(ld_count_distributions,
all_ld_table=table).to_dataframe()
ld_counts[superpopulation].rename(columns={"num_pairs": superpopulation},
inplace=True)
# Merge all LD result pair counts into a single dataframe.
merged_counts = None
for superpopulation in POPULATION_PLOT_ORDER:
counts = ld_counts[superpopulation]
if merged_counts is None:
merged_counts = counts
else:
merged_counts = pd.merge(merged_counts, counts, on="qchrom", how="inner")
merged_counts["qchrom"] = merged_counts["qchrom"].astype(int)
merged_counts.sort("qchrom", inplace=True)
# Plot the LD result pair counts by chromosome and superpopulation.
melted_counts = pd.melt(merged_counts,
id_vars="qchrom",
var_name="population",
value_name="num_results")
sns.set_style("whitegrid")
sns.barplot(x="qchrom", y="num_results", hue="population", data=melted_counts)
sns.axlabel("Chromosome", "Number of LD results")
"""
Explanation: Visualizing the extent of LD across populations
As the above result shows, there are over 3 billion LD results in the European dataset! That's because the 1000 Genomes Phase 3 data has over 80 million variants in it, and LD is computed between pairs of variants--even with the reduction of data stored by windowing and pruning pairs with low LD, there is still a lot of data.
The following analysis will display the total number of LD result pairs on each chromosome separately for each of the five superpopulations. Note that the count values in the query is divided by two since each pair of LD results appears twice in the table.
End of explanation
"""
def merge_summary_stats(data_dict, populations=None):
"""Returns a pandas DataFrame of all summary stats for all populations.
Args:
data_dict: A dictionary keyed by population with value a DataFrame
containing mean, stdev, and quartile information about a metric.
populations: A list of keys of the data dictionary to include in the
result.
Returns:
A single DataFrame with all summary statistics for all populations
present in the input data dictionary.
"""
if populations is None:
populations = sorted(data_dict.keys())
merged_dict = {"population": [],
"mean": [],
"stdev": [],
"min": [],
"max": [],
"q1": [],
"median": [],
"q3": []}
for population in populations:
metrics_table = data_dict[population]
merged_dict["population"].append(population)
merged_dict["mean"].append(metrics_table["mean"][0])
merged_dict["stdev"].append(metrics_table["stdev"][0])
min_, q1, median, q3, max_ = sorted(metrics_table["quantiles"])
merged_dict["min"].append(min_)
merged_dict["q1"].append(q1)
merged_dict["median"].append(median)
merged_dict["q3"].append(q3)
merged_dict["max"].append(max_)
return pd.DataFrame(merged_dict)
def boxplot_from_summary_stats(data, position_col, ylabel,
show_stddev=True, ylim=None, whis=1.5, box_width=.8):
"""Returns a boxplot based on the summary data.
This function mirrors matplotlib.axes.boxplot closely, but is modified to work
with already-summary-data rather than including the calculation of quartiles,
mean, and standard deviation from the input data.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
boxes, medians, means, stds = [], [], [], []
for pos_minus_1, row in data.iterrows():
pos = pos_minus_1 + 1
label = row[position_col]
min_ = row["min"]
q1 = row["q1"]
median = row["median"]
q3 = row["q3"]
max_ = row["max"]
mean = row["mean"]
stdev = row["stdev"]
iq = q3 - q1
hi_val = q3 + whis * iq
lo_val = q1 - whis * iq
box_x_min = pos - box_width * 0.5
box_x_max = pos + box_width * 0.5
med_y = [median, median]
med_x = [box_x_min, box_x_max]
box_x = [box_x_min, box_x_max, box_x_max, box_x_min, box_x_min]
box_y = [q1, q1, q3, q3, q1]
mean_x = [pos - box_width * .05, pos + box_width * .05]
mean_y = [mean, mean]
std_x = [pos, pos]
std_y = [mean - stdev, mean + stdev]
boxes.extend(ax.plot(box_x, box_y, "b-"))
medians.extend(ax.plot(med_x, med_y, "r-"))
means.extend(ax.plot(mean_x, mean_y, "b-"))
if show_stddev:
stds.extend(ax.plot(std_x, std_y, "b--"))
ax.set_xticklabels([""] + list(data[position_col]))
ax.set_ylabel(ylabel)
ax.set_axis_bgcolor("white")
ax.grid(False)
if ylim is not None:
ax.set_ylim(ylim)
return dict(boxes=boxes, medians=medians, means=means, stds=stds)
"""
Explanation: The above results show a consistent trend, where the number of LD results per chromosome is greatest for the Admixed American superpopulation (AMR), followed by African (AFR), European (EUR), South Asian (SAS), and finally East Asian (EAS). While the presence of more LD results in AMR is expected due to population stratification (Hinrichs et al. 2009), the presence of more LD in AFR than EUR is unexpected. The result likely indicates substantial population stratification within the AFR superpopulation, and a need for care in addressing it when performing association studies in a heterogeneous "African" population.
We can investigate this more detail by examining the distributions of linkage disequilibrium results, performed below.
End of explanation
"""
%%sql --module ld_rsquared_distributions
SELECT
AVG(corr * corr) AS mean,
STDDEV(corr * corr) AS stdev,
QUANTILES(corr * corr, 5) AS quantiles
FROM $all_ld_table
WHERE
qstart < tstart
"""
Explanation: The following query will be used to examine the distribution of r^2 values in each superpopulation.
End of explanation
"""
%%sql --module ld_maf_distributions
SELECT
AVG(LEAST(num_bothone_chroms,
num_chroms - num_qone_chroms - num_tone_chroms + num_bothone_chroms,
num_qone_chroms - num_bothone_chroms,
num_tone_chroms - num_bothone_chroms) / num_chroms) AS mean,
STDDEV(LEAST(num_bothone_chroms,
num_chroms - num_qone_chroms - num_tone_chroms + num_bothone_chroms,
num_qone_chroms - num_bothone_chroms,
num_tone_chroms - num_bothone_chroms) / num_chroms) AS stdev,
QUANTILES(LEAST(num_bothone_chroms,
num_chroms - num_qone_chroms - num_tone_chroms + num_bothone_chroms,
num_qone_chroms - num_bothone_chroms,
num_tone_chroms - num_bothone_chroms) / num_chroms, 5) AS quantiles
FROM $all_ld_table
WHERE
qstart < tstart
# For each superpopulation, load the mean, stdev, and quantile results of r^2
# and the frequency of the most rare haplotype.
rsquared_metrics = {}
maf_metrics = {}
for superpopulation in populations:
table = tables[superpopulation]
rsquared_metrics[superpopulation] = bq.Query(ld_rsquared_distributions,
all_ld_table=table).to_dataframe()
maf_metrics[superpopulation] = bq.Query(ld_maf_distributions,
all_ld_table=table).to_dataframe()
# Merge the superpopulation-specific results into single dataframes.
merged_rsquared_metrics = merge_summary_stats(rsquared_metrics, POPULATION_PLOT_ORDER)
merged_maf_metrics = merge_summary_stats(maf_metrics, POPULATION_PLOT_ORDER)
_ = boxplot_from_summary_stats(merged_rsquared_metrics,
position_col="population",
ylabel="Coefficient of determination (r^2)")
"""
Explanation: The following query will be used to examine the distribution of frequencies of the most rare haplotype for each LD comparison in each superpopulation.
End of explanation
"""
_ = boxplot_from_summary_stats(merged_maf_metrics,
position_col="population",
ylabel="Most-rare-haplotype frequency",
show_stddev=False,
ylim=[0, 0.02])
"""
Explanation: The boxplot of the r^2 measure of LD shows that the mean and median LD are highest for the European and East Asian superpopulations, and substantially lower for the African and Admixed American populations. This suggests that the larger total number of LD results in African and Admixed American populations (as evidenced by the barplot of LD results given above) may arise from rare haplotypes.
To investigate this hypothesis, we can examine the distribution of frequencies of the most rare haplotype in LD results.
End of explanation
"""
|
qingshuimonk/STA663 | docs/VAE_synthetic_Siyang.ipynb | mit | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
import time
from tensorflow.python.client import timeline
import matplotlib.pyplot as plt
%matplotlib inline
FLAGS = tf.app.flags.FLAGS
# number of device count
tf.app.flags.DEFINE_integer('num_cpu_core', 1, 'Number of CPU cores to use')
tf.app.flags.DEFINE_integer('intra_op_parallelism_threads', 1, 'How many ops can be launched in parallel')
tf.app.flags.DEFINE_integer('num_gpu_core', 0, 'Number of GPU cores to use')
device_id = -1 # Global Variable Counter for device_id used
def next_device(use_cpu = True):
''' See if there is available next device;
Args: use_cpu, global device_id
Return: new device id
'''
global device_id
if (use_cpu):
if ((device_id + 1) < FLAGS.num_cpu_core):
device_id += 1
device = '/cpu:%d' % device_id
else:
if ((device_id + 1) < FLAGS.num_gpu_core):
device_id += 1
device = '/gpu:%d' % device_id
return device
"""
Explanation: Test VAE on Synthetic Data
End of explanation
"""
def xavier_init(neuron_in, neuron_out, constant=1):
low = -constant*np.sqrt(6/(neuron_in + neuron_out))
high = constant*np.sqrt(6/(neuron_in + neuron_out))
return tf.random_uniform((neuron_in, neuron_out), minval=low, maxval=high, dtype=tf.float32)
def init_weights(config):
"""
Initialize weights with specified configuration using Xavier algorithm
"""
encoder_weights = dict()
decoder_weights = dict()
# two layers encoder
encoder_weights['h1'] = tf.Variable(xavier_init(config['x_in'], config['encoder_1']))
encoder_weights['h2'] = tf.Variable(xavier_init(config['encoder_1'], config['encoder_2']))
encoder_weights['mu'] = tf.Variable(xavier_init(config['encoder_2'], config['z']))
encoder_weights['sigma'] = tf.Variable(xavier_init(config['encoder_2'], config['z']))
encoder_weights['b1'] = tf.Variable(tf.zeros([config['encoder_1']], dtype=tf.float32))
encoder_weights['b2'] = tf.Variable(tf.zeros([config['encoder_2']], dtype=tf.float32))
encoder_weights['bias_mu'] = tf.Variable(tf.zeros([config['z']], dtype=tf.float32))
encoder_weights['bias_sigma'] = tf.Variable(tf.zeros([config['z']], dtype=tf.float32))
# two layers decoder
decoder_weights['h1'] = tf.Variable(xavier_init(config['z'], config['decoder_1']))
decoder_weights['h2'] = tf.Variable(xavier_init(config['decoder_1'], config['decoder_2']))
decoder_weights['mu'] = tf.Variable(xavier_init(config['decoder_2'], config['x_in']))
decoder_weights['sigma'] = tf.Variable(xavier_init(config['decoder_2'], config['x_in']))
decoder_weights['b1'] = tf.Variable(tf.zeros([config['decoder_1']], dtype=tf.float32))
decoder_weights['b2'] = tf.Variable(tf.zeros([config['decoder_2']], dtype=tf.float32))
decoder_weights['bias_mu'] = tf.Variable(tf.zeros([config['x_in']], dtype=tf.float32))
decoder_weights['bias_sigma'] = tf.Variable(tf.zeros([config['x_in']], dtype=tf.float32))
return (encoder_weights, decoder_weights)
def forward_z(x, encoder_weights):
"""
Compute mean and sigma of z
"""
with tf.device(next_device()):
layer_1 = tf.nn.softplus(tf.add(tf.matmul(x, encoder_weights['h1']), encoder_weights['b1']))
with tf.device(next_device()):
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, encoder_weights['h2']), encoder_weights['b2']))
z_mean = tf.add(tf.matmul(layer_2, encoder_weights['mu']), encoder_weights['bias_mu'])
z_sigma = tf.add(tf.matmul(layer_2, encoder_weights['sigma']), encoder_weights['bias_sigma'])
return(z_mean, z_sigma)
def reconstruct_x(z, decoder_weights):
"""
Use z to reconstruct x
"""
with tf.device(next_device()):
layer_1 = tf.nn.softplus(tf.add(tf.matmul(z, decoder_weights['h1']), decoder_weights['b1']))
with tf.device(next_device()):
layer_2 = tf.nn.softplus(tf.add(tf.matmul(layer_1, decoder_weights['h2']), decoder_weights['b2']))
x_prime = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, decoder_weights['mu']), decoder_weights['bias_mu']))
return x_prime
def optimize_func(z, z_mean, z_sigma, x, x_prime, learn_rate):
"""
Define cost and optimize function
"""
# define loss function
# reconstruction lost
recons_loss = -tf.reduce_sum(x * tf.log(1e-10 + x_prime) + (1-x) * tf.log(1e-10 + 1 - x_prime), 1)
# KL distance
latent_loss = -0.5 * tf.reduce_sum(1 + z_sigma - tf.square(z_mean) - tf.exp(z), 1)
# summing two loss terms together
cost = tf.reduce_mean(recons_loss + latent_loss)
# use ADAM to optimize
optimizer = tf.train.AdamOptimizer(learning_rate=learn_rate).minimize(cost)
return (cost, optimizer)
def vae_init(batch_size=100, learn_rate=0.001, config={}):
"""
This function build a varational autoencoder based on https://jmetzen.github.io/2015-11-27/vae.html
In consideration of simplicity and future work on optimization, we removed the class structure
A tensorflow session, optimizer and cost function as well as input data will be returned
"""
# default configuration of network
# x_in = 784
# encoder_1 = 500
# encoder_2 = 500
# decoder_1 = 500
# decoder_2 = 500
# z = 20
# use default setting if no configuration is specified
if not config:
config['x_in'] = 784
config['encoder_1'] = 500
config['encoder_2'] = 500
config['decoder_1'] = 500
config['decoder_2'] = 500
config['z'] = 20
# input
x = tf.placeholder(tf.float32, [None, config['x_in']])
# initialize weights
(encoder_weights, decoder_weights) = init_weights(config)
# compute mean and sigma of z
(z_mean, z_sigma) = forward_z(x, encoder_weights)
# compute z by drawing sample from normal distribution
eps = tf.random_normal((batch_size, config['z']), 0, 1, dtype=tf.float32)
z_val = tf.add(z_mean, tf.multiply(tf.sqrt(tf.exp(z_sigma)), eps))
# use z to reconstruct the network
x_prime = reconstruct_x(z_val, decoder_weights)
# define loss function
(cost, optimizer) = optimize_func(z_val, z_mean, z_sigma, x, x_prime, learn_rate)
# initialize all variables
init = tf.global_variables_initializer()
#
config_ = tf.ConfigProto(device_count={"CPU": FLAGS.num_cpu_core}, # limit to num_cpu_core CPU usage
inter_op_parallelism_threads = 1,
intra_op_parallelism_threads = FLAGS.intra_op_parallelism_threads,
log_device_placement=True)
# define and return the session
sess = tf.InteractiveSession(config=config_)
sess.run(init)
return (sess, optimizer, cost, x, x_prime)
"""
Explanation: Define VAE
End of explanation
"""
mu_H0, sigma_H0 = 0, 0.5
mu_H1, sigma_H1 = 2, 0.5
n_samples = 1000000
H0 = np.random.normal(mu_H0, sigma_H0, n_samples).reshape((-1,1))
H1 = np.random.normal(mu_H1, sigma_H1, n_samples).reshape((-1,1))
H0_label = np.zeros((n_samples, 1))
H0_label = np.ones((n_samples, 1))
"""
Explanation: 1D Gaussian
Generate synthetic data with two classes:
$H0 \sim \mathcal{N}(0, 0.5)$
$H1 \sim \mathcal{N}(2, 0.5)$
Each of them have 1000000 samples
End of explanation
"""
_, bins_0, _ = plt.hist(H0, 50, normed=True, label='H0')
_, bins_1, _ = plt.hist(H1, 50, normed=True, label='H1')
plt.plot(bins_0, 1/(sigma_H0 * np.sqrt(2 * np.pi)) * np.exp( - (bins_0 - mu_H0)**2 / (2 * sigma_H0**2) ),
linewidth=2, color='r', label='H0 ideal')
plt.plot(bins_1, 1/(sigma_H1 * np.sqrt(2 * np.pi)) * np.exp( - (bins_1 - mu_H1)**2 / (2 * sigma_H1**2) ),
linewidth=2, color='y', label='H1 ideal')
plt.legend(fontsize=10)
plt.show()
"""
Explanation: Plot the data to verify:
End of explanation
"""
data = np.concatenate((H0, H1), axis=0)
np.random.shuffle(data)
def vae_train(sess, optimizer, cost, x, n_samples, batch_size=100, learn_rate=0.001, train_epoch=10, verb=1, verb_step=5):
start_time = time.time()
for epoch in range(train_epoch):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_x = data[i*batch_size:(i+1)*batch_size]
_, c = sess.run((optimizer, cost), feed_dict={x: batch_x})
avg_cost += c / n_samples * batch_size
elapsed_time = (time.time() - start_time)* 1000 / verb_step
start_time = time.time()
if verb:
if epoch % verb_step == 0:
print('Epoch:%04d' % (epoch+1), 'cost=', '{:.9f}'.format(avg_cost), 'Elapsed time: ','%.9f' % elapsed_time)
config = {}
config['x_in'] = 1
config['encoder_1'] = 1
config['encoder_2'] = 1
config['decoder_1'] = 1
config['decoder_2'] = 1
config['z'] = 2
batch_size = 50000
(sess, optimizer, cost, x, x_prime) = vae_init(batch_size=batch_size, config=config)
vae_train(sess, optimizer, cost, x, n_samples*2, batch_size=batch_size, learn_rate=0.00001, train_epoch=26)
"""
Explanation: Merge and shuffle them, use VAE to train on data
End of explanation
"""
total_batch = int(n_samples*2 / batch_size)
data_reconstruct = []
for i in range(total_batch):
data_reconstruct.append(sess.run(x_prime, feed_dict={x: data[i*batch_size:(i+1)*batch_size]}))
data_reconstruct = np.concatenate(data_reconstruct, axis=0)
_, bins, _ = plt.hist(data_reconstruct, bins=50, normed=True, label='reconstruct')
plt.plot(bins_0, 1/(sigma_H0 * np.sqrt(2 * np.pi)) * np.exp( - (bins_0 - mu_H0)**2 / (2 * sigma_H0**2) ),
linewidth=2, color='r', label='H0 ideal')
plt.plot(bins_1, 1/(sigma_H1 * np.sqrt(2 * np.pi)) * np.exp( - (bins_1 - mu_H1)**2 / (2 * sigma_H1**2) ),
linewidth=2, color='y', label='H1 ideal')
plt.legend(fontsize=10)
plt.show()
"""
Explanation: Reconstruct Data
End of explanation
"""
mu_H0, sigma_H0 = (0,0), [[0.3,0],[0,0.3]]
mu_H1, sigma_H1 = (2,2), [[0.1,0],[0,0.9]]
n_samples = 500000
H0 = np.random.multivariate_normal(mu_H0, sigma_H0, n_samples).reshape((-1,2))
H1 = np.random.multivariate_normal(mu_H1, sigma_H1, n_samples).reshape((-1,2))
H0_label = np.zeros((n_samples, 1))
H0_label = np.ones((n_samples, 1))
plt.scatter(H0[:,0], H0[:,1], c='b', label='H0')
plt.scatter(H1[:,0], H1[:,1], c='r', label='H1')
plt.legend(fontsize=10)
plt.show()
data = np.concatenate((H0, H1), axis=0)
np.random.shuffle(data)
def vae_train(sess, optimizer, cost, x, n_samples, batch_size=100, learn_rate=0.001, train_epoch=10, verb=1, verb_step=5):
start_time = time.time()
for epoch in range(train_epoch):
avg_cost = 0
total_batch = int(n_samples / batch_size)
for i in range(total_batch):
batch_x = data[i*batch_size:(i+1)*batch_size,:]
_, c = sess.run((optimizer, cost), feed_dict={x: batch_x})
avg_cost += c / n_samples * batch_size
elapsed_time = (time.time() - start_time)* 1000 / verb_step
start_time = time.time()
if verb:
if epoch % verb_step == 0:
print('Epoch:%04d' % (epoch+1), 'cost=', '{:.9f}'.format(avg_cost), 'Elapsed time: ','%.9f' % elapsed_time)
config = {}
config['x_in'] = 2
config['encoder_1'] = 1
config['encoder_2'] = 1
config['decoder_1'] = 1
config['decoder_2'] = 1
config['z'] = 2
batch_size = 10000
(sess, optimizer, cost, x, x_prime) = vae_init(batch_size=batch_size, config=config)
vae_train(sess, optimizer, cost, x, n_samples*2, batch_size=batch_size, learn_rate=0.001, train_epoch=26)
"""
Explanation: 2D Gaussian
Generate synthetic data with two classes:
$H0 \sim \mathcal{N}((0,0), (0.3,0.3))$
$H1 \sim \mathcal{N}((2,2), (0.1,0.9))$
Each of them have 500000 samples
End of explanation
"""
total_batch = int(n_samples*2 / batch_size)
data_reconstruct = []
for i in range(total_batch):
data_reconstruct.append(sess.run(x_prime, feed_dict={x: data[i*batch_size:(i+1)*batch_size,:]}))
data_reconstruct = np.concatenate(data_reconstruct, axis=0)
plt.scatter(data_reconstruct[:,0], data_reconstruct[:,1])
plt.show()
"""
Explanation: Reconstruct Data
End of explanation
"""
|
Santana9937/Classification_ML_Specialization | Week_3_Decision_Trees/week_3_assign_1_safe_loans_decision_trees.ipynb | mit | import json
import numpy as np
import pandas as pd
import sklearn, sklearn.tree
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
%matplotlib inline
"""
Explanation: Identifying safe loans with decision trees
The LendingClub is a peer-to-peer leading company that directly connects borrowers and potential lenders/investors. In this notebook, you will build a classification model to predict whether or not a loan provided by LendingClub is likely to default.
In this notebook you will use data from the LendingClub to predict whether a loan will be paid off in full or the loan will be charged off and possibly go into default. In this assignment you will:
Use SFrames to do some feature engineering.
Train a decision-tree on the LendingClub dataset.
Visualize the tree.
Predict whether a loan will default along with prediction probabilities (on a validation set).
Train a complex tree model and compare it to simple tree model.
Let's get started!
Importing Libraries
End of explanation
"""
loans = pd.read_csv("lending-club-data_assign_1.csv")
"""
Explanation: Load LendingClub Loans dataset
We will be using a dataset from the LendingClub. A parsed and cleaned form of the dataset is availiable here. Make sure you download the dataset before running the following command.
End of explanation
"""
loans.head()
"""
Explanation: Exploring some features
Let's quickly explore what the dataset looks like. First, let's look at the first few entries of the loans dataframe.
End of explanation
"""
loans.columns.values
"""
Explanation: Now, let's print out the column names to see what features we have in this dataset.
End of explanation
"""
plt.figure(figsize=(10,6))
loans['grade'].value_counts().plot(kind='bar')
plt.tick_params(axis='x', labelsize=18)
plt.xticks(rotation='horizontal')
plt.tick_params(axis='y', labelsize=18)
plt.title("Histogram of Loan Grades", fontsize=18)
plt.xlabel("Loan Grades", fontsize=18)
plt.ylabel("Count", fontsize=18)
"""
Explanation: Here, we see that we have some feature columns that have to do with grade of the loan, annual income, home ownership status, etc. Let's take a look at the distribution of loan grades in the dataset.
End of explanation
"""
plt.figure(figsize=(10,6))
loans['home_ownership'].value_counts().plot(kind='bar')
plt.tick_params(axis='x', labelsize=18)
plt.xticks(rotation='horizontal')
plt.tick_params(axis='y', labelsize=18)
plt.title("Histogram of Home Ownership", fontsize=18)
plt.xlabel("Home Ownership Type", fontsize=18)
plt.ylabel("Count", fontsize=18)
"""
Explanation: We can see that over half of the loan grades are assigned values B or C. Each loan is assigned one of these grades, along with a more finely discretized feature called sub_grade (feel free to explore that feature column as well!). These values depend on the loan application and credit report, and determine the interest rate of the loan. More information can be found here.
Now, let's look at a different feature.
End of explanation
"""
# safe_loans = 1 => safe
# safe_loans = -1 => risky
loans['safe_loans'] = loans['bad_loans'].apply(lambda x : +1 if x==0 else -1)
loans = loans.drop('bad_loans', 1)
"""
Explanation: This feature describes whether the loanee is mortaging, renting, or owns a home. We can see that a small percentage of the loanees own a home.
Exploring the target column
The target column (label column) of the dataset that we are interested in is called bad_loans. In this column 1 means a risky (bad) loan 0 means a safe loan.
In order to make this more intuitive and consistent with the lectures, we reassign the target to be:
* +1 as a safe loan,
* -1 as a risky (bad) loan.
We put this in a new column called safe_loans.
End of explanation
"""
plt.figure(figsize=(10,6))
loans['safe_loans'].value_counts().plot(kind='bar')
plt.tick_params(axis='x', labelsize=18)
plt.xticks(rotation='horizontal')
plt.tick_params(axis='y', labelsize=18)
plt.title("Histogram of whether a Loan is safe or risky", fontsize=18)
plt.xlabel("Safe Loan=1, Risky Loan=-1", fontsize=18)
plt.ylabel("Count", fontsize=18)
print "Percentage of safe loans: %.1f%%" %((loans['safe_loans'].value_counts().ix[1]/float(len(loans['safe_loans'])))*100.0)
print "Percentage of risky loans: %.1f%%" %((loans['safe_loans'].value_counts().ix[-1]/float(len(loans['safe_loans'])))*100.0)
"""
Explanation: Now, let us explore the distribution of the column safe_loans. This gives us a sense of how many safe and risky loans are present in the dataset.
End of explanation
"""
features = ['grade', # grade of the loan
'sub_grade', # sub-grade of the loan
'short_emp', # one year or less of employment
'emp_length_num', # number of years of employment
'home_ownership', # home_ownership status: own, mortgage or rent
'dti', # debt to income ratio
'purpose', # the purpose of the loan
'term', # the term of the loan
'last_delinq_none', # has borrower had a delinquincy
'last_major_derog_none', # has borrower had 90 day or worse rating
'revol_util', # percent of available credit being used
'total_rec_late_fee', # total late fees received to day
]
target = 'safe_loans' # prediction target (y) (+1 means safe, -1 is risky)
# Extract the feature columns and target column
loans = loans[features + [target]]
"""
Explanation: You should have:
* Around 81% safe loans
* Around 19% risky loans
It looks like most of these loans are safe loans (thankfully). But this does make our problem of identifying risky loans challenging.
Features for the classification algorithm
In this assignment, we will be using a subset of features (categorical and numeric). The features we will be using are described in the code comments below. If you are a finance geek, the LendingClub website has a lot more details about these features.
End of explanation
"""
safe_loans_raw = loans[loans[target] == +1]
risky_loans_raw = loans[loans[target] == -1]
print "Number of safe loans : %s" % len(safe_loans_raw)
print "Number of risky loans : %s" % len(risky_loans_raw)
"""
Explanation: What remains now is a subset of features and the target that we will use for the rest of this notebook.
Sample data to balance classes
As we explored above, our data is disproportionally full of safe loans. Let's create two datasets: one with just the safe loans (safe_loans_raw) and one with just the risky loans (risky_loans_raw).
End of explanation
"""
print "Percentage of safe loans : %.1f%%" %((float(len(safe_loans_raw))/len(loans[target]))*100.0)
print "Percentage of risky loans : %.1f%%" %((float(len(risky_loans_raw))/len(loans[target]))*100.0)
"""
Explanation: Now, write some code to compute below the percentage of safe and risky loans in the dataset and validate these numbers against what was calculated earlier in the assignment:
End of explanation
"""
loans_one_hot_enc = pd.get_dummies(loans)
"""
Explanation: As can be seem there are much more sage loans than risky loans in the data set. The training data and validation data we will load will combat this class imbalance and will have roughly 50% safe loans and 50% risky loans.
Performing one-hot encoding with Pandas
Before performing analysis on the data, we need to perform one-hot encoding for all of the categorical data. Once the one-hot encoding is performed on all of the data, we will split the data into a training set and a validation set.
End of explanation
"""
with open('module-5-assignment-1-train-idx.json', 'r') as f:
train_idx_lst = json.load(f)
train_idx_lst = [int(entry) for entry in train_idx_lst]
with open('module-5-assignment-1-validation-idx.json', 'r') as f:
validation_idx_lst = json.load(f)
validation_idx_lst = [int(entry) for entry in validation_idx_lst]
"""
Explanation: Loading the training and validation datasets
Loading the JSON files with the indicies from the training data and the validation data into a a list.
End of explanation
"""
train_data = loans_one_hot_enc.ix[train_idx_lst]
validation_data = loans_one_hot_enc.ix[validation_idx_lst]
"""
Explanation: Using the list of the training data indicies and the validation data indicies to get a DataFrame with the training data and a DataFrame with the validation data.
End of explanation
"""
decision_tree_model = sklearn.tree.DecisionTreeClassifier(max_depth=6)
decision_tree_model.fit(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
"""
Explanation: Use decision tree to build a classifier
Now, let's use the built-in GraphLab Create decision tree learner to create a loan prediction model on the training data. (In the next assignment, you will implement your own decision tree learning algorithm.) Our feature columns and target column have already been decided above. Use validation_set=None to get the same results as everyone else.
Using sklearn to learn a decision tree classification model. The first entry in .fit is all the data, excluding the target variable "safe_loans" and the second entry is the targer variable "safe_loans".
First, training a tree with max_depth=6
End of explanation
"""
small_model = sklearn.tree.DecisionTreeClassifier(max_depth=2)
small_model.fit(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
"""
Explanation: Now, training a tree with max_depth=2
End of explanation
"""
validation_safe_loans = validation_data[validation_data[target] == 1]
validation_risky_loans = validation_data[validation_data[target] == -1]
sample_validation_data_risky = validation_risky_loans[0:2]
sample_validation_data_safe = validation_safe_loans[0:2]
sample_validation_data = sample_validation_data_safe.append(sample_validation_data_risky)
sample_validation_data
"""
Explanation: Making predictions
Let's consider two positive and two negative examples from the validation set and see what the model predicts. We will do the following:
* Predict whether or not a loan is safe.
* Predict the probability that a loan is safe.
End of explanation
"""
samp_vald_data_pred = decision_tree_model.predict(sample_validation_data.ix[:, sample_validation_data.columns != "safe_loans"])
samp_vald_data_label = sample_validation_data["safe_loans"].values
"""
Explanation: Explore label predictions
Now, we will use our model to predict whether or not a loan is likely to default. For each row in the sample_validation_data, use the decision_tree_model to predict whether or not the loan is classified as a safe loan.
Hint: Be sure to use the .predict() method.
End of explanation
"""
print "%.1f%%" %((np.sum(samp_vald_data_pred == samp_vald_data_label)/float(len(samp_vald_data_pred)))*100.0)
"""
Explanation: Quiz Question: What percentage of the predictions on sample_validation_data did decision_tree_model get correct?
End of explanation
"""
samp_vald_data_prob = decision_tree_model.predict_proba(sample_validation_data.ix[:, sample_validation_data.columns != "safe_loans"])[:,1]
"""
Explanation: Explore probability predictions
For each row in the sample_validation_data, what is the probability (according decision_tree_model) of a loan being classified as safe?
End of explanation
"""
sample_validation_data.index[np.argmax(samp_vald_data_prob)]
sample_validation_data
"""
Explanation: Quiz Question: Which loan has the highest probability of being classified as a safe loan?
End of explanation
"""
small_model.predict_proba(sample_validation_data.ix[:, sample_validation_data.columns != "safe_loans"])[:,1]
"""
Explanation: 41 corresponds to the 4th loan
Tricky predictions!
Now, we will explore something pretty interesting. For each row in the sample_validation_data, what is the probability (according to small_model) of a loan being classified as safe?
Hint: Set output_type='probability' to make probability predictions using small_model on sample_validation_data:
End of explanation
"""
sample_validation_data
"""
Explanation: Quiz Question: Notice that the probability preditions are the exact same for the 2nd and 3rd loans. Why would this happen?
During tree traversal both examples fall into the same leaf node.
Now, let's consider the 2nd entry in the sample_validation_data
End of explanation
"""
sample_validation_data.ix[79]
"""
Explanation: The 2nd entry of sample_validation_data has index 79
End of explanation
"""
small_model.predict(sample_validation_data.ix[79, sample_validation_data.columns != "safe_loans"])[0]
"""
Explanation: Quiz Question: Based on the small_model , what prediction would you make for this data point?
End of explanation
"""
small_model_train_acc = small_model.score(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
decision_tree_model_train_acc = decision_tree_model.score(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
print small_model_train_acc
print decision_tree_model_train_acc
"""
Explanation: Evaluating accuracy of the decision tree model
Recall that the accuracy is defined as follows:
$$
\mbox{accuracy} = \frac{\mbox{# correctly classified examples}}{\mbox{# total examples}}
$$
Let us start by evaluating the accuracy of the small_model and decision_tree_model on the training data
End of explanation
"""
decision_tree_model_train_acc > small_model_train_acc
"""
Explanation: Checkpoint: You should see that the small_model performs worse than the decision_tree_model on the training data.
End of explanation
"""
decision_tree_model_valid_acc = decision_tree_model.score(validation_data.ix[:, validation_data.columns != "safe_loans"], validation_data["safe_loans"])
print "Accuracy of decision_tree_model on validation set: %.2f" %(decision_tree_model_valid_acc)
"""
Explanation: Now, let us evaluate the accuracy of the small_model and decision_tree_model on the entire validation_data, not just the subsample considered above.
Quiz Question: What is the accuracy of decision_tree_model on the validation set, rounded to the nearest .01?
End of explanation
"""
big_model = sklearn.tree.DecisionTreeClassifier(max_depth=10)
big_model.fit(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
"""
Explanation: Evaluating accuracy of a complex decision tree model
Here, we will train a large decision tree with max_depth=10. This will allow the learned tree to become very deep, and result in a very complex model. Recall that in lecture, we prefer simpler models with similar predictive power. This will be an example of a more complicated model which has similar predictive power, i.e. something we don't want.
End of explanation
"""
big_model_train_acc = big_model.score(train_data.ix[:, train_data.columns != "safe_loans"], train_data["safe_loans"])
big_model_valid_acc = big_model.score(validation_data.ix[:, validation_data.columns != "safe_loans"], validation_data["safe_loans"])
"""
Explanation: Now, let us evaluate the accuracy of the big_model on the training set and validation set.
End of explanation
"""
big_model_train_acc > decision_tree_model_train_acc
"""
Explanation: Checkpoint: We should see that big_model has even better performance on the training set than decision_tree_model did on the training set.
End of explanation
"""
big_model_valid_acc > decision_tree_model_valid_acc
"""
Explanation: Quiz Question: How does the performance of big_model on the validation set compare to decision_tree_model on the validation set? Is this a sign of overfitting?
End of explanation
"""
predic_valid_data = decision_tree_model.predict(validation_data.ix[:, validation_data.columns != "safe_loans"])
labels_valid_data = validation_data["safe_loans"].values
"""
Explanation: The big_model has more features, performs better on the training dataset, but worse on the validation dataset. This is a sign of overfitting.
Quantifying the cost of mistakes
Every mistake the model makes costs money. In this section, we will try and quantify the cost of each mistake made by the model.
Assume the following:
False negatives: Loans that were actually safe but were predicted to be risky. This results in an oppurtunity cost of losing a loan that would have otherwise been accepted.
False positives: Loans that were actually risky but were predicted to be safe. These are much more expensive because it results in a risky loan being given.
Correct predictions: All correct predictions don't typically incur any cost.
Let's write code that can compute the cost of mistakes made by the model.
First, let us make predictions on validation_data using the decision_tree_model. Then, let's store the labels of validation_data.
End of explanation
"""
N_false_pos = 0
N_false_neg = 0
"""
Explanation: Now, let's initialize counters that will store the number of false positive and the number of false negatives to 0.
End of explanation
"""
for i in range(len(labels_valid_data)):
# If we find a mistake
if predic_valid_data[i] != labels_valid_data[i]:
# If false positive, increment N_false_pos
if predic_valid_data[i]==1:
N_false_pos += 1
# Else, it's a false negative, increment N_false_neg
else:
N_false_neg += 1
"""
Explanation: Now, let's loop over the data to determine the number of false positive and the number of false negatives. False positives are predictions where the model predicts +1 but the true label is -1. False negatives are predictions where the model predicts -1 but the true label is +1.
End of explanation
"""
10000*N_false_neg + 20000*N_false_pos
"""
Explanation: Quiz Question: Let us assume that each mistake costs money:
* Assume a cost of \$10,000 per false negative.
* Assume a cost of \$20,000 per false positive.
What is the total cost of mistakes made by decision_tree_model on validation_data?
End of explanation
"""
|
maxrose61/GA_DS | maxrose_hw/oct13/06_yelp_votes_homework.ipynb | gpl-3.0 | # access yelp.csv using a relative path
import pandas as pd
yelp = pd.read_csv('../data/yelp.csv')
yelp.head(1)
"""
Explanation: Linear regression homework with Yelp votes
Introduction
This assignment uses a small subset of the data from Kaggle's Yelp Business Rating Prediction competition.
Description of the data:
yelp.json is the original format of the file. yelp.csv contains the same data, in a more convenient format. Both of the files are in this repo, so there is no need to download the data from the Kaggle website.
Each observation in this dataset is a review of a particular business by a particular user.
The "stars" column is the number of stars (1 through 5) assigned by the reviewer to the business. (Higher stars is better.) In other words, it is the rating of the business by the person who wrote the review.
The "cool" column is the number of "cool" votes this review received from other Yelp users. All reviews start with 0 "cool" votes, and there is no limit to how many "cool" votes a review can receive. In other words, it is a rating of the review itself, not a rating of the business.
The "useful" and "funny" columns are similar to the "cool" column.
Task 1
Read yelp.csv into a DataFrame.
End of explanation
"""
# read the data from yelp.json into a list of rows
# each row is decoded into a dictionary named "data" using using json.loads()
import json
with open('../data/yelp.json', 'rU') as f:
data = [json.loads(row) for row in f]
# show the first review
data[0]
# convert the list of dictionaries to a DataFrame
ydata = pd.DataFrame(data)
ydata.head(2)
# add DataFrame columns for cool, useful, and funny
x = pd.DataFrame.from_records(ydata.votes)
ydata= pd.concat([ydata, x], axis=1)
ydata.head(2)
# drop the votes column and then display the head
ydata.drop("votes", axis=1, inplace=True)
ydata.head(2)
"""
Explanation: Task 1 (Bonus)
Ignore the yelp.csv file, and construct this DataFrame yourself from yelp.json. This involves reading the data into Python, decoding the JSON, converting it to a DataFrame, and adding individual columns for each of the vote types.
End of explanation
"""
# treat stars as a categorical variable and look for differences between groups by comparing the means of the groups
ydata.groupby(['stars'])['cool','funny','useful'].mean().T
# display acorrelation matrix of the vote types (cool/useful/funny) and stars
%matplotlib inline
import seaborn as sns
sns.heatmap(yelp.corr())
# display multiple scatter plots (cool, useful, funny) with linear regression line
feat_cols = ['cool', 'useful', 'funny']
sns.pairplot(ydata, x_vars=feat_cols, y_vars='stars', kind='reg', size=5)
"""
Explanation: Task 2
Explore the relationship between each of the vote types (cool/useful/funny) and the number of stars.
End of explanation
"""
X = ydata[['cool', 'useful', 'funny']]
y = ydata['stars']
"""
Explanation: Task 3
Define cool/useful/funny as the feature matrix X, and stars as the response vector y.
End of explanation
"""
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(X, y)
# print the coefficients
print lr.intercept_
print lr.coef_
zip(X, lr.coef_)
"""
Explanation: Task 4
Fit a linear regression model and interpret the coefficients. Do the coefficients make intuitive sense to you? Explore the Yelp website to see if you detect similar trends.
End of explanation
"""
from sklearn.cross_validation import train_test_split
from sklearn import metrics
import numpy as np
# define a function that accepts a list of features and returns testing RMSE
# define a function that accepts a list of features and returns testing RMSE
def train_test_rmse(feat_cols):
X = ydata[feat_cols]
y = ydata.stars
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123)
linreg = LinearRegression()
linreg.fit(X_train, y_train)
y_pred = linreg.predict(X_test)
return np.sqrt(metrics.mean_squared_error(y_test, y_pred))
train_test_split(X, y, random_state=123)
# calculate RMSE with all three features
print train_test_rmse(['cool', 'funny', 'useful'])
"""
Explanation: Task 5
Evaluate the model by splitting it into training and testing sets and computing the RMSE. Does the RMSE make intuitive sense to you?
End of explanation
"""
print train_test_rmse(['cool', 'funny', 'useful'])
print train_test_rmse(['cool', 'funny'])
print train_test_rmse(['cool'])
### RMSE is best with all 3 features
"""
Explanation: Task 6
Try removing some of the features and see if the RMSE improves.
End of explanation
"""
# new feature: Number of reviews per business_id. More reviews = more favored by reviewer?
# Adding # of occurs for business_id
ydata['review_freq']= ydata.groupby(['business_id'])['stars'].transform('count')
# new features:
# add 0 if occurs < 4 or 1 if >= 4
ydata["favored"] = [1 if x > 3 else 0 for x in ydata.review_freq]
# add new features to the model and calculate RMSE
print train_test_rmse(['cool', 'funny', 'useful','review_freq'])
"""
Explanation: Task 7 (Bonus)
Think of some new features you could create from the existing data that might be predictive of the response. Figure out how to create those features in Pandas, add them to your model, and see if the RMSE improves.
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=123)
# create a NumPy array with the same shape as y_test
y_null = np.zeros_like(y_test, dtype=float)
# fill the array with the mean value of y_test
y_null.fill(y_test.mean())
y_null
np.sqrt(metrics.mean_squared_error(y_test, y_null))
"""
Explanation: Task 8 (Bonus)
Compare your best RMSE on the testing set with the RMSE for the "null model", which is the model that ignores all features and simply predicts the mean response value in the testing set.
End of explanation
"""
|
dkirkby/astroml-study | Chapter1/Chapter1.ipynb | mit | %pylab inline
import astroML
print astroML.__version__
"""
Explanation: Chapter 1
Prepared by David Kirkby dkirkby@uci.edu on 14-Jan-2016.
End of explanation
"""
"""
SDSS Spectrum Example
---------------------
Figure 1.2.
An example of an SDSS spectrum (the specific flux plotted as a function of
wavelength) loaded from the SDSS SQL server in real time using Python tools
provided here (this spectrum is uniquely described by SDSS parameters
plate=1615, fiber=513, and mjd=53166).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_sdss_spectrum
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch single spectrum
plate = 1615
mjd = 53166
fiber = 513
spec = fetch_sdss_spectrum(plate, mjd, fiber)
#------------------------------------------------------------
# Plot the resulting spectrum
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(spec.wavelength(), spec.spectrum, '-k', lw=1)
ax.set_xlim(3000, 10000)
ax.set_ylim(25, 300)
ax.set_xlabel(r'$\lambda {(\rm \AA)}$')
ax.set_ylabel('Flux')
ax.set_title('Plate = %(plate)i, MJD = %(mjd)i, Fiber = %(fiber)i' % locals())
plt.show()
"""
Explanation: 1.4.2 Code Management with Git
See the links from the 2014 Physics 231 website.
1.5.4 Fetching and Displaying SDSS Spectra
Reproduce Figure 1.2 showing a sample SDSS spectrum:
End of explanation
"""
"""
SDSS DR7 Quasars
----------------
Figure 1.4.
The r-i color vs. redshift diagram for the first 10,000 entries from the
SDSS Data Release 7 Quasar Catalog. The color variation is due to emission
lines entering and exiting the r and i band wavelength windows.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.datasets import fetch_dr7_quasar
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the quasar data
data = fetch_dr7_quasar()
# select the first 10000 points
data = data[:10000]
r = data['mag_r']
i = data['mag_i']
z = data['redshift']
#------------------------------------------------------------
# Plot the quasar data
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(z, r - i, marker='.', markersize=2, linestyle='none', color='black')
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
"""
Explanation: 1.5.6 SDSS DR7 Quasar Catalog
Reproduce Figure 1.4 showing color (r-i) vs. redshift for SDSS DR7 quasars:
End of explanation
"""
import bossdata
print bossdata.__version__
"""
Explanation: Access BOSS spectra and metadata
The AstroML tools can only access pre-BOSS SDSS data, i.e. up to data release DR7. However, all BOSS data (and eventually eBOSS data) can be access with the https://bossdata.readthedocs.org/en/latest/, developed here at UCI:
End of explanation
"""
quasar_catalog = bossdata.meta.Database(quasar_catalog=True)
dr12q = quasar_catalog.select_all(what='RA,DEC,Z_VI,PSFMAG_2,PSFMAG_3', max_rows=0)
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
ax.plot(z, r - i, marker='.', markersize=2, linestyle='none', color='black')
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
"""
Explanation: Read the DR12 quasar catalog:
End of explanation
"""
"""
SDSS Stripe 82 Standard Stars
-----------------------------
Figure 1.9.
Scatter plot with contours over dense regions.This is a color-color diagram
of the entire set of SDSS Stripe 82 standard stars; cf. figure 1.6.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from matplotlib import pyplot as plt
from astroML.plotting import scatter_contour
from astroML.datasets import fetch_sdss_S82standards
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Fetch the Stripe 82 standard star catalog
data = fetch_sdss_S82standards()
g = data['mmu_g']
r = data['mmu_r']
i = data['mmu_i']
#------------------------------------------------------------
# plot the results
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(g - r, r - i, threshold=200, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlabel(r'${\rm g - r}$')
ax.set_ylabel(r'${\rm r - i}$')
ax.set_xlim(-0.6, 2.5)
ax.set_ylim(-0.6, 2.5)
plt.show()
"""
Explanation: 1.6.1 Plotting Two-Dimensional Representations of Large Data Sets
Reproduce Figure 1.9 showing g-r vs r-i for SDSS stripe-82 standard stars as a scatter plot with contours overlayed:
End of explanation
"""
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(z, r - i, threshold=1000, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
"""
Explanation: Use the same technique to plot the r-i vs. redshift quasar plot above:
End of explanation
"""
"""
Example of HealPix pixellization
--------------------------------
Figure 1.15.
The top panel shows HEALPix pixels in nested order. The 12 fundamental sky
divisions can be seen, as well as the hierarchical nature of the smaller
pixels. This shows a pixelization with nside = 4, that is, each of the 12
large regions has 4 x 4 pixels, for a total of 192 pixels. The lower panel
shows a seven-year co-add of raw WMAP data, plotted using the HEALPix
projection using the HealPy package. This particular realization has
nside = 512, for a total of 3,145,728 pixels. The pixels are roughly
6.8 arcminutes on a side.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
from __future__ import print_function
import numpy as np
from matplotlib import pyplot as plt
# warning: due to a bug in healpy, importing it before pylab can cause
# a segmentation fault in some circumstances.
import healpy as hp
from astroML.datasets import fetch_wmap_temperatures
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Next plot the wmap pixellization
wmap_unmasked = fetch_wmap_temperatures(masked=False)
# plot the unmasked map
fig = plt.figure(2, figsize=(10, 7.5))
hp.mollview(wmap_unmasked, min=-1, max=1, title='Raw WMAP data',
unit=r'$\Delta$T (mK)', fig=2)
plt.show()
"""
Explanation: 1.6.3 Plotting Representations of Data on the Sky
Reproduce Figure 1.15 showing the WMAP7 raw temperature map using healpix with nside=512 (~3.1Mpix):
End of explanation
"""
from mpl_toolkits.basemap import Basemap
from matplotlib.collections import PolyCollection
def plot_sky(ra, dec, data=None, nside=16, label='', projection='eck4', cmap=plt.get_cmap('jet'), norm=None,
hide_galactic_plane=False):
# get pixel area in degrees
pixel_area = hp.pixelfunc.nside2pixarea(nside, degrees=True)
# find healpixels associated with input vectors
pixels = hp.ang2pix(nside, 0.5*np.pi-np.radians(dec), np.radians(ra))
# find unique pixels
unique_pixels = np.unique(pixels)
# count number of points in each pixel
bincounts = np.bincount(pixels)
# if no data provided, show counts per sq degree
# otherwise, show mean per pixel
if data is None:
values = bincounts[unique_pixels]/pixel_area
else:
weighted_counts = np.bincount(pixels, weights=data)
values = weighted_counts[unique_pixels]/bincounts[unique_pixels]
# find pixel boundaries
corners = hp.boundaries(nside, unique_pixels, step=1)
corner_theta, corner_phi = hp.vec2ang(corners.transpose(0,2,1))
corner_ra, corner_dec = np.degrees(corner_phi), np.degrees(np.pi/2-corner_theta)
# set up basemap
m = Basemap(projection=projection, lon_0=90, resolution='l', celestial=True)
m.drawmeridians(np.arange(0, 360, 30), labels=[0,0,1,0], labelstyle='+/-')
m.drawparallels(np.arange(-90, 90, 15), labels=[1,0,0,0], labelstyle='+/-')
m.drawmapboundary()
# convert sky coords to map coords
x,y = m(corner_ra, corner_dec)
# regroup into pixel corners
verts = np.array([x.reshape(-1,4), y.reshape(-1,4)]).transpose(1,2,0)
# Make the collection and add it to the plot.
coll = PolyCollection(verts, array=values, cmap=cmap, norm=norm, edgecolors='none')
plt.gca().add_collection(coll)
plt.gca().autoscale_view()
if not hide_galactic_plane:
from astropy.coordinates import SkyCoord
import astropy.units as u
# generate vector in galactic coordinates and convert to equatorial coordinates
galactic_l = np.linspace(0, 2*np.pi, 1000)
galactic_plane = SkyCoord(l=galactic_l*u.radian, b=np.zeros_like(galactic_l)*u.radian, frame='galactic').fk5
# project to map coordinates
galactic_x, galactic_y = m(galactic_plane.ra.degree, galactic_plane.dec.degree)
m.scatter(galactic_x, galactic_y, marker='.', s=2, c='k')
# Add a colorbar for the PolyCollection
plt.colorbar(coll, orientation='horizontal', pad=0.01, aspect=40, label=label)
return m
plt.figure(figsize=(12,9))
plot_sky(dr12q['RA'].data, dr12q['DEC'].data, label='Number of quasars per square degree')
plt.show()
"""
Explanation: You can make nicer sky plots using the Basemap map-projections library. This example is borrowed from the bossdata docs and shows the number density of BOSS DR12 quasars on the sky:
End of explanation
"""
import seaborn as sns
z = dr12q['Z_VI']
r = dr12q['PSFMAG_2']
i = dr12q['PSFMAG_3']
fig, ax = plt.subplots(figsize=(5, 3.75))
scatter_contour(z, r - i, threshold=1000, log_counts=True, ax=ax,
histogram2d_args=dict(bins=40),
plot_args=dict(marker=',', linestyle='none', color='black'),
contour_args=dict(cmap=plt.cm.bone))
ax.set_xlim(0, 5)
ax.set_ylim(-0.5, 1.0)
ax.set_xlabel(r'${\rm redshift}$')
ax.set_ylabel(r'${\rm r-i}$')
plt.show()
"""
Explanation: Graphing Extras
Two packages worth exploring for visualization are:
* Seaborn: builds on top of matplotlib and provides better defaults and some higher-level graphing functions.
* Bokeh: uses a client-server architecture to allow easy interaction with graphs.
Both of these work in notebooks. The easiest way to get start is to import seaborn, which improves your defaults.
End of explanation
"""
|
bmeaut/python_nlp_2017_fall | course_material/04_Object_oriented_programming/04_Object_oriented_programming_lecture.ipynb | mit | class ClassWithInit:
def __init__(self):
pass
class ClassWithoutInit:
pass
"""
Explanation: Introduction to Python and Natural Language Technologies
Lecture 03, Week 04
Object oriented programming
27 September 2017
Introduction
Python has been object oriented since its first version
basically everything is an object including
class definitions
functions
modules
PEP8 defines style guidelines for classes as well
Defining classes
class keyword
instance explicitly bound to the first parameter of each method
named self by convention
__init__ is called after the instance is created
not exactly a constructor because the instance already exists
not mandatory
End of explanation
"""
class A:
def __init__(self):
self.attr1 = 42
def method(self):
self.attr2 = 43
a = A()
print(a.attr1)
# print(a.attr2) # raises AttributeError
a.method()
print(a.attr2)
"""
Explanation: Class attributes
data attributes: these correspond to data members in C++
methods: these correspond to methods in C++
both are
created upon assignment
can be assigned anywhere (not just in __init__)
End of explanation
"""
a.attr3 = 11
print(a.attr3)
"""
Explanation: Attributes can be added to instances
End of explanation
"""
a2 = A()
# a2.attr3 # raises AttributeError
"""
Explanation: this will not affect other instances
End of explanation
"""
class InitWithArguments:
def __init__(self, value, value_with_default=42):
self.attr = value
self.solution_of_the_world = value_with_default
class InitWithVariableNumberOfArguments:
def __init__(self, *args, **kwargs):
self.val1 = args[0]
self.val2 = kwargs.get('important_param', 42)
obj1 = InitWithArguments(41)
obj2 = InitWithVariableNumberOfArguments(1, 2, 3, param4="apple", important_param=23)
print(obj1.attr, obj1.solution_of_the_world,
obj2.val1, obj2.val2)
"""
Explanation: __init__ may have arguments
End of explanation
"""
class A:
def foo(self):
print("foo called")
def bar(self, param):
print("bar called with parameter {}".format(param))
"""
Explanation: Method attributes
functions inside the class definition
explicitly take the instance as first parameter
End of explanation
"""
c = A()
c.foo()
c.bar(42)
A.foo(c)
A.bar(c, 43)
"""
Explanation: Calling methods
instance.method(param)
class.method(instance, param)
End of explanation
"""
', '.join(A.__dict__)
"""
Explanation: Special attributes
every object has a number of special attributes
double underscore or dunder notation: __attribute__
automatically created
advanced OOP features are implemented using these
End of explanation
"""
class A:
def __init__(self):
self.__private_attr = 42
def foo(self):
self.__private_attr += 1
a = A()
a.foo()
# print(a.__private_attr) # raises AttributeError
a.__dict__
print(a._A__private_attr) # name mangled
a.__dict__
"""
Explanation: Data hiding with name mangling
by default every attribute is public
private attributes can be defined through name mangling
every attribute with at least two leading underscores and at most one trailing underscore is replaced with a mangled attribute
emulates private behavior
mangled name: __classname_attrname
End of explanation
"""
class A:
class_attr = 42
"""
Explanation: Class attributes
class attributes are class-global attributes
roughly the same as static attributes in C++
End of explanation
"""
a1 = A()
a1.class_attr
"""
Explanation: Accessing class attributes via instances
End of explanation
"""
A.class_attr
"""
Explanation: Accessing class attributes via the class object
End of explanation
"""
a1 = A()
a2 = A()
print(a1.class_attr, a2.class_attr)
A.class_attr = 43
a1.class_attr, a2.class_attr
"""
Explanation: Setting the class object via the class
End of explanation
"""
a1 = A()
a2 = A()
a1.class_attr = 11
a2.class_attr
"""
Explanation: Cannot set via an instance
End of explanation
"""
a1.__dict__
"""
Explanation: because this assignment creates a new attribute in the instance's namespace.
End of explanation
"""
a1.__class__.class_attr
"""
Explanation: each object has a __class__ magic attribute that accesses the class object.
We can use this to access the class attribute:
End of explanation
"""
a2.__dict__, a2.class_attr
"""
Explanation: a2 has not shadowed class_attr, so we can access it through the instance
End of explanation
"""
class A:
pass
class B(A):
pass
a = A()
b = B()
print(isinstance(a, B))
print(isinstance(b, A))
print(issubclass(B, A))
print(issubclass(A, B))
"""
Explanation: Inheritance
Python supports inheritance and multiple inheritance
End of explanation
"""
%%python2
class OldStyleClass:
pass
class NewStyleClass(object):
pass
class ThisIsAlsoNewStyleClass(NewStyleClass):
pass
"""
Explanation: New style vs. old style classes
Python 2
Python 2.2 introduced a new inheritance mechanism
new style classes vs. old style classes
class is new style if it subclasses object or one of its predecessors subclasses object
wide range of previously unavailable functionality
old style classes are the default in Python 2
Python 3
only supports new style classes
every class implicitly subclasses object
The differences between old style and new style classes are listed here: https://wiki.python.org/moin/NewClassVsClassicClass
End of explanation
"""
class A: pass
class B(object): pass
print(issubclass(A, object))
print(issubclass(B, object))
"""
Explanation: Python 3 implicitly subclasses object
End of explanation
"""
class A(object):
def foo(self):
print("A.foo was called")
def bar(self):
print("A.bar was called")
class B(A):
def foo(self):
print("B.foo was called")
b = B()
b.foo()
b.bar()
"""
Explanation: Method inheritance
Methods are inherited and overridden in the usual way
End of explanation
"""
class A(object):
def foo(self):
self.value = 42
class B(A):
pass
b = B()
print(b.__dict__)
a = A()
print(a.__dict__)
a.foo()
print(a.__dict__)
"""
Explanation: Since data attributes can be created anywhere, they are only inherited if the code in the base class' method is called.
End of explanation
"""
class A(object):
def __init__(self):
print("A.__init__ called")
class B(A):
def __init__(self):
print("B.__init__ called")
class C(A): pass
b = B()
c = C()
"""
Explanation: Calling the base class's constructor
since __init__ is not a constructor, the base class' init is not called automatically, if the subclass overrides it
End of explanation
"""
class A(object):
def __init__(self):
print("A.__init__ called")
class B(A):
def __init__(self):
A.__init__(self)
print("B.__init__ called")
class C(B):
def __init__(self):
super().__init__()
print("C.__init__ called")
print("Instantiating B")
b = B()
print("Instantiating C")
c = C()
"""
Explanation: The base class's methods can be called in at least two ways:
1. explicitely via the class name
1. using the super function
End of explanation
"""
%%python2
class A(object):
def __init__(self):
print("A.__init__ called")
class B(A):
def __init__(self):
A.__init__(self)
print("B.__init__ called")
class C(A):
def __init__(self):
super(C, self).__init__()
print("B.__init__ called")
print("Instantiating B")
b = B()
print("Instantiating C")
c = C()
"""
Explanation: super's usage was more complicated in Python 2
End of explanation
"""
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
def __str__(self):
return "{0}, age {1}".format(self.name, self.age)
class Employee(Person):
def __init__(self, name, age, position, salary):
self.position = position
self.salary = salary
super().__init__(name, age)
def __str__(self):
return "{0}, position: {1}, salary: {2}".format(super().__str__(), self.position, self.salary)
e = Employee("Jakab Gipsz", 33, "manager", 450000)
print(e)
print(Person(e.name, e.age))
"""
Explanation: A complete example using super in the subclass's init:
End of explanation
"""
class Cat(object):
def make_sound(self):
self.mieuw()
def mieuw(self):
print("Mieuw")
class Dog(object):
def make_sound(self):
self.bark()
def bark(self):
print("Vau")
animals = [Cat(), Dog()]
for animal in animals:
# animal must have a make_sound method
animal.make_sound()
"""
Explanation: Duck typing and interfaces
no built-in mechanism for interfacing
the Abstract Base Classes (abc) module implements interface-like features
not used extensively in Python in favor of duck typing
"In computer programming, duck typing is an application of the duck test in type safety. It requires that type checking be deferred to runtime, and is implemented by means of dynamic typing or reflection." -- Wikipedia
"If it looks like a duck, swims like a duck, and quacks like a duck, then it probably is a duck." -- Wikipedia
allows polymorphism without abstract base classes
End of explanation
"""
class A(object):
def foo(self):
raise NotImplementedError()
class B(A):
def foo(self):
print("Yay.")
class C(A): pass
b = B()
b.foo()
c = C()
# c.foo() # NotImplementedError why does this happen?
"""
Explanation: NotImplementedError
emulating C++'s pure virtual function
End of explanation
"""
a = A()
"""
Explanation: we can still instantiate A
End of explanation
"""
class ClassWithoutStr(object):
def __init__(self, value=42):
self.param = value
class ClassWithStr(object):
def __init__(self, value=42):
self.param = value
def __str__(self):
return "My id is {0} and my parameter is {1}".format(
id(self), self.param)
print("Printint a class that does not __str__: {}".format(ClassWithoutStr(345)))
print("Printint a class that defines __str__: {}".format(ClassWithStr(345)))
"""
Explanation: Magic methods
mechanism to implement advanced OO features
dunder methods
__str__ method
returns the string representation of the object
Python 2 has two separate methods __str__ and __unicode__ for bytestrings and unicode strings
End of explanation
"""
class Complex(object):
def __init__(self, real=0.0, imag=0.0):
self.real = real
self.imag = imag
def __abs__(self):
return (self.real**2 + self.imag**2) ** 0.5
def __eq__(self, other): # right hand side
return self.real == other.real and self.imag == other.imag
def __gt__(self, other):
return abs(self) > abs(other)
c1 = Complex()
c2 = Complex(1, 1)
abs(c2), c1 == c2
"""
Explanation: Operator overloading
operators are mapped to magic functions
defining these functions defines/overrides operators
comprehensive list of operator functions are here
some built-in functions are included as well
__len__: defines the behavior of len(obj)
__abs__: defines the behavior of abs(obj)
End of explanation
"""
class Complex(object):
def __init__(self, real=0.0, imag=0.0):
self.real = real
self.imag = imag
def __abs__(self):
return (self.real**2 + self.imag**2) ** 0.5
def __eq__(self, other): # right hand side
return self.real == other.real and self.imag == other.imag
def __gt__(self, other):
if isinstance(other, str):
return abs(self) > len(other)
return abs(self) > abs(other)
c1 = Complex()
c2 = Complex(1, 1)
abs(c2), c1 == c2, c2 > "a", c2 > "ab"
"""
Explanation: How can we define comparison between different types?
Let's define a comparison between Complex and strings. We can check for the right operand's type:
End of explanation
"""
"a" < c2
"""
Explanation: if the built-in type is the left-operand for which comparison against Complex is not defined, the operands are automatically swithced:
End of explanation
"""
# "a" > c2 # raises TypeError
"""
Explanation: Defining __gt__ does not automatically define __lt__:
End of explanation
"""
class Noisy(object):
def __setattr__(self, attr, value):
print("Setting [{}] to value [{}]".format(attr, value))
super().__setattr__(attr, value)
def __getattr__(self, attr):
print("Getting (getattr) [{}]".format(attr))
super().__getattr__(attr)
def __getattribute__(self, attr):
print("Getting (getattribute) [{}]".format(attr))
super().__getattribute__(attr)
def __delattr__(self, attr):
print("You wish")
"""
Explanation: Assignment operator
the assignment operator (=) cannot be overridden
it performs reference binding instead of copying
tightly bound to the garbage collector
Other useful overloads
Attributes can be set, get and deleted. 4 magic methods govern these:
__setattr__: called when we set an attribute,
__delattr__: called when we delete an attribute using del or delattr
__getattribute__: called when accessing attributes
__getattr__: called when the 'usual' attribute lookup fails (for example the attribute is not present in the object's namespace
End of explanation
"""
a = Noisy()
try:
a.dog
except AttributeError:
print("AttributeError raised")
"""
Explanation: getting an attribute that doesn't exist yet calls
getattribute first, which calls the base class' getattribute which fails
getattr is called.
End of explanation
"""
a.dog = "vau" # equivalent to setattr(a, "dog", "vau")
"""
Explanation: setting an attribute
End of explanation
"""
a.dog # equivalent to getattr(a, "dog")
"""
Explanation: getting an existing attribute
End of explanation
"""
a.dog = "Vau" # equivalent to setattr(a, "dog", "Vau")
"""
Explanation: modifying an attribute also calls __setattr__
End of explanation
"""
del a.dog # equivalent to delattr(a, "dog")
"""
Explanation: deleting an attribute
End of explanation
"""
class DictLike(object):
def __init__(self):
self.d = {}
def __setitem__(self, item, value):
print("Setting {} to {}".format(item, value))
self.d[item] = value
def __getitem__(self, item):
print("Getting {}".format(item))
return self.d.get(item, None)
def __iter__(self):
return iter(self.d)
d = DictLike()
d["a"] = 1
d["b"] = 2
for k in d:
print(k)
"""
Explanation: Dictionary-like behavior can be achieved by overloading []
We also define __iter__ to support iteration.
End of explanation
"""
l1 = [[1, 2], [3, 4, 5]]
l2 = l1
id(l1[0]) == id(l2[0])
l1[0][0] = 10
l2
"""
Explanation: Shallow copy vs. deep copy
There are 3 types of assignment and copying:
the assignment operator (=) creates a new reference to the same object,
copy performs shallow copy,
deepcopy recursively deepcopies everything.
The difference between shallow and deep copy is only relevant for compound objects.
Assignment operator
End of explanation
"""
from copy import copy
l1 = [[1, 2], [3, 4, 5]]
l2 = copy(l1)
id(l1) == id(l2), id(l1[0]) == id(l2[0])
l1[0][0] = 10
l2
"""
Explanation: Shallow copy
End of explanation
"""
from copy import deepcopy
l1 = [[1, 2], [3, 4, 5]]
l2 = deepcopy(l1)
id(l1) == id(l2), id(l1[0]) == id(l2[0])
l1[0][0] = 10
l2
"""
Explanation: Deep copy
End of explanation
"""
from copy import copy, deepcopy
class ListOfLists(object):
def __init__(self, lists):
self.lists = lists
self.list_lengths = [len(l) for l in self.lists]
def __copy__(self):
print("ListOfLists copy called")
return ListOfLists(self.lists)
def __deepcopy__(self, memo):
print("ListOfLists deepcopy called")
return ListOfLists(deepcopy(self.lists))
l1 = ListOfLists([[1, 2], [3, 4, 5]])
l2 = copy(l1)
l1.lists[0][0] = 12
print(l2.lists)
l3 = deepcopy(l1)
"""
Explanation: Both can be defined via magic methods
note that these implementations do not check for infinite loops
End of explanation
"""
class A(object):
@classmethod
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
print("A.__new__ called")
return instance
def __init__(self):
print("A.__init__ called")
def __del__(self):
print("A.__del__ called")
try:
super(A, self).__del__()
except AttributeError:
print("parent class does not have a __del__ method")
a = A()
del a
"""
Explanation: However, these are very far from complete implementations. We need to take care of preventing infinite loops and support for pickling (serialization module).
Object creation and destruction: the __new__ and the __del__ method
The __new__ method is called to create a new instance of a class. __new__ is a static method that takes the class object as a first parameter.
Typical implementations create a new instance of the class by invoking the superclass’s __new__() method using super(currentclass, cls).__new__(cls[, ...]) with appropriate arguments and then modifying the newly-created instance as necessary before returning it.
__new__ has to return an instance of cls, on which __init__ is called.
The __del__ method is called when an object is about to be destroyed.
Although technically a destructor, it is handled by the garbage collector.
It is not guaranteed that __del__() methods are called for objects that still exist when the interpreter exits.
End of explanation
"""
class A(object):
var = 12
def __init__(self, value):
self.value = value
def foo(self):
print("bar")
", ".join(dir(A))
"""
Explanation: Object introspection
support for full object introspection
dir lists every attribute of an object
End of explanation
"""
", ".join(dir(A(12)))
"""
Explanation: Class A does not have a value attribute, since it is bounded to an instance. However, it does have the class global var attribute.
An instance of A has both:
End of explanation
"""
class A(object):
pass
class B(A):
pass
b = B()
a = A()
print(isinstance(a, A))
print(isinstance(a, B))
print(isinstance(b, A))
print(isinstance(b, object))
"""
Explanation: isinstance, issubclass
End of explanation
"""
def evaluate(x):
a = 12
b = 3
return a*x + b
print(evaluate.__code__)
#dir(evaluate.__code__)
evaluate.__code__.co_varnames, evaluate.__code__.co_freevars, evaluate.__code__.co_stacksize
"""
Explanation: Every object has a __code__ attribute, which contains everything needed to call the function.
End of explanation
"""
from inspect import getsourcelines
getsourcelines(evaluate)
"""
Explanation: The inspect module provides further code introspection tools, including the getsourcelines function, which returns the source code itself.
End of explanation
"""
class A(object):
instance_count = 0
def __init__(self, value=42):
self.value = value
A.increase_instance_count()
@staticmethod
def increase_instance_count():
A.instance_count += 1
a1 = A()
print(A.instance_count)
a2 = A()
print(A.instance_count)
"""
Explanation: Class decorators
Many OO features are achieved via a syntax sugar called decorators. We will talk about decorators in detail later.
The most common features are:
staticmethod,
classmethod,
property.
Static methods
defined inside a class but not bound to an instance (no self parameter)
analogous to C++'s static methods
End of explanation
"""
class Complex(object):
def __init__(self, real, imag):
self.real = real
self.imag = imag
def __str__(self):
return '{0}+j{1}'.format(self.real, self.imag)
@classmethod
def from_str(cls, complex_str):
real, imag = complex_str.split('+')
imag = imag.lstrip('ij')
print("Instantiating {}".format(cls.__name__))
return cls(float(real), float(imag))
class ChildComplex(Complex): pass
c1 = Complex.from_str("3.45+j2")
print(c1)
c2 = Complex(3, 4)
print(c2)
c1 = ChildComplex.from_str("3.45+j2")
"""
Explanation: Class methods
bound to the class instead of an instance of the class
first argument is a class instance
called cls by convention
typical usage: factory methods for the class
Let's create a Complex class that can be initialized with either a string such as "5+j6" or with two numbers.
End of explanation
"""
class Person(object):
def __init__(self, name, age):
self.name = name
self.age = age
@property
def age(self):
return self._age
@age.setter
def age(self, age):
try:
if 0 <= age <= 150:
self._age = age
except TypeError:
pass
def __str__(self):
return "Name: {0}, age: {1}".format(self.name, self.age)
p = Person("John", 12)
print(p)
p.age = "abc"
print(p)
p.age = 85
print(p)
p = Person("Pete", 17)
",".join(dir(p))
"""
Explanation: Properties
attributes with getters, setters and deleters
Properties are attributes with getters, setters and deleters. Property works as both a built-in function and as separate decorators.
End of explanation
"""
class A(object):
def __init__(self, value):
print("A init called")
self.value = value
class B(object):
def __init__(self):
print("B init called")
class C(A, B):
def __init__(self, value1, value2):
print("C init called")
self.value2 = value2
super(C, self).__init__(value1)
class D(B, A): pass
print("Instantiating C")
c = C(1, 2)
print("Instantiating D")
d = D()
"""
Explanation: Multiple inheritance
no interface inheritance in Python
since every class subclasses object, the diamond problem is present
method resolution order (MRO) defines the way methods are inherited
very different between old and new style classes
End of explanation
"""
|
BelalC/keyword_i2x | notebooks/sketch.ipynb | mit | from sklearn.feature_extraction import stop_words
from nltk.corpus import stopwords
import math
from textblob import TextBlob as tb
with open("scripts/script.txt", "r") as f:
data = f.read()
#with open("scripts/script.txt", "r") as f:
# data2 = f.readlines()
#for line in data:
# words = data.split()
with open("scripts/transcript_1.txt", "r") as t1:
t1 = t1.read()
with open("scripts/transcript_2.txt", "r") as t2:
t2 = t2.read()
with open("scripts/transcript_3.txt", "r") as t3:
t3 = t3.read()
"""
Explanation: Instructions:
Compute the most important key-words (a key-word can be between 1-3 words)
Choose the top n words from the previously generated list. Compare these key- words with all the words occurring in all of the transcripts.
Generate a score (rank) for these top n words based on analysed transcripts.
End of explanation
"""
from spacy.en import English
import nltk
from sklearn.feature_extraction.stop_words import ENGLISH_STOP_WORDS
parser = English()
parsedData = parser(data)
# All you have to do is iterate through the parsedData
# Each token is an object with lots of different properties
# A property with an underscore at the end returns the string representation
# while a property without the underscore returns an index (int) into spaCy's vocabulary
# The probability estimate is based on counts from a 3 billion word
# corpus, smoothed using the Simple Good-Turing method.
for i, token in enumerate(parsedData[0:2]):
print("original:", token.orth, token.orth_)
print("lowercased:", token.lower, token.lower_)
print("lemma:", token.lemma, token.lemma_)
print("shape:", token.shape, token.shape_)
print("prefix:", token.prefix, token.prefix_)
print("suffix:", token.suffix, token.suffix_)
print("log probability:", token.prob)
print("Brown cluster id:", token.cluster)
print("----------------------------------------")
"""
Explanation: PRE-PROCESSING!
End of explanation
"""
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
def tf(word, blob):
return blob.words.count(word) / len(blob.words)
def n_containing(word, bloblist):
return sum(1 for blob in bloblist if word in blob.words)
def idf(word, bloblist):
return math.log(len(bloblist) / (1 + n_containing(word, bloblist)))
def tfidf(word, blob, bloblist):
return tf(word, blob) * idf(word, bloblist)
bloblist = []
[bloblist.append(tb(doc)) for doc in [data, t1, t2, t3]]
for i, blob in enumerate(bloblist):
print("Top words in document {}".format(i + 1))
scores = {word: tfidf(word, blob, bloblist) for word in blob.words}
sorted_words = sorted(scores.items(), key=lambda x: x[1], reverse=True)
for word, score in sorted_words[:3]:
print("Word: {}, TF-IDF: {}".format(word, round(score, 5)))
CountVectorizer(data)
tf = TfidfVectorizer(analyzer='word', ngram_range=(1,3), min_df = 0, stop_words = 'english')
tfidf_matrix = tf.fit_transform(data2)
feature_names = tf.get_feature_names()
tfidf_matrix.shape, len(feature_names)
dense = tfidf_matrix.todense()
episode = dense[0].tolist()[0]
phrase_scores = [pair for pair in zip(range(0, len(episode)), episode) if pair[1] > 0]
sorted_phrase_scores = sorted(phrase_scores, key=lambda t: t[1] * -1)
for phrase, score in [(feature_names[word_id], score) for (word_id, score) in sorted_phrase_scores][:20]:
print('{0: <20} {1}'.format(phrase, score))
def freq(word, tokens):
return tokens.count(word)
#Compute the frequency for each term.
vocabulary = []
docs = {}
all_tips = []
for tip in (venue.tips()):
tokens = tokenizer.tokenize(tip.text)
bi_tokens = bigrams(tokens)
tri_tokens = trigrams(tokens)
tokens = [token.lower() for token in tokens if len(token) > 2]
tokens = [token for token in tokens if token not in stopwords]
bi_tokens = [' '.join(token).lower() for token in bi_tokens]
bi_tokens = [token for token in bi_tokens if token not in stopwords]
tri_tokens = [' '.join(token).lower() for token in tri_tokens]
tri_tokens = [token for token in tri_tokens if token not in stopwords]
final_tokens = []
final_tokens.extend(tokens)
final_tokens.extend(bi_tokens)
final_tokens.extend(tri_tokens)
docs[tip.text] = {'freq': {}}
for token in final_tokens:
docs[tip.text]['freq'][token] = freq(token, final_tokens)
print docs
"""
Explanation: TF-IDF
End of explanation
"""
from rake_nltk import Rake
r = Rake() # Uses stopwords for english from NLTK, and all puntuation characters.
# If you want to provide your own set of stop words and punctuations to
# r = Rake(<list of stopwords>, <string of puntuations to ignore>
r.extract_keywords_from_text(data)
r.get_ranked_phrases_with_scores() # To get keyword phrases ranked highest to lowest.
"""
Explanation: RAKE
End of explanation
"""
|
quasars100/Resonance_testing_scripts | python_tutorials/Megno.ipynb | gpl-3.0 | def simulation(par):
a, e = par # unpack parameters
rebound.reset()
rebound.integrator = "whfast-nocor"
rebound.dt = 5.
rebound.add(m=1.) # Star
rebound.add(m=0.000954, a=5.204, anom=0.600, omega=0.257, e=0.048)
rebound.add(m=0.000285, a=a, anom=0.871, omega=1.616, e=e)
rebound.move_to_com()
rebound.init_megno(1e-16)
try:
rebound.integrate(5e2*2.*np.pi,maxR=20.) # integrator for 500 years
return rebound.calculate_megno()
except rebound.ParticleEscaping:
return 10. # At least one particle got ejected, returning large MEGNO.
"""
Explanation: Stability map with MEGNO and WHFast
In this tutorial, we'll create a stability map of a two planet system using the chaos indicator MEGNO (Mean Exponential Growth of Nearby Orbits) and the symplectic integrator WHFast (Rein and Tamayo 2015).
We will integrate a two planet system with massive planets. We vary two orbital parameters, the semi-major axis $a$ and the eccentricity $e$. Let us first define a function that runs one simulation for a given set of initial conditions $(a, e)$.
End of explanation
"""
import rebound
import numpy as np
simulation((7,0.1))
"""
Explanation: Let's try this out and run one simulation
End of explanation
"""
Ngrid = 80
par_a = np.linspace(7.,10.,Ngrid)
par_e = np.linspace(0.,0.5,Ngrid)
parameters = []
for e in par_e:
for a in par_a:
parameters.append((a,e))
from rebound.interruptible_pool import InterruptiblePool
pool = InterruptiblePool()
results = pool.map(simulation,parameters)
"""
Explanation: The return value is the MEGNO. It is about 2, thus the system is regular for these initial conditions. Let's run a whole array of simulations.
End of explanation
"""
results2d = np.array(results).reshape(Ngrid,Ngrid)
%matplotlib inline
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(7,5))
ax = plt.subplot(111)
extent = [min(par_a),max(par_a),min(par_e),max(par_e)]
ax.set_xlim(extent[0],extent[1])
ax.set_xlabel("semi-major axis $a$")
ax.set_ylim(extent[2],extent[3])
ax.set_ylabel("eccentricity $e$")
im = ax.imshow(results2d, interpolation="none", vmin=1.9, vmax=4, cmap="RdYlGn_r", origin="lower", aspect='auto', extent=extent)
cb = plt.colorbar(im, ax=ax)
cb.set_label("MEGNO $\\langle Y \\rangle$")
"""
Explanation: On my laptop (dual core CPU), this takes only 3 seconds!
Let's plot it!
End of explanation
"""
|
nathanshammah/pim | doc/notebooks/piqs_steadystate_superradiance.ipynb | mit | import matplotlib.pyplot as plt
from qutip import *
from piqs import *
"""
Explanation: Steady-state superradiance
We consider a system of $N$ two-level systems (TLSs) with identical frequency $\omega_{0}$, incoherently pumped at a rate $\gamma_\text{P}$ and de-excitating at a collective emission rate $\gamma_\text{CE}$,
\begin{eqnarray}
\dot{\rho} &=&
-i\lbrack \omega_{0}J_z,\rho \rbrack
+\frac{\gamma_\text {CE}}{2}\mathcal{L}{J{-}}[\rho]
+\frac{\gamma_\text{P}}{2}\sum_{n=1}^{N}\mathcal{L}{J{+,n}}[\rho]
\end{eqnarray}
This system can sustain superradiant light emission and line narrowing [1-3], whose peak intensity scales proportionally to $N^2$.
End of explanation
"""
N = 10
system = Dicke(N = N)
[jx, jy, jz, jp, jm] = jspin(N)
w0 = 1
h0 = w0 * jz
gCE = 1
gP = N * gCE
system.hamiltonian = h0
system.collective_emission = gCE
system.pumping = gP
"""
Explanation: 1) Time evolution
We study the system of Eq. (1) (above) by using the Permutational Invariant Quantum Solver (PIQS) to build the Liouvillian of the system. Using QuTiP's $\texttt{mesolve}()$ we can calculate operators expectation values in time as well as higher order correlation functions [4,5].
System properties
End of explanation
"""
L = system.liouvillian()
rhoss = steadystate(L)
jpjm_ss = expect(jp*jm, rhoss)
"""
Explanation: Liouvillian and steady state $\rho_\text{ss}$
End of explanation
"""
# time evolution parameters
nt = 1000
td = np.log(N)/(N*gCE)
tmax = 5 * td
t = np.linspace(0, tmax, nt)
# initial state
rho0= dicke(N, N/2, -N/2)
# calculate g2(tau)
A = jp*jm
rhoA = jm*rhoss*jp
#g2(tau)
result1 = mesolve(L, rhoA, t, [], e_ops = [A], options = Options(store_states=True))
g2t = result1.expect[0]
#rho(t)
result2 = mesolve(L, rho0, t, [], e_ops = A, options = Options(store_states=True))
rhot = result2.states
jpjmt = result2.expect[0]
"""
Explanation: Time integration for $g^{(2)}(\tau)$ and $\langle J_{+}J_{-}\rangle (t)$
We define the $g^{(2)}(\tau)$ of the system as the two-time correlation function mapping the photonic degrees of freedom onto the TLS collective operators
\begin{eqnarray}
g^{(2)}(\tau) = \frac{\langle: J^\dagger(\tau) a^\dagger(0) a(\tau) a(0) :\rangle}{|\langle: a^\dagger(0) a(0) :\rangle|^2}= \frac{\langle: J_{+}(\tau) J_{+}(0) J_{-}(\tau) J_{-}(0) :\rangle}{|\langle J_{+}(0) J_{-}(0) \rangle|^2}
\end{eqnarray}
End of explanation
"""
j2max = (0.5 * N + 1) * (0.5 * N)
plt.rc('text', usetex = True)
label_size = 20
plt.rc('xtick', labelsize=label_size)
plt.rc('ytick', labelsize=label_size)
fig1 = plt.figure()
plt.plot(t/td, g2t/jpjm_ss**2, '-')
plt.plot(t/td, 1+0*g2t, '--')
plt.xlabel(r'$\tau/t_\mathrm{D}$', fontsize = label_size)
plt.ylabel(r'$g^{(2)}(\tau)$', fontsize = label_size)
plt.xticks([0,(tmax/2)/td,tmax/td])
plt.show()
plt.close()
fig2 = plt.figure()
plt.plot(t/td, jpjmt/j2max, '-')
plt.xlabel(r'$t/t_\mathrm{D}$', fontsize = label_size)
plt.ylabel(r'$\langle J_{+}J_{-}\rangle (t)$', fontsize = label_size)
plt.xticks([0,(tmax/2)/td,tmax/td])
plt.title(r'Light emission', fontsize = label_size)
plt.show()
plt.close()
"""
Explanation: Visualization
End of explanation
"""
# Cycle on Coefficients
gCE = 1
gP0 = 1
gP_min_exp = -20
gP_max_exp = 20
gP_stepsize = 0.5
gP_list = np.arange(gP_min_exp, gP_max_exp+1, gP_stepsize)*0.1
gP_list_log = 10**(gP_list)
jpjmss_max_list = []
for i in gP_list_log:
gP = i*gP0
system = Dicke(hamiltonian = jz, N = N, pumping = gP, collective_emission = gCE)
liouv = system.liouvillian()
#steadystate
rho_ss = steadystate(liouv)
jpjm_ss = expect(jp*jm, rho_ss)
jpjmss_max_list.append(jpjm_ss)
"""
Explanation: 2) Maximum of light emission as a function of $\frac{\gamma_\text{P}}{N\gamma_\text{CE}}$
We perform a study of the scaling of the steady state light emission of the system as a function of the pumping rate, normalized by the number of TLSs and the collective emission rate. The results show an optimal point for $\frac{\gamma_\text{P}}{N\gamma_\text{CE}}\simeq 1$.
End of explanation
"""
intensity_max = float(N)*gCE/2*(float(N)*gCE/2+1)
normalized_intensity = np.array(jpjmss_max_list)/intensity_max
plt.semilogx(gP_list_log/(gCE*N), normalized_intensity, '-')
label_size = 20
plt.xlabel(r'${\gamma_\mathrm{P}}/\left({N\gamma_\mathrm{CE}}\right)$', fontsize = label_size)
plt.ylabel(r'$\langle J_{+}J_{-}\rangle_\mathrm{ss}$', fontsize = label_size)
plt.title(r'Steady-state light emission', fontsize = label_size)
plt.show()
plt.close()
"""
Explanation: Visualization
End of explanation
"""
qutip.about()
"""
Explanation: References
[1] D. Meiser and M.J. Holland, Phys. Rev. A 81, 033847 (2010)
[2] D. Meiser and M.J. Holland, Phys. Rev. A 81, 063827 (2010)
[3] J.G. Bohnet et al. Nature 484, 78 (2012)
[4] J.R. Johansson, P.D. Nation, and F. Nori, Comp. Phys. Comm. 183, 1760 (2012) http://qutip.org
End of explanation
"""
|
DaveBackus/Data_Bootcamp | Code/SQL/SQL_Bootcamp_Stern_2016.ipynb | mit | from IPython.display import display, HTML, clear_output
HTML('''<script> code_show=true; function code_toggle() {if (code_show){$('div.input').hide();}
else {$('div.input').show();}code_show = !code_show} $( document ).ready(code_toggle);
</script> <form action="javascript:code_toggle()"><input type="submit" value="Hide Raw Code"></form>''')
from ipywidgets import interact, interactive, fixed, widgets
import pandas as pd
import sqlite3
import re
# just testing out the youtube player capabilites of Jupyter
#from IPython.display import YouTubeVideo
#YouTubeVideo("a1Y73sPHKxw", width=700, height=500)
# if this .sqlite db doesn't already exists, this will create it
# if the .sqlite db *does* already exist, this establishes the desired connection
con = sqlite3.connect("sql_sample_db_new.sqlite")
book_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/book_table.csv')
auth_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/author_table.csv')
sales_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/sales_table.csv')
tech_cos = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/tech_cos.csv')
public_cos = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/public_cos.csv')
movie_table = pd.read_csv('https://raw.githubusercontent.com/DaveBackus/Data_Bootcamp/master/Code/SQL/movie_table.csv')
tables = [book_table,
auth_table,
sales_table,
tech_cos,
public_cos,
movie_table]
table_names = ['book_table',
'auth_table',
'sales_table',
'tech_cos',
'public_cos',
'movie_table']
# drop each table name if it already exists to avoid error if you rerun this bit of code
# then add it back (or add it for the first time, if the table didn't already exist)
for i in range(len(tables)):
table_name = table_names[i]
table = tables[i]
con.execute("DROP TABLE IF EXISTS {}".format(table_name))
pd.io.sql.to_sql(table, "{}".format(table_name), con, index=False)
# Function to make it easy to run queries on this mini-database
def run(query):
try:
results = pd.read_sql("{}".format(query), con).fillna(' ')
return results
except:
pass
def run_q(query, button):
def on_button_clicked(b):
clear_output()
new_value = query.value.replace('\n', ' ')
if new_value != '':
df = run(new_value)
try:
output = HTML(df.to_html(index=False))
display(output)
#except AttributeError:
except:
print('''SQL error! Check your query:
1. Text values are in quotation marks and capitalized correctly
2. Items in the SELECT clause are comma-separated
3. No dangling comma in the SELECT clause right before the FROM clause
4. If you are joining tables that have columns with the same name, use table_name.column_name format
5. Try "PRAGMA TABLE_INFO(table_name) to double-check the column names in the table
6. Correct order of clauses:
SELECT
FROM
JOIN...ON
WHERE
GROUP BY
ORDER BY
LIMIT
''')
button.on_click(on_button_clicked)
on_button_clicked(None)
def cheat(answer):
def f(Reveal):
if Reveal == False:
clear_output()
else:
print(answer)
interact(f, Reveal=False)
clear_output()
"""
Explanation: <br>
<center> Sarah Beckett-Hile | NYU Stern School of Business | Spring 2016 </center>
To get started:
Click 'Cell ➤ Run All' above. When the program runs, it will skip down the page. After it stops skipping, scroll back up to the top to continue the lesson. Just a weird quirk of Jupyter.
End of explanation
"""
test_ex = widgets.Textarea(value=
'''You can change text in these boxes to edit and re-run queries!''',
width = '50em', height = '7em')
display(test_ex)
text_ex_button = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(text_ex_button)
def on_button_clicked2(b):
clear_output()
print('''Here's the output from the cell above:
''', test_ex.value)
text_ex_button.on_click(on_button_clicked2)
"""
Explanation: <a id='table_of_contents'></a>
Table of Contents
Course Details
Introduction to SQL
Structure and Formatting Basics
Determine a table's strucure.............................PRAGMA TABLE_INFO()
Query building blocks.......................................SELECT & FROM
Filter your data..................................................WHERE
Wildcards and vague search.............................LIKE and %
Sort your data...................................................ORDER BY
Limit the number of rows you see.....................LIMIT
Combining tables..............................................JOIN <br>
• Combining 3+ tables........................................Multiple JOIN's <br>
• Different JOIN Types.......................................Overview<br>
• Simple Join......................................................INNER JOIN aka JOIN<br>
• One-Sided Join................................................LEFT JOIN<br>
• Full Join............................................................OUTER JOIN<br>
• Practice combining tables...............................JOIN Drills
Column & Table Aliases.....................................AS
Add, subtract, multiply, & divide data................Operators
Apply functions to columns...............................Functions
Group data by categories..................................GROUP BY
Filter out certain groups.....................................HAVING
Conditional values..............................................IF & CASE WHEN
SQL-ception: Queries within queries..................Nesting
Run multiple queries at once..............................UNION & UNION ALL
Add a summaryt/total row..................................ROLLUP
Summary
Full table of RDBMS dialect differences
Additional Resources:
Syllabus
Cheatsheet
Google Group
<center> <a id='introduction'></a> Table of Contents | Next
</center>
SQL, "sequel", "ESS CUE ELL"
SQL stands for "Structured Query Language", but no one calls it that. You can pronouce it as either "S-Q-L" or "sequel". Some people feel strongly in favor of a particular pronunciation. I don't. I'll say "sequel" in class, but I'll never correct you for saying S-Q-L.
SQL is the database language of choice for most businesses; you use it to communicate with databases. "Communication" can take the form of creating, reading, updating, and deleting data. This course only covers reading data. That's all most MBAs do with SQL.
Relational Databases
Companies use relational databases because they can store and easily recall A LOT of data. Excel can't handle more than a million rows. If you're Amazon and you need to record every click, Excel is useless. Relational databases are much more efficient.
What do we mean by "efficient"? Every recorded "bit" takes up server space, which costs money. It also slows everything down. So an efficient database should allow you to record and recall a lot of information using the minimal number of bits.
Imagine you want to store the names of four books and some information about their authors. Think of the character count as a proxy for how many bits of storage your table takes up:
Now imagine you want to add some more books by each of those authors. Some of the information gets redundant. Imagine if you had to do this for millions of different books:
This is where relational databases can help. With a relational database, you'd create two separate tables that relate to each other. You still storing the same information, but you're doing it by using fewer characters. You've eliminated the need to repeat yourself, so you've made a much more efficient database.
<br>
SQL Dialects
There are different softwares that can manage relational databases. SQL varies a little from software to software, just like English varies a little between England and the U.S. We'll address these instances whenever possible.
Each software is called a Relational Database Management System, or RDBMS. These are some of the most popular that you might encounter at work:
<font color='#1f5fd6'> Microsoft SQL Server | <font color='#1f5fd6'> MySQL | <font color='#1f5fd6'> Oracle | <font color='#1f5fd6'> SQLite </font>|
:------------------: | :---: | :----: | :----: |
Proprietary, more common at older companies | Open source, frequently used by startups and tech companies | Proprietary, more common at older companies | Frequently used for mobile apps (and this class!)
<br>
<a id='course_details'></a>
<center> Previous | Table of Contents | Next
</center>
The goal is to start simple and practice often. By the end of this class, you should feel extremely comfortable writing moderately complicated SQL code, which will save you countless hours trying to figure out SQL on the job or waiting for someone else with SQL knowledge to pull data for you. Using this interactive program, we'll explore a small sample database by learning new SQL concepts one at a time. Concepts will build on each other.
Quick Exercises
Sometimes you'll be asked to edit or delete parts of a provided query. Rerun the query with each step, taking care to understand what changed with the output each time. Note that none of the changes that you make to these queries will be saved when you close this program.
Try it by changing something in the cell below and hitting "Run"!
End of explanation
"""
chall ='''When you click a checkbox, you reveal the answer to a challenge!
Uncheck it to hide the answer again.'''
cheat(chall)
"""
Explanation: Challenges
After we've learned a new concept and you've practiced with some quick exercises, you'll be challenged to write your own query. Read each challenge carefully, and keep re-running it until you get the results you are looking for.
Need to cheat a little? Check the "Reveal" box to see the answer to a challenge.
<br>
<img align="left" src="http://i.imgur.com/FhCJTqa.png">
End of explanation
"""
pragma = widgets.Textarea(
value='''PRAGMA TABLE_INFO(book_table)''',
width = '50em', height = '3em')
display(pragma)
prag_button = widgets.Button(description='Run',width='10em', height='2.5em', color='white', background_color='black', border_color='black')
display(prag_button)
run_q(pragma, prag_button)
"""
Explanation: Using this program
The content you're currently reading is written in Python, Markdown and HTML and runs in a Jupyter Notebook. No need to know what any of that means, I only told you in case you were terribly curious.
You will not be using this interface at work - the point of this class is to teach you SQL the language, which can be typed into a variety of different software programs. You'll be able to learn the quirks of a different software program pretty easily as long as you know SQL.
Still, there are some things you should know about this program to help you with the class:
* If you accidentally double-click on a block of text, and suddenly it looks like code, hit SHIFT-RETURN or 'Cell ➤ Run'.
* If you try to run a query and the output doesn't refresh, select 'Cell ➤ Run All' to reboot the program.
* If you accidentally delete a cell, click 'Edit ➤ Undo Delete Cell'
* Nothing that you write in the challenges and exercises will save after you close this program.
* If you want to save something that you've written, follow the steps below:
<br>
<img align="left" src="http://i.imgur.com/qkh6TiN.png">
<a id='formatting'></a>
<center>
Previous | Table of Contents | Next
</center>
Structure and Formatting Query Basics:
Below is an example of a query, SQL code that requests data from a database. Try to make a habit of writing queries by following these formatting conventions. Queries can get very long and complicated, and formatting makes them easier to read.
<a id='pragma_table'></a>
<center>
Previous | Table of Contents | Next
</center>
<font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:------------------: | :---: | :----: | :----:
SP_Help some_table | DESCRIBE some_table | DESCRIBE some_table | PRAGMA TABLE_INFO(some_table)
SQLite version that we'll be using for this class:
PRAGMA TABLE_INFO(some_table) ➞ result-set lists the column names and data types within the table
We're using SQLite, so we're going to be using the PRAGMA TABLE_INFO() option. Put the name of a table in the parentheses, and the output tells you the names and data types in each column in the table.
So far, we've learned about 2 tables in our relational database, which we'll call book_table and auth_table. We're also going to use a sales_table, which we'll take a look at later on. Combined, these three tables will make up the "database" of a very tiny, very limited, very imaginary bookstore.
We'll start by reviewing the book_table and auth_table:
Now, we'll use PRAGMA TABLE_INFO() to read the table structure of the book_table. In plain English, the query below says "show me the names of the columns in the book_table, and what type of data (text, numbers?) is in each column."
End of explanation
"""
pragma_sales = widgets.Textarea(
value='',
width = '50em',
height = '4em'
)
display(pragma_sales)
prag_sales_button = widgets.Button(description='Run',width='10em', height='2.5em', color='white', background_color='black', border_color='black')
display(prag_sales_button)
run_q(pragma_sales, prag_sales_button)
prag_sales_answer = 'PRAGMA TABLE_INFO(sales_table)'
cheat(prag_sales_answer)
"""
Explanation: name tells us the names of each column in the table. So now we know that the book_table has columns headed Book, COGs, and Author
type tells us what type of data is in each column. So now we know that the Book column has TEXT data, and that COGs contains REAL numbers - numbers that can have a fractional value.
All other columns you can ignore. Seriously.
Quick Exercise:
Change the query above to look at the auth_table instead. Why is the author's birth_year data type not REAL like we saw with COGs?
Challenge:
Rewrite the query above to take a look at the sales_table structure. Judging from what your query returns, can you guess what you'll probably see once you actually look all the data in the sales_table?
End of explanation
"""
select = widgets.Textarea(value=
'''SELECT
*
FROM
book_table''',
width = '50em', height = '7em')
display(select)
select_button = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(select_button)
run_q(select, select_button)
"""
Explanation: <a id='select_from'></a>
<center>
Previous | Table of Contents | Next
</center>
<!--<center>
[Jump to: Selecting specific columns](#select_col) | [Jump to: Selecting distinct values](#select_distinct)
</center>-->
SELECT <br>
* ➞ an asterisk means "all columns" <br>
FROM <br>
table_name
To see the actual data in a table, we'll use SELECT and FROM clauses. In the SELECT clause, you tell SQL which columns you want to see. In the FROM clause, you tell SQL the table where those columns are located. An asterisk returns all columns from a particular table.
In plain English, the query below says: "Show me all columns and their data from the book_table"
End of explanation
"""
select_c = widgets.Textarea(value='', width = '50em')
display(select_c)
select_c_button = widgets.Button(
description='Run',
width='10em',
height='2.5em',
color='white',
background_color='black',
border_color='black')
display(select_c_button)
run_q(select_c, select_c_button)
select_answer ='''SELECT
*
FROM
sales_table'''
cheat(select_answer)
"""
Explanation: Quick Exercise:
Change the query above to show us all columns and their data from the auth_table instead of the book_table
Challenge:
Write a query to view all columns and their data from the sales_table
End of explanation
"""
select_col = widgets.Textarea(value=
'''SELECT
book,
author
FROM
book_table''',
width = '50em', height = '8em')
display(select_col)
select_col_button = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(select_col_button)
run_q(select_col, select_col_button)
"""
Explanation: <img align="left" src="http://i.imgur.com/p6d18FV.png"> <br><br>
Use asterisks sparingly. Usually, you'll select specific columns from a table rather than all columns. Using an asterisk to select all columns is okay when the table is small or when you tightly constrain your selection of rows. Otherwise, select specific columns and use WHERE and LIMIT (taught below) to go easy on your servers.
<a id='select_col'></a>
SELECT specific columns:
SELECT <br>
column_a, ➞ separate multiple columns with commas <br>
column_b ➞ optional, but conventional, to also use a return <br>
FROM <br>
table_name <br>
Instead of using an asterisk for "all columns", you can specify a particular column or columns. In plain English: "Show me the data in the book and author columns from the book_table"
End of explanation
"""
select_cols_chall = widgets.Textarea(value=
'',
width = '50em', height = '8em')
display(select_cols_chall)
select_cols_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(select_cols_chall_b)
run_q(select_cols_chall, select_cols_chall_b)
select_cols_chall_cheat ='''SELECT
first_name,
last_name
FROM
auth_table'''
cheat(select_cols_chall_cheat)
"""
Explanation: Challenge:
Write a query to show the first_name and last_name columns from the auth_table
End of explanation
"""
select_cols_chall2 = widgets.Textarea(value='', width = '50em', height = '8em')
display(select_cols_chall2)
select_cols_chall_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(select_cols_chall_b2)
run_q(select_cols_chall2, select_cols_chall_b2)
select_cols_chall_cheat2 ='''SELECT
book
FROM
sales_table'''
cheat(select_cols_chall_cheat2)
"""
Explanation: Challenge:
Write a query to select only the book column from the sales_table
End of explanation
"""
distinct_q = widgets.Textarea(value=
'''SELECT
DISTINCT book
FROM
sales_table''',
width = '50em', height = '7em')
display(distinct_q)
distinct_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(distinct_b)
run_q(distinct_q, distinct_b)
"""
Explanation: <a id='select_distinct'></a>
SELECT DISTINCT:
SELECT <br>
DISTINCT column_a ➞ returns only unique values <br>
FROM <br>
table_name <br>
Use DISTINCT to return unique values from a column, so if there are any repeats in a column, your output will include each value just once. The query below displays each book in the sales_table just once, even though we know each shows up multiple times in the table.
End of explanation
"""
distinct_q_chall = widgets.Textarea(value='', width = '50em', height = '7em')
display(distinct_q_chall)
distinct_q_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(distinct_q_chall_b)
run_q(distinct_q_chall, distinct_q_chall_b)
REP ='''SELECT
DISTINCT author
FROM
book_table'''
cheat(REP)
"""
Explanation: Challenge:
Write a query to return each author from the book_table without any names repeating.
End of explanation
"""
where_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
WHERE
author = 'Hemingway' ''',
width = '50em', height = '10em')
display(where_q)
where_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(where_b)
run_q(where_q, where_b)
"""
Explanation: <a id='where'></a>
<center>
Previous | Table of Contents | Next
</center>
<!--
[Jump to: WHERE & Text Values](#where_text) [Jump to: Where & Numbers](#table_of_contents) [Jump to: WHERE & Multiple Requirements](#where_and)
-->
SELECT <br>
column_a <br>
FROM <br>
table_name <br>
WHERE <br>
column_a = x ➞ result-set will only include rows where value of column_a is x
WHERE lets you filter results so you only see rows that specifically match your criteria. Below there are few more options for the WHERE clause:
Options for WHERE | Description
:------- | :-------------
col = 'some_text' | Put text in quotations. Capitalization is important!
col != x | Return rows where col's values DO NOT equal x
col < x | Return rows where col's value is less than x
col <= x | Return rows where col's value is less OR EQUAL TO than x
col IN (x, y) | Values can equal EITHER x OR y
col NOT IN (x, y) | Value are NEITHER x NOR y
col BETWEEN x AND y | Values are between x and y
col = x AND another_col = y | Returns rows when col's values are x AND another_col's values are y
col = x OR another_col = y | Returns rows when col's values are x OR another_col's values are y
WHERE & text values
Below, we use WHERE to tell SQL to only show us rows in the book_table when Hemingway is the author. In plain English, we're saying "Show me information about books that are written by Hemingway in the book_table"
End of explanation
"""
where_q_chall = widgets.Textarea(value='',
width = '50em', height = '10em')
display(where_q_chall)
where_b_chall = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(where_b_chall)
run_q(where_q_chall, where_b_chall)
where_q_chall_cheat ='''SELECT
*
FROM
auth_table
WHERE
country = 'England' '''
cheat(where_q_chall_cheat)
"""
Explanation: Quick Exercises:
Above, change the name from 'Hemingway' to 'Shakespeare', rerun
Delete the quotation marks around the word Shakespeare, rerun. Why the error?
Put double quotation marks, rerun
Change "Shakespeare" to "shakespeare", rerun
Change "shakespeare" to "Twain", rerun
Change "Twain" to 'Hemingway', rerun to get back to where we started
Change = in the WHERE clause to !=, rerun
Challenge:
Write a query to return all columns of the auth_table, but only rows where the author's country is England.
End of explanation
"""
in_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
WHERE
author IN ('Hemingway', 'Austen')''',
width = '50em', height = '10em')
display(in_q)
in_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(in_b)
run_q(in_q, in_b)
"""
Explanation: We use IN (value_1, value_2) to return rows that can match more than one value. In plain English, the query below says, "Show me all columns from the book table when the author is EITHER Hemingway OR Austen"
End of explanation
"""
greater_q = widgets.Textarea(value=
'''SELECT
*
FROM
sales_table
WHERE
revenue > 18''',
width = '50em', height = '10em')
display(greater_q)
greater_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(greater_b)
run_q(greater_q, greater_b)
"""
Explanation: Quick Exercise:
Add 'Faulkner' to the list, rerun.
Replace IN with NOT IN, rerun.
Delete the whole last line and replace it so that the query returns all books except for <u>Emma</u> and <u>Macbeth</u>.
<a id='where_numbers'></a>
WHERE & number values
The WHERE clause is useful with numbers as well. We can start throwing in comparisons like less than (<) and greater than (>):
End of explanation
"""
born_chall = widgets.Textarea(value='',width = '50em', height = '10em')
display(born_chall)
born_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(born_chall_b)
run_q(born_chall, born_chall_b)
born_chall_c ='''SELECT
*
FROM
auth_table
WHERE
birth_year < 1800
'''
cheat(born_chall_c)
"""
Explanation: Quick Exercises:
Replace > with <, rerun
Add an = directly after the <, rerun
Change the line to revenue BETWEEN 10 AND 12, rerun
Challenge:
Write a query that returns all columns from the auth_table for authors with a birth_year before 1800:
End of explanation
"""
and_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
WHERE
author = 'Hemingway'
AND cogs > 11''',
width = '50em', height = '12em')
display(and_q)
and_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(and_b)
run_q(and_q, and_b)
"""
Explanation: <a id='where_and'></a>
WHERE with AND/OR
So far, we've only filtered by a specific column (like the revenue column , country, or author columns). Sometimes you'll want to filter by multiple columns. This is where AND and OR come in handy.
End of explanation
"""
and_chall = widgets.Textarea(value=
'',
width = '50em', height = '14em')
display(and_chall)
and_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(and_chall_b)
run_q(and_chall, and_chall_b)
and_chall_c ='''SELECT
last_Name,
country,
birth_Year
FROM
auth_table
WHERE
country = 'England'
AND birth_Year > 1650 '''
cheat(and_chall_c)
"""
Explanation: Quick Exercises:
Delete AND cogs > 11 and rerun the query. Then replace it and run it again.
Change the word AND to OR, rerun. What's going on?
Challenge:
Write a query to pull the last_name, country, and birth_year of authors who were from England AND born after 1650
End of explanation
"""
or_chall = widgets.Textarea(value=
'',
width = '50em', height = '12em')
display(or_chall)
or_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(or_chall_b)
run_q(or_chall, or_chall_b)
or_chall_cheat ='''SELECT
*
FROM
sales_table
WHERE
book = 'Macbeth'
OR revenue > 17 '''
cheat(or_chall_cheat)
"""
Explanation: Challenge:
Write a query to see all columns from the sales_table where the book name is <u>Macbeth</u> OR revenue was greater than $17.
End of explanation
"""
like_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
WHERE
author LIKE 'hemingway' ''',
width = '50em', height = '10em')
display(like_q)
like_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(like_b)
run_q(like_q, like_b)
"""
Explanation: <a id='where_like'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a <br>
FROM <br>
table_name <br>
WHERE <br>
column_a LIKE 's%Me_t%xT' ➞ correct capitalization isn't necessary with LIKE, and % stands in for any missing character
LIKE lets you search for a value even if you capitalize it incorrectly. It also allows you to work with percentage signs that act as wildcards, which stand in for an unlimited number of missing characters (helpful if you don't know how to spell something). Take a look at the query below. Recall that earlier when we wrote author = 'shakespeare', we got no results.
End of explanation
"""
like_q2 = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
WHERE
author LIKE 'He%ingway' ''',
width = '50em', height = '11em')
display(like_q2)
like_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(like_b2)
run_q(like_q2, like_b2)
"""
Explanation: Quick Exercises:
Replace 'hemingway' with 'hemingWAY', rerun
Replace LIKE with =, rerun
Replace = with LIKE again, but change 'hemingWAY' to 'Hemmingway', rerun
Replace 'Hemmingway' with 'Hem', rerun
Using % as a "wildcard"
With exercises #3 and #4, you saw that LIKE alone has a limitation - it only lets you mess with capitalization. You need wildcards to do more with LIKE. Let's say you can't remember if Hemingway is spelled with 1 "m" or 2. Use a percentage sign (%) to get the value you're looking for:
End of explanation
"""
like_chall = widgets.Textarea(value=
'',
width = '50em', height = '12em')
display(like_chall)
like_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(like_chall_b)
run_q(like_chall, like_chall_b)
like_chall_c ='''SELECT
book,
author
FROM
book_table
WHERE
book LIKE 'Pride %' '''
cheat(like_chall_c)
"""
Explanation: Quick Exercises:
Change 'He%ingway' to 'Hemm%ingway'. Why doesn't this work?
Change 'Hemm%ingway' to 'Hem%', rerun
Change 'Hem%' to '%us%', rerun
Change LIKE to =, rerun (see how wildcards only work with LIKE?)
Challenge:
Write a query to pull the book and author columns from the book_table. Pretend you can't remember the full name of the book you're looking for. You just know it starts with the word "Pride".
End of explanation
"""
order_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
ORDER BY
book''',
width = '50em', height = '11em')
display(order_q)
order_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(order_b)
run_q(order_q, order_b)
"""
Explanation: <img align="left" src="http://i.imgur.com/p6d18FV.png"> <br><br>
Use LIKE sparingly: It's a great tool, but it really puts the strain on your database's servers. Use it only when a table is pretty small or when you've limited your result-set by using additional filters in the WHERE clause.
<a id='order_by'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a <br>
FROM <br>
table_name <br>
[WHERE clause, optional] <br>
ORDER BY ➞ sorts the result-set by column_a <br>
column_a DESC ➞ DESC is optional, it sorts results in descending order
Without an ORDER BY clause, the default result-set will be sorted by however it appears in the database (which is crap-shoot depending on the type of table). Use ORDER BY to sort your result-set by a particular column, and add DESC to sort in descending order (Z→A, 100→1).
End of explanation
"""
order_chall = widgets.Textarea(value=
'',
width = '50em', height = '12em')
display(order_chall)
order_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(order_chall_b)
run_q(order_chall, order_chall_b)
order_chall_c ='''SELECT
book,
revenue
FROM
sales_table
ORDER BY
revenue DESC'''
cheat(order_chall_c)
"""
Explanation: Quick Exercises:
Change the query so it sorts by author instead
Add DESC and rerun
Delete author DESC, replace it with author, book, rerun
Add DESC so it reads author, book DESC, rerun
Change the line to author DESC, book
Challenge:
Write a query to see the book and revenue columns from the sales_table, and sort the results by revenue in descending order.
End of explanation
"""
order_chall2 = widgets.Textarea(value=
'',
width = '50em', height = '16em')
display(order_chall2)
order_chall_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(order_chall_b2)
run_q(order_chall2, order_chall_b2)
order_chall_c2 ='''SELECT
*
FROM
book_table
WHERE
author LIKE '%pear%'
OR cogs > 12
ORDER BY
cogs'''
cheat(order_chall_c2)
"""
Explanation: Challenge:
Write a query to view all columns from the book_table, but only where the author's name is something like "pear" or COGs are over $12. Sort your results by COGs with the cheapest book first.
End of explanation
"""
limit_q = widgets.Textarea(value=
'''SELECT
*
FROM
sales_table
LIMIT 5''',
width = '50em', height = '14em')
display(limit_q)
limit_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(limit_b)
run_q(limit_q, limit_b)
"""
Explanation: <a id='limit'></a>
<center>
Previous | Table of Contents | Next
</center>
<font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:------------------: | :---: | :----: | :----:
SELECT TOP N column_name | LIMIT N | WHERE ROWNUM <= N | LIMIT N
SELECT <br>
column_a <br>
FROM <br>
table_name <br>
[WHERE clause] <br>
[ORDER BY clause] <br>
LIMIT N ➞ limits the result-set to N rows
LIMIT lets you set a maximum limit to the number of rows that your query returns. You've seen the query below before, but now we've added a LIMIT clause:
End of explanation
"""
limit_chall = widgets.Textarea(value=
'',
width = '50em', height = '12em')
display(limit_chall)
limit_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(limit_chall_b)
run_q(limit_chall, limit_chall_b)
limit_chall_c ='''SELECT
book,
cogs
FROM
book_table
ORDER BY
cogs
LIMIT 2'''
cheat(limit_chall_c)
"""
Explanation: Quick Exercises:
Change 5 to 10, rerun
Add an ORDER BY clause so that you see the top 10 transactions in terms of revenue, rerun
Add a WHERE clause so you only see transactions relating to <u>Emma</u>
Challenge:
Write a query to view the title and cogs of the two books with the lowest cogs in the book_table
End of explanation
"""
limit_chall2 = widgets.Textarea(value=
'',
width = '50em', height = '12em')
display(limit_chall2)
limit_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(limit_chall2_b)
run_q(limit_chall2, limit_chall2_b)
limit_chall2_c ='''SELECT
book,
revenue
FROM
sales_table
ORDER BY
book, revenue
LIMIT 15'''
cheat(limit_chall2_c)
"""
Explanation: Challenge:
Write a query to view the book and revenue columns from the sales_table and sort by book title first, then by revenue (ascending). Limit your results to 15 rows.
End of explanation
"""
join_q = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
JOIN auth_table
ON book_table.author = auth_table.last_name''',
width = '50em', height = '11em')
display(join_q)
join_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_b)
run_q(join_q, join_b)
"""
Explanation: <a id='join_tables'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
table_x.column_a, ➞ read this as "column_a from table_x"<br>
table_y.column_b, ➞ "column_b from table_y"<br>
FROM<br>
table_x<br>
JOIN table_y <br>
ON table_x.key_column_x = table_y.key_column_y <br> ➞ table_x's key_column_x has corresponding values with table_y's key_column_y<br>
[WHERE clause] <br>
[ORDER BY clause] <br>
[LIMIT clause] <br>
The ability to join tables is the most fundamental and useful part about relational databases. Different tables have columns with corresponding values, and you can use these columns as "keys" to join the two tables.
The format table_x.key_column can be read as "key_column from table_x"; it tells SQL the tables where a column is located. We didn't need this before because we were only using one table at a time, so SQL knew exactly which table we were talking about. When we deal with more than one table, we need to be more specific. So for example, book_table.book means "the book column from the book_table", and auth_table.last_name means "the last_name column from the auth_table."
Think back to our original discussion of splitting up our author and book data onto two separate tables:
You could think of the columns in these tables in terms of a Venn Diagram. Again, the format table_x.key_column is read as "key_column from table_x", so book_table.author means "the author column from the book_table":
The author column from the book_table corresponds with the last_name column in the auth_table - they both list the last names of the writers. Whenever you have two tables corresponding columns, you can "join" them by telling SQL to use these corresponding columns as keys. book_table.author and auth_table.last_name are the key columns for our join.
End of explanation
"""
join_q2 = widgets.Textarea(value=
'''SELECT
*
FROM
book_table
JOIN auth_table
ON book_table.author = auth_table.last_name''',
width = '50em', height = '18em')
display(join_q2)
join_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_b2)
run_q(join_q2, join_b2)
"""
Explanation: What just happened? SQL went through these steps:
First, SQL pulled up the two tables that we named in the FROM clause: FROM book_table JOIN auth_table.
Then it identified the "key" columns that we named with ON: ON book_table.author = auth_table.last_name.
Next, it matched up the values on the key columns:
Whenever it found a match, it made a kind of copy of the row from the auth_table and pasted it to the book_table
Finally, it literally "joined" the two tables by returning their columns in a single table
Quick Exercises:
The JOIN query we've been discussing has been reproduced in the box below for these exercises.
1. Change the query so you only see the book, first_name and the author's last name (you can do this with either author or last_name), and the birth_year, then rerun.
2. Add a WHERE clause so that you only see books by Hemingway and Austen, rerun.
3. Add an ORDER BY clause so that the author born first appears first, and so that their books appear in alphabetical order. Rerun.
End of explanation
"""
join_chall = widgets.Textarea(value=
'',
width = '50em', height = '15em')
display(join_chall)
join_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_chall_b)
run_q(join_chall, join_chall_b)
join_chall_c ='''SELECT
*
FROM
book_table
JOIN sales_table
ON book_table.book = sales_table.book
LIMIT 20'''
cheat(join_chall_c)
"""
Explanation: Challenge:
Our "database" has another Venn Diagram relationship: the book_table is related to the sales_table. Write a query to join these tables and view all their columns but limit your results to 20 rows. Use the Venn Diagram below as a guide:
End of explanation
"""
join_chall2 = widgets.Textarea(value=
'',
width = '50em', height = '17em')
display(join_chall2)
join_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_chall2_b)
run_q(join_chall2, join_chall2_b)
join_chall_c2 ='''SELECT
book_table.book, [NOTE: or you could use "sales_table.book" instead]
cogs,
author,
id,
revenue
FROM
book_table
JOIN sales_table
ON book_table.book = sales_table.book
LIMIT 20'''
cheat(join_chall_c2)
"""
Explanation: Challenge Continued:
Start by copying the query that you wrote in the previous challenge and pasting it in the box below.
1. Change the line book_table.book = sales_table.book to book = book and rerun. What's going wrong? Fix it and rerun.
2. Change the query so that you only see the book title listed once. If you get stuck, remember that table_x.column_a means "column_a from table_x".
End of explanation
"""
multi_join_chall = widgets.Textarea(value=
'',
width = '50em', height = '25em')
display(multi_join_chall)
multi_join_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(multi_join_chall_b)
run_q(multi_join_chall, multi_join_chall_b)
multi_join_chall_c ='''SELECT
first_name,
last_name,
book_table.book,
cogs,
revenue
FROM
book_table
JOIN auth_table
ON book_table.author = auth_table.last_name
JOIN
sales_table
ON book_table.book = sales_table.book'''
cheat(multi_join_chall_c)
"""
Explanation: <a id='multi_join'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
table_x.column_a, <br>
table_y.column_b <br>
table_z.column_c<br>
FROM<br>
table_x<br>
JOIN table_y ON table_x.key_column = table_y.key_column<br>
JOIN table_z ON table_x.other_key_column = table_z.other_key_column<br>
[WHERE clause] <br>
[ORDER BY clause] <br>
[LIMIT clause] <br>
As long as they are directly related or related by the transitive property, you can join multiple tables. Consider the sales_table and the auth_table in a Venn Diagram - there's no relation at all:
However, when the book_table enters the picture, suddenly the sales_table and auth_table have a connection:
Now we have an opportunity to join all three!
Challenge:
Write a query to show the first and last name of the author, the book title, the COGs, and the revenue from each transaction.
Extra credit once you've completed the challenge: Only return rows where the book was written by an English author. Sort your results so that the transaction with the most revenue appears first.
End of explanation
"""
multi_join_chall_c2 ='''SELECT
first_name,
last_name,
book_table.book,
cogs,
revenue
FROM
book_table
JOIN auth_table
ON book_table.author = auth_table.last_name
JOIN
sales_table
ON book_table.book = sales_table.book
WHERE
country = 'England'
ORDER BY
revenue DESC'''
cheat(multi_join_chall_c2)
"""
Explanation: Answer to the extra credit:
End of explanation
"""
inner_join_q = widgets.Textarea(value=
'''SELECT
*
FROM
tech_cos
JOIN public_cos
ON tech_cos.company = public_cos.company''',
width = '50em', height = '11em')
display(inner_join_q)
inner_join_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(inner_join_b)
run_q(inner_join_q, inner_join_b)
"""
Explanation: <img align="left" src="http://i.imgur.com/p6d18FV.png"> <br>
Use multiple joins sparingly: Multiple joins can put a lot of strain on servers because SQL has to do a lot of work matching up all that data. The more tangential the relationship, the worse it gets. Avoid more than 2 degrees of separatation, and avoid joining 2 or more large tables. It's ok if one of your tables is big, but the others should be small.
<a id='join_types'></a>
<center>
Previous | Table of Contents | Next
</center>
There are more ways to join two tables than the method we just covered. However, not all RDBMS support these different join methods. We'll learn about each of these methods, even if we can only practice 2 of them in SQLite.
<font color='#1f5fd6'> Join Type |<font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:----: | :------------------: | :---: | :----: | :----:
JOIN or INNER JOIN | ✓ | ✓ | ✓ | ✓
LEFT JOIN or LEFT OUTER JOIN | ✓ | ✓ | ✓ | ✓
RIGHT JOIN or RIGHT OUTER JOIN | ✓ | ✓ | ✓ | not supported
OUTER JOIN or FULL OUTER JOIN | ✓ | not supported | ✓ | not supported
We're going to leave behind the book database for the next lesson, since a different data set will help illustrate the point a little better.
Until now, the tables that we've joined have had columns that correspond perfectly - that is to say, every value that appears in one table also appears in the other. There aren't any authors that appear in the auth_table that don't also appear at least once in the book_table, and vice versa.
Sometimes, you'll have two tables with corresponding columns, but they don't match perfectly.
Consider the two tables below. The first lists tech companies and their CEOs, the second lists publicly traded companies and their share price.
Amazon, Alphabet, and Microsoft all appear on both tables. But Uber, SpaceX and AirBnB - which haven't IPO'd - aren't on the public_cos table. Conversely, Walmart, GE and P&G only appear on the public_cos table.
Even though it's not a perfect match, there is some overlap. So, we can still use the company columns from each table as keys to join the tables:
<a id='inner_joins'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
table_x.column_a, ➞ read this as "column_a from table_x"<br>
table_y.column_b, ➞ "column_b from table_y"<br>
FROM<br>
table_x<br>
JOIN table_y ➞ SQL interprets JOIN and INNER JOIN as the same thing <br>
ON table_x.key_column = table_y.key_column
So what would happen if we tried to join public_cos and tech_cos using the method we just learned with the book database? We'll give it a shot:
End of explanation
"""
left_q = widgets.Textarea(value=
'''SELECT
*
FROM
tech_cos
LEFT JOIN public_cos
ON tech_cos.company = public_cos.company''',
width = '50em', height = '11em')
display(left_q)
left_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(left_b)
run_q(left_q, left_b)
"""
Explanation: This time, SQL can't find a match for every value in the two different company columns:
So it performs an "INNER JOIN". You can write either JOIN or INNER JOIN - SQL will interpret them as the same thing. This eliminates any rows that don't have matching values, then combined the tables:
<a id='left_joins'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
table_x.column_a,<br>
table_y.column_b,<br>
FROM<br>
table_x<br>
LEFT JOIN table_y ➞ see all results from the first ("left") table & results when available from second table <br>
ON table_x.key_column = table_y.key_column
If you want to make sure you see all the rows from a particular table - even if there's no match in the other table - you can do a LEFT JOIN instead. It lets you prioritize the results from one table over another. Let's say your priority is to see all tech companies in your result-set, but you also want to see the share_price when that data is available:
End of explanation
"""
left_q2 = widgets.Textarea(value=
'''SELECT
*
FROM
tech_cos
LEFT JOIN public_cos
ON tech_cos.company = public_cos.company''',
width = '50em', height = '11em')
display(left_q2)
left_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(left_b2)
run_q(left_q2, left_b2)
"""
Explanation: With a LEFT JOIN, SQL still starts by looking for matching values.
When it fails to find a match, it will still keep the values on the "left" table, but get rid of the unmatched values on the "right" table.
Quick Exercise:
The query from above has been reproduced below for these exercises.
1. Figure out how to change the query above so that you only see the company column appear once.
2. Rewrite the query so that public_cos becomes the priority table instead.
End of explanation
"""
full_join_q = widgets.Textarea(value=
'''SELECT
ceo,
tech_cos.company,
public_cos.company,
share_price
FROM
tech_cos
LEFT JOIN public_cos ON tech_cos.company = public_cos.company
UNION ALL
SELECT
' ',
' ',
public_cos.company,
share_price
FROM
public_cos
WHERE
public_cos.company NOT IN (SELECT company FROM tech_cos)''',
width = '50em', height = '26em')
display(full_join_q)
full_join_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(full_join_b)
run_q(full_join_q, full_join_b)
"""
Explanation: SELECT <br>
table_x.column_a,<br>
table_y.column_b,<br>
FROM<br>
table_x<br>
RIGHT JOIN table_y ➞ see all results from the second ("right") table, results where available from first table <br>
ON table_x.key_column = table_y.key_column
It's exactly the same as LEFT JOIN, except it prioritizes the second table (the "right" table) over the first ("left") table. We can't practice it because SQLite doesn't support it, and it's super redundant anyway because we can just use LEFT JOIN. Boom. We're done with RIGHT JOIN.
<a id='outer_joins'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
table_x.column_a,<br>
table_y.column_b,<br>
FROM<br>
table_x<br>
OUTER JOIN table_y ➞ see all results from BOTH the first and second table <br>
ON table_x.key_column = table_y.key_column
What if you want to see all values from both tables? You can do this with an OUTER JOIN. Unfortunately, MySQL and SQLite (what we're using right now!) doesn't support it, so we can't practice it.
If you are using Oracle or Microsoft SQL, then you'd use the example code above. For MySQL and SQLite, there is a workaround. You don't need to understand what's going on in the code for now, just look at the output to make sure you understand what the OUTER JOIN output should look like.
End of explanation
"""
insert_b = widgets.Button(description='Click here JUST ONCE before starting', width='20em', height='3em', color='white',background_color='#1f5fd6', border_color='#1f5fd6')
display(insert_b)
def insert_button(b):
insert_q1 = '''INSERT INTO auth_table VALUES ('Tolstoy', 'Leo', 'Russia', 1828)'''
insert_q2 = '''INSERT INTO auth_table VALUES ('Twain', 'Mark', 'USA', 1835)'''
insert_q3 = '''INSERT INTO book_table VALUES ('Jude the Obscure', '11.25', 'Hardy')'''
insert_q4 = '''INSERT INTO book_table VALUES ('The Age of Innocence', '14.20', 'Wharton')'''
query_list = [insert_q1, insert_q2, insert_q3, insert_q4]
for query in query_list:
run(query)
print('New rows have been added to auth_table and book_table!')
insert_b.on_click(insert_button)
"""
Explanation: The OUTER JOIN starts out the same as the INNER JOIN and LEFT JOIN, trying to find matches wherever it can:
But when it can't find a match, instead of eliminating any the rows, it makes room for them:
<a id='join_drills'></a><center>
Previous | Table of Contents | Next
</center>
End of explanation
"""
join_drill1 = widgets.Textarea(value='',width = '50em', height = '7em')
display(join_drill1)
join_drill1_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_drill1_b)
run_q(join_drill1, join_drill1_b)
join_drill1_c ='''SELECT
*
FROM
auth_table [change to "book_table" for second part of challenge]'''
cheat(join_drill1_c)
"""
Explanation: We've now added some rows to auth_table and book_table so we can practice our new join skills!
Challenge
Start simple by taking a look at the new rows we've added. Write a query to see all columns and rows from book_table, then change the query so you can take a look at auth_table instead:
End of explanation
"""
join_drill2 = widgets.Textarea(value='', width = '50em', height = '12em')
display(join_drill2)
join_drill2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_drill2_b)
run_q(join_drill2, join_drill2_b)
join_drill2_c ='''SELECT
book,
author,
first_name
FROM
book_table
JOIN auth_table ON book_table.author = auth_table.last_name'''
cheat(join_drill2_c)
"""
Explanation: Now auth_table has 2 authors listed (Tolstoy and Twain) that don't appear on the book_table, and the book_table has two books (<u>Jude the Obscure</u> and <u>The Age of Innocence</u>) whose authors don't appear in the auth_table.
Challenge:
Write a query to view the book titles, first names, and last names of authors that appear on both the auth_table and the book_table.
End of explanation
"""
join_drill3 = widgets.Textarea(value='', width = '50em', height = '13em')
display(join_drill3)
join_drill3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_drill3_b)
run_q(join_drill3, join_drill3_b)
join_drill3_b ='''SELECT
book,
country
FROM
book_table
LEFT JOIN auth_table
ON book_table.author = auth_table.last_name'''
cheat(join_drill3_b)
"""
Explanation: Challenge:
Write a query to see the titles of all the books from the book_table, and the author's country when that information is available.
End of explanation
"""
join_drill4 = widgets.Textarea(value='',width = '50em', height = '16em')
display(join_drill4)
join_drill4_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_drill4_b)
run_q(join_drill4, join_drill4_b)
join_drill4_c ='''SELECT
first_name,
book
FROM
auth_table
LEFT JOIN book_table ON auth_table.last_name = book_table.author'''
cheat(join_drill4_c)
"""
Explanation: Quick Exercise:
Edit the query above so that you only see books by authors from England
Edit it so that you only see books by authors NOT from England.
Challenge
Write a query to see the first names of all authors in the auth_table, and the books they've written when that information is available.
End of explanation
"""
join_drill4_ex_c ='''SELECT
first_name,
book
FROM
auth_table
LEFT JOIN book_table ON auth_table.last_name = book_table.author
WHERE
first_name = 'William'
ORDER BY
book
LIMIT 3'''
cheat(join_drill4_ex_c)
"""
Explanation: Quick Exercise:
Change the query so that you only see results when the writer's first name is William, rerun
Change the query to sort the books in alphabethical order, rerun
Limit the number of rows to 3, rerun
End of explanation
"""
join_drill4 = widgets.Textarea(value='',width = '50em', height = '12em')
display(join_drill4)
join_drill4_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(join_drill4_b)
run_q(join_drill4, join_drill4_b)
join_drill4_c ='''SELECT
book_table.book,
revenue
FROM
book_table
LEFT JOIN sales_table ON book_table.book = sales_table.book
ORDER BY
revenue'''
cheat(join_drill4_c)
"""
Explanation: Challenge:
Write a query to return all books from the book_table, and their revenue data whenever that information is available. Try to figure out how you might sort your results so that you see books with no sales first.
End of explanation
"""
as_q = widgets.Textarea(value=
'''SELECT
book AS book_title
FROM
book_table''',
width = '50em', height = '14em')
display(as_q)
as_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(as_b)
run_q(as_q, as_b)
"""
Explanation: <a id='as'></a>
<center>
Previous | Table of Contents | Next
</center>
Assigning aliases to columns
SELECT <br>
column_a AS alias_a ➞ creates an alias for column_a <br>
FROM<br>
table_name<br>
WHERE<br>
alias_a = x ➞ optional; use the alias in the WHERE clause<br>
ORDER BY<br>
alias_a ➞ optional; use the alias in the ORDER BY clause<br>
[LIMIT clause] <br>
Aliases allow you to rename columns and tables in your query. They will come in handy as we learn to do more with the data.
In plain English, the query below can be read as "Show me the book column from the book_table, but rename the column to book_title.
End of explanation
"""
as_ex_c ='''SELECT
book titles,
author AS author_name
FROM
book_table
WHERE
author_name = 'Austen'
ORDER BY
titles DESC'''
cheat(as_ex_c)
"""
Explanation: Quick Exercises:
Change the query to rename the column to titles.
Delete the word AS and rerun. (You'll see that AS is totally optional when assigning aliases. It just make the query easier to read.)
Change the query so that you also pull the author, but rename the column author_name.
Change the query so that you only see books by Austen. Use the column's alias in your WHERE clause.
Order results by book in reverse alphabetical order. Use the column's alias in your ORDER BY clause.
End of explanation
"""
as_chall = widgets.Textarea(value='', width = '50em', height = '15em')
display(as_chall)
as_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(as_chall_b)
run_q(as_chall, as_chall_b)
as_chall_c ='''SELECT
last_name AS author,
country as nationality,
birth_year AS year_born
FROM
auth_table
WHERE
nationality != 'England'
AND year_born BETWEEN 1800 and 1850'''
cheat(as_chall_c)
"""
Explanation: Challenge:
Write a query to pull:
last_name, but renamed author
country, but renamed nationality
birth_year, but renamed year_born
Use each column's alias in the WHERE clause; use WHERE clause to only return results where the author is not from England, AND was born between 1800 and 1850.
End of explanation
"""
as_table_q = widgets.Textarea(value=
'''SELECT
S.book,
S.revenue,
B.cogs
FROM
book_table B
JOIN sales_table S
ON S.book = B.book''',
width = '50em', height = '12em')
display(as_table_q)
as_table_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(as_table_b)
run_q(as_table_q, as_table_b)
"""
Explanation: Assigning aliases to tables
SELECT <br>
X.column_a, <br>
Y.column_b <br>
FROM <br>
table_x X ➞ assigns table_x the alias X <br>
JOIN table Y ➞ assigns table_y the alias Y <br>
ON X.key_column = Y.key_column ➞ table aliases can be used as substitutes in the table_x.column_a format <br>
When dealing with one or more tables in a query, we commonly assign capitalized one-letter aliases to tables. Writing X.key_column is much shorter than table_x.key_column, and coders like shortcuts. They also typically won't use AS when assigning aliases to tables (although it makes no difference either way).
When you're dealing with only one table, it's unnecessary to use table aliases because SQL knows exactly what columns you are referring to. However, when you are dealing with 2 or more tables, particularly tables that have columns with the same names (like book, which is a column in both sales_table and book_table), then aliases are extremely handy.
End of explanation
"""
as_table_chall = widgets.Textarea(value='', width = '50em', height = '10em')
display(as_table_chall)
as_table_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(as_table_chall_b)
run_q(as_table_chall, as_table_chall_b)
as_table_chall_c ='''SELECT
B.book,
A.country
FROM
book_table B
JOIN auth_table A ON B.author = A.last_name'''
cheat(as_table_chall_c)
"""
Explanation: Quick Exercises:
In the SELECT clause, change S.book to just book, rerun. What's going wrong?
Now change book to B.book, rerun.
Change S.revenue to B.revenue, rerun. What's going wrong?
Change B.revenue to just revenue and rerun. Note that when you're joining tables, it's standard to use table aliases even on columns that don't need them. This makes it easier for someone to read your query even if they are unfamaliar with the tables that you're working with. However, as you see, it's not technically necessary.
Give each of these columns an alias (any alias) and rerun.
Challenge:
Write a query to view books and the author's country by joining auth_table and book_table. Give auth_table the alias A and book_table the alias B. Use the aliases in the ON part of the JOIN clause.
End of explanation
"""
as_chall2 = widgets.Textarea(value='', width = '50em', height = '20em')
display(as_chall2)
as_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(as_chall2_b)
run_q(as_chall2, as_chall2_b)
as_chall2_c ='''SELECT
B.book as titles,
S.revenue as earnings,
A.birth_year as year_born
FROM
book_table B
JOIN auth_table A ON B.author = A.last_name
JOIN sales_table S on B.book = S.book
WHERE
year_born BETWEEN 1700 and 1900
AND earnings > 12
ORDER BY
earnings
LIMIT 20'''
cheat(as_chall2_b)
"""
Explanation: Challenge:
Write a query to view these columns:
book titles with the alias titles
revenue with the alias earnings
author's last name with the alias author_name
year in which the author was born with the alias year_born
Use one-letter aliases for table names in your SELECT and JOIN clauses
For your WHERE and ORDER BY clauses:
Use column aliases
Only view results where author was born between 1700 and 1900 AND where revenue is more than $12.
Sort your results so that earnings appear in ascending order
Limit your results to 20 rows
End of explanation
"""
op_q = widgets.Textarea(value=
'''SELECT
B.book,
S.revenue,
B.cogs,
S.revenue - B.cogs
FROM
book_table B
JOIN sales_table S ON B.book = S.Book''',
width = '50em', height = '18em')
display(op_q)
op_q_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(op_q_b)
run_q(op_q, op_q_b)
"""
Explanation: <a id='operators'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a + column_b, ➞ adds the values in column_a and columns_b<br>
column_a - column_b, ➞ subtracts<br>
column_a * column_b, ➞ multiplies<br>
column_a / column_b, ➞ divides<br>
(column_a + column_b) * column_c, ➞ use parentheses to make more complex calculations<br>
FROM <br>
table_name <br>
[WHERE clause] <br>
[ORDER BY clause] <br>
[LIMIT clause] <br>
This is pretty straightforward. Let's start by calculating gross profit per transaction: revenue minus cogs. Recall again that we can use S.book or B.book - we'll get the same results.
End of explanation
"""
op_chall = widgets.Textarea(value='',width = '50em', height = '18em')
display(op_chall)
op_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(op_chall_b)
run_q(op_chall, op_chall_b)
op_chall_c ='''SELECT
B.book,
B.author,
(S.revenue - B.cogs) / S.revenue AS gross_margin
FROM
book_table B
JOIN sales_table S ON B.book = S.book
WHERE
B.author NOT IN ('Falkner', 'Austen')
ORDER BY
gross_margin DESC
LIMIT 10'''
cheat(op_chall_c)
"""
Explanation: Quick Exercise:
Give the calculated column the alias gross_grofit, rerun. See how nice aliases are?
Add a WHERE clause to only see transactions where gross_grofit is over $5, rerun.
Add an ORDER BY clause to sort by gross_profit with the most profitable transaction is listed first, rerun.
Challenge:
Pull book name and author's last name
Calculate the gross margin per transaction, give the calculated column the alias gross_margin
Use one-letter aliases for all the table names
Only return rows where the author's name is NOT Faulkner or Austen
Sort your results with the highest margin transaction listed first
Limit your results to 10 rows
End of explanation
"""
conc_q = widgets.Textarea(value=
'''SELECT
first_name || last_name
FROM
auth_table''',
width = '50em', height = '7em')
display(conc_q)
conc_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(conc_b)
run_q(conc_q, conc_b)
"""
Explanation: Concatonating
<font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:------------------: | :---: | :----: | :----:
CONCAT(column_a, column_b) or + | CONCAT(column_a, column_b) | CONCAT(column_a, column_b) or || | ||
SELECT <br>
column_a || column_b, ➞ combines the characters of column_a & column_b <br>
column_a || ' ' || column_b ➞ * combines the characters of column_a & column_b with a space inbetween <br>
FROM <br>
table_name*
This one is extremely straightforward. This allows you to non-mathematically combine values. So "some || word" becomes "someword", and "some || ' ' || word" becomes "some word".
End of explanation
"""
sum_q = widgets.Textarea(value=
'''SELECT
sum(revenue)
FROM
sales_table''',
width = '50em', height = '17em')
display(sum_q)
sum_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(sum_b)
run_q(sum_q, sum_b)
"""
Explanation: Quick Exercises:
Fix the query so that there is a space between the names, rerun
Give the concatenated column an alias, rerun
Rewrite the query so that it follows the format "last_name, first_name" instead
<a id='functions'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
SOME_FUNCTION(column_a), ➞ performs the function on the column <br>
FROM <br>
table_name<br>
[WHERE clause] <br>
[ORDER BY clause] <br>
[LIMIT clause] <br>
Functions work similar to funcitons in Excel - you can apply them to entire columns. There are tons more functions than the ones listed below, just Google what you want to do to find more.
Short List of Functions:
FUNCTION | DESCRIPTION
:------- | :-------------
AVG(col) | Averages values
COUNT(col) | Counts the number of rows with non-null values in the column
COUNT(*) | Counts the number of rows in the table
COUNT(DISTINCT(col)) | Counts the number of unique values in the column
GROUP_CONCAT(col, 'separator') | Returns a comma-separated list of values, specify a separator in quotes
MAX(col) | Returns the maximum value
MIN(col) | Returns the minimum value
ROUND(AVG(col), x) | Rounds value to x decimals
SUM(col) | Sums values
UPPER(col) | If column is text, it will return all-caps version of the text
First, we'll start with SUM() to find the total revenue for all our transactions.
End of explanation
"""
sum_q_c ='''SELECT
SUM(revenue) AS total_rev,
ROUND(AVG(revenue), 2) AS avg_rev,
COUNT(revenue) AS total_transactions,
MAX(revenue) AS max_rev,
MIN(revenue) AS min_rev,
COUNT(DISTINCT(book)) AS distinct_books
FROM
sales_table
WHERE
book IN ('For Whom the Bell Tolls', 'Emma') '''
cheat(sum_q_c)
"""
Explanation: Quick Exercises:
Give the calculated column an alias, rerun
Add a line to the SELECT clause to find the average revenue per transaction and give the column an alias, rerun
Edit the average column so that your results are rounded to the nearest cent, rerun
Add a line to the SELECT clause to count the total number of transactions and give the column an alias, rerun
Add a 2 lines to the SELECT clause to see the minimum and maximum revenue earned on a single transaction, rerun
Add a line to the SELECT clause to see a count of the number of distinct books that appear in sales_table
Add a WHERE clause to only see results for the books "For Whom the Bell Tolls" and "Emma"
End of explanation
"""
function_chall = widgets.Textarea(value='', width = '50em', height = '11em')
display(function_chall)
function_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(function_chall_b)
run_q(function_chall, function_chall_b)
function_chall_c ='''SELECT
ROUND(AVG(B.cogs), 2) AS avg_cogs
FROM
book_table B
JOIN auth_table A ON B.author = A.last_name
WHERE
A.country = 'USA'
'''
cheat(function_chall_c)
"""
Explanation: Challenge:
Write a query to find the average cost of goods for books whose authors are from the US (USA). Round the number to the nearest cent. Use an alias for your column.
End of explanation
"""
function_chall2 = widgets.Textarea(value='',width = '50em', height = '10em')
display(function_chall2)
function_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(function_chall2_b)
run_q(function_chall2, function_chall2_b)
function_chall2_c ='''SELECT
GROUP_CONCAT(last_name)
FROM
auth_table
WHERE
last_name NOT IN ('Austen', 'Shakespeare')
'''
cheat(function_chall2_c)
"""
Explanation: Challenge:
Try out the GROUP_CONCAT function. Write a query to select GROUP_CONCAT(last_name) from the auth_table, only return results where the author is NEITHER Austen NOR Shakespeare. After you get your query to work, change it to GROUP_CONCAT(last_name, ' / ') and rerun.
End of explanation
"""
insert_null_b = widgets.Button(description='Click here JUST ONCE before starting', width='20em', height='3em', color='white',background_color='#1f5fd6', border_color='#1f5fd6')
display(insert_null_b)
def insert_button(b):
null_query1 = '''INSERT INTO auth_table VALUES ('Homer', NULL, 'Greece', NULL)'''
run(null_query1)
print('A new row has been added to the auth_table!')
insert_null_b.on_click(insert_button)
"""
Explanation: COUNT(*) vs COUNT(column_name)
End of explanation
"""
star_chall = widgets.Textarea(value='', width = '50em', height = '12em')
display(star_chall)
star_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(star_chall_b)
run_q(star_chall, star_chall_b)
"""
Explanation: It's much more common to use COUNT(*) than COUNT(column_name) when you are trying to get a count of the number of rows in your result-set. This is because COUNT(*) will capture all rows, while COUNT(column_name) will skip over NULL values in that particular row.
Challenge:
We've just added new a new row to the auth_table that has some NULL (blank) values. Start by writing a query to view everything (SELECT *) in the auth_table and make a note of the new row.
End of explanation
"""
star_chall_c ='''SELECT
COUNT(first_name),
COUNT(*)
FROM
auth_table
'''
cheat(star_chall_c)
"""
Explanation: Challenge Continued...
Delete * and replace it with COUNT(first_name), rerun
Add a line (but don't erase anything) to the SELECT clause: COUNT(*)
End of explanation
"""
fun_op_q = widgets.Textarea(value=
'''SELECT
SUM(S.revenue) - SUM(B.cogs) AS gross_profit
FROM
book_table B
JOIN sales_table S ON S.book = B.book''',
width = '50em', height = '8em')
display(fun_op_q)
fun_op_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(fun_op_b)
run_q(fun_op_q, fun_op_b)
"""
Explanation: What's going on:
When you use COUNT(first_name), SQL skipped over the Homer row because there was no value in the first_name column. COUNT(*), on the other hand, looks across all columns, so as long as a row has a value in at least one column, it'll get included in the count. You might argue that you could just use country or last_name, but they fact is that * is just way easier and less time-consuming to type out. Overwhelmingly, people opt for COUNT(*) instead of COUNT(column_name) unless they are interested in overlooking NULL values.
Functions + Operators
You can use functions together with operators to do more complex calculations. Below, we've calculated our total gross profit using both the SUM() function and subtraction:
End of explanation
"""
fun_op_chall = widgets.Textarea(value='', width = '50em', height = '8em')
display(fun_op_chall)
fun_op_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(fun_op_chall_b)
run_q(fun_op_chall, fun_op_chall_b)
fun_op_chall_c ='''SELECT
SUM(S.revenue - B.cogs) / SUM(S.revenue) AS gross_margin
FROM
book_table B
JOIN sales_table S ON S.book = B.book
- OR - you can use AVG() instead of SUM() for all functions'''
cheat(fun_op_chall_c)
"""
Explanation: Quick Exercise:
Rewrite the SELECT clause so that you get the same results but only have to use SUM once.
Challenge:
Write a query to view gross margin for all transactions using functions in conjunction with operators. Extra credit: round your results to the nearest cent.
End of explanation
"""
group_q = widgets.Textarea(value=
'''SELECT
book,
AVG(revenue)
FROM
sales_table
GROUP BY
book''',
width = '50em', height = '20em')
display(group_q)
group_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_b)
run_q(group_q, group_b)
"""
Explanation: <a id='group_by'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a,
SUM(column_b) ➞ sums up the values in column_b <br>
FROM <br>
table_name <br>
[WHERE clause] <br>
GROUP BY ➞ creates one group for each unique value in column_a <br>
column_a <br>
[ORDER BY clause] <br>
[LIMIT clause]
GROUP BY creates a group for each unique value in the column you specify. You'll always use it in conjunction with functions - it creates segments for your results. In plain English, the query below says: "Show me the average revenue per book from the sales_table"
End of explanation
"""
group_chall = widgets.Textarea(value='', width = '50em', height = '12em')
display(group_chall)
group_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_chall_b)
run_q(group_chall, group_chall_b)
group_chall_c ='''SELECT
author,
count(*)
FROM
book_table
GROUP BY
author'''
cheat(group_chall_c)
"""
Explanation: Quick Exercises:
Change AVG() to SUM(), rerun
Give the book column the alias book_title, then use the alias in the GROUP BY clause, rerun
Sort the results so that the most profitable book is listed first, rerun
Add this to the SELECT clause: COUNT(*), rerun
Add a WHERE clause to only return results that are not written by Faulkner (hint: you'll have to join a table for this)
Challenge:
Write a query to count the number of books each author has listed in the book_table.
End of explanation
"""
group_chall2 = widgets.Textarea(value='', width = '50em', height = '12em')
display(group_chall2)
group_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_chall2_b)
run_q(group_chall2, group_chall2_b)
group_chall2_c ='''SELECT
B.author,
SUM(S.revenue)
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
B.author'''
cheat(group_chall2_c)
"""
Explanation: Challenge:
Write a query that joins the book_table and the sales_table to see total revenue per author.
End of explanation
"""
group_chall3 = widgets.Textarea(value='',width = '50em', height = '15em')
display(group_chall3)
group_chall3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_chall3_b)
run_q(group_chall3, group_chall3_b)
group_chall3_c ='''SELECT
book,
MAX(revenue),
MIN(revenue)
FROM
sales_table
WHERE
book NOT IN ('Macbeth','Hamlet')
GROUP BY
book'''
cheat(group_chall3_c)
"""
Explanation: Challenge:
Write a query to see the maximum and minimum prices that each book sold for, but don't include Macbeth or Hamlet in your result-set:
End of explanation
"""
group_fun_op_q = widgets.Textarea(value=
'''SELECT
B.book,
SUM(S.revenue) - SUM(B.cogs) AS gross_profit
FROM
sales_table S
JOIN book_table B ON S.book = B.book
GROUP BY
B.book''',
width = '50em', height = '12em')
display(group_fun_op_q)
group_fun_op_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_fun_op_b)
run_q(group_fun_op_q, group_fun_op_b)
"""
Explanation: GROUP BY + Functions + Operators
You can use GROUP BY with functions and operators to do more complex analysis. Below, we use SUM() and the subtraction operator to see gross profit for each book.
End of explanation
"""
group_func_op_chall = widgets.Textarea(value='', width = '50em', height = '13em')
display(group_func_op_chall)
group_func_op_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_func_op_chall_b)
run_q(group_func_op_chall, group_func_op_chall_b)
group_func_op_chall_c ='''SELECT
B.author,
(SUM(S.revenue) - SUM(B.cogs)) / SUM(S.revenue) AS gross_margin
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
B.author'''
cheat(group_func_op_chall_c)
"""
Explanation: Challenge:
Write a query to find the gross margin per author using GROUP BY, functions and operators. Give the gross margin column an alias.
End of explanation
"""
group_chall3 = widgets.Textarea(value='', width = '50em', height = '26em')
display(group_chall3)
group_chall3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(group_chall3_b)
run_q(group_chall3, group_chall3_b)
group_chall3_c ='''SELECT
B.author AS author_name,
SUM(S.revenue) AS total_revenue,
SUM(B.cogs) AS total_cogs,
(SUM(S.revenue) - SUM(B.cogs))/SUM(S.revenue) AS gross_margin,
COUNT(*) AS transaction_count,
COUNT(DISTINCT(S.book)) AS distinct_book_titles,
GROUP_CONCAT(DISTINCT(S.book)) AS book_list
FROM
book_table B
JOIN sales_table S ON B.book = S.book
WHERE
author_name != 'Faulkner'
AND S.book != 'Hamlet'
GROUP BY
author_name
ORDER BY
gross_margin DESC'''
cheat(group_chall3_c)
"""
Explanation: Challenge:
Copy and paste the query you just wrote for the previous challenge.
Add the following columns in the SELECT clause (in addition to author and gross margin columns), and give every column an alias:
total revenue
total cogs
a count of the number of individual transactions
BONUS: a count of the distinct book titles sold
BONUS: a comma-separated list of the book titles with no repeats
Only include results where the author isn't Faulker and the book isn't Hamlet
Sort your results so that the author with the highest average gross margin is listed first
End of explanation
"""
having_q = widgets.Textarea(value=
'''SELECT
book,
SUM(revenue)
FROM
sales_table
GROUP BY
book HAVING SUM(revenue) > 100''',
width = '50em', height = '11em')
display(having_q)
having_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(having_b)
run_q(having_q, having_b)
"""
Explanation: <a id='having'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a, <br>
FUNCTION(column_b) <br>
FROM <br>
table_name <br>
[WHERE clause] <br>
GROUP BY <br>
column_a HAVING FUNCTION(column_b) > x ➞ returns groups whose value is greater than x <br>
[ORDER BY clause] <br>
[LIMIT clause] <br>
Use HAVING with GROUP BY in order to filter out groups that don't meet your criteria. Below, the plain English translation of this query says, "show my the total revenue for each book, but only show me books that have total revenue over $100"
End of explanation
"""
having_chall = widgets.Textarea(value='',width = '50em', height = '10em')
display(having_chall)
having_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(having_chall_b)
run_q(having_chall, having_chall_b)
having_chall_c ='''SELECT
author,
AVG(cogs) AS avg_cogs
FROM
book_table
GROUP BY
author HAVING avg_cogs > 10'''
cheat(having_chall_c)
"""
Explanation: Quick Exercises:
Change the > to <, rerun
Give the SUM(revenue) column an alias, and change the GROUP BY clause so that you're using the alias instead, rerun
Think about why this is different from WHERE. Take a moment to discuss this with your partner in class.
Challenge:
Write a query to see average COGs per author, but use HAVING to return authors whose average COGs is greater than $10. Assign the average COGs column an alias and use it in the GROUP BY clause.
End of explanation
"""
hw_q1 = widgets.Textarea(value=
'''SELECT
author,
AVG(cogs)
FROM
book_table
WHERE
author != 'Faulkner'
GROUP BY
author''',
width = '50em', height = '14em')
display(hw_q1)
hw_b1 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(hw_b1)
run_q(hw_q1, hw_b1)
"""
Explanation: HAVING vs. WHERE
HAVING and WHERE both let you change the results you see in your result-set, but they operate quite differently. Take a look at the query below. It looks at the average cogs per author, but uses a WHERE clause to filter out 'Faulkner':
End of explanation
"""
hw_q2 = widgets.Textarea(value=
'''SELECT
author,
AVG(cogs)
FROM
book_table
WHERE
author != 'Faulkner'
GROUP BY
author HAVING AVG(cogs) > 11''',
width = '50em', height = '14em')
display(hw_q2)
hw_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(hw_b2)
run_q(hw_q2, hw_b2)
"""
Explanation: Quick Exercise:
Now let's say you want to filter out Faulkner AND you only want to see authors whose average COGs are over $11. Your first thought might be to use the WHERE clause. Add AND AVG(cogs) > 11, rerun. Why do you think you're hitting an error?
You hit an error because the AVG(cogs) column was created by a function, and SQL doesn't let you put function-generated columns in the WHERE clause. You have to use HAVING instead. The query below will accomplish what we're trying to do, and returns a result-set that doesn't include Faulkner AND only shows authors whose average COGs are over $11.
End of explanation
"""
hw_q3 = widgets.Textarea(value=
'''SELECT
author,
AVG(cogs)
FROM
book_table
WHERE
author != 'Faulkner'
AND cogs > 11
GROUP BY
author''',
width = '50em', height = '15em')
display(hw_q3)
hw_b3 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(hw_b3)
run_q(hw_q3, hw_b3)
"""
Explanation: This seems relatively straightforward - but it's easy to forget and wind up with inaccurate results. Consider the following query. Why would this be wrong?
End of explanation
"""
sql_order_q = widgets.Textarea(value=
'''SELECT
author,
AVG(cogs)
FROM
book_table
WHERE
author != 'Faulkner'
GROUP BY
author HAVING AVG(cogs) > 11
ORDER BY
AVG(cogs)
LIMIT 3''',
width = '50em', height = '17.5em')
display(sql_order_q)
sql_order_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(sql_order_b)
run_q(sql_order_q, sql_order_b)
"""
Explanation: These results are inaccurate because instead of telling SQL to only return authors with average COGs over \$11, we've told SQL "only consider rows where COGs are over \$11". SQL dropped the rows with COGs under \$11 before it started grouping and averaging.
SQL's Order of Execution
When we read, we start at the top of a page and work our way to the bottom. That's not how SQL works. It actually starts with the FROM clause and jumps around. It's helpful to understand the order it follows to determine when to use HAVING and when to use WHERE.
<!-- This will also help clear up some other issues. Ever wonder why you can mention a column in the `WHERE` clause that you don't mention in the `SELECT` clause? Or why SQL knows the table alias that you're referring to in the `SELECT` clause even though you don't assign aliases to tables until later in the query? This is why.-->
We write the clauses in this order:
SELECT <br>
FROM <br>
JOIN...ON <br>
WHERE <br>
GROUP BY...HAVING <br>
ORDER BY <br>
LIMIT <br>
However, SQL reads and executes the clauses in this order:
FROM <br>
JOIN...ON <br>
WHERE <br>
SELECT <br>
GROUP BY...HAVING <br>
ORDER BY <br>
LIMIT <br>
Here's a query we've seen before, but now we've added a few more clauses so that we can see all of them in action:
End of explanation
"""
hw_q4 = widgets.Textarea(value=
'''SELECT
author,
AVG(cogs)
FROM
book_table
WHERE
author != 'Faulkner'
AND cogs > 11
GROUP BY
author''',
width = '50em', height = '15em')
display(hw_q4)
hw_b4 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(hw_b4)
run_q(hw_q4, hw_b4)
"""
Explanation: The GIF below shows the order that SQL follows the steps:
Let's revisit the query that gave us the skewed average:
End of explanation
"""
have_chall2 = widgets.Textarea(value='',width = '50em', height = '12em')
display(have_chall2)
have_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(have_chall2_b)
run_q(have_chall2, have_chall2_b)
have_chall2_c ='''SELECT
B.author,
SUM(S.revenue) AS total_rev
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
author HAVING total_rev > 200'''
cheat(have_chall2_c)
"""
Explanation: Now that we know the order in which SQL executes commands, we can see what went wrong. The rows with COGs under $11 were eliminated before SQL averaged COGs for each group:
Challenge:
Write a query to join book_table and sales_table. Select author and total revenue, but only return authors whose total revenue was over $200
End of explanation
"""
have_chall3 = widgets.Textarea(value='', width = '50em', height = '16em')
display(have_chall3)
have_chall3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(have_chall3_b)
run_q(have_chall3, have_chall3_b)
have_chall3_c ='''SELECT
A.country,
COUNT(*) AS count_of_sales
FROM
sales_table S
JOIN book_table B ON S.book = B.book
JOIN auth_table A ON A.last_name = B.author
WHERE
A.last_name != 'Hemingway'
GROUP BY
A.country
NOTE: you can also use "B.author != 'Hemingway'" in the WHERE clause to get the same results'''
cheat(have_chall3_c)
"""
Explanation: Challenge:
Write a query to join the auth_table with the sales_table (remember that this requires multiple joins). Count the number of sales per country (author's country of origin in the auth_table), but don't include sales from Hemingway.
End of explanation
"""
case1_q = widgets.Textarea(value=
'''SELECT
last_name,
CASE WHEN last_name = 'Austen' THEN 'True'
ELSE 'False'
END
FROM
auth_table''',
width = '50em', height = '13em')
display(case1_q)
case1_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case1_b)
run_q(case1_q, case1_b)
"""
Explanation: <a id='case_when'></a>
<center>
Previous | Table of Contents | Next
</center>
Conditional Type | <font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:--------------- |:------------------ |:--- |:---- |:----
IF | IF logical_test PRINT value_if_true | IF(logical_test, value_if_true, value_if_false) (same as Excel) | IF logical_test THEN value_if_true ELSIF...END IF | NOT SUPPORTED
CASE WHEN | ✓ | ✓ | ✓ | ✓
SELECT <br>
CASE WHEN some_column = x THEN value_if_true <br>
WHEN some_column = y THEN other_value_if_true <br>
ELSE value_if_false <br>
END <br>
FROM <br>
some_table <br>
Because SQLite doesn't support IF statements, we're going to focus on CASE WHEN. CASE WHEN lets you accomplish the same thing by setting logical tests and conditional values, but it has the added bonus of freeing you from ever needing to nest multiple IF statements.
Let's start very simple. The following query uses a logical test to create a column where the value is "true" if the author is Austen, and "false" if the author is not Austen:
End of explanation
"""
case2_q = widgets.Textarea(value=
'''SELECT
book,
revenue,
CASE WHEN revenue < 10 THEN "<$10"
WHEN revenue BETWEEN 10 AND 15 THEN "$10-15"
WHEN revenue > 15 THEN ">$15"
END AS revenue_category
FROM
sales_table
''',
width = '50em', height = '15em')
display(case2_q)
case2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case2_b)
run_q(case2_q, case2_b)
"""
Explanation: Quick Exercises:
Give the CASE WHEN column an alias (immediately after END), rerun
Change the query so that instead of "True", the query returns Austen's first name (use the first_name column), rerun
Add something to the CASE WHEN column so that the query returns Faulkner's first name as well (look at the example code above for help), rerun.
Using CASE WHEN to create categories
CASE WHEN allows you to set multiple logical tests, which can help you create buckets or categories. In Excel, you'd have to nest multiple logical tests in an IF statement (ie. IF(logical_test, value_if_true, IF(other_logical_test, value_if_true, value_if_false))...very messy). With CASE WHEN, you can add an infinite number very easily.
Let's say that rather than caring about the exact revenue for each transaction, you only cared whether it was under \$10, between \$10 and \$15, or over \$15. That's easy to do with CASE WHEN. We'll include the revenue column as well so you can more easily see what's going on:
End of explanation
"""
case3_q = widgets.Textarea(value=
'''SELECT
CASE WHEN revenue < 10 THEN "<$10"
WHEN revenue BETWEEN 10 AND 15 THEN "$10-15"
WHEN revenue > 15 THEN ">$15"
END AS revenue_category,
COUNT(*) AS total_sales,
SUM(revenue) AS total_revenue
FROM
sales_table
GROUP BY
revenue_category
''',
width = '50em', height = '18em')
display(case3_q)
case3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case3_b)
run_q(case3_q, case3_b)
"""
Explanation: This might not immediately seem useful, but when you start grouping by your newly created categories, you'll be able to do all kinds of new analysis. Consider the query below. We use CASE WHEN to create a column that we then use in the GROUP BY clause. We've essentially created new groups where there were none before, and now we can assess the number of sales and total revenue from each revenue group.
End of explanation
"""
case_chall = widgets.Textarea(value='', width = '50em', height = '16em')
display(case_chall)
case_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_chall_b)
run_q(case_chall, case_chall_b)
case_chall_c ='''SELECT
CASE WHEN
B.author = 'Austen' THEN 'Female'
ELSE 'Male' --- or you can say, "WHEN B.author IN ('Faulkner', 'Shakespeare', 'Hemingway') THEN 'Male' "
END AS gender,
SUM(S.revenue) AS total_revenue
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
gender'''
cheat(case_chall_c)
"""
Explanation: Challenge:
Suppose you want to see total revenues broken out by male vs. female authors. Use CASE WHEN to create these groups - with Austen in the "female" group and Faulkner, Hemingway, and Shakespeare in the "male" group.
End of explanation
"""
case_pivot_ex = widgets.Textarea(value='''SELECT
date,
CASE WHEN
B.author = 'Austen' THEN 'Female'
ELSE 'Male'
END AS gender,
SUM(S.revenue) AS total_revenue
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
date, gender'''
, width = '50em', height = '18em')
display(case_pivot_ex)
case_pivot_ex_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_pivot_ex_b)
run_q(case_pivot_ex, case_pivot_ex_b)
"""
Explanation: Using CASE WHEN to create a pivot table
Say you want to see revenue broken out by gender and by date. Right now, the only way we know how to do this is to add "date" to the GROUP BY clause. The query below is the same as the one from your last challenge, only we've added date to both the SELECT clause and the GROUP BY clause.
End of explanation
"""
case_pivot = widgets.Textarea(value=
'''SELECT
date,
SUM(CASE WHEN B.author = 'Austen' THEN revenue END) AS Female_Rev,
SUM(CASE WHEN B.author != 'Austen' THEN revenue END) AS Male_Rev
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
date''',
width = '50em', height = '14em')
display(case_pivot)
case_pivot_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_pivot_b)
run_q(case_pivot, case_pivot_b)
"""
Explanation: The result-set above is almost useless - it's impossible to do meaningful analysis when you group by multiple columns. Instead, we'll use CASE WHEN nested inside a function to essentially create a pivot table:
End of explanation
"""
case_pivot1 = widgets.Textarea(value=
'''SELECT
date,
SUM(CASE WHEN B.author = 'Austen' THEN revenue END) AS Female_Rev,
SUM(CASE WHEN B.author != 'Austen' THEN revenue END) AS Male_Rev
FROM
book_table B
JOIN sales_table S ON B.book = S.book
GROUP BY
date''',
width = '50em', height = '18em')
display(case_pivot1)
case_pivot_b1 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_pivot_b1)
run_q(case_pivot1, case_pivot_b1)
"""
Explanation: Note that while this CASE WHEN method will work in other RDBMSs, it's more common to use IF when you are only using a single logical test. In MySQL, for instance, the line for Female_Rev would look like this instead, which would translate to "sum up the revenue for any row where the author is Austen, and the number 0 whenever the author is not Austen":
SUM(IF(B.author = 'Austen', revenue, 0))
Quick Exercises:
The query from above has been reproduced below for these exercises (so you don't have to keep scrolling up and down).
1. Change the query so that you have separate columns for each individual author's revenue, rerun
2. Change SUM to AVG, rerun
3. Change AVG to COUNT - note that with conditional statements, you don't use an asterisk with COUNT. You need to stick with a specific column name
End of explanation
"""
case_pivot_chall = widgets.Textarea(value='',width = '50em', height = '18em')
display(case_pivot_chall)
case_pivot_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_pivot_chall_b)
run_q(case_pivot_chall, case_pivot_chall_b)
case_pivot_chall_c ='''SELECT
CASE WHEN A.birth_year < 1700 THEN "Before 1700"
WHEN A.birth_year BETWEEN 1700 AND 1800 THEN "1700-1800"
WHEN A.birth_year > 1800 THEN "After 1800"
END AS era,
COUNT(CASE WHEN A.country = 'USA' THEN book END) AS count_from_USA,
COUNT(CASE WHEN A.country = 'England' THEN book END) AS count_from_England
FROM
book_table B
JOIN auth_table A ON B.author = A.last_name
GROUP BY
era
'''
cheat(case_pivot_chall_c)
"""
Explanation: Challenge:
In the SELECT clause:
Use CASE WHEN to create a column that creates buckets for author's birth_year: "Before 1700", "1700-1800", "After 1800"
Use CASE WHEN to create a column that returns the count of books by authors from USA
Use CASE WHEN to create a column that returns the count of books by authors from England
GROUP BY the birth_year bucket column that you created
End of explanation
"""
case_pivot_chall2 = widgets.Textarea(value='', width = '50em', height = '14em')
display(case_pivot_chall2)
case_pivot_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(case_pivot_chall2_b)
run_q(case_pivot_chall2, case_pivot_chall2_b)
case_pivot_chall2_c ='''SELECT
date,
COUNT(CASE WHEN book = 'For Whom the Bell Tolls' THEN revenue END) Bell_Tolls_Count,
COUNT(CASE WHEN book = 'Emma' THEN revenue END) Emma_Count,
COUNT(CASE WHEN book IN ('Macbeth', 'Hamlet') THEN revenue END) Macbeth_Hamlet_Count
FROM
sales_table
GROUP BY
date'''
cheat(case_pivot_chall2_c)
"""
Explanation: Challenge:
Write a query that returns a daily count of the sales of:
<u>For Whom the Bell Tolls</u> (in its own column)
<u>Emma</u> (in its own column)
<u>Macbeth</u> and <u>Hamlet</u> (in a combined column)
End of explanation
"""
insert_b = widgets.Button(description="Read the paragraph above before clicking", width='25em', height='3em', color='white',background_color='#1f5fd6', border_color='#1f5fd6')
display(insert_b)
def insert_button(b):
insert_q1 = '''INSERT INTO auth_table VALUES ('Tolstoy', 'Leo', 'Russia', 1828)'''
insert_q2 = '''INSERT INTO auth_table VALUES ('Twain', 'Mark', 'USA', 1835)'''
insert_q3 = '''INSERT INTO book_table VALUES ('Jude the Obscure', '11.25', 'Hardy')'''
insert_q4 = '''INSERT INTO book_table VALUES ('The Age of Innocence', '14.20', 'Wharton')'''
query_list = [insert_q1, insert_q2, insert_q3, insert_q4]
for query in query_list:
run(query)
print('New rows have been added to auth_table and book_table!')
insert_b.on_click(insert_button)
"""
Explanation: <a id='nesting'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
column_a <br>
FROM <br>
table_x <br>
WHERE <br>
column_a IN (SELECT column_b FROM table_y)
Read first: For this section, we'll need the extra rows in the auth_table and book_table that we added during the JOIN exercises. If you've closed the program or re-run it since you last added those rows, then click the button below to re-add them.
End of explanation
"""
nested_q1 = widgets.Textarea(value=
'''SELECT
COUNT(DISTINCT(book)) AS Count_of_Distinct_Books
FROM
sales_table''',
width = '50em', height = '7em')
display(nested_q1)
nested_b1 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nested_b1)
run_q(nested_q1, nested_b1)
"""
Explanation: To be totally honest, you likely won't be writing nested queries yourself until you've become much more comfortable with SQL. However, it's good to learn about them because you'll likely encounter them when coworkers share queries with you.
Start by looking at the two queries and their outputs below:
End of explanation
"""
nested_q2 = widgets.Textarea(value=
'''SELECT
COUNT(DISTINCT(book)) AS Count_of_Distinct_Books
FROM
book_table''',
width = '50em', height = '7em')
display(nested_q2)
nested_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nested_b2)
run_q(nested_q2, nested_b2)
"""
Explanation: NOTE THAT YOU NEED TO HIT "RUN" AGAIN FOR THE QUERY BELOW
(It should return the number 13. If it doesn't, click the blue button above to update the book_table, then re-run the query below)
End of explanation
"""
nested_q3 = widgets.Textarea(value=
'''SELECT
book
FROM
book_table
WHERE
book NOT IN (SELECT book FROM sales_table)''',
width = '50em', height = '10em')
display(nested_q3)
nested_q3_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nested_q3_b)
run_q(nested_q3, nested_q3_b)
"""
Explanation: From the count of distinct books in each table, we see that there are two books in our book_table (our inventory) that haven't made a single sale. Imagine if both tables had thousands of rows - it'd be a nightmare to try to figure out which were the books with no sales. However, a nested query can help us out.
The query below uses a nested query in the WHERE clause. In plain English, it says "Show me the books from the book_table, but not the ones that also show up in the sales_table":
End of explanation
"""
nest_chall = widgets.Textarea(value='', width = '50em', height = '12em')
display(nest_chall)
nest_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nest_chall_b)
run_q(nest_chall, nest_chall_b)
nest_chall_c ='''SELECT
last_name
FROM
auth_table
WHERE
last_name NOT IN (SELECT author FROM book_table)
'''
cheat(nest_chall_c)
"""
Explanation: Challenge:
Write a query to see authors who appear in the auth_table but don't show up in the book_table.
End of explanation
"""
nest_q3 = widgets.Textarea(value=
'''SELECT
SUM(revenue)
FROM
sales_table
WHERE
book IN (SELECT
book
FROM
book_table B
JOIN auth_table A ON B.author = A.last_name
WHERE
A.country = 'England')''',
width = '50em', height = '18em')
display(nest_q3)
nest_b3 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nest_b3)
run_q(nest_q3, nest_b3)
"""
Explanation: You can also use nested queries to avoid the need for multiple JOIN clauses. Suppose you wanted to see the total revenue for books by authors from England. Previously, we would have joined the sales_table to the book_table, and then the book_table to the auth_table in order to be able to work with both the revenue column and the country column:
End of explanation
"""
nest_q3_explained = widgets.Textarea(value='',width = '50em', height = '12em')
display(nest_q3_explained)
nest_b3_explained = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nest_b3_explained)
run_q(nest_q3_explained, nest_b3_explained)
"""
Explanation: Let's break down what's going on here. First, copy and paste the nested query (the part in parentheses) in the cell below, then run it:
End of explanation
"""
nest_q3_explained2 = widgets.Textarea(value=
'''SELECT
SUM(revenue)
FROM
sales_table
WHERE
book IN ( )''',
width = '50em', height = '12em')
display(nest_q3_explained2)
nest_b3_explained2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(nest_b3_explained2)
run_q(nest_q3_explained2, nest_b3_explained2)
"""
Explanation: Next, take the output that you just produced and: <br>
• comma-separate each book <br>
• wrap each book in quotation marks <br>
• paste your list between the parentheses in the WHERE clause below: <br>
• rerun the query <br>
End of explanation
"""
union_q = widgets.Textarea(value=
'''SELECT
book AS selection
FROM
book_table
UNION
SELECT
first_name AS selection
FROM
auth_table''',
width = '50em', height = '16em')
display(union_q)
union_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(union_b)
run_q(union_q, union_b)
"""
Explanation: This is essentially the same process that SQL walks through when you run a nested query. It pulls the list of books from the nested query, then uses that list in the WHERE clause of the dominant query.
<img align="left" src="http://i.imgur.com/sRHO5xr.png"> <br>
The query above produces the same results as if you'd done a multiple join, but it's much more efficient. That's because SQL can just get the book titles it needs from the nested query and plug them into the dominent query, rather than needing to do all the work of duplicating rows to create joined tables. Use nested queries for more efficient joins whenever possible.
<a id='union'></a>
<center>
Previous | Table of Contents | Next
</center>
SELECT <br>
some_column <br>
FROM <br>
table_x <br>
UNION ➞ or use UNION ALL, see explanation below <br>
SELECT <br>
some_other_column <br>
FROM <br>
table_y <br>
UNION and UNION ALL allow you to attach two completely separate queries. UNION will result in the output from the first query and the second query to be sorted by default (or you can add an ORDER BY clause). UNION ALL will ensure that the results from the second query will all appear after the results from the first query.
We'll start with a very, very simple illustration and work our way into more complex versions of UNION queries. First, consider the query below. We're pulling all books from the book table with the first query, and all the authors' first names from the auth_table with the second query. By using UNION, we're telling SQL to return the results of both these queries in the same column.
End of explanation
"""
union_chall = widgets.Textarea(value='',width = '50em', height = '25em')
display(union_chall)
union_chall_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(union_chall_b)
run_q(union_chall, union_chall_b)
union_chall_c ='''SELECT
book as Item,
cogs as COGs,
author as Creator
FROM
book_table
UNION
SELECT
film,
cogs,
director
FROM
movie_table
ORDER BY
Item'''
cheat(union_chall_c)
"""
Explanation: Quick Exercise:
Change the query above to use UNION ALL instead of UNION and re-run. Make sure you understand how the output changes.
Delete AS selection in the second query and rerun.
Rename selection to something else in the first query and rerun.
Add cogs in the SELECT clause in the first query and country to the SELECT clause in the second query, rerun.
Delete cogs in the first query and rerun. Can you think why you're hitting an error?
Useful applications for UNION
The above example is just a simple illustration of how UNION functions, but it's not very useful as a practical application. Now lets try UNION in a more useful way.
Let's say that our imaginary book store decides to start stocking a few movies, so we've created a new table to manage this new inventory:
Now let's say we wanted to view all of our store inventory, COGs, and the author or director of each item. We don't want to join the movie_table and the book_table - there's nothing to really join them on. However, it'd be useful to stack them.
Challenge:
Use UNION to write a query to view the contents of both movie_table and book_table in a single table. The column-headers should be: Item, COGs, and Creator. Order by item title (hint: with UNION, the ORDER BY clause can only go after the second query).
End of explanation
"""
union_q2 = widgets.Textarea(value=
'''SELECT
book,
cogs
FROM
book_table
UNION ALL
SELECT
'Average COGs',
ROUND(AVG(cogs), 2)
FROM
book_table''',
width = '50em', height = '19em')
display(union_q2)
union_b2 = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(union_b2)
run_q(union_q2, union_b2)
"""
Explanation: Using UNION to add totals and subtotals:
Take a look at the query below. You'll see it pulls the COGs and book title for each book. It also uses UNION ALL to add a final line - a summary row averaging all cogs:
End of explanation
"""
union_chall1 = widgets.Textarea(value='',
width = '50em', height = '22em')
display(union_chall1)
union_chall1_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(union_chall1_b)
run_q(union_chall1, union_chall1_b)
union_chall1_c ='''SELECT
book,
SUM(revenue) AS total_revenue
FROM
sales_table
GROUP BY
book
UNION ALL
SELECT
'NULL',
SUM(revenue)
FROM
sales_table
'''
cheat(union_chall1_c)
"""
Explanation: Quick Exercise:
Remove the ALL from UNION ALL and rerun. See how ALL can be useful?
Delete 'Average COGs', from the second query and rerun. Make sure you understand why there's an error. Fix it and reruun.
Challenge:
Write a query that totals revenue per book from the sales_table. Use UNION ALL to add a summary line that totals revenue for all books.
End of explanation
"""
HTML(run('''SELECT
B.author AS author_last_name,
B.book AS book_title,
SUM(S.revenue) AS sum_revenue
FROM
sales_table S
JOIN book_table B on S.book = B.book
GROUP BY
B.book
UNION
SELECT
UPPER(B.author),
'TOTAL REVENUE',
SUM(S.revenue)
FROM
sales_table S
JOIN book_table B on S.book = B.book
GROUP BY
B.author
''').to_html(index=False))
union_chall2 = widgets.Textarea(value='', width = '50em', height = '30em')
display(union_chall2)
union_chall2_b = widgets.Button(description='Run', width='10em', height='2.5em', color='white',background_color='black', border_color='black')
display(union_chall2_b)
run_q(union_chall2, union_chall2_b)
union_chall2_c ='''SELECT
B.author AS author_last_name,
B.book AS book_title,
SUM(S.revenue) AS sum_revenue
FROM
sales_table S
JOIN book_table B on S.book = B.book
GROUP BY
B.book
UNION
SELECT
UPPER(B.author),
'TOTAL REVENUE',
SUM(S.revenue)
FROM
sales_table S
JOIN book_table B on S.book = B.book
GROUP BY
B.author'''
cheat(union_chall2_c)
"""
Explanation: Extra Challenging Challenge:
Write a query that totals revenue per book. Add subtotal lines for each author's revenue above their books. The output should look like this (use Google to figure out how to capitalize the authors' names for the subtotal rows).
End of explanation
"""
HTML(run('''SELECT
book,
SUM(revenue) AS total_revenue,
COUNT(*) AS count_of_sales
FROM
sales_table
GROUP BY
book
UNION ALL
SELECT
'NULL',
SUM(revenue),
COUNT(*)
FROM
sales_table''').to_html(index=False))
"""
Explanation: <a id='rollup'></a>
<center>
Previous | Table of Contents | Next
</center>
<font color='#1f5fd6'>Microsoft SQL Server | <font color='#1f5fd6'>MySQL | <font color='#1f5fd6'>Oracle | <font color='#1f5fd6'>SQLite </font>
:------------------: | :---: | :----: | :----:
GROUP BY column_a WITH ROLLUP | GROUP BY column_a WITH ROLLUP | GROUP BY ROLLUP (column_a) | not supported
Unfortunately, SQLite doesn't have a simple way to do ROLLUP like the other RDBMSs, so we can't practice it here. However, the concept is very straightforward: it's exactly like using UNION to add a summary row, except way simpler. Below is what the query would look like if we were using Microsoft or MySQL. Take a look at the query and the output to understand what's going on, even if you can't practice it:
SELECT <br>
book, <br>
SUM(revenue) AS total_revenue, <br>
COUNT(*) AS count_of_sales <br>
FROM <br>
sales_table <br>
GROUP BY <br>
book WITH ROLLUP <br>
End of explanation
"""
|
tgsmith61591/pyramid | examples/quick_start_example.ipynb | mit | import numpy as np
import pmdarima as pm
print('numpy version: %r' % np.__version__)
print('pmdarima version: %r' % pm.__version__)
"""
Explanation: auto_arima
Pmdarima bring R's auto.arima functionality to Python by wrapping statsmodel ARIMA and SARIMAX models into a singular scikit-learn-esque estimator (pmdarima.arima.ARIMA) and adding several layers of degree and seasonal differencing tests to identify the optimal model parameters.
Pmdarima ARIMA models:
Are fully picklable for easy persistence and model deployment
Can handle seasonal terms (unlike statsmodels ARIMAs)
Follow sklearn model fit/predict conventions
End of explanation
"""
from pmdarima.datasets import load_wineind
# this is a dataset from R
wineind = load_wineind().astype(np.float64)
"""
Explanation: We'll start by defining an array of data from an R time-series, wineind:
```r
forecast::wineind
Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec
1980 15136 16733 20016 17708 18019 19227 22893 23739 21133 22591 26786 29740
1981 15028 17977 20008 21354 19498 22125 25817 28779 20960 22254 27392 29945
1982 16933 17892 20533 23569 22417 22084 26580 27454 24081 23451 28991 31386
1983 16896 20045 23471 21747 25621 23859 25500 30998 24475 23145 29701 34365
1984 17556 22077 25702 22214 26886 23191 27831 35406 23195 25110 30009 36242
1985 18450 21845 26488 22394 28057 25451 24872 33424 24052 28449 33533 37351
1986 19969 21701 26249 24493 24603 26485 30723 34569 26689 26157 32064 38870
1987 21337 19419 23166 28286 24570 24001 33151 24878 26804 28967 33311 40226
1988 20504 23060 23562 27562 23940 24584 34303 25517 23494 29095 32903 34379
1989 16991 21109 23740 25552 21752 20294 29009 25500 24166 26960 31222 38641
1990 14672 17543 25453 32683 22449 22316 27595 25451 25421 25288 32568 35110
1991 16052 22146 21198 19543 22084 23816 29961 26773 26635 26972 30207 38687
1992 16974 21697 24179 23757 25013 24019 30345 24488 25156 25650 30923 37240
1993 17466 19463 24352 26805 25236 24735 29356 31234 22724 28496 32857 37198
1994 13652 22784 23565 26323 23779 27549 29660 23356
```
Note that the frequency of the data is 12:
```r
frequency(forecast::wineind)
[1] 12
```
End of explanation
"""
from pmdarima.arima import ARIMA
fit = ARIMA(order=(1, 1, 1), seasonal_order=(0, 1, 1, 12)).fit(y=wineind)
"""
Explanation: Fitting an ARIMA
We will first fit a seasonal ARIMA. Note that you do not need to call auto_arima in order to fit a model—if you know the order and seasonality of your data, you can simply fit an ARIMA with the defined hyper-parameters:
End of explanation
"""
fit = ARIMA(order=(1, 1, 1), seasonal_order=None).fit(y=wineind)
"""
Explanation: Also note that your data does not have to exhibit seasonality to work with an ARIMA. We could fit an ARIMA against the same data with no seasonal terms whatsoever (but it is unlikely that it will perform better; quite the opposite, likely).
End of explanation
"""
# fitting a stepwise model:
stepwise_fit = pm.auto_arima(wineind, start_p=1, start_q=1, max_p=3, max_q=3, m=12,
start_P=0, seasonal=True, d=1, D=1, trace=True,
error_action='ignore', # don't want to know if an order does not work
suppress_warnings=True, # don't want convergence warnings
stepwise=True) # set to stepwise
stepwise_fit.summary()
"""
Explanation: Finding the optimal model hyper-parameters using auto_arima:
If you are unsure (as is common) of the best parameters for your model, let auto_arima figure it out for you. auto_arima is similar to an ARIMA-specific grid search, but (by default) uses a more intelligent stepwise algorithm laid out in a paper by Hyndman and Khandakar (2008). If stepwise is False, the models will be fit similar to a gridsearch. Note that it is possible for auto_arima not to find a model that will converge; if this is the case, it will raise a ValueError.
Fitting a stepwise search:
End of explanation
"""
rs_fit = pm.auto_arima(wineind, start_p=1, start_q=1, max_p=3, max_q=3, m=12,
start_P=0, seasonal=True, d=1, D=1, trace=True,
n_jobs=-1, # We can run this in parallel by controlling this option
error_action='ignore', # don't want to know if an order does not work
suppress_warnings=True, # don't want convergence warnings
stepwise=False, random=True, random_state=42, # we can fit a random search (not exhaustive)
n_fits=25)
rs_fit.summary()
"""
Explanation: Fitting a random search
If you don't want to use the stepwise search, auto_arima can fit a random search by enabling random=True. If your random search returns too many invalid (nan) models, you might try increasing n_fits or making it an exhaustive search (stepwise=False, random=False).
End of explanation
"""
from bokeh.plotting import figure, show, output_notebook
import pandas as pd
# init bokeh
output_notebook()
def plot_arima(truth, forecasts, title="ARIMA", xaxis_label='Time',
yaxis_label='Value', c1='#A6CEE3', c2='#B2DF8A',
forecast_start=None, **kwargs):
# make truth and forecasts into pandas series
n_truth = truth.shape[0]
n_forecasts = forecasts.shape[0]
# always plot truth the same
truth = pd.Series(truth, index=np.arange(truth.shape[0]))
# if no defined forecast start, start at the end
if forecast_start is None:
idx = np.arange(n_truth, n_truth + n_forecasts)
else:
idx = np.arange(forecast_start, n_forecasts)
forecasts = pd.Series(forecasts, index=idx)
# set up the plot
p = figure(title=title, plot_height=400, **kwargs)
p.grid.grid_line_alpha=0.3
p.xaxis.axis_label = xaxis_label
p.yaxis.axis_label = yaxis_label
# add the lines
p.line(truth.index, truth.values, color=c1, legend='Observed')
p.line(forecasts.index, forecasts.values, color=c2, legend='Forecasted')
return p
in_sample_preds = stepwise_fit.predict_in_sample()
in_sample_preds[:10]
show(plot_arima(wineind, in_sample_preds,
title="Original Series & In-sample Predictions",
c2='#FF0000', forecast_start=0))
"""
Explanation: Inspecting goodness of fit
We can look at how well the model fits in-sample data:
End of explanation
"""
next_25 = stepwise_fit.predict(n_periods=25)
next_25
# call the plotting func
show(plot_arima(wineind, next_25))
"""
Explanation: Predicting future values
After your model is fit, you can forecast future values using the predict function, just like in sci-kit learn:
End of explanation
"""
stepwise_fit.update(next_25, maxiter=10) # take 10 more steps
stepwise_fit.summary()
updated_data = np.concatenate([wineind, next_25])
# visualize new forecasts
show(plot_arima(updated_data, stepwise_fit.predict(n_periods=10)))
"""
Explanation: Updating your model
ARIMAs create forecasts by using the latest observations. Over time, your forecasts will drift, and you'll need to update the model with the observed values. There are several solutions to this problem:
Fit a new ARIMA with the new data added to your training sample
You can either re-use the order discovered in the auto_arima function, or re-run auto_arima altogether.
Use the update method (preferred). This will allow your model to update its parameters by taking several more MLE steps on new observations (controlled by the maxiter arg) starting from the parameters it's already discovered. This approach will help you avoid over-fitting.
For this example, let's update our existing model with the next_25 we just computed, as if they were actually observed values.
End of explanation
"""
|
aflaxman/siaman16-va-minitutorial | 2-tutorial-notebook-solutions/4-va_csmf.ipynb | gpl-3.0 | import numpy as np, pandas as pd
"""
Explanation: We won't work through this notebook
We won't have time. But I thought I'd include it, in case you want to see exactly how I implement my population-level quality metric.
End of explanation
"""
def measure_prediction_quality(csmf_pred, y_test):
"""Calculate population-level prediction quality (CSMF Accuracy)
Parameters
----------
csmf_pred : pd.Series, predicted distribution of causes
y_test : array-like, labels for test dataset
Results
-------
csmf_acc : float
"""
csmf_true = pd.Series(y_test).value_counts() / float(len(y_test))
csmf_acc = 1 - np.sum(np.absolute(csmf_pred - csmf_true)) / (2*(1 - csmf_true.min()))
# cccsmf_acc = (csmf_acc - 0.632) / (1 - 0.632)
return csmf_acc
"""
Explanation: Let's put the CSMF Accuracy calculation right at the top
End of explanation
"""
csmf_pred = pd.Series({'cause_1': .5, 'cause_2': .5})
y_test = ['cause_1', 'cause_2']
measure_prediction_quality(csmf_pred, y_test)
csmf_pred = pd.Series({'cause_1': 0., 'cause_2': 1.})
y_test = ['cause_1']*1000 + ['cause_2']
measure_prediction_quality(csmf_pred, y_test)
"""
Explanation: How can I test this?
End of explanation
"""
val = {}
module = 'Adult'
val[module] = pd.read_csv('../3-data/phmrc_cleaned.csv')
def get_data(module):
X = np.array(val[module].filter(regex='(^s[0-9]+|age|sex)').fillna(0))
y = np.array(val[module].gs_text34)
site = np.array(val[module].site)
return X, y, site
X, y, site = get_data(module)
X.shape
def my_resample(X, y, N2, csmf_new):
""""Randomly resample X and y so that resampled cause distribution follows
csmf_new and there are N2 samples total
Parameters
----------
X : array-like, feature vectors
y : array-like, corresponding labels
N2 : int, number of samples in resampled results
csmf_new : pd.Series, distribution of resampled data
Results
-------
X_new : array-like, resampled feature vectors
y_new : array-like, corresponding resampled labels
"""
N, I = X.shape
assert len(y) == N, 'X and y must have same length'
causes = csmf_new.index
J, = causes.shape # trailing comma for sneaky numpy reasons
# generate count of examples for each cause according to csmf_new
cnt_new = np.random.multinomial(N2, csmf_new)
# replace y_new with original values
y_new = []
for cnt, cause in zip(cnt_new, causes):
for n_j in range(cnt):
y_new.append(cause)
y_new = np.array(y_new)
# resample rows of X appropriately
X_new = np.zeros((len(y_new), I))
for j in causes:
new_rows, = np.where(y_new == j) # trailing comma for sneaky numpy reasons
candidate_rows, = np.where(y == j) # trailing comma for sneaky numpy reasons
assert len(candidate_rows) > 0, 'must have examples of each resampled cause'
old_rows = np.random.choice(candidate_rows, size=len(new_rows), replace=True)
X_new[new_rows,] = X[old_rows,]
return X_new, y_new
def random_allocation(X_train, y_train):
""" make predictions by random allocation"""
clf = sklearn.base.BaseEstimator()
def my_predict(X_test):
N = len(X_test)
J = float(len(np.unique(y_train)))
y_pred = np.ones((N, J)) / J
csmf_pred = pd.Series(y_pred.sum(axis=0),
index=np.unique(y_train)) / N
return csmf_pred
clf.my_predict = my_predict
return clf
def my_key(module, clf):
return '{}-{}'.format(module, clf)
import sklearn.model_selection
results = []
def measure_csmf_acc(my_fit_predictor, replicates=10):
""" my_fit_predictor : function that takes X,y returns clf object with my_predict method
clf.my_predict takes X_test, return csmf_pred
Results
-------
stores calculation in results dict,
returns calc for adults
"""
X, y, site = get_data(module)
acc = []
np.random.seed(12345) # set seed for reproducibility
cv = sklearn.model_selection.StratifiedShuffleSplit(n_iter=replicates, test_size=0.25)
for train_index, test_index in cv.split(X, y):
# make train test split
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
# resample train set for equal class weights
J = len(np.unique(y))
csmf_flat = pd.Series(np.ones(J)/J, index=np.unique(y))
X_train, y_train = my_resample(X_train, y_train, J*100, csmf_flat)
clf = my_fit_predictor(X_train, y_train)
# resample test set to have uninformative cause distribution
csmf_rand = pd.Series(np.random.dirichlet(np.ones(J)), index=np.unique(y))
X_test_resamp, y_test_resamp = my_resample(X_test, y_test, J*100, csmf_rand)
# make predictions
csmf_pred = clf.my_predict(X_test_resamp)
# test predictions
csmf_acc = measure_prediction_quality(csmf_pred, y_test_resamp)
results.append({'csmf_acc':csmf_acc, 'key':my_key(module, clf)})
df = pd.DataFrame(results)
g = df.groupby('key')
return g.csmf_acc.describe().unstack()
baseline_csmf_acc = measure_csmf_acc(random_allocation)
baseline_csmf_acc
import sklearn.naive_bayes
def nb_pr_allocation(X_train, y_train):
clf = sklearn.naive_bayes.BernoulliNB()
clf.fit(X_train, y_train)
def my_predict(X_test):
y_pred = clf.predict_proba(X_test)
csmf_pred = pd.Series(y_pred.sum(axis=0), index=clf.classes_) / float(len(y_pred))
return csmf_pred
clf.my_predict = my_predict
return clf
measure_csmf_acc(nb_pr_allocation)
"""
Explanation: Things we don't have time for
An approach to really do the cross-validation out of sample:
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/messy-consortium/cmip6/models/emac-2-53-aerchem/land.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'messy-consortium', 'emac-2-53-aerchem', 'land')
"""
Explanation: ES-DOC CMIP6 Model Properties - Land
MIP Era: CMIP6
Institute: MESSY-CONSORTIUM
Source ID: EMAC-2-53-AERCHEM
Topic: Land
Sub-Topics: Soil, Snow, Vegetation, Energy Balance, Carbon Cycle, Nitrogen Cycle, River Routing, Lakes.
Properties: 154 (96 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:10
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Conservation Properties
3. Key Properties --> Timestepping Framework
4. Key Properties --> Software Properties
5. Grid
6. Grid --> Horizontal
7. Grid --> Vertical
8. Soil
9. Soil --> Soil Map
10. Soil --> Snow Free Albedo
11. Soil --> Hydrology
12. Soil --> Hydrology --> Freezing
13. Soil --> Hydrology --> Drainage
14. Soil --> Heat Treatment
15. Snow
16. Snow --> Snow Albedo
17. Vegetation
18. Energy Balance
19. Carbon Cycle
20. Carbon Cycle --> Vegetation
21. Carbon Cycle --> Vegetation --> Photosynthesis
22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
23. Carbon Cycle --> Vegetation --> Allocation
24. Carbon Cycle --> Vegetation --> Phenology
25. Carbon Cycle --> Vegetation --> Mortality
26. Carbon Cycle --> Litter
27. Carbon Cycle --> Soil
28. Carbon Cycle --> Permafrost Carbon
29. Nitrogen Cycle
30. River Routing
31. River Routing --> Oceanic Discharge
32. Lakes
33. Lakes --> Method
34. Lakes --> Wetlands
1. Key Properties
Land surface key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of land surface model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of land surface model code (e.g. MOSES2.2)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.3. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the processes modelled (e.g. dymanic vegation, prognostic albedo, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_atmosphere_flux_exchanges')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "water"
# "energy"
# "carbon"
# "nitrogen"
# "phospherous"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Land Atmosphere Flux Exchanges
Is Required: FALSE Type: ENUM Cardinality: 0.N
Fluxes exchanged with the atmopshere.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.atmospheric_coupling_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Atmospheric Coupling Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of land surface coupling with the Atmosphere model component, which may be different for different quantities (e.g. dust: semi-implicit, water vapour: explicit)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "bare soil"
# "urban"
# "lake"
# "land ice"
# "lake ice"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.6. Land Cover
Is Required: TRUE Type: ENUM Cardinality: 1.N
Types of land cover defined in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.land_cover_change')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Land Cover Change
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how land cover change is managed (e.g. the use of net or gross transitions)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Tiling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general tiling procedure used in the land surface (if any). Include treatment of physiography, land/sea, (dynamic) vegetation coverage and orography/roughness
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.energy')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Conservation Properties
TODO
2.1. Energy
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how energy is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.water')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Water
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how water is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.conservation_properties.carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Carbon
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how carbon is conserved globally and to what level (e.g. within X [units]/year)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestep_dependent_on_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestepping Framework
TODO
3.1. Timestep Dependent On Atmosphere
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a time step dependent on the frequency of atmosphere coupling?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Overall timestep of land surface model (i.e. time between calls)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.timestepping_framework.timestepping_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Timestepping Method
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of time stepping method and associated time step(s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of land surface code
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Grid
Land surface grid
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the grid in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid --> Horizontal
The horizontal grid in the land surface
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the horizontal grid (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.horizontal.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the horizontal grid match the atmosphere?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Vertical
The vertical grid in the soil
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the vertical grid in the soil (not including any tiling)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.grid.vertical.total_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.2. Total Depth
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The total depth of the soil (in metres)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Soil
Land surface soil
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of soil in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_water_coupling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Heat Water Coupling
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the coupling between heat and water in the soil
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.number_of_soil layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 8.3. Number Of Soil layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the soil scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Soil --> Soil Map
Key properties of the land surface soil map
9.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of soil map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil structure map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.texture')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.3. Texture
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil texture map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.organic_matter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.4. Organic Matter
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil organic matter map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.5. Albedo
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil albedo map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.water_table')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.6. Water Table
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil water table map, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.continuously_varying_soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 9.7. Continuously Varying Soil Depth
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Does the soil properties vary continuously with depth?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.soil_map.soil_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.8. Soil Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil depth map
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10. Soil --> Snow Free Albedo
TODO
10.1. Prognostic
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is snow free albedo prognostic?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "soil humidity"
# "vegetation state"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, describe the dependancies on snow free albedo calculations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.direct_diffuse')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "distinction between direct and diffuse albedo"
# "no distinction between direct and diffuse albedo"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Direct Diffuse
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe the distinction between direct and diffuse albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.snow_free_albedo.number_of_wavelength_bands')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 10.4. Number Of Wavelength Bands
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If prognostic, enter the number of wavelength bands used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Soil --> Hydrology
Key properties of the land surface soil hydrology
11.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of the soil hydrological model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river soil hydrology in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil hydrology tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.number_of_ground_water_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 11.5. Number Of Ground Water Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of soil layers that may contain water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.lateral_connectivity')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "perfect connectivity"
# "Darcian flow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.6. Lateral Connectivity
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe the lateral connectivity between tiles
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Bucket"
# "Force-restore"
# "Choisnel"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.7. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
The hydrological dynamics scheme in the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.number_of_ground_ice_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 12. Soil --> Hydrology --> Freezing
TODO
12.1. Number Of Ground Ice Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
How many soil layers may contain ground ice
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.ice_storage_method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Ice Storage Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the method of ice storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.freezing.permafrost')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.3. Permafrost
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the treatment of permafrost, if any, within the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Soil --> Hydrology --> Drainage
TODO
13.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General describe how drainage is included in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.hydrology.drainage.types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Gravity drainage"
# "Horton mechanism"
# "topmodel-based"
# "Dunne mechanism"
# "Lateral subsurface flow"
# "Baseflow from groundwater"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
Different types of runoff represented by the land surface model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Soil --> Heat Treatment
TODO
14.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General description of how heat treatment properties are defined
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of soil heat scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.3. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the soil heat treatment tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.vertical_discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.4. Vertical Discretisation
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the typical vertical discretisation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.heat_storage')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Force-restore"
# "Explicit diffusion"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.5. Heat Storage
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the method of heat storage
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.soil.heat_treatment.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "soil moisture freeze-thaw"
# "coupling with snow temperature"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.6. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe processes included in the treatment of soil heat
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Snow
Land surface snow
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of snow in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the snow tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.number_of_snow_layers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.3. Number Of Snow Layers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of snow levels used in the land surface scheme/model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.4. Density
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow density
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.water_equivalent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.5. Water Equivalent
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the snow water equivalent
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.heat_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.6. Heat Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of the heat content of snow
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.temperature')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.7. Temperature
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow temperature
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.liquid_water_content')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.8. Liquid Water Content
Is Required: TRUE Type: ENUM Cardinality: 1.1
Description of the treatment of snow liquid water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_cover_fractions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "ground snow fraction"
# "vegetation snow fraction"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.9. Snow Cover Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify cover fractions used in the surface snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "snow interception"
# "snow melting"
# "snow freezing"
# "blowing snow"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15.10. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Snow related processes in the land surface scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.11. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the snow scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "prescribed"
# "constant"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Snow --> Snow Albedo
TODO
16.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of snow-covered land albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.snow.snow_albedo.functions')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation type"
# "snow age"
# "snow density"
# "snow grain type"
# "aerosol deposition"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16.2. Functions
Is Required: FALSE Type: ENUM Cardinality: 0.N
*If prognostic, *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Vegetation
Land surface vegetation
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vegetation in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 17.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of vegetation scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.dynamic_vegetation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.3. Dynamic Vegetation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there dynamic evolution of vegetation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.4. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vegetation tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_representation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "vegetation types"
# "biome types"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.5. Vegetation Representation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Vegetation classification used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "broadleaf tree"
# "needleleaf tree"
# "C3 grass"
# "C4 grass"
# "vegetated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.6. Vegetation Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of vegetation types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biome_types')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "evergreen needleleaf forest"
# "evergreen broadleaf forest"
# "deciduous needleleaf forest"
# "deciduous broadleaf forest"
# "mixed forest"
# "woodland"
# "wooded grassland"
# "closed shrubland"
# "opne shrubland"
# "grassland"
# "cropland"
# "wetlands"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.7. Biome Types
Is Required: FALSE Type: ENUM Cardinality: 0.N
List of biome types in the classification, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_time_variation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed (not varying)"
# "prescribed (varying from files)"
# "dynamical (varying from simulation)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.8. Vegetation Time Variation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How the vegetation fractions in each tile are varying with time
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.vegetation_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.9. Vegetation Map
Is Required: FALSE Type: STRING Cardinality: 0.1
If vegetation fractions are not dynamically updated , describe the vegetation map used (common name and reference, if possible)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.interception')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 17.10. Interception
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is vegetation interception of rainwater represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic (vegetation map)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.11. Phenology
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.phenology_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.12. Phenology Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation phenology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prescribed"
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.13. Leaf Area Index
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.leaf_area_index_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.14. Leaf Area Index Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of leaf area index
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.15. Biomass
Is Required: TRUE Type: ENUM Cardinality: 1.1
*Treatment of vegetation biomass *
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biomass_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.16. Biomass Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biomass
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.17. Biogeography
Is Required: TRUE Type: ENUM Cardinality: 1.1
Treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.biogeography_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.18. Biogeography Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation biogeography
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "light"
# "temperature"
# "water availability"
# "CO2"
# "O3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.19. Stomatal Resistance
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify what the vegetation stomatal resistance depends on
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.stomatal_resistance_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.20. Stomatal Resistance Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of the treatment of vegetation stomatal resistance
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.vegetation.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.21. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the vegetation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18. Energy Balance
Land surface energy balance
18.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of energy balance in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the energy balance tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.number_of_surface_temperatures')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 18.3. Number Of Surface Temperatures
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The maximum number of distinct surface temperatures in a grid cell (for example, each subgrid tile may have its own temperature)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "alpha"
# "beta"
# "combined"
# "Monteith potential evaporation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.4. Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
Specify the formulation method for land surface evaporation, from soil and vegetation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.energy_balance.processes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "transpiration"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18.5. Processes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Describe which processes are included in the energy balance scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19. Carbon Cycle
Land surface carbon cycle
19.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of carbon cycle in land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the carbon cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of carbon cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.anthropogenic_carbon')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "grand slam protocol"
# "residence time"
# "decay time"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.4. Anthropogenic Carbon
Is Required: FALSE Type: ENUM Cardinality: 0.N
Describe the treament of the anthropogenic carbon pool
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.5. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the carbon scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 20. Carbon Cycle --> Vegetation
TODO
20.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.forest_stand_dynamics')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.3. Forest Stand Dynamics
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of forest stand dyanmics
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.photosynthesis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Carbon Cycle --> Vegetation --> Photosynthesis
TODO
21.1. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for photosynthesis (e.g. type of photosynthesis, distinction between C3 and C4 grasses, Nitrogen depencence, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.maintainance_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22. Carbon Cycle --> Vegetation --> Autotrophic Respiration
TODO
22.1. Maintainance Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for maintainence respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.autotrophic_respiration.growth_respiration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.2. Growth Respiration
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the general method used for growth respiration
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23. Carbon Cycle --> Vegetation --> Allocation
TODO
23.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the allocation scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_bins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "leaves + stems + roots"
# "leaves + stems + roots (leafy + woody)"
# "leaves + fine roots + coarse roots + stems"
# "whole plant (no distinction)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.2. Allocation Bins
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify distinct carbon bins used in allocation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.allocation.allocation_fractions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "fixed"
# "function of vegetation type"
# "function of plant allometry"
# "explicitly calculated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23.3. Allocation Fractions
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how the fractions of allocation are calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.phenology.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24. Carbon Cycle --> Vegetation --> Phenology
TODO
24.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the phenology scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.vegetation.mortality.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25. Carbon Cycle --> Vegetation --> Mortality
TODO
25.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general principle behind the mortality scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26. Carbon Cycle --> Litter
TODO
26.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.litter.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.number_of_carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27. Carbon Cycle --> Soil
TODO
27.1. Number Of Carbon Pools
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.carbon_pools')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Carbon Pools
Is Required: FALSE Type: STRING Cardinality: 0.1
List the carbon pools used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.soil.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Method
Is Required: FALSE Type: STRING Cardinality: 0.1
List the general method used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.is_permafrost_included')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28. Carbon Cycle --> Permafrost Carbon
TODO
28.1. Is Permafrost Included
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is permafrost included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.emitted_greenhouse_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.2. Emitted Greenhouse Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
List the GHGs emitted
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.decomposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Decomposition
Is Required: FALSE Type: STRING Cardinality: 0.1
List the decomposition methods used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.carbon_cycle.permafrost_carbon.impact_on_soil_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.4. Impact On Soil Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the impact of permafrost on soil properties
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29. Nitrogen Cycle
Land surface nitrogen cycle
29.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of the nitrogen cycle in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the notrogen cycle tiling, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 29.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of nitrogen cycle in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.nitrogen_cycle.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.4. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the nitrogen scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30. River Routing
Land surface river routing
30.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of river routing in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.tiling')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.2. Tiling
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the river routing, if any.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of river routing scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_inherited_from_land_surface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.4. Grid Inherited From Land Surface
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the grid inherited from land surface?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.grid_description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.5. Grid Description
Is Required: FALSE Type: STRING Cardinality: 0.1
General description of grid, if not inherited from land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.number_of_reservoirs')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.6. Number Of Reservoirs
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Enter the number of reservoirs
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.water_re_evaporation')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "flood plains"
# "irrigation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.7. Water Re Evaporation
Is Required: TRUE Type: ENUM Cardinality: 1.N
TODO
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_atmosphere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 30.8. Coupled To Atmosphere
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Is river routing coupled to the atmosphere model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.coupled_to_land')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.9. Coupled To Land
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the coupling between land and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.quantities_exchanged_with_atmosphere')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.10. Quantities Exchanged With Atmosphere
Is Required: FALSE Type: ENUM Cardinality: 0.N
If couple to atmosphere, which quantities are exchanged between river routing and the atmosphere model components?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.basin_flow_direction_map')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "present day"
# "adapted for other periods"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30.11. Basin Flow Direction Map
Is Required: TRUE Type: ENUM Cardinality: 1.1
What type of basin flow direction map is being used?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.flooding')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.12. Flooding
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the representation of flooding, if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.13. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the river routing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.discharge_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "direct (large rivers)"
# "diffuse"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. River Routing --> Oceanic Discharge
TODO
31.1. Discharge Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify how rivers are discharged to the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.river_routing.oceanic_discharge.quantities_transported')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31.2. Quantities Transported
Is Required: TRUE Type: ENUM Cardinality: 1.N
Quantities that are exchanged from river-routing to the ocean model component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32. Lakes
Land surface lakes
32.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lakes in the land surface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.coupling_with_rivers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.2. Coupling With Rivers
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are lakes coupled to the river routing model component?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 32.3. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Time step of lake scheme in seconds
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.quantities_exchanged_with_rivers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "heat"
# "water"
# "tracers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32.4. Quantities Exchanged With Rivers
Is Required: FALSE Type: ENUM Cardinality: 0.N
If coupling with rivers, which quantities are exchanged between the lakes and rivers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.vertical_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.5. Vertical Grid
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the vertical grid of lakes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.1
List the prognostic variables of the lake scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.ice_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33. Lakes --> Method
TODO
33.1. Ice Treatment
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is lake ice included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.albedo')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "prognostic"
# "diagnostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.2. Albedo
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe the treatment of lake albedo
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamics')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No lake dynamics"
# "vertical"
# "horizontal"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33.3. Dynamics
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which dynamics of lakes are treated? horizontal, vertical, etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.dynamic_lake_extent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.4. Dynamic Lake Extent
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is a dynamic lake extent scheme included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.method.endorheic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 33.5. Endorheic Basins
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Basins not flowing to ocean included?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.land.lakes.wetlands.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34. Lakes --> Wetlands
TODO
34.1. Description
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe the treatment of wetlands, if any
End of explanation
"""
|
rjurney/Agile_Data_Code_2 | ch09/Improving_Predictions.ipynb | mit | import sys, os, re
import json
import datetime, iso8601
from tabulate import tabulate
# Initialize PySpark
APP_NAME = "Improving Predictions"
# If there is no SparkSession, create the environment
try:
sc and spark
except NameError as e:
import findspark
findspark.init()
import pyspark
import pyspark.sql
sc = pyspark.SparkContext()
spark = pyspark.sql.SparkSession(sc).builder.appName(APP_NAME).getOrCreate()
print("PySpark initialized...")
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType
from pyspark.sql.types import StructType, StructField
from pyspark.sql.functions import udf
schema = StructType([
StructField("ArrDelay", DoubleType(), True), # "ArrDelay":5.0
StructField("CRSArrTime", TimestampType(), True), # "CRSArrTime":"2015-12-31T03:20:00.000-08:00"
StructField("CRSDepTime", TimestampType(), True), # "CRSDepTime":"2015-12-31T03:05:00.000-08:00"
StructField("Carrier", StringType(), True), # "Carrier":"WN"
StructField("DayOfMonth", IntegerType(), True), # "DayOfMonth":31
StructField("DayOfWeek", IntegerType(), True), # "DayOfWeek":4
StructField("DayOfYear", IntegerType(), True), # "DayOfYear":365
StructField("DepDelay", DoubleType(), True), # "DepDelay":14.0
StructField("Dest", StringType(), True), # "Dest":"SAN"
StructField("Distance", DoubleType(), True), # "Distance":368.0
StructField("FlightDate", DateType(), True), # "FlightDate":"2015-12-30T16:00:00.000-08:00"
StructField("FlightNum", StringType(), True), # "FlightNum":"6109"
StructField("Origin", StringType(), True), # "Origin":"TUS"
])
input_path = "../data/simple_flight_delay_features.jsonl"
features = spark.read.json(input_path, schema=schema)
# Sample 10% to make executable inside the notebook
features = features.sample(False, 0.1)
features.show(3)
features.first()
#
# Check for nulls in features before using Spark ML
#
# null_counts = [(column, features.where(features[column].isNull()).count()) for column in features.columns]
# cols_with_nulls = filter(lambda x: x[1] > 0, null_counts)
# print("Columns with nulls that need to be filtered: {}".format(
# str(list(cols_with_nulls))
# ))
#
# Add a Route variable to replace FlightNum
#
from pyspark.sql.functions import lit, concat
features_with_route = features.withColumn(
'Route',
concat(
features.Origin,
lit('-'),
features.Dest
)
)
features_with_route.show(3)
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay into on-time, slightly late, very late (0, 1, 2)
#
from pyspark.ml.feature import Bucketizer
# Setup the Bucketizer
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# Save the model
arrival_bucketizer_path = "../models/arrival_bucketizer_2.0.bin"
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# Apply the model
ml_bucketized_features = arrival_bucketizer.transform(features_with_route)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show(5)
#
# Extract features tools in with pyspark.ml.feature
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into indexes
for column in ["Carrier", "DayOfMonth", "DayOfWeek", "DayOfYear",
"Origin", "Dest", "Route"]:
print("Indexing column \"{}\" ...".format(column))
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# Drop the original column
ml_bucketized_features = ml_bucketized_features.drop(column)
# Save the pipeline model
string_indexer_output_path = "../models/string_indexer_model_{}.bin".format(
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
print("Indexed all string columns!")
# Handle continuous, numeric fields by combining them into one feature vector
numeric_columns = ["DepDelay", "Distance"]
index_columns = ["Carrier_index", "DayOfMonth_index",
"DayOfWeek_index", "DayOfYear_index", "Origin_index",
"Origin_index", "Dest_index", "Route_index"]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Save the numeric vector assembler
vector_assembler_path = "../models/numeric_vector_assembler.bin"
vector_assembler.write().overwrite().save(vector_assembler_path)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Inspect the finalized features
final_vectorized_features.show(5)
"""
Explanation: Improving Predictions
Now that we have deployed working models predicting flight delays, it is time to “make believe” that our prediction has proven useful based on user feedback, and further that the prediction is valuable enough that prediction quality is important. In this case, it is time to iteratively improve the quality of our prediction. If a prediction is valuable enough, this becomes a full-time job for one or more people.
In this chapter we will tune our Spark ML classifier and also do additional feature engineering to improve prediction quality. In doing so, we will show you how to iteratively improve predictions.
Fixing Our Prediction Problem
At this point we realized that our model was always predicting one class, no matter the input. We began by investigating that in a Jupyter notebook at ch09/Debugging Prediction Problems.ipynb.
The notebook itself is very long, and we tried many things to fix our model. It turned out we had made a mistake. We were using OneHotEncoder on top of the output of StringIndexerModel when we were encoding our nominal/categorical string features. This is how you should encode features for models other than decision trees, but it turns out that for decision tree models, you are supposed to take the string indexes from StringIndexerModel and directly compose them with your continuous/numeric features in a VectorAssembler. Decision trees are able to infer the fact that indexes represent categories. One benefit of directly adding StringIndexes to your feature vectors is that you then get easily interpretable feature importances.
When we discovered this, we had to go back and edit the book so that we didn’t teach something that was wrong, and so this is now what you see. We thought it worthwhile to link to the notebook, though, to show how this really works in the wild: you build broken shit and then fix it.
When to Improve Predictions
Not all predictions should be improved. Often something fast and crude will work well enough as an MVP (minimum viable product). Only predictions that prove useful should be improved. It is possible to sink large volumes of time into improving the quality of a prediction, so it is essential that you connect with users before getting sucked into this task. This is why we’ve included the discussion of improving predictions in its own chapter.
Improving Prediction Performance
There are a few ways to improve an existing predictive model. The first is by tuning the parameters of the statistical model making your prediction. The second is feature engineering.
Tuning model hyperparameters to improve predictive model quality can be done by intuition, or by brute force through something called a grid or random search. We’re going to focus on feature engineering, as hyperparameter tuning is covered elsewhere. A good guide to hyperparameter tuning is available in the Spark documentation on model selection and tuning.
As we move through this chapter, we’ll be using the work we’ve done so far to perform feature engineering. Feature engineering is the most important part of making good predictions. It involves using what you’ve discovered about the data through exploratory data analysis in order to feed your machine learning algorithm better, more consequential data as input.
Experimental Adhesion Method: See What Sticks
There are several ways to decide which features to use, and Saurav Kaushik has written a post on Analytics Vidhya that introduces them well. The method we employ primarily, which we jokingly entitle the Experimental Adhesion Method, is to quickly select all the features that we can simply compute, and try them all using a random forest or gradient boosted decision tree model (note that even if our application requires another type of model, we still use decision trees to guide feature selection). Then we train the model and inspect the model’s feature importances to “see what sticks.” The most important variables are retained, and this forms the basic model we begin with.
Feature engineering is an iterative process. Based on the feature importances, we ponder what new things we might try using the data we have available. We start with the simplest idea, or the one that is easiest to implement. If the feature importances indicate one type of feature is important, and we can’t easily compute new features similar to this one, we think about how we might acquire new data to join to our training data to use as features.
The key is to be logical and systematic in our exploration of the feature space. You should think about how easy a potential feature is to compute, as well as what it would teach you if it turned out to be important. Are there other, similar features that you could try if this candidate worked? Develop hypotheses and test them in the form of new features. Evaluate each new feature in an experiment and reflect on what you’ve learned before engineering the next feature.
Establishing Rigorous Metrics for Experiments
In order to improve our classification model, we need to reliably determine its prediction quality in the first place. To do so, we need to beef up our cross-validation code, and then establish a baseline of quality for the original model. Check out ch09/baseline_spark_mllib_model.py, which we copied from ch09/train_spark_mllib_model.py and altered to improve its cross-validation code.
In order to evaluate the prediction quality of our classifier, we need to use more than one metric. Spark ML’s MulticlassClassificationEvaluator offers four metrics: accuracy, weighted precision, weighted recall, and f1.
Defining Our Classification Metrics
The raw accuracy is just what it sounds like: the number of correct predictions divided by the number of predictions. This is something to check first, but it isn’t adequate alone. Precision is a measure of how useful the result is. Recall describes how complete the results are. The f1 score incorporates both precision and recall to determine overall quality. Taken together, the changes to these metrics between consecutive runs of training our model can give us a clear picture of what is happening with our model in terms of prediction quality. We will use these metrics along with feature importance to guide our feature engineering efforts.
Feature Importance
Model quality metrics aren’t enough to guide the iterative improvements of our model. To understand what is going on with each new run, we need to employ a type of model called a decision tree.
In Spark ML, the best general-purpose multiclass classification model is an implementation of a random forest, the RandomForestClassificationModel, fit by the RandomForestClassifier. Random forests can classify or regress, and they have an important feature that helps us interrogate predictive models through a feature called feature importance.
The importance of a feature is what it sounds like: a measure of how important that feature was in contributing to the accuracy of the model. This information is incredibly useful, as it can serve as a guiding hand to feature engineering. In other words, if you know how important a feature is, you can use this clue to make changes that increase the accuracy of the model, such as removing unimportant features and trying to engineer features similar to those that are most important. Feature engineering is a major theme of Agile Data Science, and it is a big part of why we’ve been doing iterative visualization and exploration (the purpose of which is to shed light on and drive feature engineering).
Note that the state of the art for many classification and regression tasks is a gradient boosted decision tree, but as of version 2.1.0 Spark ML’s implementation—the GBTClassificationModel, which is fit by the GBTClassifier—can only do binary classification.
Getting Ready for Experiments
We need to run through the model's code from chapter 8 before we can setup and run an experiment.
End of explanation
"""
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print("Run {} out of {} of test/train splits in cross validation...".format(
i,
split_count,
)
)
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4657,
maxMemoryInMB=1024
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
"""
Explanation: Implementing A More Rigorous Experiment
In order to be confident in our experiment for each measure, we need to repeat it at least twice to see how it varies. This is the degree to which we cross-validate. In addition, we need to loop and run the measurement code once for each score. Once we’ve collected several scores for each metric, we look at both the average and standard deviation for each score. Taken together, these scores give us a picture of the quality of our classifier.
To begin, we need to iterate and repeat our experiment N times. For each experiment we need to compute a test/train split, then we need to train the model on the training data and apply it to the test data. Then we use MulticlassClassificationEvaluator to get a score, once for each metric. We gather the scores in a list for each metric, which we will evaluate at the end of the experiment:
End of explanation
"""
#
# Evaluate average and STD of each metric and print a table
#
import numpy as np
score_averages = defaultdict(float)
# Compute the table data
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# Print the table
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
"""
Explanation: Processing Run Results
Our run leaves us with a defaultdict of scores, with one list for each metric. Now we need to compute the average and standard deviation of each list to give us the overall average and standard deviation of each metric:
Note that we need to compute both the average and standard deviation of each metric from our run. The average will tell us the approximate performance level, and the standard deviation will tell us how much a model's performance varies. Less variance is desirable. We'll use this information in tuning our model.
End of explanation
"""
scores
"""
Explanation: The standard deviations indicate that we might not even need to perform k-fold cross-validation, but an inspection of the underlying scores says otherwise:
End of explanation
"""
#
# Persist the score to a sccore log that exists between runs
#
import pickle
# Load the score log or initialize an empty one
try:
score_log_filename = "../models/score_log.pickle"
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# Compute the existing score log entry
score_log_entry = {metric_name: score_averages[metric_name] for metric_name in metric_names}
# Compute and display the change in score for each metric
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# Append the existing average scores to the log
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
"""
Explanation: There is actually significant variation between runs, and this could obscure a small improvement (or degradation) in prediction quality.
The iterations take time, and this discourages experimentation. A middle ground should be found.
Comparing Experiments to Determine Improvements
Now that we have our baseline metrics, we can repeat this code as we improve the model and see what the effect is in terms of the four metrics available to us. So it seems we are done, that we can start playing with our model and features. However, we will quickly run into a problem. We will lose track of the score from the previous run, printed on the screen above many logs for each run, unless we write it down each time. And this is tedious. So, we need to automate this process.
What we need to do is load a score log from disk, evaluate the current score in terms of the previous one, and store a new entry to the log back to disk for the next run to access. The following code achieves this aim.
First we use pickle to load any existing score log. If this is not present, we initialize a new log, which is simply an empty Python list. Next we prepare the new log entry—a simple Python dict containing the average score for each of four metrics. Then we subtract the previous run’s score to determine the change in this run. This is the information we use to evaluate whether our change worked or not (along with any changes in feature importances, which we will address as well).
Finally, we append the new score entry to the log and store it back to disk:
End of explanation
"""
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print(f"\nRun {i} out of {split_count} of test/train splits in cross validation...")
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4657,
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print(f"{metric_name} = {score}")
#
# Collect feature importances
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
"""
Explanation: Now when we run our script, we will get a report that shows the change between this run and the last run. We can use this, along with our feature importances, to direct our efforts at improving the model. For instance, an example test run shows the model accuracy increase by .003:
```
Experiment Report
Metric Score
accuracy 0.00300548
weightedPrecision -0.00592227
weightedRecall 0.00300548
f1 -0.0105553
```
Jump back to the code for the model, the code under the section Implementing a More Rigorous Experiment. Re-run all the code between there and here, the last three code blocks. See how the score changed slightly? You will use these changes to guide you as you change the model!
Inspecting Changes in Feature Importance
We can use the list of columns given to our final VectorAssembler along with RandomForestClassificationModel.featureImportances to derive the importance of each named feature. This is extremely valuable, because like with our prediction quality scores, we can look at changes in feature importances for all features between runs. If a newly introduced feature turns out to be important, it is usually worth adding to the model, so long as it doesn’t hurt quality.
Logging Feature Importances
We begin by altering our experiment loop to record feature importances for each run. Check out the abbreviated content from ch09/improved_spark_mllib_model.py:
End of explanation
"""
#
# Analyze and report feature importance changes
#
# Compute averages for each feature
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# Sort the feature importances in descending order and print
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
"""
Explanation: Inspecting Feature Importances
Next, we need to compute the average of the importance for each feature. Note that we use a defaultdict(float) to ensure that accessing empty keys returns zero. This will be important when comparing entries in the log with different sets of features. In order to print the feature importances, we need to sort them first, by descending order of importance:
End of explanation
"""
#
# Compare this run's feature importances with the previous run's
#
# Load the feature importance log or initialize an empty one
try:
feature_log_filename = "../models/feature_log.pickle"
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# Compute and display the change in score for each feature
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
"""
Explanation: Feature Importance Differences Between Runs
Next we need to perform the same housekeeping as we did for the model score log: load the model, create an entry for this experiment, load the last experiment and compute the change for each feature between that experiment and the current one, and then print a report on these deltas.
First we load the last feature log. If it isn’t available because it doesn’t exist, we initialize the last_feature_log with zeros for each feature, so that new features will have a positive score equal to their amount:
End of explanation
"""
# Compute the deltas
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
"""
Explanation: Next we compute the change between the last run and the current one:
End of explanation
"""
# Sort feature deltas, biggest change first
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
"""
Explanation: In order to display them, we need to sort the feature importance changes in descending order, to show the biggest change first:
End of explanation
"""
# Display sorted feature deltas
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
"""
Explanation: Then we display the sorted feature deltas:
End of explanation
"""
# Append the existing average deltas to the log
feature_log.append(feature_importance_entry)
# Persist the log for next run
pickle.dump(feature_log, open(feature_log_filename, "wb"))
"""
Explanation: Finally, as with the score log, we append our entry to the log and save it for the next run:
End of explanation
"""
features.registerTempTable("features")
features.show(5)
spark.sql("""
SELECT
HOUR(CRSDepTime) + 1 AS Hour,
AVG(ArrDelay),
STD(ArrDelay)
FROM features
GROUP BY HOUR(CRSDepTime)
ORDER BY HOUR(CRSDepTime)
""").show(24)
spark.sql("""
SELECT
HOUR(CRSArrTime) + 1 AS Hour,
AVG(ArrDelay),
STD(ArrDelay)
FROM features
GROUP BY HOUR(CRSArrTime)
ORDER BY HOUR(CRSArrTime)
""").show(24)
from pyspark.sql.functions import hour
features = features.withColumn('CRSDepHourOfDay', hour(features.CRSDepTime))
features = features.withColumn('CRSArrHourOfDay', hour(features.CRSArrTime))
departure_cov = features.stat.cov('CRSDepHourOfDay', 'ArrDelay')
arrival_cov = features.stat.cov('CRSArrHourOfDay', 'ArrDelay')
print("Departure delay covariance: {:,}".format(departure_cov))
print("Arrival delay covariance: {:,}".format(arrival_cov))
features.select(
"CRSDepTime",
"CRSDepHourOfDay",
"CRSArrTime",
"CRSArrHourOfDay"
).show()
"""
Explanation: We’ll use the raw feature importances as well as the changes in feature importance to guide our creation or alteration of features as we improve the model.
Conclusion
Now that we have the ability to understand the effect of changes between experimental runs, we can detect changes that improve our model. We can start adding features to test their effect on the model’s prediction quality, and pursue related features that help improve quality! Without this setup, we would be hard put to make positive changes. With it, we are only bounded by our creativity in our efforts to improve the model.
Time of Day as a Feature
In examining our feature importances, it looks like the date/time fields have some impact. What if we extracted the hour/minute as an integer from the datetime for departure/arrival fields? This would inform the model about morning versus afternoon versus red-eye flights, which surely affects on-time performance, as there is more traffic in the morning than overnight.
Check out ch09/explore_delays.py. Let’s start by exploring the premise of this feature, that lateness varies by the time of day of the flight:
End of explanation
"""
features.show(5)
"""
Explanation: Encoding Our New Features
Now we must repeat the feature encoding process such that it includes these new features. Lets take a look at what our features look like at this moment:
End of explanation
"""
#
# Add a Route variable to replace FlightNum
#
from pyspark.sql.functions import lit, concat
features_with_route = features.withColumn(
'Route',
concat(
features.Origin,
lit('-'),
features.Dest
)
)
features_with_route.show(6)
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay into on-time, slightly late, very late (0, 1, 2)
#
from pyspark.ml.feature import Bucketizer
# Setup the Bucketizer
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# Save the model
arrival_bucketizer_path = "../models/arrival_bucketizer_2.0.bin"
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# Apply the model
ml_bucketized_features = arrival_bucketizer.transform(features_with_route)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show(5)
#
# Extract features tools in with pyspark.ml.feature
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into indexes
for column in ["Carrier", "DayOfMonth", "DayOfWeek", "DayOfYear",
"Origin", "Dest", "Route"]:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# Save the pipeline model
string_indexer_output_path = "../models/string_indexer_model_3.0.{}.bin".format(
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
ml_bucketized_features.show(5)
# Combine continuous, numeric fields with indexes of nominal ones
# ...into one feature vector
numeric_columns = [
"DepDelay", "Distance",
"CRSDepHourOfDay", "CRSArrHourOfDay"
]
index_columns = ["Carrier_index", "DayOfMonth_index",
"DayOfWeek_index", "DayOfYear_index", "Origin_index",
"Origin_index", "Dest_index", "Route_index"]
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Save the numeric vector assembler
vector_assembler_path = "../models/numeric_vector_assembler_3.0.bin"
vector_assembler.write().overwrite().save(vector_assembler_path)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Inspect the finalized features
final_vectorized_features.show(5)
"""
Explanation: So we're back at the beginning, and still have to add Route, bucketize the data, encode our string and numeric fields and then combine them into a single vector. Lets get started!
End of explanation
"""
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print(f"\nRun {i} out of {split_count} of test/train splits in cross validation...")
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4657,
maxMemoryInMB=1024
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print(f"{metric_name} = {score}")
#
# Collect feature importances
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
#
# Evaluate average and STD of each metric and print a table
#
import numpy as np
score_averages = defaultdict(float)
# Compute the table data
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# Print the table
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# Persist the score to a sccore log that exists between runs
#
import pickle
# Load the score log or initialize an empty one
try:
score_log_filename = "../models/score_log.pickle"
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# Compute the existing score log entry
score_log_entry = {metric_name: score_averages[metric_name] for metric_name in metric_names}
# Compute and display the change in score for each metric
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# Append the existing average scores to the log
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# Analyze and report feature importance changes
#
# Compute averages for each feature
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# Sort the feature importances in descending order and print
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# Compare this run's feature importances with the previous run's
#
# Load the feature importance log or initialize an empty one
try:
feature_log_filename = "../models/feature_log.pickle"
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# Compute and display the change in score for each feature
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# Compute the deltas
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# Sort feature deltas, biggest change first
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# Display sorted feature deltas
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# Append the existing average deltas to the log
feature_log.append(feature_importance_entry)
# Persist the log for next run
pickle.dump(feature_log, open(feature_log_filename, "wb"))
"""
Explanation: Training with Our New Features
Now we will train the model again, noting the performance and feature importances as we did before. This allows us to see the change in performance owing to the introduction of these new fields, CRSDepHourOfDay and CRSArrHourOfDay.
End of explanation
"""
# Load the on-time parquet file
input_path = "../data/january_performance.parquet"
on_time_dataframe = spark.read.parquet(input_path)
on_time_dataframe.registerTempTable("on_time_performance")
# Select a few features of interest
simple_on_time_features = spark.sql("""
SELECT
FlightNum,
FlightDate,
DayOfWeek,
DayofMonth AS DayOfMonth,
CONCAT(Month, '-', DayofMonth) AS DayOfYear,
Carrier,
Origin,
Dest,
Distance,
DepDelay,
ArrDelay,
CRSDepTime,
CRSArrTime,
CONCAT(Origin, '-', Dest) AS Route,
TailNum AS FeatureTailNum
FROM on_time_performance
WHERE FlightDate < '2015-02-01'
""")
simple_on_time_features = simple_on_time_features.sample(False, 0.1)
simple_on_time_features.select(
"FlightNum",
"FlightDate",
"FeatureTailNum"
).show(10)
# Filter nulls, they can't help us
filled_on_time_features = simple_on_time_features.where(simple_on_time_features.ArrDelay.isNotNull())
filled_on_time_features = filled_on_time_features.where(filled_on_time_features.DepDelay.isNotNull())
filled_on_time_features.show(5)
# We need to turn timestamps into timestamps, and not strings or numbers
def convert_hours(hours_minutes):
hours = hours_minutes[:-2]
minutes = hours_minutes[-2:]
if hours == '24':
hours = '23'
minutes = '59'
time_string = "{}:{}:00Z".format(hours, minutes)
return time_string
def compose_datetime(iso_date, time_string):
return "{} {}".format(iso_date, time_string)
def create_iso_string(iso_date, hours_minutes):
time_string = convert_hours(hours_minutes)
full_datetime = compose_datetime(iso_date, time_string)
return full_datetime
def create_datetime(iso_string):
return iso8601.parse_date(iso_string)
def convert_datetime(iso_date, hours_minutes):
iso_string = create_iso_string(iso_date, hours_minutes)
dt = create_datetime(iso_string)
return dt
def day_of_year(iso_date_string):
dt = iso8601.parse_date(iso_date_string)
doy = dt.timetuple().tm_yday
return doy
def alter_feature_datetimes(row):
flight_date = iso8601.parse_date(row['FlightDate'])
scheduled_dep_time = convert_datetime(row['FlightDate'], row['CRSDepTime'])
scheduled_arr_time = convert_datetime(row['FlightDate'], row['CRSArrTime'])
# Handle overnight flights
if scheduled_arr_time < scheduled_dep_time:
scheduled_arr_time += datetime.timedelta(days=1)
doy = day_of_year(row['FlightDate'])
return {
'FlightNum': row['FlightNum'],
'FlightDate': flight_date,
'DayOfWeek': int(row['DayOfWeek']),
'DayOfMonth': int(row['DayOfMonth']),
'DayOfYear': doy,
'Carrier': row['Carrier'],
'Origin': row['Origin'],
'Dest': row['Dest'],
'Distance': row['Distance'],
'DepDelay': row['DepDelay'],
'ArrDelay': row['ArrDelay'],
'CRSDepTime': scheduled_dep_time,
'CRSArrTime': scheduled_arr_time,
'Route': row['Route'],
'FeatureTailNum': row['FeatureTailNum'],
}
timestamp_features = filled_on_time_features.rdd.map(alter_feature_datetimes)
timestamp_df = timestamp_features.toDF()
timestamp_df.show(5)
"""
Explanation: Interpreting Our Results
Interpreting the output, it looks like the combined effect of these fields is to impact feature importance by about 1%, but the effect on accuracy is insignificant. We’ll leave the fields in, although they don’t help much. Without resorting to advanced time series analysis, it seems we’ve milked all we can from date/time-based features.
Incorporating Airplane Data
Recall from Investigating Airplanes (Entities) that we incorporated data on airplane manufacturers into our data model. For instance, we analyzed the distribution of manufacturers in the American commercial fleet. In this section, we’re going to join in airline data and see what impact this has on the model’s accuracy.
I wonder whether properties of the aircraft (called the “metal” of the flight) influence delays? For instance, bigger aircraft fly higher and can go over weather, while smaller aircraft may be less able to do so. I can’t honestly think of a reason why the engine manufacturer, airplane manufacturer, or manufacture year would have an impact on the model, but since we’re importing one field, we may as well try them all! Note that we can simply drop any features that don’t rank as very significant. The beauty of our experimental model with decision trees is that it doesn’t cost extra to try extra fields. Sometimes you can simply let the model decide what matters.
Note that when dealing with team members and with other teams who need an accounting of your time in order to coordinate with you, a description of the experiments you are running will help keep the teams in sync. For instance, “We are attempting to incorporate a new dataset which we scraped from the FAA website into our flight delay predictive model” would make a good experimental description during an agile sprint.
Extracting Airplane Features
To add airplane features to our model, we need to create a new feature extraction script, ch09/extract_features_with_airplanes.py. We can do this by copying and altering ch09/extract_features.py.
First we add TailNum to the fields we select from our training data. Because this column also appears in our airplane dataset, we need to name it differently or we won’t easily be able to access the column after the join. We’ll name it FeatureTailNum:
End of explanation
"""
# Load airplanes and left join on tail numbers
airplanes_path = "../data/airplanes.jsonl"
airplanes = spark.read.json(airplanes_path)
# Left outer join ensures all feature records remain, with nulls where airplane records are not available
features_with_airplanes = timestamp_df.join(
airplanes,
on=timestamp_df.FeatureTailNum == airplanes.TailNum,
how="left_outer"
)
# Fill in the nulls 'Empty' with COALESCE
features_with_airplanes = features_with_airplanes.selectExpr(
"FlightNum",
"FlightDate",
"DayOfWeek",
"DayOfMonth",
"DayOfYear",
"Carrier",
"Origin",
"Dest",
"Distance",
"DepDelay",
"ArrDelay",
"CRSDepTime",
"CRSArrTime",
"Route",
"FeatureTailNum AS TailNum",
"COALESCE(EngineManufacturer, 'Empty') AS EngineManufacturer",
"COALESCE(EngineModel, 'Empty') AS EngineModel",
"COALESCE(Manufacturer, 'Empty') AS Manufacturer",
"COALESCE(ManufacturerYear, 'Empty') AS ManufacturerYear",
"COALESCE(Model, 'Empty') AS Model",
"COALESCE(OwnerState, 'Empty') AS OwnerState"
)
features_with_airplanes.show(5)
# Explicitly sort the data and keep it sorted throughout. Leave nothing to chance.
sorted_features = features_with_airplanes.sort(
timestamp_df.DayOfYear,
timestamp_df.Carrier,
timestamp_df.Origin,
timestamp_df.Dest,
timestamp_df.FlightNum,
timestamp_df.CRSDepTime,
timestamp_df.CRSArrTime,
)
"""
Explanation: Joining Airplane Data
Next, we load the airplane data and left join it to our features dataset. Note that null is a problematic value for our StringIndexer. But we don’t want to discard empty values or rows either, because whether a variable is present or not is something our decision tree model can use to learn. We use DataFrame.selectExpr to COALESCE our null values to the string 'Empty'. This will get its own index from StringIndexer and things will work out well. Also note that we rename FeatureTailNum back to TailNum for the final output:
End of explanation
"""
# Store as a single json file
output_path = "../data/simple_flight_delay_features_airplanes.json"
sorted_features.repartition(1).write.mode("overwrite").json(output_path)
"""
Explanation: Storing Our Features
Finally, we store the final output to a new path. We’ll have to remember to alter our model training script to point at this new path:
End of explanation
"""
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType
from pyspark.sql.types import StructType, StructField
from pyspark.sql.functions import udf
schema = StructType([
StructField("ArrDelay", DoubleType(), True),
StructField("CRSArrTime", TimestampType(), True),
StructField("CRSDepTime", TimestampType(), True),
StructField("Carrier", StringType(), True),
StructField("DayOfMonth", IntegerType(), True),
StructField("DayOfWeek", IntegerType(), True),
StructField("DayOfYear", IntegerType(), True),
StructField("DepDelay", DoubleType(), True),
StructField("Dest", StringType(), True),
StructField("Distance", DoubleType(), True),
StructField("FlightDate", DateType(), True),
StructField("FlightNum", StringType(), True),
StructField("Origin", StringType(), True),
StructField("Route", StringType(), True),
StructField("TailNum", StringType(), True),
StructField("EngineManufacturer", StringType(), True),
StructField("EngineModel", StringType(), True),
StructField("Manufacturer", StringType(), True),
StructField("ManufacturerYear", StringType(), True),
StructField("OwnerState", StringType(), True),
])
input_path = "../data/simple_flight_delay_features_airplanes.json"
features = spark.read.json(input_path, schema=schema)
features.show(5)
#
# Add the hour of day of scheduled arrival/departure
#
from pyspark.sql.functions import hour
features_with_hour = features.withColumn(
"CRSDepHourOfDay",
hour(features.CRSDepTime)
)
features_with_hour = features_with_hour.withColumn(
"CRSArrHourOfDay",
hour(features.CRSArrTime)
)
features_with_hour.select("CRSDepTime", "CRSDepHourOfDay", "CRSArrTime", "CRSArrHourOfDay").show(5)
"""
Explanation: Now we’re ready to incorporate the features into our model.
Incorporating Airplane Features into Our Classifier Model
Now we need to create a new script that incorporates our new airplane features into our classifier model. Check out ch09/spark_model_with_airplanes.py, which we copied from ch09/improved_spark_mllib_model.py and altered.
First we need to load the training data with the additional fields, including Route (which is now calculated in ch09/extract_features_with_airplanes.py):
End of explanation
"""
#
# Check for nulls in features before using Spark ML
#
null_counts = [(column, features_with_hour.where(features_with_hour[column].isNull()).count()) for column in features_with_hour.columns]
cols_with_nulls = filter(lambda x: x[1] > 0, null_counts)
print("\nNull Value Report")
print("-----------------")
print(tabulate(cols_with_nulls, headers=["Column", "Nulls"]))
"""
Explanation: Because we left joined our new features in, we need to know how many of the resulting training records have null values for their fields. Null values will crash the StringIndexer for a field, so we’ve explicitly altered our feature extraction code to remove them. There should be no nulls, so we’ll print a table with a warning if they are present:
End of explanation
"""
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay into on-time, slightly late, very late (0, 1, 2)
#
from pyspark.ml.feature import Bucketizer
# Setup the Bucketizer
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# Save the model
arrival_bucketizer_path = "../models/arrival_bucketizer_2.0.bin"
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# Apply the model
ml_bucketized_features = arrival_bucketizer.transform(features_with_hour)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show(5)
"""
Explanation: There should be no nulls present!
Next we need to bucketize our data as per normal.
End of explanation
"""
#
# Extract features tools in with pyspark.ml.feature
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into indexes
string_columns = ["Carrier", "Origin", "Dest", "Route",
"TailNum", "EngineManufacturer", "EngineModel",
"Manufacturer", "ManufacturerYear", "OwnerState"]
for column in string_columns:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# Save the pipeline model
string_indexer_output_path = "../models/string_indexer_model_4.0.{}.bin".format(
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
ml_bucketized_features.show(5)
"""
Explanation: Next we add the hour of day fields as normal, and we bucketize the ArrDelay field to get the ArrDelayBucket. Then we need to index all our string columns, including our new airplane features.
Note that we are also making another change. We are moving the DayOfMonth, DayOfWeek and DayOfYear fields, along with CRSDepHourOfDay and CRSArrHourOfDay into numeric fields directly to be vectorized. This is because in thinking about it... indexing these numeric fields only adds a layer of abstraction, encoding them into numbers again.
End of explanation
"""
# Combine continuous, numeric fields with indexes of nominal ones
# ...into one feature vector
numeric_columns = [
"DepDelay",
"Distance",
"DayOfYear",
"DayOfMonth",
"DayOfWeek",
"CRSDepHourOfDay",
"CRSArrHourOfDay"
]
index_columns = [column + "_index" for column in string_columns]
index_columns
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Save the numeric vector assembler
vector_assembler_path = "../models/numeric_vector_assembler_5.0.bin"
vector_assembler.write().overwrite().save(vector_assembler_path)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Inspect the finalized features
final_vectorized_features.show(5)
"""
Explanation: Next, we need to create a new VectorAssembler to combine our features into one feature vector, the column Features_vec. As before, an index field name is the field name with _index appended. This time around, we use a list comprehension to compute the index columns:
End of explanation
"""
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print(f"\nRun {i} out of {split_count} of test/train splits in cross validation...")
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4896,
maxMemoryInMB=1024
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
#
# Collect feature importances
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
#
# Evaluate average and STD of each metric and print a table
#
import numpy as np
score_averages = defaultdict(float)
# Compute the table data
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# Print the table
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# Persist the score to a sccore log that exists between runs
#
import pickle
# Load the score log or initialize an empty one
try:
score_log_filename = "../models/score_log.pickle"
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# Compute the existing score log entry
score_log_entry = {
metric_name: score_averages[metric_name] for metric_name in metric_names
}
# Compute and display the change in score for each metric
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# Append the existing average scores to the log
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# Analyze and report feature importance changes
#
# Compute averages for each feature
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# Sort the feature importances in descending order and print
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# Compare this run's feature importances with the previous run's
#
# Load the feature importance log or initialize an empty one
try:
feature_log_filename = "../models/feature_log.pickle"
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# Compute and display the change in score for each feature
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# Compute the deltas
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# Sort feature deltas, biggest change first
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# Display sorted feature deltas
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# Append the existing average deltas to the log
feature_log.append(feature_importance_entry)
# Persist the log for next run
pickle.dump(feature_log, open(feature_log_filename, "wb"))
"""
Explanation: The rest of the code is identical to ch09/improved_spark_mllib_model.py:
End of explanation
"""
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay into on-time, slightly late, very late (0, 1, 2)
#
from pyspark.ml.feature import Bucketizer
# Setup the Bucketizer
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# Save the model
arrival_bucketizer_path = "../models/arrival_bucketizer_2.0.bin"
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# Apply the model
ml_bucketized_features = arrival_bucketizer.transform(features_with_hour)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show(5)
#
# Extract features tools in with pyspark.ml.feature
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into indexes
string_columns = ["Carrier", "Origin", "Dest", "Route", "TailNum"]
for column in string_columns:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# Save the pipeline model
string_indexer_output_path = "../models/string_indexer_model_4.0.{}.bin".format(
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
ml_bucketized_features.show(5)
# Combine continuous, numeric fields with indexes of nominal ones
# ...into one feature vector
numeric_columns = [
"DepDelay",
"Distance",
"DayOfYear",
"DayOfMonth",
"CRSDepHourOfDay",
"CRSArrHourOfDay"
]
index_columns = [column + "_index" for column in string_columns]
index_columns
vector_assembler = VectorAssembler(
inputCols=numeric_columns + index_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Save the numeric vector assembler
vector_assembler_path = "../models/numeric_vector_assembler_5.0.bin"
vector_assembler.write().overwrite().save(vector_assembler_path)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Inspect the finalized features
final_vectorized_features.show(5)
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print(f"\nRun {i} out of {split_count} of test/train splits in cross validation...")
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4896,
maxMemoryInMB=1024
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
#
# Collect feature importances
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
#
# Evaluate average and STD of each metric and print a table
#
import numpy as np
score_averages = defaultdict(float)
# Compute the table data
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# Print the table
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# Persist the score to a sccore log that exists between runs
#
import pickle
# Load the score log or initialize an empty one
try:
score_log_filename = "../models/score_log.pickle"
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# Compute the existing score log entry
score_log_entry = {
metric_name: score_averages[metric_name] for metric_name in metric_names
}
# Compute and display the change in score for each metric
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# Append the existing average scores to the log
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# Analyze and report feature importance changes
#
# Compute averages for each feature
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# Sort the feature importances in descending order and print
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# Compare this run's feature importances with the previous run's
#
# Load the feature importance log or initialize an empty one
try:
feature_log_filename = "../models/feature_log.pickle"
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# Compute and display the change in score for each feature
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# Compute the deltas
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# Sort feature deltas, biggest change first
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# Display sorted feature deltas
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# Append the existing average deltas to the log
feature_log.append(feature_importance_entry)
# Persist the log for next run
pickle.dump(feature_log, open(feature_log_filename, "wb"))
"""
Explanation: Note that on the first go around, our model failed because we needed to increase the maxBins parameter to 4896 to accommodate our new fields. After that, the code runs without incident.
It looks like our efforts were mostly for naught—they actually hurt the quality of the model (although so little that it comes out about even)! The single exception is that adding the TailNum helps in terms of feature importance by 0.05 (your precise results may vary). Apparently some airplanes are more prone to delay than others, but this isn’t down to the properties of the airplane much... or at least those properties of the airplane are encoded mostly by the identity of the airplane itself.
Keep It Simple Stupid: KISS Your Models
The tail end of the scores for feature importance look like this (for me):
EngineModel_index 0.00221025
OwnerState_index 0.00181267
ManufacturerYear_index 0.00156983
Manufacturer_index 0.000969526
EngineManufacturer_index 0.000708076
DayOfWeek 0.00032268
It is better to have a simpler model as they tend not to overfit, that is to work well on training data by extracting patterns that only apply to it by chance but not to work well on test data or in the real world. They are also easier to deploy and maintain. If something doesn't help - remove it!
Model Simplification Experiment
Lets remove EngineModel, OwnerState, ManufacturerYear, Manufacturer, EngineManufacturer and DayOfWeek and run the model again, to see what this does to performance.
End of explanation
"""
# Load the on-time parquet file
input_path = "../data/january_performance.parquet"
on_time_dataframe = spark.read.parquet(input_path)
on_time_dataframe.registerTempTable("on_time_performance")
# Filtering on FlightDate > 2/1 and a 10% sample are for a training course.
# Feel free to use all the data if you have the RAM and the time!
features = spark.sql("""
SELECT
FlightNum,
FlightDate,
DayOfWeek,
DayofMonth AS DayOfMonth,
CONCAT(Month, '-', DayofMonth) AS DayOfYear,
Carrier,
Origin,
Dest,
Distance,
DepDelay,
ArrDelay,
CRSDepTime,
CRSArrTime,
CONCAT(Origin, '-', Dest) AS Route,
TailNum,
COALESCE(AirTime, 0.0) AS AirTime
FROM on_time_performance
WHERE FlightDate < '2015-02-01'
""")
features = features.sample(False, 0.1)
# Filter nulls, they can't help us
features = features.filter(
(features.ArrDelay.isNotNull())
&
(features.DepDelay.isNotNull())
)
features.show(10)
#
# Add the hour of day of scheduled arrival/departure
#
from pyspark.sql.functions import hour
features_with_hour = features.withColumn(
"CRSDepHourOfDay",
hour(features.CRSDepTime)
)
features_with_hour = features_with_hour.withColumn(
"CRSArrHourOfDay",
hour(features.CRSArrTime)
)
features_with_hour.select("CRSDepTime", "CRSDepHourOfDay", "CRSArrTime", "CRSArrHourOfDay").show(5)
# We need to turn timestamps into timestamps, and not strings or numbers
def convert_hours(hours_minutes):
hours = hours_minutes[:-2]
minutes = hours_minutes[-2:]
if hours == '24':
hours = '23'
minutes = '59'
time_string = "{}:{}:00Z".format(hours, minutes)
return time_string
def compose_datetime(iso_date, time_string):
return "{} {}".format(iso_date, time_string)
def create_iso_string(iso_date, hours_minutes):
time_string = convert_hours(hours_minutes)
full_datetime = compose_datetime(iso_date, time_string)
return full_datetime
def create_datetime(iso_string):
return iso8601.parse_date(iso_string)
def convert_datetime(iso_date, hours_minutes):
iso_string = create_iso_string(iso_date, hours_minutes)
dt = create_datetime(iso_string)
return dt
def day_of_year(iso_date_string):
dt = iso8601.parse_date(iso_date_string)
doy = dt.timetuple().tm_yday
return doy
def alter_feature_datetimes(row):
flight_date = iso8601.parse_date(row['FlightDate'])
scheduled_dep_time = convert_datetime(row['FlightDate'], row['CRSDepTime'])
scheduled_arr_time = convert_datetime(row['FlightDate'], row['CRSArrTime'])
# Handle overnight flights
if scheduled_arr_time < scheduled_dep_time:
scheduled_arr_time += datetime.timedelta(days=1)
doy = day_of_year(row['FlightDate'])
return {
'FlightNum': row['FlightNum'],
'FlightDate': flight_date,
'DayOfWeek': int(row['DayOfWeek']),
'DayOfMonth': int(row['DayOfMonth']),
'DayOfYear': doy,
'Carrier': row['Carrier'],
'Origin': row['Origin'],
'Dest': row['Dest'],
'Distance': row['Distance'],
'DepDelay': row['DepDelay'],
'ArrDelay': row['ArrDelay'],
'CRSDepTime': scheduled_dep_time,
'CRSArrTime': scheduled_arr_time,
'Route': row['Route'],
'TailNum': row['TailNum'],
'AirTime': row['AirTime']
}
timestamp_features = features_with_hour.rdd.map(alter_feature_datetimes)
timestamp_df = timestamp_features.toDF()
# Explicitly sort the data and keep it sorted throughout. Leave nothing to chance.
sorted_features = timestamp_df.sort(
timestamp_df.DayOfYear,
timestamp_df.Carrier,
timestamp_df.Origin,
timestamp_df.Dest,
timestamp_df.FlightNum,
timestamp_df.CRSDepTime,
timestamp_df.CRSArrTime,
)
sorted_features.show(10)
# Store as a single json file
output_path = "../data/simple_flight_delay_features_flight_times.json"
sorted_features.repartition(1).write.mode("overwrite").json(output_path)
print("Features with AirTime prepared!")
"""
Explanation: Interpreting Our Results
This impacts the score in a positive way (for me), but not in a significant way: an improvement of 0.00031884 in accuracy. However, at this point all our features are contributing significantly to the model’s prediction quality, which is where we want to be:
```
Feature Importances
Name Importance
DepDelay 0.775767
TailNum_index 0.0541045
Route_index 0.0401366
Origin_index 0.0290746
DayOfMonth 0.0287668
DayOfYear 0.0268546
Distance 0.0165887
Dest_index 0.0126576
CRSDepHourOfDay 0.00680116
Carrier_index 0.00542581
CRSArrHourOfDay 0.00382289
```
Remember: when it comes to predictive models, simpler is better. If a feature doesn’t sizably influence prediction accuracy, remove it. The model’s quality will increase, it will perform faster in production, and you will have an easier time understanding the impact of additional features on the model. A simpler model will be less susceptible to bias.
Incorporating Flight Time
One thing we haven’t considered yet is the flight time. We should be able to subtract the takeoff time from the landing time and get the duration of the flight. Since distance is a top-3 feature, and the hour of day matters, it seems like flight time might eke out a bit more prediction quality. Let’s try!
In the book, we computed this field by converting our ISO datetimes into unix times (seconds since 1970) and subtracted the takeoff time from the landing time. This gave us flight time in seconds. However, it turns out there is a field called AirTime which is the minutes in the air. Lets try adding AirTime and see how things work.
Availability of Feature at Runtime
It is easy to make the mistake of incorporating a feature into your model that you can't reliabily retrieve in realtime to make a prediction. Remember - if you can't get it in realtime when you make a prediction, you can't incorporate it into the model... not if your model is going to work in the real world. This is a big difference between data science in Kaffle competitions and data science in practice.
AirTime would be available at runtime, if we compute an expected AirTime by subtracting the scheduled arrival time CRSArrTime from the schedule departure CRSDepTime, after converting both to unix time. We would then have to divide by 60 to get the expected AirTime. Would that work alright? One must reason about features... and in this case it seems like a valid feature, and a similar feature, Distance has a 1.65% relative feature importance (for me).
Check out ch09/extract_features_with_flight_time.py, which we copied from ch09/extract_features_with_airplanes.py. We only need to change one line, our selectExpr, to add the date math for our FlightTime field:
Lets add AirTime to our model and see whether it helps or not. We'll have to go back and load the AirTime column, then proceed with our experiment. We're going to put all the code in one block this time, to give you a code block you can use to add your own features to the model later.
We simply add the AirTime column to our initial SELECT statement to Spark SQL. Note that sometimes AirTime is not present, resulting in null values. To address this, we will use Spark SQL's COALESCE function to impute (fill in) a 0.0 value in place of null. This way our model can use this missing data as evidence of timeliness, along with the values which are present.
End of explanation
"""
from pyspark.sql.types import StringType, IntegerType, FloatType, DoubleType, DateType, TimestampType
from pyspark.sql.types import StructType, StructField
from pyspark.sql.functions import udf
schema = StructType([
StructField("ArrDelay", DoubleType(), True),
StructField("CRSArrTime", TimestampType(), True),
StructField("CRSDepTime", TimestampType(), True),
StructField("Carrier", StringType(), True),
StructField("DayOfMonth", IntegerType(), True),
StructField("DayOfWeek", IntegerType(), True),
StructField("DayOfYear", IntegerType(), True),
StructField("DepDelay", DoubleType(), True),
StructField("Dest", StringType(), True),
StructField("Distance", DoubleType(), True),
StructField("FlightDate", DateType(), True),
StructField("FlightNum", StringType(), True),
StructField("Origin", StringType(), True),
StructField("Route", StringType(), True),
StructField("TailNum", StringType(), True),
StructField("AirTime", FloatType(), True)
])
input_path = "../data/simple_flight_delay_features_flight_times.json"
features = spark.read.json(input_path, schema=schema)
features.show(5)
#
# Add the hour of day of scheduled arrival/departure
#
from pyspark.sql.functions import hour
features_with_hour = features.withColumn(
"CRSDepHourOfDay",
hour(features.CRSDepTime)
)
features_with_hour = features_with_hour.withColumn(
"CRSArrHourOfDay",
hour(features.CRSArrTime)
)
features_with_hour.select("CRSDepTime", "CRSDepHourOfDay", "CRSArrTime", "CRSArrHourOfDay").show(5)
#
# Check for nulls in features before using Spark ML
#
null_counts = [(column, features_with_hour.where(features_with_hour[column].isNull()).count()) for column in features_with_hour.columns]
cols_with_nulls = filter(lambda x: x[1] > 0, null_counts)
print("\nNull Value Report")
print("-----------------")
print(tabulate(cols_with_nulls, headers=["Column", "Nulls"]))
#
# Use pysmark.ml.feature.Bucketizer to bucketize ArrDelay into on-time, slightly late, very late (0, 1, 2)
#
from pyspark.ml.feature import Bucketizer
# Setup the Bucketizer
splits = [-float("inf"), -15.0, 0, 30.0, float("inf")]
arrival_bucketizer = Bucketizer(
splits=splits,
inputCol="ArrDelay",
outputCol="ArrDelayBucket"
)
# Save the model
arrival_bucketizer_path = "../models/arrival_bucketizer_2.0.bin"
arrival_bucketizer.write().overwrite().save(arrival_bucketizer_path)
# Apply the model
ml_bucketized_features = arrival_bucketizer.transform(features_with_hour)
ml_bucketized_features.select("ArrDelay", "ArrDelayBucket").show(5)
ml_bucketized_features.show(5)
#
# Extract features tools in with pyspark.ml.feature
#
from pyspark.ml.feature import StringIndexer, VectorAssembler
# Turn category fields into indexes
string_columns = ["Carrier", "Origin", "Dest", "Route", "TailNum"]
for column in string_columns:
string_indexer = StringIndexer(
inputCol=column,
outputCol=column + "_index"
)
string_indexer_model = string_indexer.fit(ml_bucketized_features)
ml_bucketized_features = string_indexer_model.transform(ml_bucketized_features)
# Save the pipeline model
string_indexer_output_path = "../models/string_indexer_model_4.0.{}.bin".format(
column
)
string_indexer_model.write().overwrite().save(string_indexer_output_path)
ml_bucketized_features.show(5)
# Combine continuous, numeric fields with indexes of nominal ones
# ...into one feature vector
numeric_columns = [
"DepDelay",
"Distance",
"DayOfYear",
"DayOfMonth",
"CRSDepHourOfDay",
"CRSArrHourOfDay",
"AirTime"
]
index_columns = [column + "_index" for column in string_columns]
input_columns = numeric_columns + index_columns
vector_assembler = VectorAssembler(
inputCols=input_columns,
outputCol="Features_vec"
)
final_vectorized_features = vector_assembler.transform(ml_bucketized_features)
# Save the numeric vector assembler
vector_assembler_path = "../models/numeric_vector_assembler_5.0.bin"
vector_assembler.write().overwrite().save(vector_assembler_path)
# Drop the index columns
for column in index_columns:
final_vectorized_features = final_vectorized_features.drop(column)
# Inspect the finalized features
final_vectorized_features.show(5)
#
# Cross validate, train and evaluate classifier: loop 5 times for 4 metrics
#
from collections import defaultdict
scores = defaultdict(list)
feature_importances = defaultdict(list)
metric_names = ["accuracy", "weightedPrecision", "weightedRecall", "f1"]
split_count = 3
for i in range(1, split_count + 1):
print("\nRun {} out of {} of test/train splits in cross validation...".format(
i,
split_count,
)
)
# Test/train split
training_data, test_data = final_vectorized_features.randomSplit([0.8, 0.2])
# Instantiate and fit random forest classifier on all the data
from pyspark.ml.classification import RandomForestClassifier
rfc = RandomForestClassifier(
featuresCol="Features_vec",
labelCol="ArrDelayBucket",
predictionCol="Prediction",
maxBins=4896,
maxMemoryInMB=1024
)
model = rfc.fit(training_data)
# Save the new model over the old one
model_output_path = "../models/spark_random_forest_classifier.flight_delays.baseline.bin"
model.write().overwrite().save(model_output_path)
# Evaluate model using test data
predictions = model.transform(test_data)
# Evaluate this split's results for each metric
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
for metric_name in metric_names:
evaluator = MulticlassClassificationEvaluator(
labelCol="ArrDelayBucket",
predictionCol="Prediction",
metricName=metric_name
)
score = evaluator.evaluate(predictions)
scores[metric_name].append(score)
print("{} = {}".format(metric_name, score))
#
# Collect feature importances
#
feature_names = vector_assembler.getInputCols()
feature_importance_list = model.featureImportances
for feature_name, feature_importance in zip(feature_names, feature_importance_list):
feature_importances[feature_name].append(feature_importance)
"""
Explanation: Training the Model
Note that we still store our data to disk and then load it explicitly. This ensures its provenance and formatting are exactly as we expect, and have not been inferred... inference that could change later and throw off our model. Model imput must be as accurate and precise as possible!
Here we just add AirTime to the columns we load, then we add it to the list of numeric_columns. The rest is the same as before.
End of explanation
"""
#
# Evaluate average and STD of each metric and print a table
#
import numpy as np
score_averages = defaultdict(float)
# Compute the table data
average_stds = [] # ha
for metric_name in metric_names:
metric_scores = scores[metric_name]
average_accuracy = sum(metric_scores) / len(metric_scores)
score_averages[metric_name] = average_accuracy
std_accuracy = np.std(metric_scores)
average_stds.append((metric_name, average_accuracy, std_accuracy))
# Print the table
print("\nExperiment Log")
print("--------------")
print(tabulate(average_stds, headers=["Metric", "Average", "STD"]))
#
# Persist the score to a sccore log that exists between runs
#
import pickle
# Load the score log or initialize an empty one
try:
score_log_filename = "../models/score_log.pickle"
score_log = pickle.load(open(score_log_filename, "rb"))
if not isinstance(score_log, list):
score_log = []
except IOError:
score_log = []
# Compute the existing score log entry
score_log_entry = {
metric_name: score_averages[metric_name] for metric_name in metric_names
}
# Compute and display the change in score for each metric
try:
last_log = score_log[-1]
except (IndexError, TypeError, AttributeError):
last_log = score_log_entry
experiment_report = []
for metric_name in metric_names:
run_delta = score_log_entry[metric_name] - last_log[metric_name]
experiment_report.append((metric_name, run_delta))
print("\nExperiment Report")
print("-----------------")
print(tabulate(experiment_report, headers=["Metric", "Score"]))
# Append the existing average scores to the log
score_log.append(score_log_entry)
# Persist the log for next run
pickle.dump(score_log, open(score_log_filename, "wb"))
#
# Analyze and report feature importance changes
#
# Compute averages for each feature
feature_importance_entry = defaultdict(float)
for feature_name, value_list in feature_importances.items():
average_importance = sum(value_list) / len(value_list)
feature_importance_entry[feature_name] = average_importance
# Sort the feature importances in descending order and print
import operator
sorted_feature_importances = sorted(
feature_importance_entry.items(),
key=operator.itemgetter(1),
reverse=True
)
print("\nFeature Importances")
print("-------------------")
print(tabulate(sorted_feature_importances, headers=['Name', 'Importance']))
#
# Compare this run's feature importances with the previous run's
#
# Load the feature importance log or initialize an empty one
try:
feature_log_filename = "../models/feature_log.pickle"
feature_log = pickle.load(open(feature_log_filename, "rb"))
if not isinstance(feature_log, list):
feature_log = []
except IOError:
feature_log = []
# Compute and display the change in score for each feature
try:
last_feature_log = feature_log[-1]
except (IndexError, TypeError, AttributeError):
last_feature_log = defaultdict(float)
for feature_name, importance in feature_importance_entry.items():
last_feature_log[feature_name] = importance
# Compute the deltas
feature_deltas = {}
for feature_name in feature_importances.keys():
run_delta = feature_importance_entry[feature_name] - last_feature_log[feature_name]
feature_deltas[feature_name] = run_delta
# Sort feature deltas, biggest change first
import operator
sorted_feature_deltas = sorted(
feature_deltas.items(),
key=operator.itemgetter(1),
reverse=True
)
# Display sorted feature deltas
print("\nFeature Importance Delta Report")
print("-------------------------------")
print(tabulate(sorted_feature_deltas, headers=["Feature", "Delta"]))
# Append the existing average deltas to the log
feature_log.append(feature_importance_entry)
# Persist the log for next run
pickle.dump(feature_log, open(feature_log_filename, "wb"))
"""
Explanation: Calculating AirTime Performance
Then we calculate performance again... this time we do everything in one block, again so you can easily copy/paste this below to add your own new features!
End of explanation
"""
|
google/learned_optimization | docs/notebooks/no_dependency_learned_optimizer.ipynb | apache-2.0 | import jax
import jax.numpy as jnp
import tensorflow_datasets as tfds
import matplotlib.pylab as plt
import numpy as onp
import functools
import os
"""
Explanation: No dependency introduction to learned optimizers in JAX
This notebook contains a self contained implementation of learned optimizers in JAX.
It is minimal in the hopes that it is easier to follow and give readers a better understanding of what is involved. First we start with some background describing what learned optimizer are. We begin the implementation by implementing a simple MLP and train it with a hand designed optimizer. We then introduce a simple learned optimizer and discuss multiple ways to meta-train the weights of this learned optimizers including gradients, and evolution strategies.
The design ideas and patterns are the same as that used by learned_optimization, but greatly stripped down and simplified.
End of explanation
"""
import tensorflow as tf
ds = tfds.load("fashion_mnist", split="train")
def resize_and_scale(batch):
batch["image"] = tf.image.resize(batch["image"], (8, 8)) / 255.
return batch
ds = ds.map(resize_and_scale).cache().repeat(-1).shuffle(
64 * 10).batch(128).prefetch(5)
data_iterator = ds.as_numpy_iterator()
batch = next(data_iterator)
fig, axs = plt.subplots(4, 4, figsize=(10, 10))
for ai, a in enumerate(axs.ravel()):
a.imshow(batch["image"][ai][:, :, 0], cmap="gray")
input_size = onp.prod(batch["image"].shape[1:])
"""
Explanation: What is a learned optimizer?
Learned optimizers are machine learning models which themselves optimize other machine learning models.
To understand what exactly this means, consider first a simple hand designed optimizer: SGD. We can write the update equation as a single function of both parameter values, $x$, and gradients $\nabla l$ computed on some loss $l$.
$$U_{sgd}(x, \nabla l; \alpha) = - \alpha \nabla l $$
This update can be applied us our next iterate:
$$x' = x + U_{sgd}(x, \nabla l; \alpha)$$
This update rule is simple, effective, and widely used. Can we do better?
Framed in this way, this algorithm is simply a function. One idea to improve training is to switch out this hand designed function with a learned function parameterized by some set of weights, $\theta$:
$$U(x, \nabla l; \theta) = \text{NN}(x, \nabla l; \theta)$$
We call the weights of the optimizer, $\theta$, the meta-parameters, or outer-parameters. The weights this optimizer is optimizing we refer to as the inner-parameters, or simply parameters.
Now given this more flexible form, how do we set a particular value of the learned optimizer weights so that the learned optimizer "performs well"? To do this, we must first define what it means to perform well. In standard optimization, this could mean find some low loss solution after applying the optimizer many times. In machine learning, this could be finding a solution which generalizes. This objective / measurement of performance of the learned optimizer often goes by the name of a meta-loss, or outer loss.
With this metric in hand, we can optimize the weights of the learned optimizer with respect to this meta-loss. If we have a flexible enough set of weights, and can solve this optimization problem, we will be left with a performant optimizer!
In this notebook, we first start by defining the type of problem we seek our optimizer to perform well on. Next, we introduce optimizers, followed learned optimizers. Next we define our meta-objective, or our measurement of how well our optimizers perform. Finally, we discuss a variety of techniques, and tricks for meta-training including gradient based, evolutionary strategies based, and by leveraging truncations.
The inner problem
We seek to train a learned optimizer to perform well on some task. In this demo notebook, we will define our task to be a single MLP trained on resized Fashion Mnist.
Data iterators
Data iterators are pretty standard, so we will not reinvent the wheel and use tensorflow datasets to create a python iterator which yields batches of data.
To keep meta-training fast, we will be working with with images resized to 8x8.
End of explanation
"""
class MLPTask:
def init(self, key):
key1, key2 = jax.random.split(key)
w0 = jax.random.normal(key1, [input_size, 128]) * 0.02
w1 = jax.random.normal(key2, [128, 10]) * 0.02
b0 = jnp.zeros([128])
b1 = jnp.ones([10])
return (w0, b0, w1, b1)
def loss(self, params, batch):
data = batch["image"]
data = jnp.reshape(data, [data.shape[0], -1])
w0, b0, w1, b1 = params
logits = jax.nn.relu(data @ w0 + b0) @ w1 + b1
labels = jax.nn.one_hot(batch["label"], 10)
vec_loss = -jnp.sum(labels * jax.nn.log_softmax(logits), axis=-1)
return jnp.mean(vec_loss)
task = MLPTask()
key = jax.random.PRNGKey(0)
params = task.init(key)
task.loss(params, batch)
"""
Explanation: Inner problem loss function & initialization
Next, we must define the inner problem with which we seek to train.
One important note here is no parameters are stored in the task itself! See this jax tutorial for more information on this.
Our task will have 2 methods -- an init which constructs the initial values of the weights, and a loss which applies the MLP, and returns the average cross entropy loss.
End of explanation
"""
value_grad_fn = jax.jit(jax.value_and_grad(task.loss))
lr = 0.1
losses = []
params = task.init(key)
# get from environment variable so this notebook can be automatically tested.
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 1000))
for i in range(num_steps):
batch = next(data_iterator)
loss, grads = value_grad_fn(params, batch)
params = [p - lr * g for p, g in zip(params, grads)]
losses.append(loss)
plt.plot(losses)
"""
Explanation: Inner training with SGD
With our newly defined model, let's train it with SGD.
End of explanation
"""
class SGD:
def __init__(self, lr):
self.lr = lr
def init(self, params):
return (params,)
def update(self, opt_state, grads):
return (tuple([p - self.lr * g for p, g in zip(opt_state[0], grads)]),)
"""
Explanation: Optimizers
SGD is all fine and good, but it is often useful to abstract away the specific update rule. This abstraction has two methods: An init, which setups up the initial optimizer state, and an update which uses this state and gradients to produce some new state.
In the case of SGD, this state is just the parameter values.
End of explanation
"""
losses = []
opt = SGD(0.1)
opt_state = opt.init(task.init(key))
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 1000))
for i in range(num_steps):
batch = next(data_iterator)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = opt.update(opt_state, grads)
losses.append(loss)
plt.plot(losses)
"""
Explanation: Instead of inlining SGD, we can now use our optimizer class.
End of explanation
"""
class Momentum:
def __init__(self, lr, decay=0.9):
self.lr = lr
self.decay = decay
def init(self, params):
return (params, [jnp.zeros_like(p) for p in params])
def update(self, state, grads):
params, momentum = state
momentum = [m * self.decay + self.lr * g for m, g in zip(momentum, grads)]
params = [p - m for p, m in zip(params, momentum)]
return (params, momentum)
"""
Explanation: Now, let's define some other optimizers. Momentum makes use of an additional accumulator variable. We can define it as follows.
End of explanation
"""
opt = Momentum(0.01)
params = task.init(key)
opt_state = opt.init(params)
del params
losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 1000))
for i in range(num_steps):
batch = next(data_iterator)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = opt.update(opt_state, grads)
losses.append(loss)
plt.plot(losses)
"""
Explanation: We can use this in our same training loop again. Here, the parameters are stored in the 0th entry of opt_state.
End of explanation
"""
class Adam:
def __init__(self, lr, beta1=0.9, beta2=0.999, epsilon=1e-8):
self.lr = lr
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def init(self, params):
return (tuple(params), jnp.asarray(0),
tuple([jnp.zeros_like(p) for p in params]),
tuple([jnp.zeros_like(p) for p in params]))
@functools.partial(jax.jit, static_argnums=(0,))
def update(self, state, grads):
params, iteration, momentum, rms = state
iteration += 1
momentum = tuple([
m * self.beta1 + (1 - self.beta1) * g for m, g in zip(momentum, grads)
])
rms = tuple([
v * self.beta2 + (1 - self.beta2) * (g**2) for v, g in zip(rms, grads)
])
mhat = [m / (1 - self.beta1**iteration) for m in momentum]
vhat = [v / (1 - self.beta2**iteration) for v in rms]
params = tuple([
p - self.lr * m / (jnp.sqrt(v) + self.epsilon)
for p, m, v in zip(params, mhat, vhat)
])
return (params, iteration, momentum, rms)
"""
Explanation: And finally, we can implement Adam.
End of explanation
"""
class LOpt:
def __init__(self, decay=0.9):
self.decay = decay
self.hidden_size = 64
def init_meta_params(self, key):
"""Initialize the learned optimizer weights -- in this case the weights of
the per parameter mlp.
"""
key1, key2 = jax.random.split(key)
input_feats = 3 # parameter value, momentum value, and gradient value
# the optimizer is a 2 hidden layer MLP.
w0 = jax.random.normal(key1, [input_feats, self.hidden_size])
b0 = jnp.zeros([self.hidden_size])
w1 = jax.random.normal(key2, [self.hidden_size, 2])
b1 = jnp.zeros([2])
return (w0, b0, w1, b1)
def initial_inner_opt_state(self, meta_params, params):
# The inner opt state contains the parameter values, and the momentum values.
momentum = [jnp.zeros_like(p) for p in params]
return tuple(params), tuple(momentum)
@functools.partial(jax.jit, static_argnums=(0,))
def update_inner_opt_state(self, meta_params, inner_opt_state, inner_grads):
"Perform 1 step of learning using the learned optimizer." ""
params, momentum = inner_opt_state
# compute momentum
momentum = [
m * self.decay + (g * (1 - self.decay))
for m, g in zip(momentum, inner_grads)
]
def predict_step(features):
"""Predict the update for a single ndarray."""
w0, b0, w1, b1 = meta_params
outs = jax.nn.relu(features @ w0 + b0) @ w1 + b1
# slice out the last 2 elements
scale = outs[..., 0]
mag = outs[..., 1]
# Compute a step as follows.
return scale * 0.01 * jnp.exp(mag * 0.01)
out_params = []
for p, m, g in zip(params, momentum, inner_grads):
features = jnp.asarray([p, m, g])
# transpose to have features dim last. The MLP will operate on this,
# and treat the leading dimensions as a batch dimension.
features = jnp.transpose(features, list(range(1, 1 + len(p.shape))) + [0])
step = predict_step(features)
out_params.append(p - step)
return tuple(out_params), tuple(momentum)
"""
Explanation: Learned optimizers
A learned optimizer is simply an optimizer which is itself some function of meta-parameters. The actual function can be anything ranging from more fixed form, to more exotic with the meta-parameters encoding neural network weights.
Per parameter learned optimizers
The family of learned optimizer we will explore in this notebook is "per parameter". What this means, is that the update function operates on each parameter independently.
In our case, the learned optimizer will operate on the parameter value, the gradient value, and momentum. These values get fed into a neural network. This neural network produces 2 outputs: $a$, $b$. These outputs are combined to produce a change in the inner parameters:
$$\Delta w = 0.001 \cdot a \cdot \text{exp}(0.001 \cdot b)$$
We use this formulation, as opposed to simply outputting a direct value, as empirically it is easier to meta-train.
Choosing input parameterizations, and output parameterizations varies across learned optimizer architecture and paper.
End of explanation
"""
lopt = LOpt()
for i in range(5):
losses = []
key = jax.random.PRNGKey(i)
meta_params = lopt.init_meta_params(key)
key = jax.random.PRNGKey(0)
params = task.init(key)
opt_state = lopt.initial_inner_opt_state(meta_params, params)
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 1000))
for i in range(num_steps):
batch = next(data_iterator)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = lopt.update_inner_opt_state(meta_params, opt_state, grads)
losses.append(loss)
plt.plot(losses)
plt.ylim(0, 4)
plt.xlabel("inner step")
plt.ylabel("inner loss")
"""
Explanation: We can now randomly init the meta-parameters a few times and apply it to our target task and see what we get.
Unsurprisingly, our randomly initialized learned optimizer doesn't do all that well at training our target problem. Many of them even diverge / nan.
End of explanation
"""
lopt = LOpt()
def get_batch_seq(seq_len):
batches = [next(data_iterator) for _ in range(seq_len)]
# stack the data to add a leading dim.
return {
"image": jnp.asarray([b["image"] for b in batches]),
"label": jnp.asarray([b["label"] for b in batches])
}
@jax.jit
def meta_loss(meta_params, key, sequence_of_batches):
def step(opt_state, batch):
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = lopt.update_inner_opt_state(meta_params, opt_state, grads)
return opt_state, loss
params = task.init(key)
opt_state = lopt.initial_inner_opt_state(meta_params, params)
# Iterate N times where N is the number of batches in sequence_of_batches
opt_state, losses = jax.lax.scan(step, opt_state, sequence_of_batches)
return jnp.mean(losses)
key = jax.random.PRNGKey(0)
meta_loss(meta_params, key, get_batch_seq(10))
"""
Explanation: Meta-loss: Measuring the performance of the learned optimizer.
Now we must define our measurement of performance for our learned optimizers. For this, we will define a meta_loss function. This function takes in as inputs the weights of the meta-parameters, initializes the weights of the inner-problem, and performs some number of steps of inner-training using a learned optimizer and the passed in meta-parameters. Each step we return the training loss, and use this average loss as the meta-loss. Depending on what we use, e.g. different unroll lengths, or different objectives (such as returning just loss at the end of training, or validation loss) we will get different behaving optimizers.
End of explanation
"""
key = jax.random.PRNGKey(0)
meta_value_grad_fn = jax.jit(jax.value_and_grad(meta_loss))
loss, meta_grad = meta_value_grad_fn(meta_params, key, get_batch_seq(10))
"""
Explanation: Meta-training with Gradients
Meta-training means training the weights of the learned optimizer to perform well in some setting. There are a lot of ways to do this optimization problem. We will run through a few different examples here.
One of the most conceptually simple way to meta-train is to do so with gradients. In particular, the gradients of the meta-loss with respect to the meta-parameters.
Te will use our meta-loss and jax.value_and_grad to compute gradients. For this simple example, we will use the average training loss over 10 applications of the learned optimizer as our meta-loss.
End of explanation
"""
meta_opt = Adam(0.001)
key = jax.random.PRNGKey(0)
meta_params = lopt.init_meta_params(key)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 300))
for i in range(num_steps):
data = get_batch_seq(10)
key1, key = jax.random.split(key)
loss, meta_grad = meta_value_grad_fn(meta_opt_state[0], key1, data)
meta_losses.append(loss)
if i % 20 == 0:
print(onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
plt.xlabel("meta-iteration")
plt.ylabel("meta-loss")
"""
Explanation: We can use this meta-gradient, with Adam to update the weights of our learned optimizer.
End of explanation
"""
meta_params = meta_opt_state[0]
for j in range(10):
losses = []
key = jax.random.PRNGKey(j)
params = task.init(key)
opt_state = lopt.initial_inner_opt_state(meta_params, params)
for i in range(10):
batch = next(data_iterator)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = lopt.update_inner_opt_state(meta_params, opt_state, grads)
losses.append(loss)
plt.plot(losses)
plt.ylim(1.0, 2.3)
plt.xlabel("inner-iteration")
plt.ylabel("loss")
"""
Explanation: Our meta-loss is decreasing which means our learned optimizer is learning to perform well on the meta-loss which means it is able to optimize our inner problem. Let's see what it learned to do by applying it to some target problem.
End of explanation
"""
def get_vec_batch_seq(vec_size, seq_len):
batches = [get_batch_seq(seq_len) for _ in range(vec_size)]
# stack them
return {
"image": jnp.asarray([b["image"] for b in batches]),
"label": jnp.asarray([b["label"] for b in batches])
}
def vectorized_meta_loss(meta_params, key, sequence_of_batches):
vec_loss = jax.vmap(
meta_loss, in_axes=(None, 0, 0))(meta_params, key, sequence_of_batches)
return jnp.mean(vec_loss)
vec_meta_loss_grad = jax.jit(jax.value_and_grad(vectorized_meta_loss))
vec_sec_batch = get_vec_batch_seq(4, 10)
keys = jax.random.split(key, 4)
loses, meta_grad = vec_meta_loss_grad(meta_params, keys, vec_sec_batch)
"""
Explanation: We can see our optimizer works, and is able to optimize for these first 10 steps.
Vectorization: Speeding up Meta-training
The above example, we are training a single problem instance for 10 iterations, and using this single training to compute meta-gradients. Oftentimes we seek to compute meta-gradients from more than one problem or to average over multiple random initializations / batches of data. To do this, we will leverage jax.vmap.
We will define a vectorized meta-loss, which computes the original meta_loss function in parallel, then averages the losses. We can then call jax.value_and_grad on this function to compute meta-gradients which are the average of these samples.
One big advantage to vectorizing in this way is to make better use of hardware accelerators. When training learned optimizers, we often apply them to small problems for speedy meta-training. These small problems can be a poor fit for the underlying hardware which often consists of big matrix multiplication units. What vectorization does compute multiple of these small problems at the same time, which, depending on the details, can be considerably faster.
End of explanation
"""
meta_opt = Adam(0.001)
key = jax.random.PRNGKey(0)
meta_params = lopt.init_meta_params(key)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 200))
for i in range(num_steps):
data = get_vec_batch_seq(8, 10)
key1, key = jax.random.split(key)
keys = jax.random.split(key1, 8)
loss, meta_grad = vec_meta_loss_grad(meta_opt_state[0], keys, data)
meta_losses.append(loss)
if i % 20 == 0:
print(onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
"""
Explanation: And now we can meta-train with this vectorized loss similarly to before.
End of explanation
"""
def antithetic_es_estimate(meta_params, key, seq_of_batches):
"""Compute a ES estimated gradient along a single direction."""
std = 0.001
keys = jax.random.split(key, len(meta_params))
noise = [
jax.random.normal(keys[i], p.shape) * std
for i, p in enumerate(meta_params)
]
meta_params_pos = [p + n for p, n in zip(meta_params, noise)]
meta_params_neg = [p - n for p, n in zip(meta_params, noise)]
pos_loss = meta_loss(meta_params_pos, key, seq_of_batches)
neg_loss = meta_loss(meta_params_neg, key, seq_of_batches)
factor = (pos_loss - neg_loss) / (2 * std**2)
es_grads = [factor * n for n in noise]
return (pos_loss + neg_loss) / 2.0, es_grads
@jax.jit
def vec_antithetic_es_estimate(meta_params, keys, vec_seq_batches):
"""Compute a ES estimated gradient along multiple directions."""
losses, grads = jax.vmap(
antithetic_es_estimate, in_axes=(None, 0, 0))(meta_params, keys,
vec_seq_batches)
return jnp.mean(losses), [jnp.mean(g, axis=0) for g in grads]
keys = jax.random.split(key, 8)
vec_sec_batch = get_vec_batch_seq(8, 10)
loss, es_grads = vec_antithetic_es_estimate(meta_params, keys, vec_sec_batch)
"""
Explanation: Evolutionary Strategies (ES): Meta-training without meta-gradients
Computing gradients through long optimization procedures can sometimes lead to chaotic dynamics, and result in exploding gradients. See https://arxiv.org/abs/1810.10180 and https://arxiv.org/abs/2111.05803 for more info.
An alternative is to leverage black box optimization techniques. A method we found that works well is evolutionary strategies with antithetic samples. This estimator can be thought of as a randomized finite difference. We sample a random direction in the meta-parameters, compute the meta-loss when shifting in this direction, and in the negative of this direction, and move in the direction which lowers the loss. The estimator can be written as:
$$\nabla_\theta = \mathbb{E}_{\epsilon \sim \mathcal{N}(0, I\sigma)} \dfrac{\epsilon}{2 \sigma ^2} (L(\theta + \epsilon) - L(\theta - \epsilon))$$
where $L$ is the meta-loss.
As before, we will construct a vectorized version of these estimators to average over a number of different random directions.
End of explanation
"""
meta_opt = Adam(0.003)
key = jax.random.PRNGKey(0)
meta_params = lopt.init_meta_params(key)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
n_particles = 32
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 200))
for i in range(num_steps):
data = get_vec_batch_seq(n_particles, 10)
key1, key = jax.random.split(key)
keys = jax.random.split(key1, n_particles)
loss, meta_grad = vec_antithetic_es_estimate(meta_opt_state[0], keys, data)
meta_losses.append(loss)
if i % 20 == 0:
print(onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
"""
Explanation: We can use a similar meta-training procedure as before now with this new gradient estimator.
End of explanation
"""
def short_segment_unroll(meta_params,
key,
inner_opt_state,
on_iteration,
seq_of_batches,
inner_problem_length=100):
def step(scan_state, batch):
opt_state, i, key = scan_state
# If we have trained more than 100 steps, reset the inner problem.
key1, key = jax.random.split(key)
opt_state, i = jax.lax.cond(
i >= inner_problem_length, lambda k:
(lopt.initial_inner_opt_state(meta_params, task.init(k)), 0), lambda k:
(opt_state, i + 1), key)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = lopt.update_inner_opt_state(meta_params, opt_state, grads)
# clip the loss to prevent diverging inner models
loss = jax.lax.cond(
jnp.isnan(loss), lambda loss: 3.0, lambda loss: jnp.minimum(loss, 3.0),
loss)
return (opt_state, i, key), loss
(inner_opt_state, on_iteration,
_), losses = jax.lax.scan(step, (inner_opt_state, on_iteration, key),
seq_of_batches)
return losses, inner_opt_state, on_iteration
inner_opt_state = lopt.initial_inner_opt_state(meta_params, task.init(key))
batch = get_batch_seq(10)
loss, inner_opt_state, on_iteration = short_segment_unroll(
meta_params, key, inner_opt_state, 0, batch)
on_iteration
"""
Explanation: Meta-training with Truncated backprop through time
In the previous meta-training examples, in the meta-loss we always initialized the inner-problem and apply the optimizer for some fixed number of steps.
This is fine for short inner-problem training times, it becomes costly for longer numbers of inner-iterations.
Truncated backprop through time, and more generally truncated meta-training techniques are one solution to this. The core idea is to split up one longer sequence into smaller chunks and compute meta-gradients only within a chunk. This allows one to compute gradients faster -- each chunk we get a gradient estimate, but these methods are generally biased as we ignore how the chunks interact with each other.
The code for this is a bit more involved. First, we need to keep track of each inner problem. In our case, this means keeping track of the inner problems optimizer state, as well as the current training iteration. Next, we must check if we are at the end of an inner-training. We fix the length of the inner training to be 100 for this example. We can then define a function (short_segment_unroll) which both progresses training by some number of steps,
and return the loss from that segment.
End of explanation
"""
def vec_short_segment_unroll(meta_params,
keys,
inner_opt_states,
on_iterations,
vec_seq_of_batches,
inner_problem_length=100):
losses, inner_opt_states, on_iterations = jax.vmap(
short_segment_unroll,
in_axes=(None, 0, 0, 0, 0, None))(meta_params, keys, inner_opt_states,
on_iterations, vec_seq_of_batches,
inner_problem_length)
return jnp.mean(losses), (inner_opt_states, on_iterations)
vec_short_segment_grad = jax.jit(
jax.value_and_grad(vec_short_segment_unroll, has_aux=True))
"""
Explanation: Now with this function, we are free to estimate gradients over just this one short unroll rather than the full inner-training. We can use whatever gradient estimator we want -- either ES, or with backprop gradients -- but for now I will show an example with backprop gradients.
As before, we construct a vectorized version of this unroll function, and compute gradients with jax.value_and_grad.
End of explanation
"""
#num_tasks = 32
num_tasks = 16
key = jax.random.PRNGKey(1)
meta_params = lopt.init_meta_params(key)
def init_single_inner_opt_state(key):
return lopt.initial_inner_opt_state(meta_params, task.init(key))
keys = jax.random.split(key, num_tasks)
inner_opt_states = jax.vmap(init_single_inner_opt_state)(keys)
# Randomly set the initial iteration to prevent the tasks from running in lock step.
on_iterations = jax.random.randint(key, [num_tasks], 0, 100)
meta_opt = Adam(0.0001)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 400))
for i in range(num_steps):
data = get_vec_batch_seq(num_tasks, 10)
key1, key = jax.random.split(key)
keys = jax.random.split(key1, num_tasks)
(loss, (inner_opt_states, on_iterations)), meta_grad = vec_short_segment_grad(
meta_opt_state[0], keys, inner_opt_states, on_iterations, data)
meta_losses.append(loss)
if i % 20 == 0:
print(i, onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
plt.xlabel("meta-iterations")
plt.ylabel("meta-loss")
"""
Explanation: We can then use this function to compute meta-gradients. Before doing that though, we must setup the initial state (parameter values and optimizer state) of the problems being trained.
End of explanation
"""
meta_params = meta_opt_state[0]
for j in range(10):
losses = []
key = jax.random.PRNGKey(j)
params = task.init(key)
opt_state = lopt.initial_inner_opt_state(meta_params, params)
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 100))
for i in range(num_steps):
batch = next(data_iterator)
loss, grads = value_grad_fn(opt_state[0], batch)
opt_state = lopt.update_inner_opt_state(meta_params, opt_state, grads)
losses.append(loss)
plt.plot(losses)
plt.ylim(0.0, 2.5)
"""
Explanation: Our meta-loss is going down which is great! There is a periodic behavior to the loss as we are averaging over different positions in inner-training.
For example, if we are averaging more samples from earlier in training, we will have higher loss.
We can now apply our optimizer for 100 steps. We can see that the resulting optimizer optimizes for ~50 steps, and then diverages. This is an indication that meta-training could have been more successful. One can improve this by meta-training for longer, or with different hparams to improve this!
End of explanation
"""
@jax.jit
def vec_short_segment_es(meta_param,
keys,
inner_opt_state,
on_iterations,
vec_seq_of_batches,
std=0.01):
# Compute an es estimate on a single inner-problem
def do_one(meta_param, key, inner_opt_state, on_iteration, seq_of_batches):
# Sample random noise of the same shape as meta-parameters
flat_params, struct = jax.tree_flatten(meta_param)
keys = [jax.random.fold_in(key, i) for i in range(len(flat_params))]
keys = jax.tree_unflatten(struct, keys)
perturbs = jax.tree_map(lambda k, v: jax.random.normal(k, v.shape) * std,
keys, meta_param)
# compute positive and negative antithetic samples
pos_theta = jax.tree_map(lambda eps, v: v + eps, perturbs, meta_param)
neg_theta = jax.tree_map(lambda eps, v: v - eps, perturbs, meta_param)
# Apply both of the antithetic samples
p_losses, p_opt_state, p_on_iteration = short_segment_unroll(
pos_theta,
key,
inner_opt_state,
on_iteration,
seq_of_batches,
inner_problem_length=30)
n_losses, n_opt_state, n_on_iteration = short_segment_unroll(
neg_theta,
key,
inner_opt_state,
on_iteration,
seq_of_batches,
inner_problem_length=30)
p_loss = jnp.mean(p_losses)
n_loss = jnp.mean(n_losses)
# estimate gradient
es_grad = jax.tree_map(lambda p: (p_loss - n_loss) * 1 / (2. * std) * p,
perturbs)
return ((p_loss + n_loss) / 2.0, (p_opt_state, p_on_iteration)), es_grad
(loss, inner_opt_state), es_grad = jax.vmap(
do_one, in_axes=(None, 0, 0, 0, 0))(meta_param, keys, inner_opt_state,
on_iterations, vec_seq_of_batches)
# Gradient has an extra batch dimension here from the vmap -- reduce over this.
return (jnp.mean(loss),
inner_opt_state), jax.tree_map(lambda x: jnp.mean(x, axis=0), es_grad)
num_tasks = 32
key = jax.random.PRNGKey(1)
inner_opt_state = lopt.initial_inner_opt_state(meta_params, task.init(key))
batch = get_batch_seq(10)
meta_params = lopt.init_meta_params(key)
def init_single_inner_opt_state(key):
return lopt.initial_inner_opt_state(meta_params, task.init(key))
keys = jax.random.split(key, num_tasks)
inner_opt_states = jax.vmap(init_single_inner_opt_state)(keys)
# Randomly set the initial iteration to prevent the tasks from running in lock step.
on_iterations = jax.random.randint(key, [num_tasks], 0, 30)
meta_opt = Adam(0.001)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 400))
for i in range(num_steps):
data = get_vec_batch_seq(num_tasks, 10)
key1, key = jax.random.split(key)
keys = jax.random.split(key1, num_tasks)
(loss, (inner_opt_states, on_iterations)), meta_grad = vec_short_segment_es(
meta_opt_state[0], keys, inner_opt_states, on_iterations, data)
meta_losses.append(loss)
if i % 20 == 0:
print(i, onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
plt.xlabel("meta-iterations")
plt.ylabel("meta-loss")
"""
Explanation: Meta-training truncated ES
Next, instead of meta-training with truncated gradients, we will meta-train with truncated evolution strategies.
End of explanation
"""
@jax.jit
def vec_short_segment_pes(meta_param,
keys,
pes_state,
vec_seq_of_batches,
std=0.01):
# Compute a pes estimate on a single inner-problem
def do_one(meta_param, key, pes_state, seq_of_batches):
accumulator, pos_opt_state, neg_opt_state, on_iteration = pes_state
# Sample random noise of the same shape as meta-parameters
flat_params, struct = jax.tree_flatten(meta_param)
keys = [jax.random.fold_in(key, i) for i in range(len(flat_params))]
keys = jax.tree_unflatten(struct, keys)
perturbs = jax.tree_map(lambda k, v: jax.random.normal(k, v.shape) * std,
keys, meta_param)
# compute positive and negative antithetic samples
pos_theta = jax.tree_map(lambda eps, v: v + eps, perturbs, meta_param)
neg_theta = jax.tree_map(lambda eps, v: v - eps, perturbs, meta_param)
# Apply both of the antithetic samples
p_losses, pos_opt_state, _ = short_segment_unroll(
pos_theta,
key,
pos_opt_state,
on_iteration,
seq_of_batches,
inner_problem_length=30)
n_losses, neg_opt_state, next_on_iteration = short_segment_unroll(
neg_theta,
key,
neg_opt_state,
on_iteration,
seq_of_batches,
inner_problem_length=30)
# estimate gradient. PES works by multipliying loss difference by the sum
# of previous perturbations.
new_accum = jax.tree_map(lambda a, b: a + b, accumulator, perturbs)
delta_losses = p_losses - n_losses
unroll_length = p_losses.shape[0]
# one unroll could span 2 problems, so we compute 2 different gradients --
# one as if it was the previous trajectory, and one as if it was a previous
# unroll and sum them.
has_finished = (jnp.arange(unroll_length) + on_iteration) > 30
last_unroll_losses = jnp.mean(delta_losses * (1.0 - has_finished), axis=0)
new_unroll = jnp.mean(delta_losses * has_finished)
es_grad_from_accum = jax.tree_map(
lambda p: last_unroll_losses * 1 / (2. * std) * p, new_accum)
es_grad_from_new_perturb = jax.tree_map(
lambda p: new_unroll * 1 / (2. * std) * p, perturbs)
es_grad = jax.tree_map(lambda a, b: a + b, es_grad_from_accum,
es_grad_from_new_perturb)
# finally, we potentially reset the accumulator to the current perturbation
# if we finished one trajectory.
def _switch_one_accum(a, b):
return jnp.where(has_finished[-1], a, b)
new_accum = jax.tree_multimap(_switch_one_accum, perturbs, new_accum)
next_pes_state = (new_accum, pos_opt_state, neg_opt_state,
next_on_iteration)
return ((jnp.mean(p_losses) + jnp.mean(n_losses)) / 2.0,
next_pes_state), es_grad
(loss, pes_state), es_grad = jax.vmap(
do_one, in_axes=(None, 0, 0, 0))(meta_param, keys, pes_state,
vec_seq_of_batches)
# Gradient has an extra batch dimension here from the vmap -- reduce over this.
return (jnp.mean(loss),
pes_state), jax.tree_map(lambda x: jnp.mean(x, axis=0), es_grad)
num_tasks = 32
key = jax.random.PRNGKey(1)
inner_opt_state = lopt.initial_inner_opt_state(meta_params, task.init(key))
batch = get_batch_seq(10)
meta_params = lopt.init_meta_params(key)
# construct the initial PES state which is passed from iteration to iteration
def init_single_inner_opt_state(key):
return lopt.initial_inner_opt_state(meta_params, task.init(key))
keys = jax.random.split(key, num_tasks)
inner_opt_states = jax.vmap(init_single_inner_opt_state)(keys)
accumulator = jax.tree_map(lambda x: jnp.zeros([num_tasks] + list(x.shape)),
meta_params)
# Randomly set the initial iteration to prevent the tasks from running in lock step.
on_iterations = jax.random.randint(key, [num_tasks], 0, 30)
pes_state = (accumulator, inner_opt_states, inner_opt_states, on_iterations)
meta_opt = Adam(0.0003)
meta_opt_state = meta_opt.init(meta_params)
meta_losses = []
num_steps = int(os.environ.get("LOPT_TRAIN_LENGTH", 400))
for i in range(num_steps):
data = get_vec_batch_seq(num_tasks, 10)
key1, key = jax.random.split(key)
keys = jax.random.split(key1, num_tasks)
(loss, pes_state), meta_grad = vec_short_segment_pes(meta_opt_state[0], keys,
pes_state, data)
meta_losses.append(loss)
if i % 20 == 0:
print(i, onp.mean(meta_losses[-20:]))
meta_opt_state = meta_opt.update(meta_opt_state, meta_grad)
plt.plot(meta_losses)
plt.xlabel("meta-iterations")
plt.ylabel("meta-loss")
"""
Explanation: Meta-training with truncations with less bias: Persistent Evolution Strategies (PES)
When training with truncated evolutionary strategies, as well as truncated backprop through time and truncated evolutionary strategies one cannot compute the effect of one truncated segment, on other truncated segments. This introduces bias when working with longer sequences.
PES is one ES based algorithm to prevent such bias.
End of explanation
"""
|
ernestyalumni/cuBlackDream | examples/RModule.ipynb | mit | import numpy
import numpy as np
m=6
n=4
k=5
"""
Explanation: $R$-Module
Doing $R$-module (equipped with Hadamard operators) algebraic operations; addition, multiplication, and element-wise Hadamard operations
Certainly, we'd want to ensure, or at least evince, through examples that we can do the same algebraic operations, addition, multiplication, and some element-wise Hadamard operations (element-wise addition and multiplication).
End of explanation
"""
a = np.array(range(11,41)).reshape((k,m)).T
print(a)
b = np.array(range(11,31)).reshape((n,k)).T
print(b)
c = np.array(range(11,35)).reshape((n,m)).T
print(c)
np.matmul(a,b)
np.diag(range(11,15))
np.ones(m*n)
# bias_broadcasted
np.matmul( np.ones(m*n).reshape((m,n)) , np.diag(range(11,15)) )
# a*b + bias_broadcasted
np.matmul(a,b) + np.matmul( np.ones(m*n).reshape((m,n)) , np.diag(range(11,15)) )
"""
Explanation: cf. Matrix computations
on the GPU
CUBLAS and MAGMA by example
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.21/_downloads/bc5044f9d3ef1d29067dd6b7d83ceed2/plot_20_visualize_epochs.ipynb | bsd-3-clause | import os
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False).crop(tmax=120)
"""
Explanation: Visualizing epoched data
This tutorial shows how to plot epoched data as time series, how to plot the
spectral density of epoched data, how to plot epochs as an imagemap, and how to
plot the sensor locations and projectors stored in :class:~mne.Epochs
objects.
:depth: 2
We'll start by importing the modules we need, loading the continuous (raw)
sample data, and cropping it to save memory:
End of explanation
"""
events = mne.find_events(raw, stim_channel='STI 014')
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32}
epochs = mne.Epochs(raw, events, tmin=-0.2, tmax=0.5, event_id=event_dict,
preload=True)
del raw
"""
Explanation: To create the :class:~mne.Epochs data structure, we'll extract the event
IDs stored in the :term:stim channel, map those integer event IDs to more
descriptive condition labels using an event dictionary, and pass those to the
:class:~mne.Epochs constructor, along with the :class:~mne.io.Raw data
and the desired temporal limits of our epochs, tmin and tmax (for a
detailed explanation of these steps, see tut-epochs-class).
End of explanation
"""
catch_trials_and_buttonpresses = mne.pick_events(events, include=[5, 32])
epochs['face'].plot(events=catch_trials_and_buttonpresses, event_id=event_dict,
event_colors=dict(buttonpress='red', face='blue'))
"""
Explanation: Plotting Epochs as time series
.. sidebar:: Interactivity in pipelines and scripts
To use the interactive features of the :meth:`~mne.Epochs.plot` method
when running your code non-interactively, pass the ``block=True``
parameter, which halts the Python interpreter until the figure window is
closed. That way, any channels or epochs that you mark as "bad" will be
taken into account in subsequent processing steps.
To visualize epoched data as time series (one time series per channel), the
:meth:mne.Epochs.plot method is available. It creates an interactive window
where you can scroll through epochs and channels, enable/disable any
unapplied :term:SSP projectors <projector> to see how they affect the
signal, and even manually mark bad channels (by clicking the channel name) or
bad epochs (by clicking the data) for later dropping. Channels marked "bad"
will be shown in light grey color and will be added to
epochs.info['bads']; epochs marked as bad will be indicated as 'USER'
in epochs.drop_log.
Here we'll plot only the "catch" trials from the sample dataset
<sample-dataset>, and pass in our events array so that the button press
responses also get marked (we'll plot them in red, and plot the "face" events
defining time zero for each epoch in blue). We also need to pass in
our event_dict so that the :meth:~mne.Epochs.plot method will know what
we mean by "buttonpress" — this is because subsetting the conditions by
calling epochs['face'] automatically purges the dropped entries from
epochs.event_id:
End of explanation
"""
ecg_proj_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_ecg-proj.fif')
ecg_projs = mne.read_proj(ecg_proj_file)
epochs.add_proj(ecg_projs)
epochs.apply_proj()
"""
Explanation: Plotting projectors from an Epochs object
In the plot above we can see heartbeat artifacts in the magnetometer
channels, so before we continue let's load ECG projectors from disk and apply
them to the data:
End of explanation
"""
epochs.plot_projs_topomap(vlim='joint')
"""
Explanation: Just as we saw in the tut-section-raw-plot-proj section, we can plot
the projectors present in an :class:~mne.Epochs object using the same
:meth:~mne.Epochs.plot_projs_topomap method. Since the original three
empty-room magnetometer projectors were inherited from the
:class:~mne.io.Raw file, and we added two ECG projectors for each sensor
type, we should see nine projector topomaps:
End of explanation
"""
print(all(proj['active'] for proj in epochs.info['projs']))
"""
Explanation: Note that these field maps illustrate aspects of the signal that have
already been removed (because projectors in :class:~mne.io.Raw data are
applied by default when epoching, and because we called
:meth:~mne.Epochs.apply_proj after adding additional ECG projectors from
file). You can check this by examining the 'active' field of the
projectors:
End of explanation
"""
epochs.plot_sensors(kind='3d', ch_type='all')
epochs.plot_sensors(kind='topomap', ch_type='all')
"""
Explanation: Plotting sensor locations
Just like :class:~mne.io.Raw objects, :class:~mne.Epochs objects
keep track of sensor locations, which can be visualized with the
:meth:~mne.Epochs.plot_sensors method:
End of explanation
"""
epochs['auditory'].plot_psd(picks='eeg')
"""
Explanation: Plotting the power spectrum of Epochs
Again, just like :class:~mne.io.Raw objects, :class:~mne.Epochs objects
have a :meth:~mne.Epochs.plot_psd method for plotting the spectral
density_ of the data.
End of explanation
"""
epochs['visual/right'].plot_psd_topomap()
"""
Explanation: It is also possible to plot spectral estimates across sensors as a scalp
topography, using :meth:~mne.Epochs.plot_psd_topomap. The default
parameters will plot five frequency bands (δ, θ, α, β, γ), will compute power
based on magnetometer channels, and will plot the power estimates in
decibels:
End of explanation
"""
bands = [(10, '10 Hz'), (15, '15 Hz'), (20, '20 Hz'), (10, 20, '10-20 Hz')]
epochs['visual/right'].plot_psd_topomap(bands=bands, vlim='joint',
ch_type='grad')
"""
Explanation: Just like :meth:~mne.Epochs.plot_projs_topomap,
:meth:~mne.Epochs.plot_psd_topomap has a vlim='joint' option for fixing
the colorbar limits jointly across all subplots, to give a better sense of
the relative magnitude in each band. You can change which channel type is
used via the ch_type parameter, and if you want to view different
frequency bands than the defaults, the bands parameter takes a list of
tuples, with each tuple containing either a single frequency and a subplot
title, or lower/upper frequency limits and a subplot title:
End of explanation
"""
epochs['auditory'].plot_image(picks='mag', combine='mean')
"""
Explanation: If you prefer untransformed power estimates, you can pass dB=False. It is
also possible to normalize the power estimates by dividing by the total power
across all frequencies, by passing normalize=True. See the docstring of
:meth:~mne.Epochs.plot_psd_topomap for details.
Plotting Epochs as an image map
A convenient way to visualize many epochs simultaneously is to plot them as
an image map, with each row of pixels in the image representing a single
epoch, the horizontal axis representing time, and each pixel's color
representing the signal value at that time sample for that epoch. Of course,
this requires either a separate image map for each channel, or some way of
combining information across channels. The latter is possible using the
:meth:~mne.Epochs.plot_image method; the former can be achieved with the
:meth:~mne.Epochs.plot_image method (one channel at a time) or with the
:meth:~mne.Epochs.plot_topo_image method (all sensors at once).
By default, the image map generated by :meth:~mne.Epochs.plot_image will be
accompanied by a scalebar indicating the range of the colormap, and a time
series showing the average signal across epochs and a bootstrapped 95%
confidence band around the mean. :meth:~mne.Epochs.plot_image is a highly
customizable method with many parameters, including customization of the
auxiliary colorbar and averaged time series subplots. See the docstrings of
:meth:~mne.Epochs.plot_image and mne.viz.plot_compare_evokeds (which is
used to plot the average time series) for full details. Here we'll show the
mean across magnetometers for all epochs with an auditory stimulus:
End of explanation
"""
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'])
epochs['auditory'].plot_image(picks=['MEG 0242', 'MEG 0243'], combine='gfp')
"""
Explanation: To plot image maps for individual sensors or a small group of sensors, use
the picks parameter. Passing combine=None (the default) will yield
separate plots for each sensor in picks; passing combine='gfp' will
plot the global field power (useful for combining sensors that respond with
opposite polarity).
End of explanation
"""
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=150e-6) # 150 µV
epochs.drop_bad(reject=reject_criteria)
for ch_type, title in dict(mag='Magnetometers', grad='Gradiometers').items():
layout = mne.channels.find_layout(epochs.info, ch_type=ch_type)
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', title=title)
"""
Explanation: To plot an image map for all sensors, use
:meth:~mne.Epochs.plot_topo_image, which is optimized for plotting a large
number of image maps simultaneously, and (in interactive sessions) allows you
to click on each small image map to pop open a separate figure with the
full-sized image plot (as if you had called :meth:~mne.Epochs.plot_image on
just that sensor). At the small scale shown in this tutorial it's hard to see
much useful detail in these plots; it's often best when plotting
interactively to maximize the topo image plots to fullscreen. The default is
a figure with black background, so here we specify a white background and
black foreground text. By default :meth:~mne.Epochs.plot_topo_image will
show magnetometers and gradiometers on the same plot (and hence not show a
colorbar, since the sensors are on different scales) so we'll also pass a
:class:~mne.channels.Layout restricting each plot to one channel type.
First, however, we'll also drop any epochs that have unusually high signal
levels, because they can cause the colormap limits to be too extreme and
therefore mask smaller signal fluctuations of interest.
End of explanation
"""
layout = mne.channels.find_layout(epochs.info, ch_type='eeg')
epochs['auditory/left'].plot_topo_image(layout=layout, fig_facecolor='w',
font_color='k', sigma=1)
"""
Explanation: To plot image maps for all EEG sensors, pass an EEG layout as the layout
parameter of :meth:~mne.Epochs.plot_topo_image. Note also here the use of
the sigma parameter, which smooths each image map along the vertical
dimension (across epochs) which can make it easier to see patterns across the
small image maps (by smearing noisy epochs onto their neighbors, while
reinforcing parts of the image where adjacent epochs are similar). However,
sigma can also disguise epochs that have persistent extreme values and
maybe should have been excluded, so it should be used with caution.
End of explanation
"""
|
agile-geoscience/notebooks | To_build_a_better_wedge.ipynb | apache-2.0 | import numpy as np
% matplotlib inline
import matplotlib.pyplot as plt
"""
Explanation: To make a better wedge
This notebook is an update to the notebook entitled "To make a wedge" featured in the blog post, To make a wedge, on December 12, 2013.
Start by importing Numpy and Matplotlib's pyplot module in the usual way:
End of explanation
"""
from bruges.filters import ricker
"""
Explanation: Import the ricker wavelet function from bruges:
End of explanation
"""
from IPython.display import Image
"""
Explanation: Make a wedge
End of explanation
"""
Image('images/generic_wedge.png', width=600)
defaults = {'ta1':150, 'tb1':30, 'dta':50, 'dtb':50,
'xa1':100, 'xa2':100, 'dx':1,
'mint':0, 'maxt': 600, 'dt':1,
'minx':0, 'maxx': 500}
def make_upper_boundary(**kw):
x = kw['maxx']-kw['minx']
t0 = kw['ta1']
x2 = np.arange(1, x-(kw['xa2']+kw['xa1']), kw['dx'])
m2 = kw['dta']/x2[-1]
seg1 = np.ones(int(kw['xa1']/kw['dx']))
seg3 = np.ones(int(kw['xa2']/kw['dx']))
seg2 = x2 * m2
interface = t0 + np.concatenate((seg1, seg2, kw['dta']+seg3))
return interface
def make_lower_boundary(**kw):
x = kw['maxx']-kw['minx']
t1 = kw['ta1'] + kw['tb1']
x2 = np.arange(1, x-(kw['xa2']+kw['xa1']), kw['dx'])
m2 = (kw['dta']+kw['dtb'])/x2[-1]
seg1 = np.ones(int(kw['xa1']/kw['dx']))
seg3 = np.ones(int(kw['xa2']/kw['dx']))
seg2 = x2 * m2
interface = t1 + np.concatenate((seg1, seg2, seg2[-1]+seg3))
return interface
def make_wedge(kwargs):
upper_interface = make_upper_boundary(**kwargs)
lower_interface = make_lower_boundary(**kwargs)
return upper_interface, lower_interface
def plot_interfaces(ax, upper, lower, **kw):
ax.plot(upper,'-r')
ax.plot(lower,'-b')
ax.set_ylim(0,600)
ax.set_xlim(kw['minx'],kw['maxx'])
ax.invert_yaxis()
upper, lower = make_wedge(defaults)
f = plt.figure()
ax = f.add_subplot(111)
plot_interfaces(ax, upper, lower, **defaults)
def make_meshgrid(**kw):
upper, lower = make_wedge(defaults)
t = np.arange(kw['mint'], kw['maxt']-1, kw['dt'])
x = np.arange(kw['minx'], kw['maxx']-1, kw['dx'])
xv, yv = np.meshgrid(x, t, sparse=False, indexing='ij')
return xv, yv
xv, yv = make_meshgrid(**defaults)
conditions = {'upper': yv.T < upper,
'middle': (yv.T >= upper) & (yv.T <= lower),
'lower': yv.T > lower
}
labels = {'upper': 1, 'middle':2, 'lower': 3}
d = yv.T.copy()
for name, cond in conditions.items():
d[cond] = labels[name]
plt.imshow(d, cmap='copper')
vp = np.array([3300., 3200., 3300.])
rho = np.array([2600., 2550., 2650.])
AI = vp*rho
AI
model = d.copy()
model[model == 1] = AI[0]
model[model == 2] = AI[1]
model[model == 3] = AI[2]
def wvlt(f):
return ricker(0.512, 0.001, f)
def conv(a):
return np.convolve(wvlt(f), a, mode='same')
plt.imshow(model, cmap='Spectral')
plt.colorbar()
plt.title('Impedances')
"""
Explanation: Let's make a more generic wedge that will handle any 3 layer case we want to make.
End of explanation
"""
# These are just some plotting parameters
rc_params = {'cmap':'RdBu',
'vmax':0.05,
'vmin':-0.05,
'aspect':0.75}
txt_params = {'fontsize':12, 'color':'black',
'horizontalalignment':'center',
'verticalalignment':'center'}
tx = [0.85*defaults['maxx'],0.85*defaults['maxx'],0.85*defaults['maxx']]
ty = [(defaults['ta1'] + defaults['dta'])/2,
defaults['ta1'] + defaults['dta'] + (defaults['dtb']/1.33),
defaults['maxt']-(defaults['maxt'] - defaults['ta1'] - defaults['dta'] - defaults['dtb'])/2]
rock_names = ['shale1', 'sand', 'shale2']
defaults['ta1'], defaults['dta'], defaults['dtb']/1.25
rc = (model[1:] - model[:-1]) / (model[1:] + model[:-1])
"""
Explanation: Plotting the synthetic
End of explanation
"""
freqs = np.array([7,14,21])
f, axs = plt.subplots(1,len(freqs), figsize=(len(freqs)*5,6))
for i, f in enumerate(freqs):
axs[i].imshow(np.apply_along_axis(conv, 0, rc), **rc_params)
[axs[i].text(tx[j], ty[j], rock_names[j], **txt_params) for j in range(3)]
plot_interfaces(axs[i], upper, lower, **defaults)
axs[i].set_ylim(defaults['maxt'],defaults['mint'])
axs[i].set_title( f'{f} Hz wavelet' )
axs[i].grid(alpha=0.5)
"""
Explanation: We can make use of the awesome apply_along_axis in Numpy to avoid looping over all the traces. https://docs.scipy.org/doc/numpy/reference/generated/numpy.apply_along_axis.html
End of explanation
"""
|
Caranarq/01_Dmine | Datasets/Pigoo/.ipynb_checkpoints/pigoo_desagregacion-checkpoint.ipynb | gpl-3.0 | # Librerias utilizadas
import pandas as pd
import sys
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from SUN.asignar_sun import asignar_sun
from SUN_integridad.SUN_integridad import SUN_integridad
from SUN.CargaSunPrincipal import getsun
# Configuracion del sistema
import sys; print('Python {} on {}'.format(sys.version, sys.platform))
print('Pandas version: {}'.format(pd.__version__))
import platform; print('Running on {} {}'.format(platform.system(), platform.release()))
"""
Explanation: AGREGACION DE DATOS DESDE EL DATASET DEL PROGRAMA DE INDICADORES DE GESTIÓN DE ORGANISMOS OPERADORES (PIGOO)
En este documento se describen las consideraciones que se tomaron para la agregación de datos desde este dataset para su uso en la PCCS.
End of explanation
"""
# Carga del dataset "CiudadesPIGOO_ClaveInegi.xlsx" al sistema
pigoo_inegi = r'D:\PCCS\01_Dmine\Datasets\Pigoo\CiudadesPIGOO_ClaveInegi.xlsx'
pigoo_inegi_df = pd.read_excel(pigoo_inegi, sheetname='OOAPAS-PIGOO', index_col=0,
dtype={'Clave-Estado-Inegi': str,
'Clave-Municipio-Inegi': str,
'Clave-Localidad-Inegi': str})
pigoo_inegi_df.head()
"""
Explanation: 1. Revisión y estandarización inicial al DataSet Pigoo
El 2 de octubre de 2017 se realizó por medio de correo electrónico, una solicitud de aclaracion al PIGOO del nivel de desagregación de los datos disponibles en su página (Ver archivo Aclaracion_desagregacion.pdf).
En respuesta a esta solicitud, el PIGOO proporcionó un archivo de excel (CiudadesPIGOO_ClaveInegi.xlsx) que incluye una clasificación de las ciudades del dataset PIGOO por clave geoestadística
End of explanation
"""
# Estandarizacion de clave geoestadistica municipal a 5 dígitos
pigoo_inegi_df['CVE_MUN'] = pigoo_inegi_df['Clave-Estado-Inegi'].map(str) + pigoo_inegi_df['Clave-Municipio-Inegi']
# Clasificación de acuerdo al Sistema Urbano Nacional
variables_SUN = ['CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN', 'NOM_ENT']
pigoo_sun = asignar_sun(pigoo_inegi_df, vars=variables_SUN)
Columnas = ['Nombre- PIGOO', 'CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN', 'NOM_ENT', 'Organismo Operador', 'Siglas', 'Loc-Inegi']
pigoo_sun[Columnas].head()
"""
Explanation: Gracias a que este dataset ya contiene etiquetas con claves geoestadísticas, es posible clasificarlas a su vez dentro de acuerdo con el Sistema Urbano Nacional. Para hacer la clasificación, se utiliza un algoritmo elaborado previamente que identifica la clave geoestadística municipal de 5 dígitos ("CVE_MUN") en cada renglón y la clasifica el municipio identificado asignándole la clave del Sistema Urbano Nacional ("CVE_SUN") a la que pertenece.
Es importante señalar las limitaciones de este algoritmo:
1. El algoritmo, además de clasificar los municipios, les asigna el nombre estándar como aparece en el marco geoestadístico del INEGI y el nombre de las ciudades como aparece en el Sistema Urbano Nacional.
2. Unicamente clasifica municipios que forma parte del Subsistema Principal del SUN. Desecha aquellos que no forman parte del Subsistema Principal.
3. Unicamente clasifica renglones cuya clave geoestadística municipal es de 5 dígitos ("CVE_MUN"). Esto es relevante pues en el dataset de entrada, la "Ciudad de México" únicamente cuentan con 2 dígitos de clave geoestadística
End of explanation
"""
sun = getsun()
sun.head()
"""
Explanation: 2. Ciudad de México
La ciudad de México está dividida en 16 delegaciones, cada una identificada por el INEGI con una clave geoestadística.
End of explanation
"""
pigoo_inegi_df.loc[42]
writer = pd.ExcelWriter(r'D:\PCCS\01_Dmine\Datasets\Pigoo\pigoo_notreviewd.xlsx')
pigoo_inegi_df[~ciudades_revisadas].to_excel(writer, sheet_name ='datos')
writer.close()
"""
Explanation: En el Dataset de PIGOO la Ciudad de México está considerada como una unidad monolítica
End of explanation
"""
|
Astrohackers-TW/IANCUPythonAdventure | notebooks/notebooks4HowtoSeries/how_to_cross-match_two_tables.ipynb | mit | table_a = pd.read_csv('files/table_A.csv')
table_b = pd.read_csv('files/table_B.csv')
table_a.head()
table_b.head()
print('Table A 有', len(table_a), '個樣本')
print('Table B 有', len(table_b), '個樣本')
ra_a = table_a['ra']
dec_a = table_a['dec']
ra_b = table_b['wise_ra']
dec_b = table_b['wise_dec']
"""
Explanation: 載入 tables
End of explanation
"""
t1_0 = time.time() # 計時器_開始時間
idx1 = []
for i in range(len(table_a[:1])):
dist1 = []
for j in range(len(table_b)):
dist1.append(np.sqrt((ra_a[i] - ra_b[j])**2 + (dec_a[i] - dec_b[j])**2))
if min(dist1) < (1. / 3600.):
idx1.append(dist1.index(min(dist1)))
else:
idx1.append(-9999)
t1_1 = time.time() # 計時器_結束時間
dt1 = t1_1 - t1_0
dt1t = dt1 / (len(table_a[:1]) / len(table_a))
print('配對一個需要花費:', round(dt1, 2), '秒')
print('配對全部需要花費:', round(dt1t / 3600, 2), '小時')
print(idx1)
"""
Explanation: 方法一:簡單交叉配對 (Naive cross-match)
使用兩層 for 迴圈,並設定 5 角秒的配對距離
距離 $= \sqrt{(RA_{ai}-RA_{bi})^{2}+(Dec_{ai}-Dec_{bi})^{2}}$
idx 為 table_a 對應到 table_b 的 index
例如 table_a 的第1列配對到 table_b 的第53列,idx 就是 53
End of explanation
"""
t15_0 = time.time() # 計時器_開始時間
idx15 = np.zeros_like(ra_a)
dist15 = np.zeros_like(ra_a)
for i in range(len(table_a[:5000])):
ind = np.where(np.sqrt((ra_b - ra_a[i])**2 + (dec_b - dec_a[i])**2) < (1. / 3600.))[0]
if ind.size == 0:
idx15[i] = -9999
dist15[i] = -9999
else:
idx15[i] = ind[0]
dist15[i] = np.sqrt((ra_b[ind[0]] - ra_a[i])**2 + (dec_b[ind[0]] - dec_a[i])**2)
t15_1 = time.time() # 計時器_結束時間
dt15 = t15_1 - t15_0
dt15t = dt15 / (len(table_a[:5000]) / len(table_a))
print('配對5000個需要花費:', round(dt15, 2), '秒')
print('配對全部需要花費:', round(dt15t, 2), '秒')
print(idx15[:20])
"""
Explanation: 方法1.5:改用 Numpy array
End of explanation
"""
from astropy.coordinates import SkyCoord # astropy的座標套件
from astropy import units as u # astropy的單位套件
t2_0 = time.time() # 計時器_開始時間
# 座標輸入
ca = SkyCoord(ra_a*u.deg, dec_a*u.deg)
cb = SkyCoord(ra_b*u.deg, dec_b*u.deg)
# 配對並計算相差距離
idx2, d2d, d3d = ca.match_to_catalog_sky(cb)
matches = cb[idx2]
dra, ddec = ca.spherical_offsets_to(matches)
# 設定配對
idx2_new = idx2[d2d < 5 * u.arcsec]
t2_1 = time.time() # 計時器_結束時間
dt2 = t2_1 - t2_0
print('配對全部需要花費:', round(dt2, 2), '秒')
print(idx2_new[:20])
"""
Explanation: 方法二:Astropy 交叉配對
End of explanation
"""
from astroML.crossmatch import crossmatch_angular
t3_0 = time.time() # 計時器_開始時間
# get imaging data
imX = np.empty((len(table_a), 2), dtype=np.float64)
imX[:, 0] = ra_a
imX[:, 1] = dec_a
# get standard stars
stX = np.empty((len(table_b), 2), dtype=np.float64)
stX[:, 0] = ra_b
stX[:, 1] = dec_b
# crossmatch catalogs
max_radius = 1. / 3600 # 1 arcsec
dist3, idx3 = crossmatch_angular(imX, stX, max_radius)
match = ~np.isinf(dist3)
# 將沒有 match 到的設為 -9999
idx3[~match] = -9999
# 配對距離 (單位:角秒)
dist_match = dist3[match]
dist_match *= 3600
t3_1 = time.time() # 計時器_結束時間
dt3 = t3_1 - t3_0
print('配對全部需要花費:', round(dt3, 2), '秒')
print(idx3[:20])
"""
Explanation: 方法三:Astroml 交叉配對
End of explanation
"""
print('方法一:簡單交叉配對 (Naive cross-match)\n\n', '\t需耗時:', round(dt1t / 3600, 2), '小時\n')
print('方法1.5:改用 numpy array\n\n', '\t需耗時:', round(dt15t, 2), '秒\n')
print('方法二:Astropy 交叉配對\n\n', '\t需耗時:', round(dt2, 2), '秒\n')
print('方法三:Astroml 交叉配對\n\n', '\t需耗時:', round(dt3, 2), '秒\n')
"""
Explanation: 運算時間
End of explanation
"""
|
adolfoguimaraes/machinelearning | Tensorflow/Tutorial02_MLP.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
def soma(x, y):
return x + y
#Criando os dados de entrada (x = features e y = classes)
x_train = np.array([[2., 2.],[1., 3.],[2., 3.],[5., 3.],[7., 3.],[2., 4.],[3., 4.],[6., 4.],
[1., 5.],[2., .5],[5., 5.],[4., 6.],[6., 6.],[5., 7.]],dtype="float32")
y_train = np.array([[0., 0., 0., 1., 1., 0., 0., 1., 0., 0., 1., 1., 1., 1.]], dtype="float32")
#Mostrando o Gráfico
A = x_train[:, 0]
B = x_train[:, 1]
colormap = np.array(['r', 'k'])
# Plot the original data
plt.scatter(A, B, c=colormap[[0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]], s=40)
plt.ylim([0,8]) # Limit the y axis size
plt.show()
soma(3, 2)
"""
Explanation: Tutorial 2 - MLP
O objetivo final deste tutorial é mostrar como podemos implementar uma MLP (Multilayer Perceptron) no Tensorflow. No entanto, para ajudar a entender um pouco como funciona o Tensorflow vamos implementar primeiro uma rede mais simples (a Perceptron, que possui uma camada apenas) e, em seguida, iremos implementar a MLP.
A implementção é baseada no Cap. 3 do livro do Redes Neurais Artificiais Para Engenharia e Ciências Aplicadas do professor Ivan Nunes e no tutorial Elementary Neural Networks with TensorFlow.
Rede Perceptron
A rede perceptron é a forma mais simples de configuração de uma rede neural artificial. A arquitetura da rede se aproxima daquela que foi apresentada no problema de regressão linear do Tutorial 1.
A imagem a seguir mostra a arquitetura da rede perceptron.
<img src="https://www.embarcados.com.br/wp-content/uploads/2016/09/Perceptron-01.png" width="50%" />
A rede é construída a partir de $n$ sinais de entrada e uma única saída, já que ela possui somente um neurônio. Mais detalhes de como a rede perceptron funciona, pode ser encontrado neste video:
A rede perceptron é utilizada em problemas que são ditos linearmente separáveis. Entende-se por esse tipo de problema aqueles que são compostos por dados que podem ser separados por uma função linear. Para isso, vamos criar um conjunto de dados que possuem tal característica. Como o propósito é só mostrar o funcionamento da rede, vamos criar um conjunto de dados sem nenhum próposito específico.
Os dados de entrada são constituídos de várias instâncias contendo duas variáveis cada ($x_1$ e $x_2$) e cada instância é classificada em 0 ou 1. Sendo assim, a tarefa da rede é aprender um modelo que seja capaz de separar estas duas classes. O código a seguir cria os dados e os exibem em um gráfico.
End of explanation
"""
# imports necessários
import tensorflow as tf
# Função de ativação
def output(u):
is_greater = tf.greater(u, 0)
as_float = tf.to_float(is_greater)
return as_float
'''
Criação do array que representa o limiar. O limiar é inicializado com -1. Neste caso, o limiar representa um vetor
14x1, ou seja, é atribuído um limiar para cada valor de entrada (no caso, 14).
'''
limiar_array = np.ones([14,1], dtype="float32")*(-1)
limiar = tf.Variable(limiar_array, name="limiar")
'''
Criação da variável com pesos. Como estamos trabalhando com dois valores de entrada por instância, os pesos são
instanciados por um vetor 2x1
'''
weights = tf.Variable(tf.random_normal([2,1]), name="pesos")
# Placeholders para feed dos dados de entrada e saída
X = tf.placeholder(tf.float32, x_train.shape)
Y = tf.placeholder(tf.float32, y_train.shape)
# Modelo criado
u = tf.matmul(x_train,weights) + limiar
# Aplicação da função de ativação
output_value = output(u)
"""
Explanation: O próximo passo é criar a seguinte rede no Tensorflow.
<img src="https://www.embarcados.com.br/wp-content/uploads/2016/09/Perceptron-01.png" width="50%" />
Observe que a rede é composta por um conjunto de sinais de entrada ($x_{train} = [x_1, x_2, ..., x_n]$). Cada sinal é poderado por um peso w, dado por $weights = [w_1, w_2, ..., w_3]$ e somado por um limiar de ativação ($\theta$). Sendo assim, o neurônio é representado pela seguinte operação:
$u = \sum_{i=1}^{n}{w_i*x_i} + bias$
O valor inicial do $bias$ é dado por $-\theta$. Neste exemplo, $\theta = 1$.
O valor de $u$ é entrada para uma função de ativação ($g$) gerando o sinal de saída $y=g(u)$.
Nesse exemplo, a função de ativação é dada por:
$g(u) = 1$, se $u >= 0$
$g(u) = 0$, se $u < 0$
O código a seguir implementa esse modelo. Mais detalhes são dados nos comentários do código.
End of explanation
"""
error = tf.subtract(y_train.T, output_value)
mse = tf.reduce_sum(tf.square(error))
"""
Explanation: Modelo criado. A próxima etapa é definir como nosso modelo será treinado.
Este problema é uma tarefa de classificação. Cada instância vai ser classificada como 0 ou 1 de acordo com a classe que pertence. Sendo assim, o primeiro passo é comparar a saída com a classificação da base de treinamento. Para isso foi calculado o erro da seguinte forma:
$mse = \sum_{i = 1}^{N}{(y_i - output_i)^2}$
O objetivo do treinamento é reduzir esse erro. Isso é dado pelo código a seguir:
End of explanation
"""
learning_rate = 0.001
delta_w = tf.matmul(x_train, learning_rate*error, transpose_a=True)
delta_limiar = tf.matmul(limiar, learning_rate*error, transpose_a=True)
train_w = tf.assign(weights, tf.add(weights, delta_w))
train_limiar = tf.assign(limiar, tf.add(limiar, delta_limiar))
"""
Explanation: Um outro passo do treinamento é a atualização dos valores dos pesos e do limiar. Esses parâmetros são atualizados segundo fórmula descrita no livro do Ivan Nunes.
$w_{i}^{atual} = w_{i}^{anterior} + \eta (d^{(k)} - y).x_{i}^{(k)}$
$\theta_{i}^{atual} = \theta_{i}^{anterior} + \eta (d^{(k)} - y).(-1)$
onde:
$d^{(k)}$ é o valor desejado e $y$, o valor de saída produzido pela perceptron. Essa diferença é representada pelo que chamamos de erro no código anterior. $\eta$ é uma constante que define a taxa de aprendizagem da rede (no código, vamos referenciar $\eta$ por learning_rate).
End of explanation
"""
init_op = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init_op)
for step in range(5000):
_, _, a, b, c = sess.run([train_w, train_limiar, mse, weights, limiar], feed_dict={X: x_train, Y: y_train})
print("Weights")
print(b)
print("Limiar", c[0][0])
"""
Explanation: Uma vez que criamos o modelo, vamos executar as operações para treina-lo.
End of explanation
"""
#Graphic display
import matplotlib.pyplot as plt
A = x_train[:, 0]
B = x_train[:, 1]
colormap = np.array(['r', 'k'])
# Plot the original data
plt.scatter(A, B, c=colormap[[0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1]], s=40)
ymin, ymax = plt.ylim()
# Calc the hyperplane (decision boundary)
ymin, ymax = plt.ylim()
w = b
a = -w[0] / w[1]
xx = np.linspace(ymin, ymax)
yy = a * xx - (c[0,0]) / w[1]
# Plot the hyperplane
plt.plot(xx,yy, 'k-')
plt.ylim([0,8]) # Limit the y axis size
plt.show()
"""
Explanation: O código a seguir apenas cria a função determinada pelos pesos e limiar achados pela rede e plota essa reta no gráfico dos dados mostrado anteriormente.
End of explanation
"""
# Carregando a base. Se a base não existir a pasta "dataset/MNIST" será criada e a base salva nesta pasta.
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("dataset/MNIST", one_hot=True)
"""
Explanation: Multilayer Perceptron
Uma rede perceptron multicamadas (Multilayer Perceptron - MLP) é caracterizada pela presença de pelo menos uma camada intermediária (escondida ou hidden layer) de neurônios, situada entre a camada de entrada e a respectiva camada neural de saída. Sendo assim, as MLP possuem pelo menos duas camadas de nurônios, o quais estarão distribuídos entre as camadas intermediárias e a camada de saída.
A figura a seguir ilustra este modelo.
<img src="https://elogeel.files.wordpress.com/2010/05/050510_1627_multilayerp1.png" />
Detalhes deste modelo podem ser encontrados no capítulo 6 do Deep Learning Book. Uma outra boa referência é o livro Redes Neurais Artificiais Para Engenharia e Ciências Aplicadas do professor Ivan Nunes. O tema é abordado no capítulo 5.
Para mostrar este modelo vamos utilizar o exemplo disponível em neste link com a base do MNIST para treinar o modelo criado.
Antes de começar a entrar em detalhes da rede, vamos baixar a base do MNIST que será utilizada. O MNIST é um dataset de dígitos escritos a mão. A tarefa consiste em dada uma imagem que representa um dígito escrito à mão classifica-la de acordo com o dígito que foi escrito. Detalhes da base podem ser encontrados neste link. Por ser uma base bastante utilizada, a API do tensorflow já possui a base em um pacote do framework.
End of explanation
"""
# Imports necessários
import tensorflow as tf
#Definição de parâmetros
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
#Parâmetros da rede
n_hidden_1 = 256 # Quantidade de features da primeira camada escondida
n_hidden_2 = 256 # Quantidade de features da segunda camada escondida
n_input = 784 # Dados de entrada no MNIST (28 * 28 = 784 (quantidade de pixels da imagem))
n_classes = 10 # Número total de classes no MNIST (dígitos de 0-9)
# Instanciação dos Input do Grafo no Tensorflow
x = tf.placeholder(tf.float32, [None, n_input]) # Irá armazenar os dados de entrada
y = tf.placeholder(tf.float32, [None, n_classes]) #Irá armazenar os dados de saída
"""
Explanation: Cada imagem do dataset possui o tamanho de 28x28 e representa um dígito escrito à mão. A imagem a seguir ilustra uma instância da base:
<img src="https://www.tensorflow.org/images/MNIST-Matrix.png" width="70%" />
As imagens vão ser transformadas em um vetor de 784 posições ($28*28$). A entrada da rede são vários vetores deste tipo. Cada vetor vai representar uma imagem. A saída da rede é definida por um vetor de 10 posições, onde cada posição representa uma possível classe do dígito (a base do MNIST trabalha com dígitos de 0 a 9).
Se considerarmos que a base de treinamento possui 55000 imagens, as imagens a seguir representam a entrada e saída da rede, respectivamente:
<img src="https://www.tensorflow.org/images/mnist-train-xs.png" width="50%" />
<img src="https://www.tensorflow.org/images/mnist-train-ys.png" width="50%"/>
A diferença desta representação para o modelo que será implementado aqui é que o nosso modelo será alimentado por batch. Uma breve explicação do que é batch no tensorflow pode ser encontrado neste link. Vamos utilizar um batch de 100.
Explicações dadas, vamos para o modelo que será implementado.
Jessica Yung em seu tutorial Explaining TensorFlow code for a Multilayer Perceptron faz uma imagem bem representativa do modelo que será implementado:
<img src="http://i0.wp.com/www.jessicayung.com/wp-content/uploads/2016/12/multilayer-perceptron-drawing.png" />
Uma questão importante no entendimento (e, consequentemente, na implementação) de qualquer modelo de rede neural é entender as dimensões dos dados ao passar por cada camada. A imagem anterior deixa isso bem claro. Por isso, vamos analisar camada por camada para que possamos entender como essas dimensões são modificadas. Na imagem, h1 e h2 são a quantidade de neurônios nas camadas intermediárias. A quantidade de neurônios de uma camada é que indica a dimensão da saída daquela camada. Outra informação importante é o tamanho do batch (já explicado anteriormente).
Com o batch igual a 100, a rede está recebendo como entrada uma matriz de 100x784, onde 784 é quantidade de pixel de cada imagem. Sendo assim, cada linha dessa matriz representa uma imagem da base de treinamento. Isso é passado para a primeira camada, onde será aplicada a seguinte operação $xW_1 + b_1$ onde, $W_1$ são os pesos de entrada e $b_1$, o bias. A imagem a seguir detalha esta operação juntamente com suas dimensões:
<img src="http://adolfo.data2learning.com/ludiico/images/mlp_dimensions1.png" width="70%" />
A saída da primeira camada é uma matriz 100x256, ou seja, 100 que representa a quantidade de instâncias que foram passadas na entrada e 256, a quantidade de neurônios. Ou seja, cada neurônio processou cada imagem e deu como resultado uma representação própria da entrada poderada pela operação definida. Ao resultado será aplicada uma função de ativação do tipo RELU (acesse o tutorial da Jessica Yung para ver detalhes do funcionamento deste tipo de função).
A entrada da segunda rede é uma matriz 100x256 (saída da camada anterior). As operações e dimensões da segunda camada são detalhadas na imagem a seguir:
<img src="http://adolfo.data2learning.com/ludiico/images/mlp_dimensions2.png" width="70%" />
Assim, como na primeira camada, a saída é uma matriz 100x256 que será aplicada uma função de atividação do tipo RELU. A camada de saída recebe os dados da segunda e gera como saída uma vetor que represente as 10 classes. Nesse caso, a saída será de 100x10, por conta do batch. Em outras palavras, estamos gerando um vetor que pesa cada possível classe para cada uma das 100 instâncias passadas como entrada. A imagem ilustra as operações e dimensões da camada de saída.
<img src="http://adolfo.data2learning.com/ludiico/images/mlp_dimensions3.png" width="70%" />
À saída da rede é aplicada a função Softmax que transforma os valores dos vetores em probabilidades. A posição que possuir o maior valor de probabilidade representa a classe à qual o dígito pertence.
Uma rápida explicação de como funciona a softmax pode ser encontrada neste vídeo.
Explicado o modelo, vamos para o código.
End of explanation
"""
def multilayer_perceptron(x, weights, biases):
# Primeira camada como função de ativação RELU
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Segunda camada com funç!ao de ativação RELU
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
#Camada de Saída
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
"""
Explanation: O modelo é implementado dentro da função multilayer_perceptron. Na função criamos cada camada de acordo com os dados passados. É muito importante que as dimensões das variáveis passadas tenham sido definidas corretamente.
End of explanation
"""
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
"""
Explanation: Os pesos e bias utilizados serão armazenados em dois dicionários: weights e biases.
End of explanation
"""
pred = multilayer_perceptron(x, weights, biases)
"""
Explanation: Construindo o modelo:
End of explanation
"""
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
"""
Explanation: Uma vez que o modelo foi criado, podemos treina-lo. O primeiro passo é definir como vai ser calculado o custo da solução e, em seguida, o método que será utilizado para otimizar o modelo. Três métodos são importantes nesta etapa:
tf.nn.sofmax_cross_entropy_with_logits
tf.reduce_mean
tf.train.AdamOptimizer
Detalhes destes métodos podem ser encontrados nos links de cada método. Basicamente, a rede será executada e à saída será aplicada a função softmax para transformar a saída em um vetor de probabilidades. A posição do vetor com maior valor de probabilidade corresponde à classe que a entrada é classificada. Esse resultado é comparado com o resultado esperado em y (aprendizado supervisionado) e o custo é calculado. O treinamento será executado com o objetivo de minimizar este custo, ou seja, reduzir a taxa de erro.
End of explanation
"""
# Inicializa as variáveis
init = tf.global_variables_initializer()
# Executa o grafo que representa o modelo construído
with tf.Session() as sess:
sess.run(init)
'''
O ciclo de treinamento é chamado de épocas. Em cada época uma quantidade de dados de entrada (batch) é passada
como entrada para a rede. Ao final de cada época, os parâmetros são atualizados de acordo com o treinamento e novos
dados são dados como entrada.
'''
for epoch in range(training_epochs):
avg_cost = 0. #Armazena a média do custo calculado
total_batch = int(mnist.train.num_examples/batch_size) # Define o total de épocas: total da base / # batch
# Loop por cada batch
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Executa as operações de otimização dos parâmetros (backprop) and custo (retorna o valor de erro)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x, y: batch_y})
# Calcula a média do erro
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Fim do treinamento")
# Testa o modelo
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calcula a acurácia
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("Acurácia:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
"""
Explanation: O código a seguir executa a etapa de treinamento. Detalhes são dados ao decorrer do código.
End of explanation
"""
# Imports necessários
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
#Carrega a base de dados
pickle_file = 'dataset/notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# Formata os dados para as dimensões apropriadas (784)
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
"""
Explanation: Base do noMNIST
A tarefa resolvida anteriormente é relativamente simples, já que a base é bem simples e já foi pré-processada com um próposito bem específico. Pensando nisso é que foi criada uma outra base (noMNIST) para o mesmo propósito: classificar dígitos, neste caso dígitos de A-Z. Apessar de parecer com o clássico dataset do MNIST, ele envolve uma tarefa mais complicada e os dados estão "menos limpos" do que os dados do MNIST. Para mais detalhes, acesse o link: http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html.
O primeiro passo é baixar o dataset. Detalhes de como baixa-lo estão disponíveis em:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/examples/udacity/1_notmnist.ipynb
Neste tutorial, assumimos que o dataset já foi baixado na pasta dataset/.
End of explanation
"""
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
train_dataset = train_dataset[:10000, :]
train_labels = train_labels[:10000]
new_pred = multilayer_perceptron(train_dataset, weights, biases)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=new_pred, labels=train_labels))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
"""
Explanation: Vamos utilizar o mesmo método definido anteriormente: multilayer_perceptron que recebe como parâmetro os dados de entrada e as variáveis para armazenar os bias e weights.
End of explanation
"""
# Inicializa as variáveis
training_epochs = 30
init = tf.global_variables_initializer()
# Executa o grafo que representa o modelo construído
with tf.Session() as sess:
sess.run(init)
'''
O ciclo de treinamento é chamado de épocas. Em cada época uma quantidade de dados de entrada (batch) é passada
como entrada para a rede. Ao final de cada época, os parâmetros são atualizados de acordo com o treinamento e novos
dados são dados como entrada.
'''
for epoch in range(training_epochs):
avg_cost = 0. #Armazena a média do custo calculado
total_batch = int(train_dataset.shape[0]/batch_size) # Define o total de épocas: total da base / # batch
# Loop por cada batch
for i in range(total_batch):
offset = (i * batch_size) % (train_labels.shape[0] - batch_size)
batch_x = train_dataset[offset:(offset+batch_size), :]
batch_y = train_labels[offset:(offset+batch_size), :]
# Executa as operações de otimização dos parâmetros (backprop) and custo (retorna o valor de erro)
_, c, prediction = sess.run([optimizer, cost, new_pred], feed_dict={x: batch_x, y: batch_y})
# Calcula a média do erro
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
# Acurácia de Treinamento
correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(train_labels, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("\tAcurácia Treinamento:", accuracy.eval({x: train_dataset, y: train_labels}))
# Acurácia de Validação
valid_prediction = multilayer_perceptron(valid_dataset, weights, biases)
correct_valid_prediction = tf.equal(tf.argmax(valid_prediction, 1), tf.argmax(valid_labels, 1))
accuracy_valid = tf.reduce_mean(tf.cast(correct_valid_prediction, "float"))
print("\tAcurácia Validação:", accuracy_valid.eval({x: valid_dataset, y: valid_labels}))
print("Fim do treinamento")
# Testa o modelo
test_prediction = multilayer_perceptron(test_dataset, weights, biases)
correct_test_prediction = tf.equal(tf.argmax(test_prediction, 1), tf.argmax(test_labels, 1))
accuracy_test = tf.reduce_mean(tf.cast(correct_test_prediction, "float"))
print("Acurácia:", accuracy_test.eval({x: test_dataset, y: test_labels}))
"""
Explanation: O passo seguinte é treinar o modelo. Observe que diferente do exemplo anterior, neste exemplo vamos trabalhar com a base de treinamento para treinar os dados, a base de validação para testar o modelo ao longo das iterações e ao final testa-lo na base de teste.
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.23/_downloads/775a4c9edcb81275d5a07fdad54343dc/channel_epochs_image.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
"""
Explanation: Visualize channel over epochs as an image
This will produce what is sometimes called an event related
potential / field (ERP/ERF) image.
Two images are produced, one with a good channel and one with a channel
that does not show any evoked field.
It is also demonstrated how to reorder the epochs using a 1D spectral
embedding as described in :footcite:GramfortEtAl2010.
End of explanation
"""
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
event_id, tmin, tmax = 1, -0.2, 0.4
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# Create epochs, here for gradiometers + EOG only for simplicity
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=('grad', 'eog'), baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
"""
Explanation: Set parameters
End of explanation
"""
# and order with spectral reordering
# If you don't have scikit-learn installed set order_func to None
from sklearn.manifold import spectral_embedding # noqa
from sklearn.metrics.pairwise import rbf_kernel # noqa
def order_func(times, data):
this_data = data[:, (times > 0.0) & (times < 0.350)]
this_data /= np.sqrt(np.sum(this_data ** 2, axis=1))[:, np.newaxis]
return np.argsort(spectral_embedding(rbf_kernel(this_data, gamma=1.),
n_components=1, random_state=0).ravel())
good_pick = 97 # channel with a clear evoked response
bad_pick = 98 # channel with no evoked response
# We'll also plot a sample time onset for each trial
plt_times = np.linspace(0, .2, len(epochs))
plt.close('all')
mne.viz.plot_epochs_image(epochs, [good_pick, bad_pick], sigma=.5,
order=order_func, vmin=-250, vmax=250,
overlay_times=plt_times, show=True)
"""
Explanation: Show event-related fields images
End of explanation
"""
|
Upward-Spiral-Science/uhhh | code/Graph Analysis/Delaunay.ipynb | apache-2.0 | import csv
from scipy.stats import kurtosis
from scipy.stats import skew
from scipy.spatial import Delaunay
import numpy as np
import math
import skimage
import matplotlib.pyplot as plt
import seaborn as sns
from skimage import future
import networkx as nx
from ragGen import *
%matplotlib inline
sns.set_color_codes("pastel")
from scipy.signal import argrelextrema
# Read in the data
data = open('../../data/data.csv', 'r').readlines()
fieldnames = ['x', 'y', 'z', 'unmasked', 'synapses']
reader = csv.reader(data)
reader.next()
rows = [[int(col) for col in row] for row in reader]
# These will come in handy later
sorted_x = sorted(list(set([r[0] for r in rows])))
sorted_y = sorted(list(set([r[1] for r in rows])))
sorted_z = sorted(list(set([r[2] for r in rows])))
"""
Explanation: Delaunay
Here, we'll perform various analysis by constructing graphs and measure properties of those graphs to learn more about the data
End of explanation
"""
a = np.array(rows)
b = np.delete(a, np.s_[3::],1)
# Separate layers - have to do some wonky stuff to get this to work
b = sorted(b, key=lambda e: e[1])
b = np.array([v.tolist() for v in b])
b = np.split(b, np.where(np.diff(b[:,1]))[0]+1)
"""
Explanation: We'll start with just looking at analysis in euclidian space, then thinking about weighing by synaptic density later. Since we hypothesize that our data will show that tissue varies as we move down the y-axis (z-axis in brain) through cortical layers, an interesting thing to do would be compare properties of the graphs on each layer (ie how does graph connectivity vary as we move through layers).
Let's start by triangulating our data. We'll use Delaunay on each y layer first. Putting our data in the right format for doing graph analysis...
End of explanation
"""
graphs = []
centroid_list = []
for layer in b:
centroids = np.array(layer)
# get rid of the y value - not relevant anymore
centroids = np.delete(centroids, 1, 1)
centroid_list.append(centroids)
graph = Delaunay(centroids)
graphs.append(graph)
"""
Explanation: Now that our data is in the right format, we'll create 52 delaunay graphs. Then we'll perform analyses on these graphs. A simple but useful metric would be to analyze edge length distributions in each layer.
End of explanation
"""
def get_d_edge_length(edge):
(x1, y1), (x2, y2) = edge
return math.sqrt((x2-x1)**2 + (y2-y1)**2)
edge_length_list = [[]]
tri_area_list = [[]]
for del_graph in graphs:
tri_areas = []
edge_lengths = []
triangles = []
for t in centroids[del_graph.simplices]:
triangles.append(t)
a, b, c = [tuple(map(int,list(v))) for v in t]
edge_lengths.append(get_d_edge_length((a,b)))
edge_lengths.append(get_d_edge_length((a,c)))
edge_lengths.append(get_d_edge_length((b,c)))
try:
tri_areas.append(float(Triangle(a,b,c).area))
except:
continue
edge_length_list.append(edge_lengths)
tri_area_list.append(tri_areas)
"""
Explanation: We're going to need a method to get edge lengths from 2D centroid pairs
End of explanation
"""
np.subtract(centroid_list[0], centroid_list[1])
"""
Explanation: Realizing after all this that simply location is useless. We know the voxels are evenly spaced, which means our edge length data will be all the same. See that the "centroids" are no different:
End of explanation
"""
real_volume = np.zeros((len(sorted_x), len(sorted_y), len(sorted_z)))
for r in rows:
real_volume[sorted_x.index(r[0]), sorted_y.index(r[1]), sorted_z.index(r[2])] = r[-1]
nx_graphs = []
for layer in b:
G = nx.Graph(graph)
nx_graphs.append(G)
for graph in graphs:
plt.figure()
nx.draw(graph, node_size=100)
"""
Explanation: There is no distance between the two. Therefore it is perhaps more useful to consider a graph that considers node weights. Voronoi is dual to Delaunay, so that's not much of an option. We want something that considers both spacial location and density similarity.
Drawing Graphs
First we look at the default networkx graph plotting:
End of explanation
"""
num_self_loops = []
for rag in y_rags:
num_self_loops.append(rag.number_of_selfloops())
num_self_loops
"""
Explanation: This is using the spring layout, so we're losing positional information. We can improve the plot by adding position information.
Self Loops
End of explanation
"""
# y_rags[0].adjacency_list()
"""
Explanation: Interesting. There are no self loops. Why would this be? Let's come back to this. In the meantime, I want to give some though to what it means to have a self loop, whether it should be theoretically possible given our data, and whether our graphs are formed properly.
The answer to this question is very simple. In a RAG, there are no self-loops by definition. Self loops are edges that form a connection between a node and itself.
<img src="../../docs/figures/selfloop.png" width="100">
To see whether the graphs are formed properly, let's look at an adjacency lists:
End of explanation
"""
# Test Data
test = np.array([[1,2],[3,4]])
test_rag = skimage.future.graph.RAG(test)
test_rag.adjacency_list()
"""
Explanation: Compare that to the test data:
End of explanation
"""
real_volume_x = np.zeros((len(sorted_x), len(sorted_y), len(sorted_z)))
for r in rows:
real_volume_x[ sorted_x.index(r[0]), sorted_y.index(r[1]), sorted_z.index(r[2])] = r[-1]
x_rags = []
count = 0;
for layer in real_volume_x:
count = count + 1
x_rags.append(skimage.future.graph.RAG(layer))
num_edges_x = []
for rag in x_rags:
num_edges_x.append(rag.number_of_edges())
sns.barplot(x=range(len(num_edges_x)), y=num_edges_x)
sns.plt.show()
"""
Explanation: X-Layers
End of explanation
"""
plt.imshow(np.amax(real_volume, axis=2), interpolation='nearest')
plt.show()
# edge_length_list[3]
# tri_area_list[3]
# triangles
# Note for future
# del_features['d_edge_length_mean'] = np.mean(edge_lengths)
# del_features['d_edge_length_std'] = np.std(edge_lengths)
# del_features['d_edge_length_skew'] = scipy.stats.skew(edge_lengths)
# del_features['d_edge_length_kurtosis'] = scipy.stats.kurtosis(edge_lengths)
"""
Explanation: We can see here the number of edges is low in that area that does not have many synapses. It, as expected, mirrors the distribution of synapses. It appears to be approximately uniform at the top, with buffers of very few synapses on the sides. Remember from here:
End of explanation
"""
|
rdhyee/diversity-census-calc | 03_01_Geographical_Hierarchies.ipynb | apache-2.0 | # YouTube video I made on how to use the American Factfinder site to look up addresses
from IPython.display import YouTubeVideo
YouTubeVideo('HeXcliUx96Y')
# standard numpy, pandas, matplotlib imports
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame, Series, Index
import pandas as pd
# check that CENSUS_KEY is defined
import census
import us
import requests
import settings
assert settings.CENSUS_KEY is not None
"""
Explanation: Goal
For background, see Mapping Census Data, including the
scan of the 10-question form. Keep in mind what people were asked and the range of data available in the census.
Using the census API to get an understanding of some of the geographic entities in the 2010 census. We'll specifically be using the variable P0010001, the total population.
What you will do in this notebook:
Sum the population of the states (or state-like entity like DC) to get the total population of the nation
Add up the counties for each state and validate the sums
Add up the census tracts for each county and validate the sums
We will make use of pandas in this notebook.
I often have the following diagram in mind to help understand the relationship among entities. Also use the list of example URLs -- it'll come in handy.
<a href="http://www.flickr.com/photos/raymondyee/12297467734/" title="Census Geographic Hierarchies by Raymond Yee, on Flickr"><img src="http://farm4.staticflickr.com/3702/12297467734_af8882d310_c.jpg" width="618" height="800" alt="Census Geographic Hierarchies"></a>
Working out the geographical hierarchy for Cafe Milano
It's helpful to have a concrete instance of a place to work with, especially when dealing with rather intangible entities like census tracts, block groups, and blocks. You can use the American FactFinder site to look up for any given US address the corresponding census geographies.
Let's use Cafe Milano in Berkeley as an example. You can verify the following results by typing in the address into http://factfinder2.census.gov/faces/nav/jsf/pages/searchresults.xhtml?refresh=t.
https://www.evernote.com/shard/s1/sh/dc0bfb96-4965-4fbf-bc28-c9d4d0080782/2bd8c92a045d62521723347d62fa2b9d
2522 Bancroft Way, BERKELEY, CA, 94704
State: California
County: Alameda County
County Subdivision: Berkeley CCD, Alameda County, California
Census Tract: Census Tract 4228, Alameda County, California
Block Group: Block Group 1, Census Tract 4228, Alameda County, California
Block: Block 1001, Block Group 1, Census Tract 4228, Alameda County, California
End of explanation
"""
c = census.Census(key=settings.CENSUS_KEY)
"""
Explanation: The census documentation has example URLs but needs your API key to work. In this notebook, we'll use the IPython notebook HTML display mechanism to help out.
End of explanation
"""
# call the API and instantiate `df`
df = DataFrame(c.sf1.get('NAME,P0010001', geo={'for':'state:*'}))
# convert the population to integer
df['P0010001'] = df['P0010001'].astype(np.int)
df.head()
states_df = df[df['NAME'] != 'Puerto Rico']
'a' in ['a', 'b']
"""
Explanation: Note: we can use c.sf1 to access 2010 census (SF1: Census Summary File 1 (2010, 2000, 1990) available in API -- 2010 is the default)
see documentation: sunlightlabs/census
Summing up populations by state
Let's make a DataFrame named states_df with columns NAME, P0010001 (for population), and state (to hold the FIPS code). Make sure to exclude Puerto Rico.
End of explanation
"""
states_fips = np.array([state.fips for state in us.states.STATES])
states_df = df[np.in1d(df.state,states_fips)]
"""
Explanation: You can filter Puerto Rico (PR) in a number of ways -- use the way you're most comfortable with.
Optional fun: filter PR in the following way
calculate a np.array holding the the fips of the states
then use numpy.in1d, which is a analogous to the in operator to test membership in a list
End of explanation
"""
# check that we have three columns
assert set(states_df.columns) == set((u'NAME', u'P0010001', u'state'))
# check that the total 2010 census population is correct
assert np.sum(states_df.P0010001) == 308745538
# check that the number of states+DC is 51
assert len(states_df) == 51
"""
Explanation: If states_df is calculated properly, the following asserts will pass silently.
End of explanation
"""
# Here's a way to use translate
# http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
# into a call using the census.Census object
r = c.sf1.get('NAME,P0010001', geo={'for':'county:*'})
# ask yourself what len(r) means and what it should be
len(r)
# let's try out one of the `census` object convenience methods
# instead of using `c.sf1.get`
r = c.sf1.state_county('NAME,P0010001',census.ALL,census.ALL)
r
# convert the json from the API into a DataFrame
# coerce to integer the P0010001 column
df = DataFrame(r)
df['P0010001'] = df['P0010001'].astype('int')
# display the first records
df.head()
# calculate the total population
# what happens when you google the number you get?
np.sum(df['P0010001'])
# often you can use dot notation to access a DataFrame column
df.P0010001.head()
# let's filter out PR -- what's the total population now
sum(df[np.in1d(df.state, states_fips)].P0010001)
# fall back to non-Pandas solution if you need to
np.sum([int(county['P0010001']) for county in r if county['state'] in states_fips])
# construct counties_df with only 50 states + DC
#counties_df = df[np.in1d(df.state, states_fips)]
counties_df = df.loc[np.in1d(df.state, states_fips)].copy()
len(counties_df)
set(counties_df.columns) == set(df.columns)
"""
Explanation: Counties
Looking at http://api.census.gov/data/2010/sf1/geo.html, we see
state-county: http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*
if we want to grab all counties in one go, or you can grab counties state-by-state:
http://api.census.gov/data/2010/sf1?get=P0010001&for=county:*&in=state:06
for all counties in the state with FIPS code 06 (which is what state?)
End of explanation
"""
# number of counties
assert len(counties_df) == 3143 #3143 county/county-equivs in US
# check that the total population by adding all counties == population by adding all states
assert np.sum(counties_df['P0010001']) == np.sum(states_df.P0010001)
# check we have same columns between counties_df and df
set(counties_df.columns) == set(df.columns)
"""
Explanation: Check properties of counties_df
End of explanation
"""
# take a look at the current structure of counties_df
counties_df.head()
states_df.head()
# reindex states_df by state FIPS
# http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.set_index.html
states_df.set_index(keys='state', inplace=True)
states_df.head()
states_df.columns
# display the result of using set_index
counties_df.head()
# df.loc[np.in1d(df.state, states_fips), 'FIPS'] = counties_df.apply(lambda s:s['state']+s['county'], axis=1)
counties_df['FIPS'] = counties_df.apply(lambda s:s['state']+s['county'], axis=1)
df[np.in1d(df.state, states_fips)].head()
counties_df.head()
def double(x):
return 2*x
counties_df.P0010001.apply(double)
# http://manishamde.github.io/blog/2013/03/07/pandas-and-python-top-10/#create
counties_df['FIPS'] = counties_df.apply(lambda s:s['state'] + s['county'], axis=1)
counties_df.set_index('FIPS', inplace=True)
counties_df.head()
counties_df.groupby('state').sum().head()
states_df.P0010001.head()
# now we're ready to compare for each state, if you add all the counties, do you get the same
# population?
# not that you can do .agg('sum') instead of .sum()
# look at http://pandas.pydata.org/pandas-docs/dev/groupby.html to learn more about agg
np.all(states_df.P0010001 == counties_df.groupby('state').agg('sum').P0010001)
"""
Explanation: Using FIPS code as the Index
From Mapping Census Data:
Each state (SUMLEV = 040) has a 2-digit FIPS ID; Delaware's is 10.
Each county (SUMLEV = 050) within a state has a 3-digit FIPS ID, appended to the 2-digit state ID. New Castle County, Delaware, has FIPS ID 10003.
Each Census Tract (SUMLEV = 140) within a county has a 6-digit ID, appended to the county code. The Tract in New Castle County DE that contains most of the the UD campus has FIPS ID 10003014502.
Each Block Group (SUMLEV = 150) within a Tract has a single digit ID appended to the Tract ID. The center of campus in the northwest corner of the tract is Block Group100030145022.
Each Block (SUMLEV = 750) within a Block Group is identified by three more digits appended to the Block Group ID. Pearson Hall is located in Block 100030145022009.
End of explanation
"""
# boolean indexing to pull up California
states_df[states_df.NAME == 'California']
# use .ix -- most general indexing
# http://pandas.pydata.org/pandas-docs/dev/indexing.html#different-choices-for-indexing-loc-iloc-and-ix
states_df.ix['06']
# California counties
counties_df[counties_df.state=='06']
counties_df[counties_df.NAME == 'Alameda County']
counties_df[counties_df.NAME == 'Alameda County']['P0010001']
"""
Explanation: Counties in California
Let's look at home: California state and Alameda County
End of explanation
"""
list(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].to_dict().values())[0]
list(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].iteritems())[0][1]
int(counties_df[counties_df.NAME == 'Alameda County']['P0010001'].values)
"""
Explanation: Different ways to read off the population of Alameda County -- still looking for the best way
End of explanation
"""
# this is like accessing a cell in a spreadsheet -- row, col
ALAMEDA_COUNTY_FIPS = '06001'
counties_df.ix[ALAMEDA_COUNTY_FIPS,'P0010001']
"""
Explanation: If you know the FIPS code for Alameda County, just read off the population using .ix
End of explanation
"""
counties_df.ix[ALAMEDA_COUNTY_FIPS,'county']
# http://api.census.gov/data/2010/sf1/geo.html
# state-county-tract
geo = {'for': 'tract:*',
'in': 'state:%s county:%s' % (us.states.CA.fips,
counties_df.ix[ALAMEDA_COUNTY_FIPS,'county'])}
r = c.sf1.get('NAME,P0010001', geo=geo)
#use state_county_tract to make a DataFrame
alameda_county_tracts_df = DataFrame(r)
alameda_county_tracts_df['P0010001'] = alameda_county_tracts_df['P0010001'].astype('int')
alameda_county_tracts_df['FIPS'] = alameda_county_tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
alameda_county_tracts_df.head()
alameda_county_tracts_df.apply(lambda s: s['state']+s['county']+s['tract'], axis=1)
alameda_county_tracts_df.P0010001.sum()
# Cafe Milano is in tract 4228
MILANO_TRACT_ID = '422800'
alameda_county_tracts_df[alameda_county_tracts_df.tract==MILANO_TRACT_ID]
"""
Explanation: Reading off all the tracts in Alameda County
End of explanation
"""
import time
import us
from itertools import islice
def census_tracts(variable=('NAME','P0010001'), sleep_time=1.0):
for state in us.states.STATES:
print (state)
for tract in c.sf1.get(variable,
geo={'for':"tract:*",
'in':'state:{state_fips}'.format(state_fips=state.fips)
}):
yield tract
# don't hit the API more than once a second
time.sleep(sleep_time)
# limit the number of tracts we crawl for until we're reading to get all of them
tracts_df = DataFrame(list(islice(census_tracts(), 100)))
tracts_df['P0010001'] = tracts_df['P0010001'].astype('int')
tracts_df.head()
"""
Explanation: Using Generators to yield all the tracts in the country
http://www.jeffknupp.com/blog/2013/04/07/improve-your-python-yield-and-generators-explained/
End of explanation
"""
|
bollwyvl/dangerous-playgrounds | index.ipynb | bsd-3-clause | import traitlets
import ipywidgets as widgets
import types
"""
Explanation: Building Dangerous Live-Coding Playgrounds with Jupyter Widgets
Motivation
Playground applications, where each user keystroke in any number of source documents updates an output document, allow you to fail fast, and are a great way to learn new APIs.
Some examples:
- Online YAML Parser
- JSON-LD Playground
- PEG Grammar
- js2coffee
- babel REPL
Let's build some tools so we can build any kind of "playground" application.
Approach: Widgets and traitlets
traitlets offer an observable implementation that makes the state of a single value observable, to which your code can react... in Python.
Widgets, or bundles of traitlets, make the state of the those traitlets to be updated, and reacted to, simultaneously on the kernel (backend) and in the notebook (front-end) over a high-performance WebSocket connection.
Building our playgrounds out of widgets lets us use both backend modules and front-end modules to achieve transformations, without writing too much code per playground, and very possibly only Python!
What is Needed?
a live text source: CodeMirror is already available in the notebook, so let's widgetize that
a transformer: while traitlets has link, this doesn't let us supply arbitrary transformations
We'll implement both of these as widgets... and then the playgrounds themselves. Let's get going!
End of explanation
"""
%%javascript
requirejs.undef("widget_codemirror");
define("widget_codemirror",
[
"underscore",
"jquery",
"components/codemirror/lib/codemirror",
"nbextensions/widgets/widgets/js/widget",
"base/js/namespace",
// silent upgrade
"components/codemirror/mode/meta"
],
function(_, $, CodeMirror, widget, Jupyter){
var CodeMirrorView = widget.DOMWidgetView.extend({
render: function(){
_.bindAll(this, "_init", "_cm_changed", "_mode_loaded");
this.displayed.then(this._init);
},
_init: function(){
this.$description = $("<div/>").appendTo(this.$el);
this._cm = new CodeMirror($("<div/>").appendTo(this.$el)[0], {
value: this.m("value")
});
this._cm.on("change", this._cm_changed);
this._theme_changed();
this._mode_changed();
this._description_changed();
// wire up magic functions
_.map(
["value", "theme", "mode", "description"],
function(name){
this.listenTo(
this.model,
"change:"+name,
this["_" + name + "_changed"]
);
}, this);
},
_cm_changed: function(){
this.m("value", this.cmv());
this.touch();
},
// model listeners
_value_changed: function(){
var value = this.m("value");
this._cm.hasFocus() || value === this.cmv() || this.cmv(value);
},
_theme_changed: function(){
var theme = this.m("theme"),
href = Jupyter.notebook.base_url + "static/components/codemirror/theme/" + theme + ".css",
style = $('link[href$="' + theme +'"]');
if(theme && !style.length){
$("<link/>", {rel: "stylesheet", href: href})
.appendTo($("head"));
}
this._cm.setOption("theme", theme);
},
_description_changed: function(){
this.$description.text(this.m("description"));
},
_mode_changed: function(){
var that = this,
mode = this.m("mode"),
spec = _.reduce(["Name", "MIME", "FileName"],
function(spec, approach){
return spec || CodeMirror["findModeBy" + approach](mode);
}, null);
if(!spec){ return; }
require(["components/codemirror/mode/" + spec.mode + "/" + spec.mode], function(){
that._cm.setOption("mode", spec.mime);
})
},
_mode_loaded: function(){
this._cm.setOption("mode", this.m("mode"));
},
cmv: function(_new){
var old = this._cm.getValue();
if(arguments.length){
old === _new || this._cm.setValue(_new);
return this;
}
return old;
},
// d3-style (g|s)etter
m: function(name, val){
if(arguments.length == 2){
this.model.set(name, val);
return this;
}
return this.model.get(name);
}
});
var CodeMirrorModel = widget.WidgetModel.extend({});
return {
CodeMirrorView: CodeMirrorView,
CodeMirrorModel: CodeMirrorModel
};
});
"""
Explanation: The CodeMirror Widget: Javascript
Writing and distributing widget frontend code is a bit of a pain right now: however, we can (ab)use the live Jupyter Notebook to just ram some named modules right into the runtime environment. If these ever become a real library, they'll move to a static folder, and be installed with something like jupyter nbextension install.
End of explanation
"""
class CodeMirror(widgets.DOMWidget):
_view_module = traitlets.Unicode("widget_codemirror").tag(sync=True)
_view_name = traitlets.Unicode("CodeMirrorView").tag(sync=True)
value = traitlets.Unicode().tag(sync=True)
description = traitlets.Unicode().tag(sync=True)
mode = traitlets.Unicode().tag(sync=True)
theme = traitlets.Unicode("monokai").tag(sync=True)
cm = CodeMirror(value="print('hello world')",
description="Yay, code!",
mode="python",
theme="material")
cm
"""
Explanation: The CodeMirror Widget: Python
With the JavaScript defined, we can start building stuff! The _view_* traitlets let us point to custom frontend code for the actual DOM elements and JavaScript we'll need.
End of explanation
"""
class PipeBase(widgets.Widget):
# a tuple of (widget, "trait")s... or, if setting "value", just (widget)
sources = traitlets.Tuple([]).tag(sync=True, **widgets.widget_serialization)
# a single (widget, "trait")
dest = traitlets.Tuple().tag(sync=True, **widgets.widget_serialization)
# a place to put the last error
error = traitlets.CUnicode().tag(sync=True)
"""
Explanation: The Pipe Widgets
The playground pattern is usually
(some stuff, some other stuff) → (yet more stuff)
These transformations could occur on the front- or backend, so let's make a base class to inherit from for the two variants.
End of explanation
"""
class Pipe(PipeBase):
fn = traitlets.Union([
traitlets.Instance(klass=_type)
for _type in [types.FunctionType, types.BuiltinFunctionType]
])
def _update(self):
[src[0].observe(self._src_changed) for src in self.sources]
def _sources_changed(self, old, new):
self._update()
def _dest_changed(self, old, new):
self._update()
def _src_changed(self, change):
args = [
getattr(src[0], src[1] if len(src) == 2 else "value")
for src in self.sources
]
try:
setattr(self.dest[0],
self.dest[1] if len(self.dest) == 2 else "value",
self.fn(*args))
except Exception as err:
self.error = err
"""
Explanation: The Backend Pipe Widget: Python
If the transformation you want is in Python, you can just use the imported function directly, or make a lambda.
End of explanation
"""
import operator
x, y, z = [widgets.FloatSlider(value=1, description=it)
for it in "xyz"]
p = Pipe(fn=operator.mul,
sources=[[x], [y]],
dest=[z])
widgets.VBox(children=[x,y,z])
"""
Explanation: Some sliders
No widget demo would be complete without some sliders. Here is a very simple example of adding two numbers to update a third.
End of explanation
"""
%%javascript
requirejs.undef("widget_pipe");
define("widget_pipe",
[
"underscore",
"jquery",
"nbextensions/widgets/widgets/js/widget",
'base/js/namespace',
],
function(_, $, widget, Jupyter){
var JSPipeModel = widget.WidgetModel.extend({
initialize: function(){
this.on("change:sources", this._update);
this.on("change:dest", this._update);
this.on("change:fn_module", this._module_changed);
},
_update: function(){
_.map(this.m("sources"), function(src){
this.listenTo(src[0], "change", this._evaluate)
}, this);
},
_module_changed: function(){
var that = this;
require([this.m("fn_module")], function(_module){
that._module = _module;
that._evaluate();
}, function(error){
console.error(error);
that.m("error", error);
})
},
_evaluate: function(){
var args = _.map(this.m("sources"), function(src){
return src[0].get(src.length === 1 ? "value": src[1])
}),
dest = this.m("dest"),
dest_attr = dest.length === 1 ? "value" : dest[1];
console.log()
try{
dest[0].set(
dest_attr,
this._module[this.m("fn")].apply(null, args)
);
}catch(error){
console.error(error)
this.m("error", error);
}
},
// d3-style (g|s)etter
m: function(name, val){
if(arguments.length == 2){
this.set(name, val);
return this;
}
return this.get(name);
}
}, {
serializers: _.extend({
sources: {deserialize: widget.unpack_models},
dest: {deserialize: widget.unpack_models},
}, widget.WidgetModel.prototype.serializers)
});
return {
JSPipeModel: JSPipeModel
};
});
"""
Explanation: The Frontend Pipe Widget: JavaScript
Sometimes the transformation you want is available only from a JavaScript module. We'll need a different approach to providing that capability. As with the CodeMirror widget, we'll write the JavaScript first. Note that since the user in the browser doesn't interact with the widget, instead of a WidgetView, we're overloading the WidgetModel.
End of explanation
"""
class JSPipe(PipeBase):
_model_module = traitlets.Unicode("widget_pipe").tag(sync=True)
_model_name = traitlets.Unicode("JSPipeModel").tag(sync=True)
fn = traitlets.Unicode().tag(sync=True)
fn_module = traitlets.Unicode().tag(sync=True)
CDNJS = "https://cdnjs.cloudflare.com/ajax/libs/"
JSDELIVR = "https://cdn.jsdelivr.net/g/"
"""
Explanation: The Frontend Pipe Widget: Python
Unsurprisingly, there is very little here. For convenience, some common CDN prefixes are provided.
End of explanation
"""
mathjs = JSPipe.CDNJS + "mathjs/2.6.0/math.js"
jx, jy, jz = [widgets.FloatSlider(value=1, description=it) for it in "xyz"]
p = JSPipe(fn="multiply", fn_module=mathjs, sources=[[jx], [jy]], dest=[jz])
widgets.VBox(children=[jx, jy, jz])
"""
Explanation: Return of Sliders
This is the same thing as above, but handled by the frontend. We'll use math.js to get a nice, callable multiply function, but you can use any AMD-compatible module. If you need complex behavior, you'll have to make your own module!
End of explanation
"""
import yaml
import json
class YamlPlayground(widgets.FlexBox):
json = traitlets.Any({})
def __init__(self, *args, **kwargs):
self._yaml = CodeMirror(
value="x: 1",
description="YAML",
mode="yaml",
width="50%")
self._json = CodeMirror(
description="JSON",
mode="javascript",
width="50%")
# transform to YAML
Pipe(fn=lambda x: json.dumps(
yaml.safe_load(x),
indent=2),
sources=[[self._yaml]],
dest=[self._json])
kwargs.update(
children=[self._yaml, self._json],
orientation="horizontal"
)
super(YamlPlayground, self).__init__(
*args, **kwargs)
YamlPlayground()
"""
Explanation: A YAML Playground
YAML is a great language for writing data quickly.
End of explanation
"""
doc = """{
"@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"name" : "rdfs:label"
},
"name": "Jane Doe"
}"""
context = """{
"@context": {
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"name" : "rdfs:label"
}
}"""
from pyld import jsonld
class JSONLDPlayground(widgets.FlexBox):
doc = traitlets.Any({})
context = traitlets.Any({})
compacted = traitlets.Any({})
def __init__(self, *args, **kwargs):
self._doc = CodeMirror(
value=doc,
description="Document",
mode="jsonld",
width="33%")
self._context = CodeMirror(
value=context,
description="Context",
mode="json",
width="33%")
self._compacted = CodeMirror(
value="{}",
description="Compacted",
mode="json",
width="33%")
kwargs.update(
children=[self._doc, self._context, self._compacted],
orientation="horizontal")
super(JSONLDPlayground, self).__init__(*args, **kwargs)
# transform to JSON
Pipe(fn=json.loads,
sources=[[self._doc]], dest=[self, "doc"])
Pipe(fn=json.loads,
sources=[[self._context]], dest=[self, "context"])
Pipe(fn=lambda x: json.dumps(x, indent=2),
sources=[[self, "compacted"]], dest=[self._compacted])
# finally, we can compact the JSON-LD
Pipe(fn=jsonld.compact,
sources=[[self, "doc"], [self, "context"]],
dest=[self, "compacted"])
pg = JSONLDPlayground()
pg
"""
Explanation: [wip] JSON-LD Playground
We use the new text editor, the pipe and the standard layout widgets to make a re-creation of the JSON-LD Playground. First, we'll need some documents to start with.
End of explanation
"""
%%javascript
requirejs.undef("simple_babel");
define("simple_babel", [
"https://cdnjs.cloudflare.com/ajax/libs/babel-standalone/6.4.4/babel.min.js"
], function(Babel){
return {
transform: function(input){
return Babel.transform(
input, {
presets: ['es2015']
}).code;
}
}
})
class BabelPlayground(widgets.FlexBox):
def __init__(self, *args, **kwargs):
self._es6 = CodeMirror(
value="()=> console.log('hello world')",
description="ES2015",
mode="javascript",
width="50%")
self._es5 = CodeMirror(
description="ES5",
mode="javascript",
width="50%")
kwargs.update(
children=[self._es6, self._es5],
orientation="horizontal"
)
super(BabelPlayground, self).__init__(*args, **kwargs)
# transform to YAML
JSPipe(
fn="transform",
fn_module="simple_babel",
sources=[[self._es6]],
dest=[self._es5])
BabelPlayground()
"""
Explanation: Babel Playground
ES2015 is great! But you need to transpile it. Before setting up a build chain, a playground can help you feel out the API.
Babel browser-based compilation support has been discontinue
End of explanation
"""
|
xR86/ml-stuff | kaggle/zmisc/Book Recommendations from Charles Darwin/notebook_bad.ipynb | mit | # Import library
import glob
# The books files are contained in this folder
folder = "datasets/"
# List all the .txt files and sort them alphabetically
files = glob.glob(folder + '*.txt')
# ... YOUR CODE FOR TASK 1 ...
files.sort()
"""
Explanation: 1. Darwin's bibliography
<p><img src="https://assets.datacamp.com/production/project_607/img/CharlesDarwin.jpg" alt="Charles Darwin" width="300px"></p>
<p>Charles Darwin is one of the few universal figures of science. His most renowned work is without a doubt his "<em>On the Origin of Species</em>" published in 1859 which introduced the concept of natural selection. But Darwin wrote many other books on a wide range of topics, including geology, plants or his personal life. In this notebook, we will automatically detect how closely related his books are to each other.</p>
<p>To this purpose, we will develop the bases of <strong>a content-based book recommendation system</strong>, which will determine which books are close to each other based on how similar the discussed topics are. The methods we will use are commonly used in text- or documents-heavy industries such as legal, tech or customer support to perform some common task such as text classification or handling search engine queries.</p>
<p>Let's take a look at the books we'll use in our recommendation system.</p>
End of explanation
"""
# Import libraries
import re, os
# Initialize the object that will contain the texts and titles
txts = []
titles = []
for n in files:
# Open each file
f = open(n, encoding='utf-8-sig')
# Remove all non-alpha-numeric characters
# ... YOUR CODE FOR TASK 2 ...
# Store the texts and titles of the books in two separate lists
txts.append(re.sub('[\W_]+', ' ', f.read()))
titles.append(os.path.basename(n).replace(".txt", ""))
# Print the length, in characters, of each book
[len(t) for t in txts]
"""
Explanation: 2. Load the contents of each book into Python
<p>As a first step, we need to load the content of these books into Python and do some basic pre-processing to facilitate the downstream analyses. We call such a collection of texts <strong>a corpus</strong>. We will also store the titles for these books for future reference and print their respective length to get a gauge for their contents.</p>
End of explanation
"""
# Browse the list containing all the titles
for i in range(len(titles)):
# Store the index if the title is "OriginofSpecies"
# ... YOUR CODE FOR TASK 3 ...
if titles[i] == 'OriginofSpecies':
ori = i
# Print the stored index
# ... YOUR CODE FOR TASK 3 ...
print(ori)
"""
Explanation: 3. Find "On the Origin of Species"
<p>For the next parts of this analysis, we will often check the results returned by our method for a given book. For consistency, we will refer to Darwin's most famous book: "<em>On the Origin of Species</em>." Let's find to which index this book is associated.</p>
End of explanation
"""
# Define a list of stop words
stoplist = set('for a of the and to in to be which some is at that we i who whom show via may my our might as well'.split())
# Convert the text to lower case
txts_lower_case = [txt.lower() for txt in txts]
# Transform the text into tokens
txts_split = [txt.split() for txt in txts_lower_case]
# Remove tokens which are part of the list of stop words
texts = [[word for word in txt if word not in stoplist] for txt in txts_split]
# Print the first 20 tokens for the "On the Origin of Species" book
# ... YOUR CODE FOR TASK 4 ...
print(texts[ori][:20])
"""
Explanation: 4. Tokenize the corpus
<p>As a next step, we need to transform the corpus into a format that is easier to deal with for the downstream analyses. We will tokenize our corpus, i.e., transform each text into a list of the individual words (called tokens) it is made of. To check the output of our process, we will print the first 20 tokens of "<em>On the Origin of Species</em>".</p>
End of explanation
"""
import pickle
# Load the stemmed tokens list from the pregenerated pickle file
texts_stem = pickle.load( open( 'datasets/texts_stem.p', 'rb' ) )
# Print the 20 first stemmed tokens from the "On the Origin of Species" book
# ... YOUR CODE FOR TASK 5 ...
print(texts_stem[ori][:20])
"""
Explanation: 5. Stemming of the tokenized corpus
<p>If you have read <em>On the Origin of Species</em>, you will have noticed that Charles Darwin can use different words to refer to a similar concept. For example, the concept of selection can be described by words such as <em>selection</em>, <em>selective</em>, <em>select</em> or <em>selects</em>. This will dilute the weight given to this concept in the book and potentially bias the results of the analysis.</p>
<p>To solve this issue, it is a common practice to use a <strong>stemming process</strong>, which will group together the inflected forms of a word so they can be analysed as a single item: <strong>the stem</strong>. In our <em>On the Origin of Species</em> example, the words related to the concept of selection would be gathered under the <em>select</em> stem.</p>
<p>As we are analysing 20 full books, the stemming algorithm can take several minutes to run and, in order to make the process faster, we will directly load the final results from a pickle file and review the method used to generate it.</p>
End of explanation
"""
# Load the functions allowing to create and use dictionaries
from gensim import corpora
# Create a dictionary from the stemmed tokens
dictionary = corpora.Dictionary(texts_stem)
# Create a bag-of-words model for each book, using the previously generated dictionary
bows = [dictionary.doc2bow(txt) for txt in texts_stem]
# Print the first five elements of the On the Origin of species' BoW model
# ... YOUR CODE FOR TASK 6 ...
print(bows[ori][:5])
"""
Explanation: 6. Building a bag-of-words model
<p>Now that we have transformed the texts into stemmed tokens, we need to build models that will be useable by downstream algorithms.</p>
<p>First, we need to will create a universe of all words contained in our corpus of Charles Darwin's books, which we call <em>a dictionary</em>. Then, using the stemmed tokens and the dictionary, we will create <strong>bag-of-words models</strong> (BoW) of each of our texts. The BoW models will represent our books as a list of all uniques tokens they contain associated with their respective number of occurrences. </p>
<p>To better understand the structure of such a model, we will print the five first elements of one of the "<em>On the Origin of Species</em>" BoW model.</p>
End of explanation
"""
# Import pandas to create and manipulate DataFrames
import pandas as pd
# Convert the BoW model for "On the Origin of Species" into a DataFrame
df_bow_origin = pd.DataFrame(bows[ori], columns=['index', 'occurrences'])
# Add a column containing the token corresponding to the dictionary index
# ... YOUR CODE FOR TASK 7 ...
df_bow_origin['token'] = df_bow_origin['index'].apply(lambda i: texts_stem[ori][i])
# Sort the DataFrame by descending number of occurrences and print the first 10 values
# ... YOUR CODE FOR TASK 7 ...
df_bow_origin.sort_values('occurrences', ascending=False).head(10)
"""
Explanation: 7. The most common words of a given book
<p>The results returned by the bag-of-words model is certainly easy to use for a computer but hard to interpret for a human. It is not straightforward to understand which stemmed tokens are present in a given book from Charles Darwin, and how many occurrences we can find.</p>
<p>In order to better understand how the model has been generated and visualize its content, we will transform it into a DataFrame and display the 10 most common stems for the book "<em>On the Origin of Species</em>".</p>
End of explanation
"""
# Load the gensim functions that will allow us to generate tf-idf models
from gensim.models import TfidfModel
# Generate the tf-idf model
model = TfidfModel(bows)
# Print the model for "On the Origin of Species"
# ... YOUR CODE FOR TASK 8 ...
print(model[bows[ori]])
"""
Explanation: 8. Build a tf-idf model
<p>If it wasn't for the presence of the stem "<em>speci</em>", we would have a hard time to guess this BoW model comes from the <em>On the Origin of Species</em> book. The most recurring words are, apart from few exceptions, very common and unlikely to carry any information peculiar to the given book. We need to use an additional step in order to determine which tokens are the most specific to a book.</p>
<p>To do so, we will use a <strong>tf-idf model</strong> (term frequency–inverse document frequency). This model defines the importance of each word depending on how frequent it is in this text and how infrequent it is in all the other documents. As a result, a high tf-idf score for a word will indicate that this word is specific to this text.</p>
<p>After computing those scores, we will print the 10 words most specific to the "<em>On the Origin of Species</em>" book (i.e., the 10 words with the highest tf-idf score).</p>
End of explanation
"""
# Convert the tf-idf model for "On the Origin of Species" into a DataFrame
df_tfidf = pd.DataFrame(model[bows[ori]], columns=['id', 'score'])
# Add the tokens corresponding to the numerical indices for better readability
# ... YOUR CODE FOR TASK 9 ...
df_tfidf['token'] = df_tfidf['id'].apply(lambda i: texts_stem[ori][i])
# Sort the DataFrame by descending tf-idf score and print the first 10 rows.
# ... YOUR CODE FOR TASK 9 ...
df_tfidf.sort_values('score', ascending=False).head(10)
"""
Explanation: 9. The results of the tf-idf model
<p>Once again, the format of those results is hard to interpret for a human. Therefore, we will transform it into a more readable version and display the 10 most specific words for the "<em>On the Origin of Species</em>" book.</p>
End of explanation
"""
# Load the library allowing similarity computations
from gensim import similarities
# Compute the similarity matrix (pairwise distance between all texts)
sims = similarities.MatrixSimilarity(model[bows])
# Transform the resulting list into a dataframe
sim_df = pd.DataFrame(list(sims))
# Add the titles of the books as columns and index of the dataframe
# ... YOUR CODE FOR TASK 10 ...
sim_df.columns = titles
sim_df.index = titles
# Print the resulting matrix
# ... YOUR CODE FOR TASK 10 ...
print(sim_df)
"""
Explanation: 10. Compute distance between texts
<p>The results of the tf-idf algorithm now return stemmed tokens which are specific to each book. We can, for example, see that topics such as selection, breeding or domestication are defining "<em>On the Origin of Species</em>" (and yes, in this book, Charles Darwin talks quite a lot about pigeons too). Now that we have a model associating tokens to how specific they are to each book, we can measure how related to books are between each other.</p>
<p>To this purpose, we will use a measure of similarity called <strong>cosine similarity</strong> and we will visualize the results as a distance matrix, i.e., a matrix showing all pairwise distances between Darwin's books.</p>
End of explanation
"""
# This is needed to display plots in a notebook
%matplotlib inline
# Import libraries
import matplotlib.pyplot as plt
# Select the column corresponding to "On the Origin of Species" and
v = sim_df.OriginofSpecies
# Sort by ascending scores
v_sorted = v.sort_values()
# v_sorted = v_sorted[:-1]
# Plot this data has a horizontal bar plot
# ... YOUR CODE FOR TASK 11 ...
plt.barh(range(len(v_sorted)), v_sorted.values)
# Modify the axes labels and plot title for a better readability
# ... YOUR CODE FOR TASK 11 ...
plt.xlabel('Similarity')
plt.ylabel('Books')
plt.yticks(range(len(v_sorted)), v_sorted.index)
plt.xlim((0, 1))
plt.title('Books most similar to the "Origin of Species"')
plt.show()
"""
Explanation: 11. The book most similar to "On the Origin of Species"
<p>We now have a matrix containing all the similarity measures between any pair of books from Charles Darwin! We can now use this matrix to quickly extract the information we need, i.e., the distance between one book and one or several others. </p>
<p>As a first step, we will display which books are the most similar to "<em>On the Origin of Species</em>," more specifically we will produce a bar chart showing all books ranked by how similar they are to Darwin's landmark work.</p>
End of explanation
"""
# Import libraries
from scipy.cluster import hierarchy
# Compute the clusters from the similarity matrix,
# using the Ward variance minimization algorithm
Z = hierarchy.linkage(sim_df, method='ward')
# Display this result as a horizontal dendrogram
# ... YOUR CODE FOR TASK 12 ...
a = hierarchy.dendrogram(
Z,
leaf_font_size=8,
labels=sim_df.index,
orientation="left"
)
"""
Explanation: 12. Which books have similar content?
<p>This turns out to be extremely useful if we want to determine a given book's most similar work. For example, we have just seen that if you enjoyed "<em>On the Origin of Species</em>," you can read books discussing similar concepts such as "<em>The Variation of Animals and Plants under Domestication</em>" or "<em>The Descent of Man, and Selection in Relation to Sex</em>." If you are familiar with Darwin's work, these suggestions will likely seem natural to you. Indeed, <em>On the Origin of Species</em> has a whole chapter about domestication and <em>The Descent of Man, and Selection in Relation to Sex</em> applies the theory of natural selection to human evolution. Hence, the results make sense.</p>
<p>However, we now want to have a better understanding of the big picture and see how Darwin's books are generally related to each other (in terms of topics discussed). To this purpose, we will represent the whole similarity matrix as a dendrogram, which is a standard tool to display such data. <strong>This last approach will display all the information about book similarities at once.</strong> For example, we can find a book's closest relative but, also, we can visualize which groups of books have similar topics (e.g., the cluster about Charles Darwin personal life with his autobiography and letters). If you are familiar with Darwin's bibliography, the results should not surprise you too much, which indicates the method gives good results. Otherwise, next time you read one of the author's book, you will know which other books to read next in order to learn more about the topics it addressed.</p>
End of explanation
"""
|
atulsingh0/MachineLearning | HandsOnML/code/16_reinforcement_learning.ipynb | gpl-3.0 | # To support both python 2 and python 3
from __future__ import division, print_function, unicode_literals
# Common imports
import numpy as np
import os
import sys
# to make this notebook's output stable across runs
def reset_graph(seed=42):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
# To plot pretty figures and animations
%matplotlib nbagg
import matplotlib
import matplotlib.animation as animation
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "rl"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
"""
Explanation: Chapter 16 – Reinforcement Learning
This notebook contains all the sample code and solutions to the exercises in chapter 16.
Setup
First, let's make sure this notebook works well in both python 2 and 3, import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures:
End of explanation
"""
import gym
"""
Explanation: Note: there may be minor differences between the output of this notebook and the examples shown in the book. You can safely ignore these differences. They are mainly due to the fact that most of the environments provided by OpenAI gym have some randomness.
Introduction to OpenAI gym
In this notebook we will be using OpenAI gym, a great toolkit for developing and comparing Reinforcement Learning algorithms. It provides many environments for your learning agents to interact with. Let's start by importing gym:
End of explanation
"""
env = gym.make('MsPacman-v0')
"""
Explanation: Next we will load the MsPacman environment, version 0.
End of explanation
"""
obs = env.reset()
"""
Explanation: Let's initialize the environment by calling is reset() method. This returns an observation:
End of explanation
"""
obs.shape
"""
Explanation: Observations vary depending on the environment. In this case it is an RGB image represented as a 3D NumPy array of shape [width, height, channels] (with 3 channels: Red, Green and Blue). In other environments it may return different objects, as we will see later.
End of explanation
"""
img = env.render(mode="rgb_array")
"""
Explanation: An environment can be visualized by calling its render() method, and you can pick the rendering mode (the rendering options depend on the environment). In this example we will set mode="rgb_array" to get an image of the environment as a NumPy array:
End of explanation
"""
plt.figure(figsize=(5,4))
plt.imshow(img)
plt.axis("off")
save_fig("MsPacman")
plt.show()
"""
Explanation: Let's plot this image:
End of explanation
"""
(img == obs).all()
"""
Explanation: Welcome back to the 1980s! :)
In this environment, the rendered image is simply equal to the observation (but in many environments this is not the case):
End of explanation
"""
def plot_environment(env, figsize=(5,4)):
plt.close() # or else nbagg sometimes plots in the previous cell
plt.figure(figsize=figsize)
img = env.render(mode="rgb_array")
plt.imshow(img)
plt.axis("off")
plt.show()
"""
Explanation: Let's create a little helper function to plot an environment:
End of explanation
"""
env.action_space
"""
Explanation: Let's see how to interact with an environment. Your agent will need to select an action from an "action space" (the set of possible actions). Let's see what this environment's action space looks like:
End of explanation
"""
env.reset()
for step in range(110):
env.step(3) #left
for step in range(40):
env.step(8) #lower-left
"""
Explanation: Discrete(9) means that the possible actions are integers 0 through 8, which represents the 9 possible positions of the joystick (0=center, 1=up, 2=right, 3=left, 4=down, 5=upper-right, 6=upper-left, 7=lower-right, 8=lower-left).
Next we need to tell the environment which action to play, and it will compute the next step of the game. Let's go left for 110 steps, then lower left for 40 steps:
End of explanation
"""
plot_environment(env)
"""
Explanation: Where are we now?
End of explanation
"""
obs, reward, done, info = env.step(0)
"""
Explanation: The step() function actually returns several important objects:
End of explanation
"""
obs.shape
"""
Explanation: The observation tells the agent what the environment looks like, as discussed earlier. This is a 210x160 RGB image:
End of explanation
"""
reward
"""
Explanation: The environment also tells the agent how much reward it got during the last step:
End of explanation
"""
done
"""
Explanation: When the game is over, the environment returns done=True:
End of explanation
"""
info
"""
Explanation: Finally, info is an environment-specific dictionary that can provide some extra information about the internal state of the environment. This is useful for debugging, but your agent should not use this information for learning (it would be cheating).
End of explanation
"""
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = env.render(mode="rgb_array")
frames.append(img)
if step % n_change_steps == 0:
action = env.action_space.sample() # play randomly
obs, reward, done, info = env.step(action)
if done:
break
"""
Explanation: Let's play one full game (with 3 lives), by moving in random directions for 10 steps at a time, recording each frame:
End of explanation
"""
def update_scene(num, frames, patch):
patch.set_data(frames[num])
return patch,
def plot_animation(frames, repeat=False, interval=40):
plt.close() # or else nbagg sometimes plots in the previous cell
fig = plt.figure()
patch = plt.imshow(frames[0])
plt.axis('off')
return animation.FuncAnimation(fig, update_scene, fargs=(frames, patch), frames=len(frames), repeat=repeat, interval=interval)
video = plot_animation(frames)
plt.show()
"""
Explanation: Now show the animation (it's a bit jittery within Jupyter):
End of explanation
"""
env.close()
"""
Explanation: Once you have finished playing with an environment, you should close it to free up resources:
End of explanation
"""
env = gym.make("CartPole-v0")
obs = env.reset()
obs
"""
Explanation: To code our first learning agent, we will be using a simpler environment: the Cart-Pole.
A simple environment: the Cart-Pole
The Cart-Pole is a very simple environment composed of a cart that can move left or right, and pole placed vertically on top of it. The agent must move the cart left or right to keep the pole upright.
End of explanation
"""
from PIL import Image, ImageDraw
try:
from pyglet.gl import gl_info
openai_cart_pole_rendering = True # no problem, let's use OpenAI gym's rendering function
except Exception:
openai_cart_pole_rendering = False # probably no X server available, let's use our own rendering function
def render_cart_pole(env, obs):
if openai_cart_pole_rendering:
# use OpenAI gym's rendering function
return env.render(mode="rgb_array")
else:
# rendering for the cart pole environment (in case OpenAI gym can't do it)
img_w = 600
img_h = 400
cart_w = img_w // 12
cart_h = img_h // 15
pole_len = img_h // 3.5
pole_w = img_w // 80 + 1
x_width = 2
max_ang = 0.2
bg_col = (255, 255, 255)
cart_col = 0x000000 # Blue Green Red
pole_col = 0x669acc # Blue Green Red
pos, vel, ang, ang_vel = obs
img = Image.new('RGB', (img_w, img_h), bg_col)
draw = ImageDraw.Draw(img)
cart_x = pos * img_w // x_width + img_w // x_width
cart_y = img_h * 95 // 100
top_pole_x = cart_x + pole_len * np.sin(ang)
top_pole_y = cart_y - cart_h // 2 - pole_len * np.cos(ang)
draw.line((0, cart_y, img_w, cart_y), fill=0)
draw.rectangle((cart_x - cart_w // 2, cart_y - cart_h // 2, cart_x + cart_w // 2, cart_y + cart_h // 2), fill=cart_col) # draw cart
draw.line((cart_x, cart_y - cart_h // 2, top_pole_x, top_pole_y), fill=pole_col, width=pole_w) # draw pole
return np.array(img)
def plot_cart_pole(env, obs):
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
plt.show()
plot_cart_pole(env, obs)
"""
Explanation: The observation is a 1D NumPy array composed of 4 floats: they represent the cart's horizontal position, its velocity, the angle of the pole (0 = vertical), and the angular velocity. Let's render the environment... unfortunately we need to fix an annoying rendering issue first.
Fixing the rendering issue
Some environments (including the Cart-Pole) require access to your display, which opens up a separate window, even if you specify the rgb_array mode. In general you can safely ignore that window. However, if Jupyter is running on a headless server (ie. without a screen) it will raise an exception. One way to avoid this is to install a fake X server like Xvfb. You can start Jupyter using the xvfb-run command:
$ xvfb-run -s "-screen 0 1400x900x24" jupyter notebook
If Jupyter is running on a headless server but you don't want to worry about Xvfb, then you can just use the following rendering function for the Cart-Pole:
End of explanation
"""
env.action_space
"""
Explanation: Now let's look at the action space:
End of explanation
"""
obs = env.reset()
while True:
obs, reward, done, info = env.step(0)
if done:
break
plt.close() # or else nbagg sometimes plots in the previous cell
img = render_cart_pole(env, obs)
plt.imshow(img)
plt.axis("off")
save_fig("cart_pole_plot")
img.shape
"""
Explanation: Yep, just two possible actions: accelerate towards the left or towards the right. Let's push the cart left until the pole falls:
End of explanation
"""
obs = env.reset()
while True:
obs, reward, done, info = env.step(1)
if done:
break
plot_cart_pole(env, obs)
"""
Explanation: Notice that the game is over when the pole tilts too much, not when it actually falls. Now let's reset the environment and push the cart to right instead:
End of explanation
"""
frames = []
n_max_steps = 1000
n_change_steps = 10
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
# hard-coded policy
position, velocity, angle, angular_velocity = obs
if angle < 0:
action = 0
else:
action = 1
obs, reward, done, info = env.step(action)
if done:
break
video = plot_animation(frames)
plt.show()
"""
Explanation: Looks like it's doing what we're telling it to do. Now how can we make the poll remain upright? We will need to define a policy for that. This is the strategy that the agent will use to select an action at each step. It can use all the past actions and observations to decide what to do.
A simple hard-coded policy
Let's hard code a simple strategy: if the pole is tilting to the left, then push the cart to the left, and vice versa. Let's see if that works:
End of explanation
"""
import tensorflow as tf
# 1. Specify the network architecture
n_inputs = 4 # == env.observation_space.shape[0]
n_hidden = 4 # it's a simple task, we don't need more than this
n_outputs = 1 # only outputs the probability of accelerating left
initializer = tf.contrib.layers.variance_scaling_initializer()
# 2. Build the neural network
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu,
kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs, activation=tf.nn.sigmoid,
kernel_initializer=initializer)
# 3. Select a random action based on the estimated probabilities
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
init = tf.global_variables_initializer()
"""
Explanation: Nope, the system is unstable and after just a few wobbles, the pole ends up too tilted: game over. We will need to be smarter than that!
Neural Network Policies
Let's create a neural network that will take observations as inputs, and output the action to take for each observation. To choose an action, the network will first estimate a probability for each action, then select an action randomly according to the estimated probabilities. In the case of the Cart-Pole environment, there are just two possible actions (left or right), so we only need one output neuron: it will output the probability p of the action 0 (left), and of course the probability of action 1 (right) will be 1 - p.
Note: instead of using the fully_connected() function from the tensorflow.contrib.layers module (as in the book), we now use the dense() function from the tf.layers module, which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while tf.layers is part of the official API. As you will see, the code is mostly the same.
The main differences relevant to this chapter are:
* the _fn suffix was removed in all the parameters that had it (for example the activation_fn parameter was renamed to activation).
* the weights parameter was renamed to kernel,
* the default activation is None instead of tf.nn.relu
End of explanation
"""
n_max_steps = 1000
frames = []
with tf.Session() as sess:
init.run()
obs = env.reset()
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
"""
Explanation: In this particular environment, the past actions and observations can safely be ignored, since each observation contains the environment's full state. If there were some hidden state then you may need to consider past actions and observations in order to try to infer the hidden state of the environment. For example, if the environment only revealed the position of the cart but not its velocity, you would have to consider not only the current observation but also the previous observation in order to estimate the current velocity. Another example is if the observations are noisy: you may want to use the past few observations to estimate the most likely current state. Our problem is thus as simple as can be: the current observation is noise-free and contains the environment's full state.
You may wonder why we are picking a random action based on the probability given by the policy network, rather than just picking the action with the highest probability. This approach lets the agent find the right balance between exploring new actions and exploiting the actions that are known to work well. Here's an analogy: suppose you go to a restaurant for the first time, and all the dishes look equally appealing so you randomly pick one. If it turns out to be good, you can increase the probability to order it next time, but you shouldn't increase that probability to 100%, or else you will never try out the other dishes, some of which may be even better than the one you tried.
Let's randomly initialize this policy neural network and use it to play one game:
End of explanation
"""
video = plot_animation(frames)
plt.show()
"""
Explanation: Now let's look at how well this randomly initialized policy network performed:
End of explanation
"""
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
y = tf.placeholder(tf.float32, shape=[None, n_outputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cross_entropy)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
"""
Explanation: Yeah... pretty bad. The neural network will have to learn to do better. First let's see if it is capable of learning the basic policy we used earlier: go left if the pole is tilting left, and go right if it is tilting right. The following code defines the same neural network but we add the target probabilities y, and the training operations (cross_entropy, optimizer and training_op):
End of explanation
"""
n_environments = 10
n_iterations = 1000
envs = [gym.make("CartPole-v0") for _ in range(n_environments)]
observations = [env.reset() for env in envs]
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
target_probas = np.array([([1.] if obs[2] < 0 else [0.]) for obs in observations]) # if angle<0 we want proba(left)=1., or else proba(left)=0.
action_val, _ = sess.run([action, training_op], feed_dict={X: np.array(observations), y: target_probas})
for env_index, env in enumerate(envs):
obs, reward, done, info = env.step(action_val[env_index][0])
observations[env_index] = obs if not done else env.reset()
saver.save(sess, "./my_policy_net_basic.ckpt")
for env in envs:
env.close()
def render_policy_net(model_path, action, X, n_max_steps = 1000):
frames = []
env = gym.make("CartPole-v0")
obs = env.reset()
with tf.Session() as sess:
saver.restore(sess, model_path)
for step in range(n_max_steps):
img = render_cart_pole(env, obs)
frames.append(img)
action_val = action.eval(feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
if done:
break
env.close()
return frames
frames = render_policy_net("./my_policy_net_basic.ckpt", action, X)
video = plot_animation(frames)
plt.show()
"""
Explanation: We can make the same net play in 10 different environments in parallel, and train for 1000 iterations. We also reset environments when they are done.
End of explanation
"""
import tensorflow as tf
reset_graph()
n_inputs = 4
n_hidden = 4
n_outputs = 1
learning_rate = 0.01
initializer = tf.contrib.layers.variance_scaling_initializer()
X = tf.placeholder(tf.float32, shape=[None, n_inputs])
hidden = tf.layers.dense(X, n_hidden, activation=tf.nn.elu, kernel_initializer=initializer)
logits = tf.layers.dense(hidden, n_outputs)
outputs = tf.nn.sigmoid(logits) # probability of action 0 (left)
p_left_and_right = tf.concat(axis=1, values=[outputs, 1 - outputs])
action = tf.multinomial(tf.log(p_left_and_right), num_samples=1)
y = 1. - tf.to_float(action)
cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)
optimizer = tf.train.AdamOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(cross_entropy)
gradients = [grad for grad, variable in grads_and_vars]
gradient_placeholders = []
grads_and_vars_feed = []
for grad, variable in grads_and_vars:
gradient_placeholder = tf.placeholder(tf.float32, shape=grad.get_shape())
gradient_placeholders.append(gradient_placeholder)
grads_and_vars_feed.append((gradient_placeholder, variable))
training_op = optimizer.apply_gradients(grads_and_vars_feed)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
def discount_rewards(rewards, discount_rate):
discounted_rewards = np.zeros(len(rewards))
cumulative_rewards = 0
for step in reversed(range(len(rewards))):
cumulative_rewards = rewards[step] + cumulative_rewards * discount_rate
discounted_rewards[step] = cumulative_rewards
return discounted_rewards
def discount_and_normalize_rewards(all_rewards, discount_rate):
all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]
flat_rewards = np.concatenate(all_discounted_rewards)
reward_mean = flat_rewards.mean()
reward_std = flat_rewards.std()
return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]
discount_rewards([10, 0, -50], discount_rate=0.8)
discount_and_normalize_rewards([[10, 0, -50], [10, 20]], discount_rate=0.8)
env = gym.make("CartPole-v0")
n_games_per_update = 10
n_max_steps = 1000
n_iterations = 250
save_iterations = 10
discount_rate = 0.95
with tf.Session() as sess:
init.run()
for iteration in range(n_iterations):
print("\rIteration: {}".format(iteration), end="")
all_rewards = []
all_gradients = []
for game in range(n_games_per_update):
current_rewards = []
current_gradients = []
obs = env.reset()
for step in range(n_max_steps):
action_val, gradients_val = sess.run([action, gradients], feed_dict={X: obs.reshape(1, n_inputs)})
obs, reward, done, info = env.step(action_val[0][0])
current_rewards.append(reward)
current_gradients.append(gradients_val)
if done:
break
all_rewards.append(current_rewards)
all_gradients.append(current_gradients)
all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate=discount_rate)
feed_dict = {}
for var_index, gradient_placeholder in enumerate(gradient_placeholders):
mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]
for game_index, rewards in enumerate(all_rewards)
for step, reward in enumerate(rewards)], axis=0)
feed_dict[gradient_placeholder] = mean_gradients
sess.run(training_op, feed_dict=feed_dict)
if iteration % save_iterations == 0:
saver.save(sess, "./my_policy_net_pg.ckpt")
env.close()
frames = render_policy_net("./my_policy_net_pg.ckpt", action, X, n_max_steps=1000)
video = plot_animation(frames)
plt.show()
"""
Explanation: Looks like it learned the policy correctly. Now let's see if it can learn a better policy on its own.
Policy Gradients
To train this neural network we will need to define the target probabilities y. If an action is good we should increase its probability, and conversely if it is bad we should reduce it. But how do we know whether an action is good or bad? The problem is that most actions have delayed effects, so when you win or lose points in a game, it is not clear which actions contributed to this result: was it just the last action? Or the last 10? Or just one action 50 steps earlier? This is called the credit assignment problem.
The Policy Gradients algorithm tackles this problem by first playing multiple games, then making the actions in good games slightly more likely, while actions in bad games are made slightly less likely. First we play, then we go back and think about what we did.
End of explanation
"""
transition_probabilities = [
[0.7, 0.2, 0.0, 0.1], # from s0 to s0, s1, s2, s3
[0.0, 0.0, 0.9, 0.1], # from s1 to ...
[0.0, 1.0, 0.0, 0.0], # from s2 to ...
[0.0, 0.0, 0.0, 1.0], # from s3 to ...
]
n_max_steps = 50
def print_sequence(start_state=0):
current_state = start_state
print("States:", end=" ")
for step in range(n_max_steps):
print(current_state, end=" ")
if current_state == 3:
break
current_state = rnd.choice(range(4), p=transition_probabilities[current_state])
else:
print("...", end="")
print()
for _ in range(10):
print_sequence()
"""
Explanation: Markov Chains
End of explanation
"""
transition_probabilities = [
[[0.7, 0.3, 0.0], [1.0, 0.0, 0.0], [0.8, 0.2, 0.0]], # in s0, if action a0 then proba 0.7 to state s0 and 0.3 to state s1, etc.
[[0.0, 1.0, 0.0], None, [0.0, 0.0, 1.0]],
[None, [0.8, 0.1, 0.1], None],
]
rewards = [
[[+10, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, -50]],
[[0, 0, 0], [+40, 0, 0], [0, 0, 0]],
]
possible_actions = [[0, 1, 2], [0, 2], [1]]
def policy_fire(state):
return [0, 2, 1][state]
def policy_random(state):
return rnd.choice(possible_actions[state])
def policy_safe(state):
return [0, 0, 1][state]
class MDPEnvironment(object):
def __init__(self, start_state=0):
self.start_state=start_state
self.reset()
def reset(self):
self.total_rewards = 0
self.state = self.start_state
def step(self, action):
next_state = rnd.choice(range(3), p=transition_probabilities[self.state][action])
reward = rewards[self.state][action][next_state]
self.state = next_state
self.total_rewards += reward
return self.state, reward
def run_episode(policy, n_steps, start_state=0, display=True):
env = MDPEnvironment()
if display:
print("States (+rewards):", end=" ")
for step in range(n_steps):
if display:
if step == 10:
print("...", end=" ")
elif step < 10:
print(env.state, end=" ")
action = policy(env.state)
state, reward = env.step(action)
if display and step < 10:
if reward:
print("({})".format(reward), end=" ")
if display:
print("Total rewards =", env.total_rewards)
return env.total_rewards
for policy in (policy_fire, policy_random, policy_safe):
all_totals = []
print(policy.__name__)
for episode in range(1000):
all_totals.append(run_episode(policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
"""
Explanation: Markov Decision Process
End of explanation
"""
n_states = 3
n_actions = 3
n_steps = 20000
alpha = 0.01
gamma = 0.99
exploration_policy = policy_random
q_values = np.full((n_states, n_actions), -np.inf)
for state, actions in enumerate(possible_actions):
q_values[state][actions]=0
env = MDPEnvironment()
for step in range(n_steps):
action = exploration_policy(env.state)
state = env.state
next_state, reward = env.step(action)
next_value = np.max(q_values[next_state]) # greedy policy
q_values[state, action] = (1-alpha)*q_values[state, action] + alpha*(reward + gamma * next_value)
def optimal_policy(state):
return np.argmax(q_values[state])
q_values
all_totals = []
for episode in range(1000):
all_totals.append(run_episode(optimal_policy, n_steps=100, display=(episode<5)))
print("Summary: mean={:.1f}, std={:1f}, min={}, max={}".format(np.mean(all_totals), np.std(all_totals), np.min(all_totals), np.max(all_totals)))
print()
"""
Explanation: Q-Learning
Q-Learning will learn the optimal policy by watching the random policy play.
End of explanation
"""
env = gym.make("MsPacman-v0")
obs = env.reset()
obs.shape
env.action_space
"""
Explanation: Learning to play MsPacman using Deep Q-Learning
End of explanation
"""
mspacman_color = np.array([210, 164, 74]).mean()
def preprocess_observation(obs):
img = obs[1:176:2, ::2] # crop and downsize
img = img.mean(axis=2) # to greyscale
img[img==mspacman_color] = 0 # Improve contrast
img = (img - 128) / 128 - 1 # normalize from -1. to 1.
return img.reshape(88, 80, 1)
img = preprocess_observation(obs)
plt.figure(figsize=(11, 7))
plt.subplot(121)
plt.title("Original observation (160×210 RGB)")
plt.imshow(obs)
plt.axis("off")
plt.subplot(122)
plt.title("Preprocessed observation (88×80 greyscale)")
plt.imshow(img.reshape(88, 80), interpolation="nearest", cmap="gray")
plt.axis("off")
save_fig("preprocessing_plot")
plt.show()
"""
Explanation: Preprocessing
Preprocessing the images is optional but greatly speeds up training.
End of explanation
"""
reset_graph()
input_height = 88
input_width = 80
input_channels = 1
conv_n_maps = [32, 64, 64]
conv_kernel_sizes = [(8,8), (4,4), (3,3)]
conv_strides = [4, 2, 1]
conv_paddings = ["SAME"]*3
conv_activation = [tf.nn.relu]*3
n_hidden_inputs = 64 * 11 * 10 # conv3 has 64 maps of 11x10 each
n_hidden = 512
hidden_activation = tf.nn.relu
n_outputs = env.action_space.n
initializer = tf.contrib.layers.variance_scaling_initializer()
learning_rate = 0.01
def q_network(X_state, scope):
prev_layer = X_state
conv_layers = []
with tf.variable_scope(scope) as scope:
for n_maps, kernel_size, strides, padding, activation in zip(conv_n_maps, conv_kernel_sizes, conv_strides, conv_paddings, conv_activation):
prev_layer = tf.layers.conv2d(prev_layer, filters=n_maps, kernel_size=kernel_size, strides=strides, padding=padding, activation=activation, kernel_initializer=initializer)
conv_layers.append(prev_layer)
last_conv_layer_flat = tf.reshape(prev_layer, shape=[-1, n_hidden_inputs])
hidden = tf.layers.dense(last_conv_layer_flat, n_hidden, activation=hidden_activation, kernel_initializer=initializer)
outputs = tf.layers.dense(hidden, n_outputs)
trainable_vars = {var.name[len(scope.name):]: var for var in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope.name)}
return outputs, trainable_vars
X_state = tf.placeholder(tf.float32, shape=[None, input_height, input_width, input_channels])
actor_q_values, actor_vars = q_network(X_state, scope="q_networks/actor") # acts
critic_q_values, critic_vars = q_network(X_state, scope="q_networks/critic") # learns
copy_ops = [actor_var.assign(critic_vars[var_name])
for var_name, actor_var in actor_vars.items()]
copy_critic_to_actor = tf.group(*copy_ops)
with tf.variable_scope("train"):
X_action = tf.placeholder(tf.int32, shape=[None])
y = tf.placeholder(tf.float32, shape=[None, 1])
q_value = tf.reduce_sum(critic_q_values * tf.one_hot(X_action, n_outputs),
axis=1, keep_dims=True)
cost = tf.reduce_mean(tf.square(y - q_value))
global_step = tf.Variable(0, trainable=False, name='global_step')
optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(cost, global_step=global_step)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
actor_vars
from collections import deque
replay_memory_size = 10000
replay_memory = deque([], maxlen=replay_memory_size)
def sample_memories(batch_size):
indices = rnd.permutation(len(replay_memory))[:batch_size]
cols = [[], [], [], [], []] # state, action, reward, next_state, continue
for idx in indices:
memory = replay_memory[idx]
for col, value in zip(cols, memory):
col.append(value)
cols = [np.array(col) for col in cols]
return cols[0], cols[1], cols[2].reshape(-1, 1), cols[3], cols[4].reshape(-1, 1)
eps_min = 0.05
eps_max = 1.0
eps_decay_steps = 50000
import sys
def epsilon_greedy(q_values, step):
epsilon = max(eps_min, eps_max - (eps_max-eps_min) * step/eps_decay_steps)
if rnd.rand() < epsilon:
return rnd.randint(n_outputs) # random action
else:
return np.argmax(q_values) # optimal action
n_steps = 100000 # total number of training steps
training_start = 1000 # start training after 1,000 game iterations
training_interval = 3 # run a training step every 3 game iterations
save_steps = 50 # save the model every 50 training steps
copy_steps = 25 # copy the critic to the actor every 25 training steps
discount_rate = 0.95
skip_start = 90 # Skip the start of every game (it's just waiting time).
batch_size = 50
iteration = 0 # game iterations
checkpoint_path = "./my_dqn.ckpt"
done = True # env needs to be reset
with tf.Session() as sess:
if os.path.isfile(checkpoint_path):
saver.restore(sess, checkpoint_path)
else:
init.run()
while True:
step = global_step.eval()
if step >= n_steps:
break
iteration += 1
print("\rIteration {}\tTraining step {}/{} ({:.1f}%)".format(iteration, step, n_steps, step * 100 / n_steps), end="")
if done: # game over, start again
obs = env.reset()
for skip in range(skip_start): # skip boring game iterations at the start of each game
obs, reward, done, info = env.step(0)
state = preprocess_observation(obs)
# Actor evaluates what to do
q_values = actor_q_values.eval(feed_dict={X_state: [state]})
action = epsilon_greedy(q_values, step)
# Actor plays
obs, reward, done, info = env.step(action)
next_state = preprocess_observation(obs)
# Let's memorize what happened
replay_memory.append((state, action, reward, next_state, 1.0 - done))
state = next_state
if iteration < training_start or iteration % training_interval != 0:
continue
# Critic learns
X_state_val, X_action_val, rewards, X_next_state_val, continues = sample_memories(batch_size)
next_q_values = actor_q_values.eval(feed_dict={X_state: X_next_state_val})
y_val = rewards + continues * discount_rate * np.max(next_q_values, axis=1, keepdims=True)
training_op.run(feed_dict={X_state: X_state_val, X_action: X_action_val, y: y_val})
# Regularly copy critic to actor
if step % copy_steps == 0:
copy_critic_to_actor.run()
# And save regularly
if step % save_steps == 0:
saver.save(sess, checkpoint_path)
"""
Explanation: Build DQN
Note: instead of using tf.contrib.layers.convolution2d() or tf.contrib.layers.conv2d() (as in the book), we now use the tf.layers.conv2d(), which did not exist when this chapter was written. This is preferable because anything in contrib may change or be deleted without notice, while tf.layers is part of the official API. As you will see, the code is mostly the same, except that the parameter names have changed slightly:
* the num_outputs parameter was renamed to filters,
* the stride parameter was renamed to strides,
* the _fn suffix was removed from parameter names that had it (e.g., activation_fn was renamed to activation),
* the weights_initializer parameter was renamed to kernel_initializer,
* the weights variable was renamed to "kernel" (instead of "weights"), and the biases variable was renamed from "biases" to "bias",
* and the default activation is now None instead of tf.nn.relu.
End of explanation
"""
|
amueller/scipy-2017-sklearn | notebooks/19.Feature_Selection.ipynb | cc0-1.0 | from sklearn.datasets import load_breast_cancer, load_digits
from sklearn.model_selection import train_test_split
cancer = load_breast_cancer()
# get deterministic random numbers
rng = np.random.RandomState(42)
noise = rng.normal(size=(len(cancer.data), 50))
# add noise features to the data
# the first 30 features are from the dataset, the next 50 are noise
X_w_noise = np.hstack([cancer.data, noise])
X_train, X_test, y_train, y_test = train_test_split(X_w_noise, cancer.target,
random_state=0, test_size=.5)
"""
Explanation: Automatic Feature Selection
Often we collected many features that might be related to a supervised prediction task, but we don't know which of them are actually predictive. To improve interpretability, and sometimes also generalization performance, we can use automatic feature selection to select a subset of the original features. There are several types of feature selection methods available, which we'll explain in order of increasing complexity.
For a given supervised model, the best feature selection strategy would be to try out each possible subset of the features, and evaluate generalization performance using this subset. However, there are exponentially many subsets of features, so this exhaustive search is generally infeasible. The strategies discussed below can be thought of as proxies for this infeasible computation.
Univariate statistics
The simplest method to select features is using univariate statistics, that is by looking at each feature individually and running a statistical test to see whether it is related to the target. This kind of test is also known as analysis of variance (ANOVA).
We create a synthetic dataset that consists of the breast cancer data with an additional 50 completely random features.
End of explanation
"""
from sklearn.feature_selection import SelectPercentile
# use f_classif (the default) and SelectPercentile to select 50% of features:
select = SelectPercentile(percentile=50)
select.fit(X_train, y_train)
# transform training set:
X_train_selected = select.transform(X_train)
print(X_train.shape)
print(X_train_selected.shape)
"""
Explanation: We have to define a threshold on the p-value of the statistical test to decide how many features to keep. There are several strategies implemented in scikit-learn, a straight-forward one being SelectPercentile, which selects a percentile of the original features (we select 50% below):
End of explanation
"""
from sklearn.feature_selection import f_classif, f_regression, chi2
F, p = f_classif(X_train, y_train)
plt.figure()
plt.plot(p, 'o')
"""
Explanation: We can also use the test statistic directly to see how relevant each feature is. As the breast cancer dataset is a classification task, we use f_classif, the F-test for classification. Below we plot the p-values associated with each of the 80 features (30 original features + 50 noise features). Low p-values indicate informative features.
End of explanation
"""
mask = select.get_support()
print(mask)
# visualize the mask. black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
"""
Explanation: Clearly most of the first 30 features have very small p-values.
Going back to the SelectPercentile transformer, we can obtain the features that are selected using the get_support method:
End of explanation
"""
from sklearn.linear_model import LogisticRegression
# transform test data:
X_test_selected = select.transform(X_test)
lr = LogisticRegression()
lr.fit(X_train, y_train)
print("Score with all features: %f" % lr.score(X_test, y_test))
lr.fit(X_train_selected, y_train)
print("Score with only selected features: %f" % lr.score(X_test_selected, y_test))
"""
Explanation: Nearly all of the original 30 features were recovered.
We can also analize the utility of the feature selection by training a supervised model on the data.
It's important to learn the feature selection only on the training set!
End of explanation
"""
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier
select = SelectFromModel(RandomForestClassifier(n_estimators=100, random_state=42), threshold="median")
select.fit(X_train, y_train)
X_train_rf = select.transform(X_train)
print(X_train.shape)
print(X_train_rf.shape)
mask = select.get_support()
# visualize the mask. black is True, white is False
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
X_test_rf = select.transform(X_test)
LogisticRegression().fit(X_train_rf, y_train).score(X_test_rf, y_test)
"""
Explanation: Model-based Feature Selection
A somewhat more sophisticated method for feature selection is using a supervised machine learning model and selecting features based on how important they were deemed by the model. This requires the model to provide some way to rank the features by importance. This can be done for all tree-based models (which implement get_feature_importances) and all linear models, for which the coefficients can be used to determine how much influence a feature has on the outcome.
Any of these models can be made into a transformer that does feature selection by wrapping it with the SelectFromModel class:
End of explanation
"""
from sklearn.feature_selection import RFE
select = RFE(RandomForestClassifier(n_estimators=100, random_state=42), n_features_to_select=40)
select.fit(X_train, y_train)
# visualize the selected features:
mask = select.get_support()
plt.matshow(mask.reshape(1, -1), cmap='gray_r')
X_train_rfe = select.transform(X_train)
X_test_rfe = select.transform(X_test)
LogisticRegression().fit(X_train_rfe, y_train).score(X_test_rfe, y_test)
select.score(X_test, y_test)
"""
Explanation: This method builds a single model (in this case a random forest) and uses the feature importances from this model.
We can do a somewhat more elaborate search by training multiple models on subsets of the data. One particular strategy is recursive feature elimination:
Recursive Feature Elimination
Recursive feature elimination builds a model on the full set of features, and similar to the method above selects a subset of features that are deemed most important by the model. However, usually only a single feature is dropped from the dataset, and a new model is built with the remaining features. The process of dropping features and model building is repeated until there are only a pre-specified number of features left:
End of explanation
"""
import numpy as np
rng = np.random.RandomState(1)
# Generate 400 random integers in the range [0, 1]
X = rng.randint(0, 2, (200, 2))
y = np.logical_xor(X[:, 0] > 0, X[:, 1] > 0) # XOR creation
plt.scatter(X[:, 0], X[:, 1], c=plt.cm.spectral(y.astype(float)))
# %load solutions/19_univariate_vs_mb_selection.py
"""
Explanation: <div class="alert alert-success">
<b>EXERCISE</b>:
<ul>
<li>
Create the "XOR" dataset as in the first cell below:
</li>
<li>
Add random features to it and compare how univariate selection compares to model based selection using a Random Forest in recovering the original features.
</li>
</ul>
</div>
End of explanation
"""
|
david4096/bioapi-examples | python_notebooks/regionSearch.ipynb | apache-2.0 | from ga4gh.client import client
c = client.HttpClient("http://1kgenomes.ga4gh.org")
import sys
import collections
import math
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
from ipywidgets import interact, interactive, fixed
from IPython.display import display
import ipywidgets as widgets
"""
Explanation: Program Overview
This program allows the user to see how many transcript effects are present in a specific location of the genome. Large regions of the genome can be queried using the "bucket" feature, which takes the count of effects and condenses it into one bar so that the user can see more information. This information could be very helpful in a clinical setting.
Initialize the client
As seen in the "1kg.ipynb" example, we take the following steps to create the client object that will be used to obtain the information we desire and query the server. Plotting tools are also imported here.
End of explanation
"""
dataset = c.search_datasets().next()
for variantSet in c.search_variant_sets(dataset.id):
if variantSet.name == "functional-annotation":
annotation = variantSet
annotationSet = c.search_variant_annotation_sets(variant_set_id=annotation.id).next()
"""
Explanation: Acquiring annotation set
We query the server for the dataset, which is the 1k-genomes dataset. The dataset has a unique ID that can be used to acquire each variant set within the dataset.
To access the different kinds of annotations, we access the annotation set using the unique variant set ID.
End of explanation
"""
def runSearch(startPos, endPos, chromosome, searchTerms, buckets):
global formatSearch
formatSearch = []
for i in range(0,len(searchTerms)):
formatSearch.append({"id":searchTerms[i]})
global windowCount
windowCount = int(buckets)
global initStart
global initEnd
initStart = startPos
initEnd = endPos
global startPoint
global endPoint
startPoint = int(startPos)
endPoint = (int(startPos)+(int(endPos)-int(startPos))/int(buckets))
global yList
global xTickList
yList=[]
xTickList=[]
global allGraphData
allGraphData = []
global count
count=0
# formatSearch loop breaks up the search by different search terms
for soTerms in formatSearch:
# windowCount/bucket loop breaks up the search into multiple smaller searches from region to region
for i in range(0,windowCount):
searchedVarAnns=c.search_variant_annotations(variant_annotation_set_id=annotationSet.id, start=startPoint, end=endPoint, reference_name=chromosome, effects=[soTerms])
idList = []
startEndList = []
for annotation in searchedVarAnns:
idList.append(annotation.variant_id)
countingStats(idList=idList, windowValue=windowCount, yValList=yList, startPos=startPoint, endPos=endPoint)
startPoint+=(int(endPos)-int(startPos))/int(buckets)
endPoint+=(int(endPos)-int(startPos))/int(buckets)
del idList[:]
"""
Explanation: Running Search
runSearch is called below using ipywidgets. Global variables are initialized for other functions to use and the start and end points are set based on how many buckets the user wants. The function then searches for the transcript effects, given the transcript effect the user is looking for.
The function consists of a loop that is used to separate the search into the amount of buckets, or windowCount, the user wants to find. This makes it easier to visualize large portions of data in a succinct way
The results are sent to countingStatistics for further processing.
End of explanation
"""
def countingStats(idList, windowValue, yValList,startPos, endPos):
if len(yList)==0:
yList.append([])
yList[count].append(len(idList))
if len(yList[count])==windowValue:
global startPoint
startPoint = int(initStart)-(int(initEnd)-int(initStart))/windowCount
global endPoint
endPoint = (int(initStart)+(int(initEnd)-int(initStart))/windowCount)-(int(initEnd)-int(initStart))/windowCount
global count
count+=1
if count!=len(formatSearch):
yList.append([])
if len(yList)==len(formatSearch) and len(yList[count-1])==windowValue and count==len(formatSearch):
plotWindowHistogram(xTickList, yList, windowValue, startPos, endPos)
"""
Explanation: Counting Statistics
countingStats arranges the data in a way that will work nicely with matplotlib. The values are then passed onto the plotting function.
End of explanation
"""
def plotWindowHistogram(xAxisTicks, yAxisValues, windowVals, startPos, endPos):
fig, ax = plt.subplots()
endValues = np.empty([1,2], dtype=np.int32)
endValues[0][0] = startPos
endValues[0][1] = endPos
colors = [str]*20
colors[0] = '#8B0000'
colors[1] = '#FF8C00'
colors[2] = '#8B008B'
colors[3] = '#556B2F'
colors[4] = '#006400'
colors[5] = '#9932CC'
colors[6] = '#BDB76B'
colors[7] = '#707B7C'
colors[8] = '#76D7C4'
colors[9] = '#F5B7B1'
colors[10] = '#1A5276'
colors[11] = '#BA4A00'
colors[12] = '#AED6F1'
colors[13] = '#F9E79F'
colors[14] = '#6E2C00'
# title and graph size formatting
titleEffects=[]
for key, value in searchOntologyDict.iteritems():
for i in range(len(formatSearch)):
if searchOntologyDict[key]==formatSearch[i]['id']:
titleEffects.append(key)
index=0
for j in range(0,len(yAxisValues[index])):
for i in range(0,count):
if j==0:
plt.bar(index, yAxisValues[i][j], width=1, color=colors[i], label=titleEffects[i])
else:
plt.bar(index, yAxisValues[i][j], width=1, color=colors[i])
index+=1
title=""
if len(titleEffects)==1:
ax.set_title(titleEffects[0]+"s"+" from "+str(initStart)+" to "+str(initEnd))
else:
if len(formatSearch)==2:
title+=titleEffects[0]+"s"+" and "+titleEffects[1]+"s"+" "
else:
for i in range(0,len(titleEffects)):
if i!=(len(titleEffects)-1):
title+=titleEffects[i]+"s"+", "
else:
title+="and "+titleEffects[i]+"s"+" "
ax.set_title(title+"from "+str(initStart)+" to "+str(initEnd))
plt.legend(loc='upper right')
plt.rcParams["figure.figsize"] = [15,15]
plt.show()
"""
Explanation: Plotting
All the statistics are gathered and then graphed. The function also sets the appropriate title, x-axis labels, and different bar colors depending on the search results.
End of explanation
"""
shortDict = {'intron_variant' : 'SO:0001627', 'feature_truncation' : 'SO:0001906' , 'non_coding_transcript_exon_variant' : 'SO:0001792' , 'non_coding_transcript_variant' : 'SO:0001619', 'transcript_ablation' : 'SO:0001893'}
chromList = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18')
global searchOntologyDict
searchOntologyDict = {
'stop_retained_variant' : 'SO:0001567',
'regulatory_region_variant' : 'SO:0001566',
'splice_acceptor_variant' : 'SO:0001574',
'splice_donor_variant' : 'SO:0001575',
'missense_variant' : 'SO:0001583',
'stop_gained' : 'SO:0001587',
'stop_lost' : 'SO:0001578',
'frameshift_variant' : 'SO:0001589',
'coding_sequence_variant' : 'SO:0001580',
'non_coding_transcript_variant' : 'SO:0001619',
'mature_miRNA_variant' : 'SO:0001620',
'NMD_transcript_variant' : 'SO:0001621',
'5_prime_UTR_variant' : 'SO:0001623',
'3_prime_UTR_variant' : 'SO:0001624',
'incomplete_terminal_codon_variant' : 'SO:0001626',
'intron_variant' : 'SO:0001627',
'intergenic_variant' : 'SO:0001628',
'splice_region_variant' : 'SO:0001630',
'upstream_gene_variant' : 'SO:0001631',
'downstream_gene_variant' : 'SO:0001632',
'TF_binding_site_variant' : 'SO:0001782',
'non_coding_transcript_exon_variant' : 'SO:0001792',
'protein_altering_variant' : 'SO:0001818',
'synonymous_variant' : 'SO:0001819',
'inframe_insertion' : 'SO:0001821',
'inframe_deletion' : 'SO:0001822',
'transcript_amplification' : 'SO:0001889',
'regulatory_region_amplification' : 'SO:0001891',
'TFBS_ablation' : 'SO:0001892',
'TFBS_amplification' : 'SO:0001892',
'regulatory_region_ablation' : 'SO:0001894',
'feature_truncation' : 'SO:0001906',
'feature_elongation' : 'SO:0001907',
'start_lost' : 'SO:0002012',
}
multiSelect = widgets.SelectMultiple(
description="Transcript Effects",
options=searchOntologyDict
)
interact(runSearch,
startPos="0",
endPos="100000",
chromosome=chromList,
searchTerms=multiSelect,
buckets="20",
__manual="True"
)
"""
Explanation: Interact Widgets
A dictionary of transcript effects and their search ontology ID's is made so that the user doesn't have to remember the ID's. runSearch is then called and the interact function gives" runSearch all of the necessary parameters based on what the user has chosen.
End of explanation
"""
|
hmenke/pairinteraction | doc/sphinx/examples_python/comparison_to_saffman_fig13.ipynb | gpl-3.0 | %matplotlib inline
# Arrays
import numpy as np
# Plotting
import matplotlib.pyplot as plt
# Operating system interfaces
import os, sys
# Parallel computing
if sys.platform != "win32": from multiprocessing import Pool
from functools import partial
# pairinteraction :-)
from pairinteraction import pireal as pi
# Create cache for matrix elements
if not os.path.exists("./cache"):
os.makedirs("./cache")
cache = pi.MatrixElementCache("./cache")
"""
Explanation: Blockade Interaction in a Magnetic Field
The interaction between Rydberg atoms is strongly influenced by external electric and magnetic fields. A small magnetic field for instance lifts the Zeeman degeneracy and thus strengthens the Rydberg blockade, especially if there is a non-zero angle between the interatomic and the quantization axis. This has been discussed in M. Saffman, T. G. Walker, and K. Mølmer, “Quantum information with Rydberg atoms”, Rev. Mod. Phys. 82, 2313 (2010). Here we show how to reproduce Fig. 13 using pairinteraction. This Jupyter notebook and the final Python script are available on GitHub.
As described in the introduction, we start our code with some preparations. We will make use of pairinteraction's parallel capacities which is why we load the multiprocessing module if supported by the operating system (in Windows, the module only works with methods defined outside an IPython notebook).
End of explanation
"""
distance = 10 # µm
bfields = np.linspace(0, 20, 200) # Gauss
"""
Explanation: We begin by defining some constants of our calculation: the spatial separation of the Rydberg atoms and a range of magnetic field we want to iterate over. The units of the respective quantities are given as comments.
End of explanation
"""
state_one = pi.StateOne("Rb", 43, 2, 2.5, 0.5)
"""
Explanation: Now, we use pairinteraction's StateOne class to define the single-atom state $\left|43d_{5/2},m_j=1/2\right\rangle$ of a Rubudium atom.
End of explanation
"""
def setup_system_one(bfield):
system_one = pi.SystemOne(state_one.getSpecies(), cache)
system_one.restrictEnergy(state_one.getEnergy()-100, state_one.getEnergy()+100)
system_one.restrictN(state_one.getN()-2, state_one.getN()+2)
system_one.restrictL(state_one.getL()-2, state_one.getL()+2)
system_one.setBfield([0, 0, bfield])
return system_one
"""
Explanation: Next, we define how to set up the single atom system. We do this using a function, so we can easily create systems with the magnetic field as a parameter. Inside the function we create a new system by passing the state_one and the cache directory we created to SystemOne.
To limit the size of the basis, we have to choose cutoffs on states which can couple to state_one. This is done by means of the restrict... functions in SystemOne.
Finally, we set the magnetic field to point in $z$-direction with the magnitude given by the argument.
End of explanation
"""
state_two = pi.StateTwo(state_one, state_one)
"""
Explanation: To investigate the $\left|43d_{5/2},m_j=1/2;43d_{5/2},m_j=1/2\right\rangle$ pair state, we easily combine the same single-atom state twice into a pair state using StateTwo.
End of explanation
"""
def setup_system_two(system_one, angle):
system_two = pi.SystemTwo(system_one, system_one, cache)
system_two.restrictEnergy(state_two.getEnergy()-5, state_two.getEnergy()+5)
system_two.setDistance(10)
system_two.setAngle(angle)
if angle == 0: system_two.setConservedMomentaUnderRotation([int(2*state_one.getM())])
system_two.setConservedParityUnderInversion(pi.ODD)
system_two.setConservedParityUnderPermutation(pi.ODD)
return system_two
"""
Explanation: Akin to the single atom system, we now define how to create a two atom system. We want to parametrize this in terms of the single atom system and the interaction angle.
We compose a SystemTwo from two system_one because we are looking at two identical atoms. Again we have to restrict the energy range for coupling. Then we proceed to set the distance between the two atoms and the interaction angle.
To speed up the calculation, we can tell pairinteraction that this system will have some symmetries.
End of explanation
"""
def getEnergies(bfield, angle):
# Set up one atom system
system_one = setup_system_one(bfield)
system_one.diagonalize(1e-3)
# Calculate Zeeman shift
zeemanshift = 2*system_one.getHamiltonian().diagonal()[system_one.getBasisvectorIndex(state_one)] # GHz
# Set up two atom system
system_two = setup_system_two(system_one,angle)
system_two.diagonalize(1e-3)
# Calculate blockade interaction
eigenenergies = (system_two.getHamiltonian().diagonal()-zeemanshift)*1e3 # MHz
overlaps = system_two.getOverlap(state_two)
blockade = 1/np.sqrt(np.sum(overlaps/eigenenergies**2))
return blockade
"""
Explanation: Now, we can use the definitions from above to compose our calculation.
End of explanation
"""
plt.xlabel(r"$B$ (Gauss)")
plt.ylabel(r"Blockade (MHz)")
plt.xlim(-0.4,20.4)
plt.ylim(0,0.4)
if sys.platform != "win32":
with Pool() as pool:
energies1 = pool.map(partial(getEnergies, angle=0), bfields)
energies2 = pool.map(partial(getEnergies, angle=np.pi/2), bfields)
else:
energies1 = list(map(partial(getEnergies, angle=0), bfields))
energies2 = list(map(partial(getEnergies, angle=np.pi/2), bfields))
plt.plot(bfields, energies1, 'b-', label=r"$\theta = 0$")
plt.plot(bfields, energies2, 'g-', label=r"$\theta = \pi/2$")
plt.legend(loc=2, bbox_to_anchor=(1.02, 1), borderaxespad=0);
"""
Explanation: With a little boiler-plate, we can then calculate and plot the result with matplotlib.
End of explanation
"""
|
MegaShow/college-programming | Homework/Principles of Artificial Neural Networks/Week 3 Backpropagation/week_3_numpy.ipynb | mit | # set some inputs
x1 = -2; x2 = 5;
# perform the forward pass
f = x1 * x2 # f becomes -10
# perform the backward pass (backpropagation) in reverse order:
# backprop through f = x * y
dfdx1 = x2 # df/dx = y, so gradient on x becomes 5
print("gradient on x is {:2}".format(dfdx1))
dfdx2 = x1 # df/dy = x, so gradient on y becomes -2
print('gradient on y is {:2}'.format(dfdx2))
"""
Explanation: Week 3 Back Propagation
We introduce back propagation in numpy and pytorch respectively.
If you have some questions or suggestion about BackPropagation with Numpy, contact Jiaxin Zhuang or email(zhuangjx5@mail2.sysu.edu.cn)
1. Simple expressions and interpretation of the gradient
1.1 Simple expressions
Lets start simple so that we can develop the notation and conventions for more complex expressions. Consider a simple multiplication function of two numbers $f(x,y)=xy$. It is a matter of simple calculus to derive the partial derivative for either input:
$$f(x,y) = x y \hspace{0.5in} \rightarrow \hspace{0.5in} \frac{\partial f}{\partial x} = y \hspace{0.5in} \frac{\partial f}{\partial y} = x$$
End of explanation
"""
# set some inputs
x = -2; y = 5; z = -4
# perform the forward pass
q = 2*x + y # q becomes 1
f = q * z # f becomes -4
print(q, f)
# perform the backward pass (backpropagation) in reverse order:
# first backprop through f = q * z = (2*x+y) * z
dfdz = q # df/dz = q, so gradient on z becomes 3
dfdq = z # df/dq = z, so gradient on q becomes -4
# now backprop through q = x + y
dfdx = 2.0 * dfdq # dq/dx = 1. And the multiplication here is the chain rule!
dfdy = 1.0 * dfdq # dq/dy = 1
print('df/dx is {:2}'.format(dfdx))
print('df/dy is {:2}'.format(dfdy))
"""
Explanation: 1.2 interpretation of the gradient
Interpretation:The derivatives indicate the rate of change of a function with respect to that variable surrounding an infinitesimally small region near a particular point:
$$\frac{df(x)}{dx} = \lim_{h\ \to 0} \frac{f(x + h) - f(x)}{h}$$
In other words, the derivative on each variable tells you the sensitivity of the whole expression on its value.As mentioned, the gradient $\nabla f$ is the vector of partial derivatives, so we have that $\nabla f = [\frac{\partial f}{\partial x}, \frac{\partial f}{\partial y}] = [y, x]$.
2. Compound expressions with chain rule
2.1 Simple examples for chain rule
Lets now start to consider more complicated expressions that involve multiple composed functions, such as $f(x,y,z) = (x + y) z$.
This expression is still simple enough to differentiate directly, but we’ll take a particular approach to it that will be helpful with understanding the intuition behind backpropagation.
In particular, note that this expression can be broken down into two expressions: $q=x+y$ and $f=qz$. As seen in the previous section,$f$ is just multiplication of $q$ and $z$, so $\frac{\partial f}{\partial q} = z, \frac{\partial f}{\partial z} = q$,and $q$ is addition of $x$ and $y$ so $\frac{\partial q}{\partial x} = 1, \frac{\partial q}{\partial y} = 1$.
However, we don’t necessarily care about the gradient on the intermediate value $q$ - the value of $\frac{\partial f}{\partial q}$ is not useful. Instead, we are ultimately interested in the gradient of $f$ with respect to its inputs $x$,$y$,$z$.
The chain rule tells us that the correct way to “chain” these gradient expressions together is through multiplication. For example, $\frac{\partial f}{\partial x} = \frac{\partial f}{\partial q} \frac{\partial q}{\partial x}$. In practice this is simply a multiplication of the two numbers that hold the two gradients. Lets see this with an example:
End of explanation
"""
# Load necessary module for later
import numpy as np
import pandas as pd
np.random.seed(1024)
"""
Explanation: 2.2 Intuitive understanding of backpropagation
Notice that backpropagation is a beautifully local process.
Every gate in a circuit diagram gets some inputs and can right away compute two things:
1. its output value and
2. the local gradient of its inputs with respect to its output value.
3. Practice: Writing a simple Feedforward Neural Network
3.1 Outline
We would implement a simple feedforward neural network by using numpy. Thus, we need to define network and implement the forward pass as well as the backword propagation.
Define a simpel feedforward neural netork, with 1 hidden layer. Implement forward and backward
Load data from local csv file with pandas, which contains some training and testing dots, generated by 3 different gaussian distribution.(different mean and std).
Define some functions for visualization and training
Training and predicting every epoch
plot the distribution of the points' label and the predictions
End of explanation
"""
w1_initialization = np.random.randn(2, 5)
w2_initialization = np.random.randn(5, 3)
w2_initialization
class FeedForward_Neural_Network(object):
def __init__(self, learning_rate):
self.input_channel = 2 # number of input neurons
self.output_channel = 3 # number of output neurons
self.hidden_channel = 5 # number of hidden neurons
self.learning_rate = learning_rate
# weights initialization
# Usually, we use random or uniform initialzation to initialize weight
# For simplicity, here we use same array to initialze
# np.random.randn(self.input_channel, self.hidden_channel)
# (2x5) weight matrix from input to hidden layer
self.weight1 = np.array([[ 2.12444863, 0.25264613, 1.45417876, 0.56923979, 0.45822365],
[-0.80933344, 0.86407349, 0.20170137, -1.87529904, -0.56850693]])
# (5x3) weight matrix from hidden to output layer
# np.random.randn(self.hidden_channel, self.output_channel)
self.weight2 = np.array([ [-0.06510141, 0.80681666, -0.5778176 ],
[ 0.57306064, -0.33667496, 0.29700734],
[-0.37480416, 0.15510474, 0.70485719],
[ 0.8452178 , -0.65818079, 0.56810558],
[ 0.51538125, -0.61564998, 0.92611427]])
def forward(self, X):
"""forward propagation through our network
"""
# dot product of X (input) and first set of 3x2 weights
self.h1 = np.dot(X, self.weight1)
# activation function
self.z1 = self.sigmoid(self.h1)
# dot product of hidden layer (z2) and second set of 3x1 weights
self.h2 = np.dot(self.z1, self.weight2)
# final activation function
o = self.sigmoid(self.h2)
return o
def backward(self, X, y, o):
"""Backward, compute gradient and update parameters
Inputs:
X: data, [batch_size, 2]
y: label, one-hot vector, [batch_size, 3]
o: predictions, [batch_size, 3]
"""
# backward propgate through the network
self.o_error = y - o # error in output
# applying derivative of sigmoid to error delata L
self.o_delta = self.o_error * self.sigmoid_prime(o)
# z1 error: how much our hidden layer weights contributed to output error
self.z1_error = self.o_delta.dot(self.weight2.T)
# applying derivative of sigmoid to z1 error
self.z1_delta = self.z1_error * self.sigmoid_prime(self.z1)
# adjusting first set (input --> hidden) weights
self.weight1 += X.T.dot(self.z1_delta) * self.learning_rate
# adjusting second set (hidden --> output) weights
self.weight2 += self.z1.T.dot(self.o_delta) * self.learning_rate
def sigmoid(self, s):
"""activation function
"""
return 1 / (1 + np.exp(-s))
def sigmoid_prime(self, s):
"""derivative of sigmoid
"""
return s * (1 - s)
"""
Explanation: 3.2 Define a Feedforward Neural Netowk, implement forward and backward
A simple Neural Network with 1 hidden layer.
```
Networks Structure
Input Weights Output
Hidden Layer [batch_size, 2] x [2,5] -> [batch_size, 5]
activation function(sigmoid) [batch_size, 5] -> [batch_size, 5]
Classification Layer [batch_size, 5] x [5,3] -> [batch_size, 3]
activation function(sigmoid) [batch_size, 3] -> [batch_size, 3]
```
According to training and testing data. Each points is in two-dimension space, and there is three categories. And predictions would be a one-hot vector, like [0 0 1] , [1 0 0], [0 1 0]
End of explanation
"""
# Import Module
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import math
train_csv_file = './labels/train.csv'
test_csv_file = './labels/test.csv'
# Load data from csv file, without header
train_frame = pd.read_csv(train_csv_file, encoding='utf-8', header=None)
test_frame = pd.read_csv(test_csv_file, encoding='utf-8', header=None)
# show data in Dataframe format (defined in pandas)
train_frame
# obtain data from specific columns
# obtain data from first and second columns and convert into narray
train_data = train_frame.iloc[:,0:2].values
# obtain labels from third columns and convert into narray
train_labels = train_frame.iloc[:,2].values
# obtain data from first and second columns and convert into narray
test_data = test_frame.iloc[:,0:2].values
# obtain labels from third columns and convert into narray
test_labels = test_frame.iloc[:,2].values
# train & test data shape
print(train_data.shape)
print(test_data.shape)
# train & test labels shape
print(train_labels.shape)
print(test_labels.shape)
"""
Explanation: 3.3 Loading Data From local csv by using Pandas
End of explanation
"""
def plot(data, labels, caption):
"""plot the data distribution, !!YOU CAN READ THIS LATER, if you are interested
"""
colors = cm.rainbow(np.linspace(0, 1, len(set(labels))))
for i in set(labels):
xs = []
ys = []
for index, label in enumerate(labels):
if label == i:
xs.append(data[index][0])
ys.append(data[index][1])
plt.scatter(xs, ys, colors[int(i)])
plt.title(caption)
plt.xlabel('x')
plt.ylabel('y')
plt.show()
plot(train_data, train_labels, 'train_dataset')
plot(test_data, test_labels, 'test_dataset')
def int2onehot(label):
"""conver labels into one-hot vector, !!YOU CAN READ THIS LATER, if you are interested
Args:
label: [batch_size]
Returns:
onehot: [batch_size, categories]
"""
dims = len(set(label))
imgs_size = len(label)
onehot = np.zeros((imgs_size, dims))
onehot[np.arange(imgs_size), label] = 1
return onehot
# convert labels into one hot vector
train_labels_onehot = int2onehot(train_labels)
test_labels_onehot = int2onehot(test_labels)
print(train_labels_onehot.shape)
print(train_labels_onehot.shape)
def get_accuracy(predictions, labels):
"""Compute accuracy, !!YOU CAN READ THIS LATER, if you are interested
Inputs:
predictions:[batch_size, categories] one-hot vector
labels: [batch_size, categories]
"""
predictions = np.argmax(predictions, axis=1)
labels = np.argmax(labels, axis=1)
all_imgs = len(labels)
predict_true = np.sum(predictions == labels)
return predict_true/all_imgs
# Please read this function carefully, related to implementation of GD, SGD, and mini-batch
def generate_batch(train_data, train_labels, batch_size):
"""Generate batch
when batch_size=len(train_data), it's GD
when batch_size=1, it's SGD
when batch_size>1 & batch_size<len(train_data), it's mini-batch, usually, batch_size=2,4,8,16...
"""
iterations = math.ceil(len(train_data)/batch_size)
for i in range(iterations):
index_from = i*batch_size
index_end = (i+1)*batch_size
yield (train_data[index_from:index_end], train_labels[index_from:index_end])
def show_curve(ys, title):
"""plot curlve for Loss and Accuacy, !!YOU CAN READ THIS LATER, if you are interested
Args:
ys: loss or acc list
title: Loss or Accuracy
"""
x = np.array(range(len(ys)))
y = np.array(ys)
plt.plot(x, y, c='b')
plt.axis()
plt.title('{} Curve:'.format(title))
plt.xlabel('Epoch')
plt.ylabel('{} Value'.format(title))
plt.show()
"""
Explanation: 3.4 Define some function for visualization and training
End of explanation
"""
learning_rate = 0.1
epochs = 400 # training epoch
batch_size = len(train_data) # GD
# batch_size = 1 # SGD
# batch_size = 8 # mini-batch
model = FeedForward_Neural_Network(learning_rate) # declare a simple feedforward neural model
losses = []
accuracies = []
for i in range(epochs):
loss = 0
for index, (xs, ys) in enumerate(generate_batch(train_data, train_labels_onehot, batch_size)):
predictions = model.forward(xs) # forward phase
loss += 1/2 * np.mean(np.sum(np.square(ys-predictions), axis=1)) # Mean square error
model.backward(xs, ys, predictions) # backward phase
losses.append(loss)
# train dataset acc computation
predictions = model.forward(train_data)
# compute acc on train dataset
accuracy = get_accuracy(predictions, train_labels_onehot)
accuracies.append(accuracy)
if i % 50 == 0:
print('Epoch: {}, has {} iterations'.format(i, index+1))
print('\tLoss: {:.4f}, \tAccuracy: {:.4f}'.format(loss, accuracy))
test_predictions = model.forward(test_data)
# compute acc on test dataset
test_accuracy = get_accuracy(test_predictions, test_labels_onehot)
print('Test Accuracy: {:.4f}'.format(test_accuracy))
"""
Explanation: 3.5 Training model and make predictions
End of explanation
"""
# Draw losses curve using losses
show_curve(losses, 'Loss')
# Draw Accuracy curve using accuracies
show_curve(accuracies, 'Accuracy')
"""
Explanation: !!! Homework 1. Describe the training procedure, based on codes above.
BP神经网络的训练流程主要分为神经网络的初始化、信号的前向传播、误差的反向传播、权重的更新四个步骤。
神经网络的初始化:初始化输入层、隐层、输出层的节点,同时初始化各层之间的权重,选择激励函数、学习速率等参数。这里各层节点分别为3、5、2,权重固定一个值,而不是均匀随机生成,激励函数选择Sigmoid函数。
信号的前向传播:输入训练集样例,通过权重的选择,以及激励函数的作用,最终得到一个输出值。
误差的反向传播:比较输出值和训练集样例的标签,计算出误差,并逐步反向传播至隐层、输入层。
权重的更新:根据下一层传回的误差和学习速率,对当前权重进行更新。
BP神经网络的训练首先初始化神经网络,然后循环训练代数次。
在训练循环中,会根据预先选择的梯度下降法对数据进行分割,每份数据都依次进行前向传播、反向传播、更新权重过程。其中,梯度下降将不分割数据,而随机梯度下降法每次只使用一个样例,mini批梯度下降法根据参数决定每份数据的样例数。
训练结束后,可使用测试集样例来检测当前BP神经网络的正确率。
End of explanation
"""
def training(learning_rate, batch_size):
model = FeedForward_Neural_Network(learning_rate) # declare a simple feedforward neural model
losses = []
accuracies = []
for i in range(epochs):
loss = 0
for index, (xs, ys) in enumerate(generate_batch(train_data, train_labels_onehot, batch_size)):
predictions = model.forward(xs) # forward phase
loss += 1/2 * np.mean(np.sum(np.square(ys-predictions), axis=1)) # Mean square error
model.backward(xs, ys, predictions) # backward phase
losses.append(loss)
# train dataset acc computation
predictions = model.forward(train_data)
# compute acc on train dataset
accuracy = get_accuracy(predictions, train_labels_onehot)
accuracies.append(accuracy)
if i % 50 == 0:
print('Epoch: {}, has {} iterations'.format(i, index+1))
print('\tLoss: {:.4f}, \tAccuracy: {:.4f}'.format(loss, accuracy))
test_predictions = model.forward(test_data)
# compute acc on test dataset
test_accuracy = get_accuracy(test_predictions, test_labels_onehot)
print('Test Accuracy: {:.4f}'.format(test_accuracy))
# Draw losses curve using losses
show_curve(losses, 'Loss')
# Draw Accuracy curve using accuracies
show_curve(accuracies, 'Accuracy')
return model
learning_rate = 0.01
training(learning_rate, batch_size)
"""
Explanation: !!! Howework 2
set learning rate = 0.01 to train the model and show two curve below
End of explanation
"""
# SGD
learning_rate = 0.1
batch_size = 1
training(learning_rate, batch_size)
# mini-batch
learning_rate = 0.1
batch_size = 8
training(learning_rate, batch_size)
"""
Explanation: !!! Howework 3
Use SGD and mini-batch to train model and show four curve below
End of explanation
"""
|
skkandrach/foundations-homework | Homework 11 Soma.ipynb | mit | plate_info = {'Plate ID': 'str'}
df = pd.read_csv("small-violations.csv", dtype=plate_info)
df
df.head()
df.head(10)
df.tail()
"""
Explanation: 1. I want to make sure my Plate ID is a string. Can't lose the leading zeroes!
End of explanation
"""
plate_info = {'Plate ID': 'str'}
df = pd.read_csv("small-violations.csv", dtype=plate_info, na_values={'Vehicle Year': '0', 'Date First Observed': '0'})
df.head()
"""
Explanation: 2. I don't think anyone's car was built in 0AD. Discard the '0's as NaN.
End of explanation
"""
import dateutil
def date_to_date(date):
date = str(date)
parsed_date = dateutil.parser.parse(date)
return parsed_date
df.columns
df['New Issue Date']= df['Issue Date'].apply(date_to_date)
import datetime
def convert_to_time(time):
try:
str_time = str(time)
return datetime.datetime.strptime(str_time, "%Y%m%d")
except:
return None
other_df = df[df['Vehicle Expiration Date'] != 0]
other_df.head()
other_df['New Vehicle Expiration Date']= other_df['Vehicle Expiration Date'].apply(convert_to_time)
other_df.head()
"""
Explanation: 3. I want the dates to be dates! Read the read_csv documentation to find out how to make pandas automatically parse dates.
End of explanation
"""
other_df.columns
other_df['Date First Observed'].dtypes
other_df['Date First Observed'].tail()
import dateutil
other_df['Date First Observed']
other_df['Violation Time'].head()
other_df['Violation Time'].tail()
def int_to_date(integer):
if not pd.isnull(integer):
date = str(int(integer))
parsed_date = dateutil.parser.parse(date)
return parsed_date.strftime("%Y-%-m-%d")
other_df['Date First Observed'].apply(int_to_date)
"""
Explanation: 4. "Date first observed" is a pretty weird column, but it seems like it has a date hiding inside. Using a function with .apply, transform the string (e.g. "20140324") into a Python date. Make the 0's show up as NaN.
End of explanation
"""
def violation_time_to_time(time):
try:
hour = time[0:2]
minutes = time[2:4]
am_pm= time[4]
regular_time= hour + ":" + minutes + " " + am_pm + 'm'
violation_time_fixed = dateutil.parser.parse(regular_time)
return violation_time_fixed.strftime("%H:%M%p")
except:
return None
other_df['Violation Time'].apply(violation_time_to_time)
"""
Explanation: 5. "Violation time" is... not a time. Make it a time.
End of explanation
"""
other_df['Vehicle Color'].value_counts()
def color_rename(color):
if (color == 'BLACK') or (color == 'BLK') or (color == 'BK'):
return 'BLACK'
elif (color == 'WHITE') or (color == 'WHT') or (color == 'WH') or (color == 'W'):
return 'WHITE'
other_df['Vehicle Color'].apply(color_rename)
"""
Explanation: 6. There sure are a lot of colors of cars, too bad so many of them are the same. Make "BLK" and "BLACK", "WT" and "WHITE", and any other combinations that you notice.
End of explanation
"""
parking_violations_df = pd.read_csv("DOF_Parking_Violation_Codes.csv", encoding="mac_roman", error_bad_lines=False)
parking_violations_df.head()
parking_violations_df['CODE'].describe()
other_df['Violation Code'].describe()
def convert_to_str(n):
return str(n)
parking_violations_df['Code'] = parking_violations_df['CODE'].apply(convert_to_str)
other_df['Violation code'] = other_df['Violation Code'].apply(convert_to_str)
parking_violations_df.head()
updated_parking_violations_df = parking_violations_df.rename(columns={'Manhattan  96th St. & below': 'Manhattan 96th & below', 'All Other Areas': 'All other areas'})
updated_parking_violations_df.head()
other_df.head()
diff_violations_df = pd.merge(other_df, updated_parking_violations_df, left_on='Violation code', right_on='Code')
diff_violations_df.head()
"""
Explanation: 7. Join the data with the Parking Violations Code dataset from the NYC Open Data site.
End of explanation
"""
diff_violations_df['Manhattan 96th & below'].describe()
diff_violations_df['All other areas'].describe()
diff_violations_df['Manhattan 96th & below'].apply(convert_to_str).head()
diff_violations_df['All other areas'].apply(convert_to_str).head()
diff_violations_df = new_violations_df[new_violations_df['Manhattan 96th & below'] != 'vary']
diff_violations_df.head()
import re
def strip_and_convert_to_int(string):
match = re.findall(r"^\$?\d*", string)
if match:
new_string = string.replace("$", "").split()
new_int = int(new_string[0])
return new_int
else:
return None
diff_violations_df['Manhattan 96th and below'] = diff_violations_df['Manhattan 96th & below'].apply(strip_and_convert_to_int)
diff_violations_df.head()
diff_violations_df['All Other Areas'] = diff_violations_df['All other areas'].apply(strip_and_convert_to_int)
diff_violations_df.tail()
diff_violations_df['All Other Areas'].value_counts().head()
manhattan_violations = diff_violations_df.groupby('Violation code')['All Other Areas'].sum()
manhattan_violations.sum()
violations_not_man = diff_violations_df.groupby('Violation code')['Manhattan 96th and below'].sum()
violations_not_man.sum()
violations_revenue = violations_not_man.sum() + manhattan_violations.sum()
violations_revenue
"""
Explanation: 8. How much money did NYC make off of parking violations?
End of explanation
"""
manhattan_violations.sort_values(ascending=False)
violations_not_man.sort_values(ascending=False)
new_violations_df['Violation code'].value_counts()
"""
Explanation: 9. What's the most lucrative kind of parking violation? The most frequent?
End of explanation
"""
out_of_staters_df = diff_violations_df[diff_violations_df['Registration State'] != 'NY']
out_of_staters_df.head()
out_of_staters_other = out_of_staters_df.groupby('Violation code')['All Other Areas'].sum()
out_of_staters_other.sum()
out_of_staters_manhattan= out_of_staters_df.groupby('Violation code')['Manhattan 96th and below'].sum()
out_of_staters_manhattan.sum()
total_out_of_staters_violations = out_of_staters_other.sum()+ out_of_staters_manhattan.sum()
total_out_of_staters_violations
"""
Explanation: 10. New Jersey has bad drivers, but does it have bad parkers, too? How much money does NYC make off of all non-New York vehicles?
End of explanation
"""
%matplotlib inline
out_of_staters_other.sort_values(ascending=False).plot(kind='bar', x='Violation code')
out_of_staters_manhattan.sort_values(ascending=False).plot(kind='bar', x='Violation code')
"""
Explanation: 11. Make a chart of the top few.
End of explanation
"""
average_tix_price = total_out_of_staters_violations / diff_violations_df['Violation code'].value_counts().sum()
average_tix_price
"""
Explanation: 12. What time of day do people usually get their tickets? You can break the day up into several blocks - for example 12am-6am, 6am-12pm, 12pm-6pm, 6pm-12am.
13. What's the average ticket cost in NYC?
End of explanation
"""
diff_violations_df['Issue Date'].value_counts().head(10).plot(kind='barh')
"""
Explanation: 14. Make a graph of the number of tickets per day.
End of explanation
"""
daily_revenue = total_out_of_staters_violations / new_violations_df['New Issue Date'].value_counts()
daily_revenue.sort_values(ascending=False).head(20).plot(kind='bar')
"""
Explanation: 15. Make a graph of the amount of revenue collected per day.
End of explanation
"""
nyc_licenses = pd.read_excel("NYC.xlsx")
nyc_licenses
"""
Explanation: 16. Manually construct a dataframe out of https://dmv.ny.gov/statistic/2015licinforce-web.pdf (only NYC boroughts - bronx, queens, manhattan, staten island, brooklyn), having columns for borough name, abbreviation, and number of licensed drivers.
End of explanation
"""
diff_violations_df.columns
diff_violations_df['Violation County'].value_counts()
bronx_violations = diff_violations_df[diff_violations_df['Violation County'] == 'BX']
bronx_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'BX']
bronx_tix = bronx_violations.groupby('Violation code')['All Other Areas'].sum()
driver_bronx_tix = bronx_licenses / bronx_tix.sum()
driver_bronx_tix
queens_violations = diff_violations_df[diff_violations_df['Violation County'] == 'Q']
queens_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'Q']
queens_tix = queens_violations.groupby('Violation code')['All Other Areas'].sum()
driver_queens_tix = queens_licenses / queens_tix.sum()
driver_queens_tix
ny_violations = diff_violations_df[diff_violations_df['Violation County'] == 'NY']
ny_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'NY']
ny_tix = ny_violations.groupby('Violation code')['All Other Areas'].sum()
driver_ny_tix = ny_licenses / ny_tix.sum()
driver_ny_tix
brooklyn_violations = diff_violations_df[diff_violations_df['Violation County'] == 'R']
brooklyn_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'R']
brooklyn_tix = brooklyn_violations.groupby('Violation code')['All Other Areas'].sum()
driver_brooklyn_tix = brooklyn_licenses / brooklyn_tix.sum()
driver_brooklyn_tix
staten_is_violations = diff_violations_df[diff_violations_df['Violation County'] == 'K']
staten_is_licenses = nyc_licenses['Total'][nyc_licenses['Abbreviation'] == 'K']
staten_is_tix = violations_kings.groupby('Violation code')['All Other Areas'].sum()
driver_staten_is_tix = staten_is_licenses / staten_is_tix.sum()
driver_staten_is_tix
"""
Explanation: 17. What's the parking-ticket-$-per-licensed-driver in each borough of NYC? Do this with pandas and the dataframe you just made, not with your head!
End of explanation
"""
|
LSSTC-DSFP/LSSTC-DSFP-Sessions | Sessions/Session05/Day4/stackdiff_Narayan/02_Reprojection/Reproject_images_exercise.ipynb | mit | import numpy as np
import matplotlib
import astropy.io.fits as afits
from astropy.wcs import WCS
import reproject
from astropy.visualization import ZScaleInterval
import astropy.table as at
import astropy.coordinates as coords
import astropy.units as u
from astropy.visualization.wcsaxes import WCSAxes
import astropy.visualization.wcsaxes.frame as frame
%matplotlib notebook
%pylab
"""
Explanation: Image Reprojection Exercise
Written by Gautham Narayan (gnarayan@stsci.edu) for LSST DSFP #5
We've already determined the WCS solution for two misalgined images in Exercise 1 using astrometry.net (Register_images_exercise.ipynb). Now we're going to reproject the images onto the same grid.
End of explanation
"""
!ls ../01_Registration/out/
"""
Explanation: If you didn't get through Exercise 1, that's OK! I saved my output! You can just use that if you'd like and press on!
End of explanation
"""
#### You get to do this ####
"""
Explanation: Open the two images and load the wcs solution that you created in the tangent plane (I used wcs.tan as my file extension)
End of explanation
"""
#### You get to do this ####
"""
Explanation: OK, now reproject the data of the 2008 image on to the 2004 image. Look for functions in the reproject module. Remember to keep flux conservation in mind!
End of explanation
"""
#### You get to do this ####
"""
Explanation: Now display the two images as you did in the last exercise - you should be able to copy and paste the same code even
End of explanation
"""
#### You get to do this. You can look for NaNs in the image, or the reproject functions should return a footprint ####
"""
Explanation: Hopefully that's halfway reasonable looking compared to what we started from! For funsies, scale both images to have mean 1 and attempt to subtract them. Remember than there are NaNs in the reprojected image!
End of explanation
"""
#### You get to do this ####
"""
Explanation: Now construct a simple difference image from the original data
End of explanation
"""
#### You get to do this ####
"""
Explanation: Create a figure instance, zscale the differences and see what the residuals look like
End of explanation
"""
|
asazo/ANN | tarea3/Pregunta 2.ipynb | mit | import numpy as np
from theano.tensor.shared_randomstreams import RandomStreams
from matplotlib import pyplot
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers.embeddings import Embedding
from keras.layers import Dropout
from keras.models import load_model
"""
Explanation: Pregunta 2: Análisis de sentimientos usando RNN
End of explanation
"""
from keras.datasets import imdb
np.random.seed(3)
srng = RandomStreams(8)
(X_train, y_train), (X_test, y_test) = imdb.load_data(seed=15)
"""
Explanation: a) Cargar el dataset completo
End of explanation
"""
# Concatenamiento de conjuntos de entrenamiento
X = np.concatenate((X_train, X_test), axis=0)
y = np.concatenate((y_train, y_test), axis=0)
print "Palabras en el dataset: ", X.size
result = map(len, X)
pyplot.boxplot(result)
pyplot.show()
"""
Explanation: b) Cantidad de palabras en el dataset, boxplot de distribución de largo de palabras
End of explanation
"""
# Se cargan las 3000 palabras más relevantes
top_words = 3000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
# Se acotan los comentarios a un máximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
"""
Explanation: c) Cargar palabras más relevantes y acotar largo de comentarios
End of explanation
"""
# Tamaño vector generado por embedding
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: d) Entrenamiento red LSTM con capa de embedding
End of explanation
"""
model = load_model('Pregunta2/LSTM-32.h5')
scores = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy: ', scores[1])
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('Pregunta2/LSTM-32.h5')
```
End of explanation
"""
# Tamaño vector generado por embedding largo 16
embedding_vector_length = 16
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: e) Entrenamiento red LSTM con distintos tamaños de vector de embedding, se prueba con valores 16 y 64 además del 32 de la pregunta anterior
End of explanation
"""
# Tamaño vector generado por embedding largo 64
embedding_vector_length = 64
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-16.h5')
```
End of explanation
"""
# Se cargan los modelos obtenidos
model16 = load_model('Pregunta2/LSTM-16.h5')
model32 = load_model('Pregunta2/LSTM-32.h5')
model64 = load_model('Pregunta2/LSTM-64.h5')
# Se obtiene el accuracy de cada modelo
scores16 = model16.evaluate(X_test, y_test, verbose=0)
scores32 = model32.evaluate(X_test, y_test, verbose=0)
scores64 = model64.evaluate(X_test, y_test, verbose=0)
print('Accuracy tamaño de vector 16: ', scores16[1])
print('Accuracy tamaño de vector 32: ', scores32[1])
print('Accuracy tamaño de vector 64: ', scores64[1])
print('Pérdida tamaño de vector 16: ', scores16[0])
print('Pérdida tamaño de vector 32: ', scores32[0])
print('Pérdida tamaño de vector 64: ', scores64[0])
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-64.h5')
```
End of explanation
"""
# Se cargan las 3000 palabras más relevantes
top_words = 3000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
# Se acotan los comentarios a un máximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
# Tamaño vector generado por embedding largo 32
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: f) Entrenamiento cambiando el tamaño de las palabras seleccionadas
Se probará con 3000, 5000 y 8000 palabras top
End of explanation
"""
# Se cargan las 5000 palabras más relevantes
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
# Se acotan los comentarios a un máximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
# Tamaño vector generado por embedding largo 32
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-words-3000.h5')
```
End of explanation
"""
# Se cargan las 8000 palabras más relevantes
top_words = 8000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
# Se acotan los comentarios a un máximo de 500 palabras
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
# Tamaño vector generado por embedding largo 32
embedding_vector_length = 32
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-words-5000.h5')
```
End of explanation
"""
# Se cargan los modelos opbtenidos
model_3000 = load_model('Pregunta2/LSTM-words-3000.h5')
model_5000 = load_model('Pregunta2/LSTM-words-5000.h5')
model_8000 = load_model('Pregunta2/LSTM-words-8000.h5')
# Se obtiene el accuracy de cada modelo
# Se cargan las 8000 palabras más relevantes
top_words = 3000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
scores3000 = model_3000.evaluate(X_test, y_test, verbose=0)
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
scores5000 = model_5000.evaluate(X_test, y_test, verbose=0)
top_words = 8000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
scores8000 = model_8000.evaluate(X_test, y_test, verbose=0)
print('Accuracy cantidad de palabras 3000: ', scores3000[1])
print('Accuracy cantidad de palabras 5000: ', scores5000[1])
print('Accuracy cantidad de palabras 8000: ', scores8000[1])
print('Perdida cantidad de palabras 3000: ', scores3000[0])
print('Perdida cantidad de palabras 5000: ', scores5000[0])
print('Perdida cantidad de palabras 8000: ', scores8000[0])
"""
Explanation: ```python
Se entrena el modelo en servidor GPU y se guarda para luego ser evaluado
model.fit(X_train, y_train, validation_data=(X_test, y_test), nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-words-8000.h5')
```
End of explanation
"""
embedding_vector_length = 32
top_words = 5000
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(Dropout(0.2))
model.add(LSTM(100))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
"""
Explanation: g) Usar Dropout para entrenar
End of explanation
"""
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
model = load_model('Pregunta2/LSTM-dropout.h5')
scores = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy: ', scores[1])
print('Pérdida: ', scores[0])
"""
Explanation: python
model.fit(X_train, y_train, nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-dropout.h5')
End of explanation
"""
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
embedding_vector_length = 64
top_words = 5000
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(Dropout(0.2))
model.add(LSTM(100))
model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
"""
Explanation: h) Propuesta nuevo modelo
End of explanation
"""
model = load_model('Pregunta2/LSTM-dropout-64.h5')
scores = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy: ', scores[1])
print('Pérdida: ', scores[0])
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
embedding_vector_length = 64
top_words = 5000
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(Dropout(0.1))
model.add(LSTM(100))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
"""
Explanation: python
model.fit(X_train, y_train, nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-dropout-64.h5')
End of explanation
"""
model = load_model('Pregunta2/LSTM-dropout-64-2.h5')
scores = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy: ', scores[1])
print('Perdida: ', scores[0])
top_words = 5000
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=top_words, seed=15)
X_train = sequence.pad_sequences(X_train, maxlen=500)
X_test = sequence.pad_sequences(X_test, maxlen=500)
embedding_vector_length = 32
top_words = 5000
model = Sequential()
model.add(Embedding(top_words, embedding_vector_length, input_length=500))
model.add(Dropout(0.1))
model.add(LSTM(100))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
"""
Explanation: python
model.fit(X_train, y_train, nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-dropout-64.h5')
End of explanation
"""
model = load_model('Pregunta2/LSTM-dropout-64-3.h5')
scores = model.evaluate(X_test, y_test, verbose=0)
print('Accuracy: ', scores[1])
print('Perdida: ', scores[0])
"""
Explanation: python
model.fit(X_train, y_train, nb_epoch=3, batch_size=64)
model.save('pregunta2/LSTM-dropout-64-3.h5')
End of explanation
"""
|
Oli4/lsi-material | Algorithmic_Basics_of_Bioinformatics/Algorithmic Basics of Bioinformatics Tutorial Sheet 6.ipynb | mit | def DPChange(M,c,d):
import math
best_num_coins = [0]
for m in range(1,M+1):
best_num_coins.append(math.inf)
for i in range(0,d):
if m >= c[i]:
if best_num_coins[m-c[i]] +1 < best_num_coins[m]:
best_num_coins[m] = best_num_coins[m-c[i]] + 1
return best_num_coins[M]
DPChange(11,[1,3,4], 3)
"""
Explanation: Question I: Coin Change Problem
This question illustrates the Change problem and the use of
Dynamic programming.
(a) Write down the running complexity ($O$ -notation) in terms of M, d of the two coin change algorithms RecursiveChange(M,c,d) and DPChange(M,c,d) as mentioned in the lecture slides. Explain your answer.
The running complexity of RecursiveChange(M,c,d) is in $O(d^M)$ The running complexity of DPChange(M,c,d) is in $O(Md)$.
(b) Given the denominations 1, 3, and 4, what is the minimum number of coins needed to make change for a given value of 11? Find this out by writing down each step of the dynamic programming algorithm introduced in the lecture.
For the dynamic programming algorithm the minimum number of coins needed is computed for every value up to the value which is asked for. The results are stored in a list and used for the computation of higher values. Therefore first you initalize the list with the needed coins with the single element 0 as for the value 0, no coins are needed to make change. Then for every value up to the value which has been asked for you append a new element to the list which is set to infinity. Then you you go through all you denominations and check if the denomination is smaller than your value. If this is true you can test if the number of needed coins for the given denomination would be smaller than the already tested denominations. If so you can update the number of needed coins for the values which is computed at this step.
For the value 11 and the denominations 1,3,4 the algorithm does the following with the list of needed coins.
[0]
[0, inf]
[0, 1]
[0, 1, inf]
[0, 1, 2]
[0, 1, 2, inf]
[0, 1, 2, 3]
[0, 1, 2, 1]
[0, 1, 2, 1, inf]
[0, 1, 2, 1, 2]
[0, 1, 2, 1, 1]
[0, 1, 2, 1, 1, inf]
[0, 1, 2, 1, 1, 2]
[0, 1, 2, 1, 1, 2, inf]
[0, 1, 2, 1, 1, 2, 3]
[0, 1, 2, 1, 1, 2, 2]
[0, 1, 2, 1, 1, 2, 2, inf]
[0, 1, 2, 1, 1, 2, 2, 3]
[0, 1, 2, 1, 1, 2, 2, 2]
[0, 1, 2, 1, 1, 2, 2, 2, inf]
[0, 1, 2, 1, 1, 2, 2, 2, 3]
[0, 1, 2, 1, 1, 2, 2, 2, 2]
[0, 1, 2, 1, 1, 2, 2, 2, 2, inf]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, inf]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, 4]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, 3]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, 3, inf]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, 3, 4]
[0, 1, 2, 1, 1, 2, 2, 2, 2, 3, 3, 3]
End of explanation
"""
import numpy as np
n = -np.inf
# E1 M1 G1 D1 D2 M2 G2 D3 M3 G3 D4 E2 E3 D5 D6
adjacency = np.matrix([[n, 0, 0, 0, n, n, n, n, n, n, n, n, n, n, n], #E
[n, n, 0, 20, n, 20, 20, n, n, n, n, n, n, n, n],#M1
[n, n, n, n, n, 15, 15, n, n, n, n, n, n, n, n], #G1
[n, n, n, n, 10, n, 10, n, n, n, n, n, n, n, n], #D1
[n, n, n, n, n, n, n, 15, n, n, n, n, n, n, n], #D2
[n, n, n, n, n, n, n, n, 20, 20, 20, n, n, n, n],#M2
[n, n, n, n, n, n, n, n, n, 20, n, n, n, n, n], #G2
[n, n, n, n, n, n, n, n, 20, n, 20, n, n, n, n], #D3
[n, n, n, n, n, n, n, n, n, n, n, 15, n, n, n], #M3
[n, n, n, n, n, n, n, n, n, n, n, 5, n, n, n], #G3
[n, n, n, n, n, n, n, n, n, n, n, 10, n, n, n], #D4
[n, n, n, n, n, n, n, n, n, n, n, n, 45, n, n], #E2
[n, n, n, n, n, n, n, n, n, n, n, n, n, 10, n], #E3
[n, n, n, n, n, n, n, n, n, n, n, n, n, n, 5], #D5
[n, n, n, n, n, n, n, n, n, n, n, n, n, n, n]]) #D6
def MTP(adj):
'''
This function returns the length of thlongest path of a
directed acyclic graph. Therefore it can solve the Manhattan
Tourist Problem.
This function works only on adjacency matrices
in which the nodes are ordered topologically.
'''
import numpy as np
score = {0:0}
for node in range(1,adj.shape[0]):
possibilities = []
for in_edge in range(adj.shape[1]):
if adj[in_edge,node] >= 0:
possibilities.append(adj[in_edge,node] + score[in_edge])
score[node] = max(possibilities)
return score[adj.shape[1]-1]
MTP(adjacency)
"""
Explanation: Question II: Manhattan Tourist Problem
Longest Path Problem : The goal of this problem is to use the
directed graph as in Fig. 1 and to find the longest path from E1
to D6.
(a) Using the approach of the Manhattan Tourist Problem, use dynamic programming to find the length of the longest path
End of explanation
"""
|
nonotone79/investigativ | 02 Jupyter Notebook & Python Intro.ipynb | mit | #Mit einem Hashtag vor einer Zeile können wir Code kommentieren, auch das ist sehr wichtig.
#Immer, wirklich, immer den eigenen Code zu kommentieren. Vor allem am Anfang.
print('hello world')
#Der Printbefehl druckt einfach alles aus. Nicht wirklich wahnsinnig toll.
#Doch er ist später sehr nützlich. Vorallem wenn es darum geht Fehler im eigenn Code zu finden.
#Mit dem Inputbefehl kannst Du Den Nutzer mit dem intergieren.
input('wie alt bis Du?')
"""
Explanation: Jupyter Notebook & Python Intro
Zuerst navigieren wir mit der Kommandozeile in den Folder, wo wir das Jupyter Notebook abspeichern wollen. Dann gehen wir in unser virtual environment und starten mit "jupyter notebook" unser Notebook auf. Jupyter Notebook ist eine Arbeitsoberfläche, der für Coding-Anfänger sehr einfach zu bedienen ist, denn es lassen sich Code-Teile einzelnen abspielen.
Es gibt zwei Formate der Zellen. Code-Format und sogenanntes Markdown. Letzteres ist ein Textformat, das dem Text möglichst wenige Formatinfos anhängt. Nicht wie Word zum Beispiel. Wenn man grosse Notebooks entwickelt, ist es sehr hilfreich damit zu arbeiten. Zum Beispiel
Titel
Titel
Titel
Titel
Titel
Oder Aufzählungen, Fetten. Das geht alles mit Markdown. Man kann sogar Tabellen bauen oder Hyper Links setzen. Wie zum Beispiel auf dieses Markdown Cheatsheet. Hier sind weitere sehr praktische Format. In der Regel benutzten wir Jupyter Notebooks aber nicht, um zu texten, sondern zu coden. Legen wir los.
Print und Input
Datentypen
Aktionen
Variablen und Zuordnungen
If, elif, else
Lists
Dictionaries
Tuples
Simple Funktionen: len, sort, sorted
For Loop
Python
Print und Input
End of explanation
"""
#Strings
'Hallo wie geht es Dir'
"12345" #versteht der computer als text
str(124) #soll es als text verstehen
#Integer
567
int('1234') #hier versteht es der computer als zahl
#Floats
4.542323 #hier steht es der computer als zahlen mit kommastellen
float(12)
#Dates, eigentlich Strings
'15-11-2019'
"""
Explanation: Datentypen
End of explanation
"""
type("4.56") #mit type kann ich überprüfen, wie es der computer versteht, ob es float, integer und dates
print('Hallo' + 'wie' + 'geht' + 'es') #reine addition
print('Hallo','wie','geht','es') #mit kommas ist es etwas leserlicher
#Alle anderen gängigen:
#minus -
#Mal *
#geteilt durch /
#Spezial: Modulo. %, geteilt durch und der Rest, der übrigbleibt
13 % 5
"""
Explanation: Aktionen
End of explanation
"""
#Grösser und kleiner als:
#< >
#Gleich == (wichtig, doppelte Gleichzeichen) #doppeltes Gleichzeichen -> ist der Vergleich
#Denn das einfache Gleichzeichen definiert eine Variable #einfaches Gleichzeichen -> ist die Zuordnung (Variable)
'Schweiz' == 'reich'
Schweiz = 'reich'
Schweiz == 'reich'
'Schweiz' = 'reich'
1 = 6
a = b
a = 'b' #hier ordne ich zu
a == "b" #hier vergleiche ich a mit b und er sagt, es sei richtig, weil ich vorher zugeordnet habe
a
"""
Explanation: Variablen, Vergleiche und Zuordnungen von Variablen
End of explanation
"""
elem = int(input('Wie alt bist Du?')) #elem ist die variable
if elem < 0:
print('Das ist unmöglich')
else:
print('Du bist aber alt')
elem = int(input('Wie alt bist Du?'))
if elem < 0:
print('Das ist unmöglich')
elif elem < 25: #ich kann unendlich elif einfügen, dann werden beide parameter verwendet / ansonsten if, ausschliessend
print('Du bist aber jung')
else:
print('Du bist aber alt')
"""
Explanation: if - else - (elif)
End of explanation
"""
#Eckige Klammern
[1,2,3,4,5,6,7] #der variable lst zugeordnet dazu
lst = [1, 2, 3, 4, 5, 6, 7]
lst
#Einzelene Elemente #computer fängt immer bei null an
lst[0]
#Ganze Abschnitte
lst[:4]
#Komplexere Schnitte
lst[::2]
#Append, Pop, etc. #nimmt das entsprechende element raus
lst.pop(0)
lst
lst.append(25) #hier füge ich eine zahle hinzu
lst
#Aufpassen mit Befehl: list weil das macht aus etwas eine Liste. Auch aus Strings:
list('hallo wie geht') #aus den strings eine liste machen
#Elegantester Weg, eine Liste zu schreiben. Und ganz wichtig,
#der Computer beginnt immer bei 0.
list(range(10))
"""
Explanation: Lists
End of explanation
"""
#Komische, geschwungene Klammern #dictionary sind mächtigere listen
{'Tier'#schlüssel: 'Hund', 'Grösse': 124, 'Alter': 10} #geschwungene klammern alt 8 + 9 eckige klammern alt 5 + 6
dct = {'Tier': 'Hund', 'Grösse': 124, 'Alter': 10} #dct ist die variable
dct['Tier']
#List of Dictionaires #sind
dct_lst = [{'Tier': 'Hund', 'Grösse': 124, 'Alter': 10}, {'Tier': 'Katze', 'Grösse': 130, 'Alter': 8}]
dct_lst[0]
dct_lst[0]['Alter']
"""
Explanation: Dictionaries
Verwende hier die geschwungene Klammern
End of explanation
"""
tuple(lst)
#Unveränderbar. Also gutes Format, um Sachen abzuspeichern.
#Aber das wirklich nur der Vollständigkeitshalber.
"""
Explanation: Tuples
Hier sind runde Klammern König.
End of explanation
"""
#len mit Strings #inklusive leerschlag
len('hallo wie geht es Dir')
#len mit Lists
len([1,2,3,4,4,5])
#len mit dictionaries #zählt die schlüssel
len({'Tier': 'Hund', 'Alter': 345})
#len mit Tuples
len((1,1,1,2,2,1))
#sorted für momentane Sortierung
sorted('hallo wie geht es Dir')
a = 'hallo wie geht es Dir'
sorted(a)
a
#Sort funktioniert allerdings "nur" mit lists
lst = [1, 5, 9, 10, 34, 12, 12, 14]
lst.sort()
lst
dic = {'Tier': 'Hund', 'Alter': 345}
dic.sort()
"""
Explanation: Simple Funktionen - len und sort
Beachte wie man die aufruft. Nämlich mit runden Klammern
End of explanation
"""
lst
for x in lst: #x ist stellvertretend für jedes element in der liste
print(x)
dic = {'Tier': 'Hund', 'Alter': 345}
for key, value in dic.items():
print(key, value)
#for loop to make new lists #wir definieren eine neue liste
lst
#Nehmen wir einmal an, wir wollen nur die geraden Zahlen in der Liste #oder anders gesagt: durch zwei teilbar
new_lst = []
for elem in lst:
if elem % 2 == 0:
new_lst.append(elem)
else:
continue
new_lst
"""
Explanation: For Loop
End of explanation
"""
dic_lst = [{'Animal': 'Dog', 'Size': 45},
{'Animal': 'Cat', 'Size': 23},
{'Animal': 'Bird', 'Size': 121212}]
for dic in dic_lst:
print(dic)
for dic in dic_lst:
print(dic['Animal'])
for dic in dic_lst:
print(dic['Animal'] + ': '+ str(dic['Size']))
"""
Explanation: For loop with list of dictionaries
End of explanation
"""
|
datascienceguide/datascienceguide.github.io | tutorials/Exploratory-Data-Analysis-Tutorial.ipynb | mit | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
%matplotlib inline
anscombe_i = pd.read_csv('../datasets/anscombe_i.csv')
anscombe_ii = pd.read_csv('../datasets/anscombe_ii.csv')
anscombe_iii = pd.read_csv('../datasets/anscombe_iii.csv')
anscombe_iv = pd.read_csv('../datasets/anscombe_iv.csv')
"""
Explanation: Exploratory Data Analysis
Author: Andrew Andrade (andrew@andrewandrade.ca)
This is complimentory tutorial for datascienceguide.github.io outlining the basics of exploratory data analysis
In this tutorial, we will learn to open a comma seperated value (CSV) data file and make find summary statistics and basic visualizations on the variables in the Ansombe dataset (to see the importance of visualization). Next we will investigate Fisher's Iris data set using more powerful visualizations.
These tutorials assumes a basic understanding of python so for those new to python, understanding basic syntax will be very helpful. I recommend writing python code in Jupyter notebook as it allows you to rapidly prototype and annotate your code.
Python is a very easy language to get started with and there are many guides:
Full list:
http://docs.python-guide.org/en/latest/intro/learning/
My favourite resources:
https://docs.python.org/2/tutorial/introduction.html
https://docs.python.org/2/tutorial/
http://learnpythonthehardway.org/book/
https://www.udacity.com/wiki/cs101/%3A-python-reference
http://rosettacode.org/wiki/Category:Python
Once you are familiar with python, the first part of this guide is useful in learning some of the libraries we will be using:
http://cs231n.github.io/python-numpy-tutorial
In addition, the following post helps teach the basics for data analysis in python:
http://www.analyticsvidhya.com/blog/2014/07/baby-steps-libraries-data-structure/
http://www.gregreda.com/2013/10/26/intro-to-pandas-data-structures/
Downloading csvs
We should store this in a known location on our local computer or server. The simplist way is to download and save it in the same folder you launch Jupyter notebook from, but I prefer to save my datasets in a datasets folder 1 directory up from my tutorial code (../datasets/).
You should dowload the following CSVs:
http://datascienceguide.github.io/datasets/anscombe_i.csv
http://datascienceguide.github.io/datasets/anscombe_ii.csv
http://datascienceguide.github.io/datasets/anscombe_iii.csv
http://datascienceguide.github.io/datasets/anscombe_iv.csv
http://datascienceguide.github.io/datasets/iris.csv
If using a server, you can download the file by using the following command:
bash
wget http://datascienceguide.github.io/datasets/iris.csv
Now we can run the following code to open the csv.
End of explanation
"""
print anscombe_i[0:5]
"""
Explanation: The first three lines of code import libraries we are using and renames to shorter names.
Matplotlib is a python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. We will use it for basic graphics
Numpy is the fundamental package for scientific computing with Python. It contains among other things:
a powerful N-dimensional array object
sophisticated (broadcasting) functions
tools for integrating C/C++ and Fortran code
useful linear algebra, Fourier transform, and random number capabilities
Pandas is open source, BSD-licensed library providing high-performance, easy-to-use data structures and data analysis tools for the Python programming language.
It extends the numpy array to allow for columns of different variable types.
Since we are using Jupyter notebook we use the line %matplotlib inline to tell python to put the figures inline with the notebook (instead of a popup)
pd.read_csv opens a .csv file and stores it into a dataframe object which we call anscombe_i, anscombe_ii, etc.
Next, let us see the structure of the data by printing the first 5 rows (using [:5]) data set:
End of explanation
"""
print "Data Set I"
print anscombe_i.describe()[:3]
print "Data Set II"
print anscombe_ii.describe()[:3]
print "Data Set III"
print anscombe_iii.describe()[:3]
print "Data Set IV"
print anscombe_iv.describe()[:3]
"""
Explanation: Now let us use the describe function to see the 3 most basic summary statistics
End of explanation
"""
plt.figure(1)
plt.scatter(anscombe_i.x, anscombe_i.y, color='black')
plt.title("anscombe_i")
plt.xlabel("x")
plt.ylabel("y")
plt.figure(2)
plt.scatter(anscombe_ii.x, anscombe_ii.y, color='black')
plt.title("anscombe_ii")
plt.xlabel("x")
plt.ylabel("y")
plt.figure(3)
plt.scatter(anscombe_iii.x, anscombe_iii.y, color='black')
plt.title("anscombe_iii")
plt.xlabel("x")
plt.ylabel("y")
plt.figure(4)
plt.scatter(anscombe_iv.x, anscombe_iv.y, color='black')
plt.title("anscombe_iv")
plt.xlabel("x")
plt.ylabel("y")
"""
Explanation: It appears that the datasets are almost identical by looking only at the mean and the standard deviation. Instead, let us make a scatter plot for each of the data sets.
Since the data is stored in a data frame (similar to an excel sheet), we can see the column names on top and we can access the columns using the following syntax
anscombe_i.x
anscombe_i.y
or
anscombe_i['x']
anscombe_i['y']
End of explanation
"""
# basic box plot
plt.figure(1)
plt.boxplot(anscombe_i.y)
plt.title("anscombe_i y box plot")
"""
Explanation: Shockily we can clearly see that the datasets are quite different! The first data set has pure irreducable error, the second data set is not linear, the third dataset has an outlier, and the fourth dataset all of x values are the same except for an outlier. If you do not believe me, I uploaded an excel worksheet with the full datasets and summary statistics here
Now let us learn how to make a box plot. Before writing this tutorial I didn't know how to make a box plot in matplotlib (I usually use seaborn which we will learn soon). I did a quick google search for "box plot matplotlib) and found an example here which outlines a couple of styling options.
End of explanation
"""
import seaborn as sns
sns.set(style="ticks")
# Load the example dataset for Anscombe's quartet
df = sns.load_dataset("anscombe")
# Show the results of a linear regression within each dataset
sns.lmplot(x="x", y="y", col="dataset", hue="dataset", data=df,
col_wrap=2, ci=None, palette="muted", size=4,
scatter_kws={"s": 50, "alpha": 1})
"""
Explanation: Trying reading the documentation for the box plot above and make your own visuaizations.
Next we are going to learn how to use Seaborn which is a very powerful visualization library. Matplotlib is a great library and has many examples of different plots, but seaborn is built on top of matplot lib and offers better plots for statistical analysis. If you do not have seaborn installed, you can follow the instructions here: http://stanford.edu/~mwaskom/software/seaborn/installing.html#installing . Seaborn also has many examples and also has a tutorial.
To show the power of the library we are going to plot the anscombe datasets in 1 plot following this example: http://stanford.edu/~mwaskom/software/seaborn/examples/anscombes_quartet.html . Do not worry to much about what the code does (it loads the same dataset and changes setting to make the visualization clearer), we will get more experince with seaborn soon.
End of explanation
"""
iris = pd.read_csv('../datasets/iris.csv')
print iris[0:5]
print iris.describe()
"""
Explanation: Seaborn does linear regression automatically (which we will learn soon). We can also see that the linear regression is the same for each dataset even though they are quite different.
The big takeway here is that summary statistics can be deceptive! Always make visualizations of your data before making any models.
Irist Dataset
Next we are going to visualize the Iris dataset. Let us first read the .csv and print the first elements of the dataframe. We also get the basic summary statistics.
End of explanation
"""
#select all Iris-setosa
iris_setosa = iris[iris.iris == "Iris-setosa"]
plt.figure(1)
#make histogram of sepal lenth
plt.hist(iris_setosa["sepal length"])
plt.xlabel("sepal length")
plt.figure(2)
plt.scatter(iris_setosa["sepal width"], iris_setosa["sepal length"] )
plt.xlabel("sepal width")
plt.ylabel("sepal lenth")
"""
Explanation: As we can see, it is difficult to interpret the results. We can see that sepal length, sepal width, petal length and petal width are all numeric features, and the iris variable is the specific type of iris (or categorical variable). To better understand the data, we can split the data based on each type of iris, make a histogram for each numeric feature, scatter plot between features and make many visualizations. I will demonstrate the process for generating a histogram for sepal length of Iris-setosa and a scatter plot for sepal length vs width for Iris-setosa
End of explanation
"""
sns.pairplot(iris, hue="iris")
"""
Explanation: This would help us to better undestand the data and is necessary for good analysis, but to do this for all the features and iris types (classes) would take a significant amount of time. Seaborn has a function called the pairplot which will do all of that for us!
End of explanation
"""
|
google/starthinker | colabs/anonymize_query.ipynb | apache-2.0 | !pip install git+https://github.com/google/starthinker
"""
Explanation: BigQuery Anonymize Query
Runs a query and anynonamizes all rows. Used to create sample table for dashboards.
License
Copyright 2020 Google LLC,
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Disclaimer
This is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.
This code generated (see starthinker/scripts for possible source):
- Command: "python starthinker_ui/manage.py colab"
- Command: "python starthinker/tools/colab.py [JSON RECIPE]"
1. Install Dependencies
First install the libraries needed to execute recipes, this only needs to be done once, then click play.
End of explanation
"""
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
"""
Explanation: 2. Set Configuration
This code is required to initialize the project. Fill in required fields and press play.
If the recipe uses a Google Cloud Project:
Set the configuration project value to the project identifier from these instructions.
If the recipe has auth set to user:
If you have user credentials:
Set the configuration user value to your user credentials JSON.
If you DO NOT have user credentials:
Set the configuration client value to downloaded client credentials.
If the recipe has auth set to service:
Set the configuration service value to downloaded service credentials.
End of explanation
"""
FIELDS = {
'auth_read':'service', # Credentials used.
'from_project':'', # Original project to read from.
'from_dataset':'', # Original dataset to read from.
'from_query':'', # Query to read data.
'to_project':None, # Anonymous data will be writen to.
'to_dataset':'', # Anonymous data will be writen to.
'to_table':'', # Anonymous data will be writen to.
}
print("Parameters Set To: %s" % FIELDS)
"""
Explanation: 3. Enter BigQuery Anonymize Query Recipe Parameters
Ensure you have user access to both datasets.
Provide the source project, dataset and query.
Provide the destination project, dataset, and table.
Modify the values below for your use case, can be done multiple times, then click play.
End of explanation
"""
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'anonymize':{
'auth':{'field':{'name':'auth_read','kind':'authentication','order':0,'default':'service','description':'Credentials used.'}},
'bigquery':{
'from':{
'project':{'field':{'name':'from_project','kind':'string','order':1,'description':'Original project to read from.'}},
'dataset':{'field':{'name':'from_dataset','kind':'string','order':2,'description':'Original dataset to read from.'}},
'query':{'field':{'name':'from_query','kind':'string','order':3,'description':'Query to read data.'}}
},
'to':{
'project':{'field':{'name':'to_project','kind':'string','order':4,'default':None,'description':'Anonymous data will be writen to.'}},
'dataset':{'field':{'name':'to_dataset','kind':'string','order':5,'description':'Anonymous data will be writen to.'}},
'table':{'field':{'name':'to_table','kind':'string','order':6,'description':'Anonymous data will be writen to.'}}
}
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
"""
Explanation: 4. Execute BigQuery Anonymize Query
This does NOT need to be modified unless you are changing the recipe, click play.
End of explanation
"""
|
ericmjl/Network-Analysis-Made-Simple | archive/8-US-airports-case-study-student.ipynb | mit | %matplotlib inline
import networkx as nx
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import warnings
warnings.filterwarnings('ignore')
pass_air_data = pd.read_csv('datasets/passengers.csv')
"""
Explanation: Exploratory analysis of the US Airport Dataset
This dataset contains data for 25 years[1995-2015] of flights between various US airports and metadata about these routes. Taken from Bureau of Transportation Statistics, United States Department of Transportation.
Let's see what can we make out of this!
End of explanation
"""
pass_air_data.head()
# Create a MultiDiGraph from this dataset
passenger_graph = nx.from_pandas_edgelist(pass_air_data, source='ORIGIN', target='DEST', edge_attr=['YEAR', 'PASSENGERS', 'UNIQUE_CARRIER_NAME'], create_using=nx.MultiDiGraph())
"""
Explanation: In the pass_air_data dataframe we have the information of number of people that fly every year on a particular route.
End of explanation
"""
passenger_graph['CLE']['ORD']
temp = [(i['YEAR'], i['PASSENGERS'])for i in dict(passenger_graph['CLE']['ORD']).values()]
x, y = zip(*temp)
plt.plot(x, y)
plt.show()
"""
Explanation: Cleveland to Chicago, how many people fly this route?
End of explanation
"""
nx.pagerank(passenger_graph)
def year_network(G, year):
temp_g = nx.DiGraph()
for i in G.edges(data=True):
if i[2]['YEAR'] == year:
temp_g.add_edge(i[0], i[1], weight=i[2]['PASSENGERS'])
return temp_g
pass_2015 = year_network(passenger_graph, 2015)
len(pass_2015)
len(pass_2015.edges())
# Load in the GPS coordinates of all the airports
lat_long = pd.read_csv('datasets/GlobalAirportDatabase.txt', delimiter=':', header=None)
lat_long[lat_long[1].isin(list(pass_2015.nodes()))]
pos_dict = {}
for airport in lat_long[lat_long[1].isin(list(pass_2015.nodes()))].iterrows():
pos_dict[airport[1][1]] = (airport[1][15], airport[1][14])
pos_dict
"""
Explanation: Exercise
Find the busiest route in 1990 and in 2015 according to number of passengers, and plot the time series of number of passengers on these routes.
You can use the DataFrame instead of working with the network. It will be faster ;)
[5 mins]
So let's have a look at the important nodes in this network, i.e. important airports in this network. We'll use pagerank, betweenness centrality and degree centrality.
End of explanation
"""
plt.hist(list(nx.degree_centrality(pass_2015).values()))
plt.show()
"""
Explanation: Exercise
Using the position dictionary pos_dict create a plot of the airports, only the nodes not the edges.
As we don't have coordinates for all the airports we have to create a subgraph first.
Use nx.subgraph(Graph, iterable of nodes) to create the subgraph
Use nx.draw_networkx_nodes(G, pos) to map the nodes.
or
Just use a scatter plot :)
What about degree distribution of this network?
End of explanation
"""
d = {}
for i, j in dict(nx.degree(pass_2015)).items():
if j in d:
d[j] += 1
else:
d[j] = 1
x = np.log2(list((d.keys())))
y = np.log2(list(d.values()))
plt.scatter(x, y, alpha=0.4)
plt.show()
"""
Explanation: Let's plot a log log plot to get a better overview of this.
End of explanation
"""
G = nx.DiGraph()
G.add_edge(1, 2, weight=1)
# print(G.edges())
# G[1][2]
# G[2][1]
# G.is_directed()
# type(G)
G.add_edges_from([(1, 2), (3, 2), (4, 2), (5, 2), (6, 2), (7, 2)])
nx.draw_circular(G, with_labels=True)
G.in_degree()
nx.pagerank(G)
G.add_edge(5, 6)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
G.add_edge(2, 8)
nx.draw_circular(G, with_labels=True)
nx.pagerank(G)
"""
Explanation: Directed Graphs
End of explanation
"""
sorted(nx.pagerank(pass_2015, weight=None).items(), key=lambda x:x[1], reverse=True)[:10]
sorted(nx.betweenness_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.degree_centrality(pass_2015).items(), key=lambda x:x[1], reverse=True)[0:10]
"""
Explanation: Moving back to Airports
End of explanation
"""
sorted(nx.betweenness_centrality(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
sorted(nx.pagerank(pass_2015, weight='weight').items(), key=lambda x:x[1], reverse=True)[0:10]
"""
Explanation: 'ANC' is the airport code of Anchorage airport, a place in Alaska, and according to pagerank and betweenness centrality it is the most important airport in this network Isn't that weird? Thoughts?
related blog post: https://toreopsahl.com/2011/08/12/why-anchorage-is-not-that-important-binary-ties-and-sample-selection/
Let's look at weighted version, i.e taking into account the number of people flying to these places.
End of explanation
"""
nx.average_shortest_path_length(pass_2015)
"""
Explanation: How reachable is this network?
We calculate the average shortest path length of this network, it gives us an idea about the number of jumps we need to make around the network to go from one airport to any other airport in this network.
End of explanation
"""
list(nx.weakly_connected_components(pass_2015))
"""
Explanation: Wait, What??? This network is not connected. That seems like a really stupid thing to do.
End of explanation
"""
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['ORIGIN'] == 'AIK')]
pass_2015.remove_nodes_from(['SPB', 'SSB', 'AIK'])
nx.is_weakly_connected(pass_2015)
nx.is_strongly_connected(pass_2015)
"""
Explanation: SPB, SSB, AIK anyone?
End of explanation
"""
G = nx.DiGraph()
G.add_edge(1, 2)
G.add_edge(2, 3)
G.add_edge(3, 1)
nx.draw(G)
G.add_edge(3, 4)
nx.draw(G)
nx.is_strongly_connected(G)
list(nx.strongly_connected_components(pass_2015))
pass_air_data[(pass_air_data['YEAR'] == 2015) & (pass_air_data['DEST'] == 'TSP')]
pass_2015_strong = max(nx.strongly_connected_component_subgraphs(pass_2015), key=len)
len(pass_2015_strong)
nx.average_shortest_path_length(pass_2015_strong)
"""
Explanation: Strongly vs weakly connected graphs.
End of explanation
"""
passenger_graph['CLE']['SFO'][25]
def str_to_list(a):
return a[1:-1].split(', ')
for i in str_to_list(passenger_graph['JFK']['SFO'][25]['UNIQUE_CARRIER_NAME']):
print(i)
%%time
for origin, dest in passenger_graph.edges():
for key in passenger_graph[origin][dest]:
passenger_graph[origin][dest][key]['airlines'] = str_to_list(passenger_graph[origin][dest][key]['UNIQUE_CARRIER_NAME'])
"""
Explanation: Exercise! (Actually this is a game :D)
How can we decrease the avg shortest path length of this network?
Think of an effective way to add new edges to decrease the avg shortest path length.
Let's see if we can come up with a nice way to do this, and the one who gets the highest decrease wins!!!
The rules are simple:
- You can't add more than 2% of the current edges( ~500 edges)
[10 mins]
What about airlines? Can we find airline specific reachability?
End of explanation
"""
united_network = nx._________
for _______, _______ in passenger_graph.edges():
if 25 in passenger_graph[______][_______]: # 25 key is for the year 2015
if "'United Air Lines Inc.'" in ____________________:
united_network.add_edge(_____, ______, weight= __________)
# number of nodes
# number of edges
# top 10 according to pagerank
# top 10 according to degree centrality
"""
Explanation: Exercise
Play around with United Airlines network.
Extract a network for United Airlines flights from the metagraph passenger_graph for the year 2015
Make sure it's a weighted network, where weight is the number of passengers.
Find the number of airports and connections in this network
Find the most important airport, according to PageRank and degree centrality.
End of explanation
"""
|
hektor-monteiro/curso-python | erros-velocidade.ipynb | gpl-2.0 | x = 1.e308
y = x * 10.
print x,y
"""
Explanation: Acurácia e velocidade
Agora já temos os componentes básicos da linguagem Python para poder atacar os problemas de física
no entanto, precisamos explorar ainda as limitações do computador visto que não pode guardar números com precisão infinita
existe um limite superior e inferior ao que se pode guardar e também de velocidade em que operações podem ser efetuadas
variáveis e intervalos
o maior numero float que pode ser armazenado é em módulo $10^{308}$
o mesmo vale para numeros imaginários
usualmente escrevemos os numeros de ponto flutuante grandes com notação científica na forma : 3.14e10 o que equivale a $3.14 \times 10^{10}$
caso uma operação gere um numero maior que o máximo aceito pelo computador o valor inf será alocado e em geral o programa não é interrompido.
End of explanation
"""
factorial = 1
for k in range(1,201):
factorial *= k
print factorial
factorial = 1.0
for k in range(1,201):
factorial *= k
print factorial
"""
Explanation: em Python inteiros podem ter tantos algarismos quanto a memória do computador permitir
End of explanation
"""
import numpy as np
import math as math
print np.pi
print math.pi
np.pi
"""
Explanation: Erro numérico
números de ponto flutuante só podem ser representados com uma certa precisão
Em Python o padrão de representação é de 16 algarismos significativos
Veja o caso de $\pi$:
valor verdadeiro: 3.1415926535897932384626 . . .
valor no Python: 3.141592653589793
diferença: 0.0000000000000002384626 . . .
End of explanation
"""
x = 1.1+2.2
print "%10.20e"% x
if x == 3.3:
print "o valor é:", x
# a melhor maneira de testar igualdade de floats, caso seja necessária seria:
x = 1.1+2.2
epsilon = 1e-12
if abs(x-3.3)<epsilon:
print "o valor é:", x
"""
Explanation: Uma lição importante é que floats não devem ser usados na avaliação de igualdades
veja abaixo:
End of explanation
"""
# o caso abaixo mostra uma situação onde o erro relativo é bem grande
x = 1000000000000000
y = 1000000000000001.2345678901234
print "%10.20f"% x, '\n',"%10.20f"% y,'\n',"%10.20f"% (y-x)
print (1.2345678901234 - (y-x) ) / 1.2345678901234
# outro exemplo envolvendo subtração de numeros
from math import sqrt
x = 1.0
y = 1.0 + (1e-14)*sqrt(2)
print((1e14)*(y-x))
print(sqrt(2))
"""
Explanation: As mesmas regras de propagação de erros são válidas no tratamento de erros no computador
pode-se mostrar que o erro de uma soma de N números é dada por:
$\sigma = C\sqrt{N}\sqrt{\bar{x}^2}$
Veja que o erro cresce com o numero de termos somados (para mais detalhes veja: http://www.umich.edu/~mejn/cp/chapters/errors.pdf)
De modo geral esse comportamento é razoável e o erro relativo tende a diminuir com o aumento de termos somados.
Problemas em geral acontecem quando se somam números de tamanhos muito diferentes. Nesses casos os números muito menores podem se perder no erro.
Os maiores problemas acontecem quando fazemos subtração de numeros. Veja o exemplo abaixo:
End of explanation
"""
niter = 20
x = 0.2
for i in range(niter):
x = (11.0*x - 2.0)
print i, '->', "%10.20f"% x
"""
Explanation: Veja que o erro cometido está na segunda casa decimal!
problemas de erros ao subtrair numeros muito próximos podem ocorrer em cálculos de física e devemos ficar atentos a eles
End of explanation
"""
from math import exp
import time
# para obter o tempo de execução
start_time = time.time()
# variáveis definidas no início para clareza
terms = 1000 # número de termos a serem usados na soma
beta = 1/100.
S = 0.0
Z = 0.0
# note o uso de um looping para calculo das duas somas e o exponencial calculado uma só vez
for n in range(terms):
E = n + 0.5
weight = exp(-beta*E)
S += weight*E
Z += weight
print(S/Z)
print("tempo de execução de %s secundos" % (time.time() - start_time))
from math import exp
import time
# para obter o tempo de execução
start_time = time.time()
# variáveis definidas no início para clareza
terms = 1000*1000 # número de termos a serem usados na soma
beta = 1/100.
S = 0.0
Z = 0.0
# note o uso de um looping para calculo das duas somas e o exponencial calculado uma só vez
for n in range(terms):
E = n + 0.5
weight = exp(-beta*E)
S += weight*E
Z += weight
print(S/Z)
print("tempo de execução de %s secundos" % (time.time() - start_time))
from math import exp
import time
# para obter o tempo de execução
start_time = time.time()
# variáveis definidas no início para clareza
terms = 100*1000*1000 # número de termos a serem usados na soma
beta = 1/100.
S = 0.0
Z = 0.0
# note o uso de um looping para calculo das duas somas e o exponencial calculado uma só vez
for n in range(terms):
E = n + 0.5
weight = exp(-beta*E)
S += weight*E
Z += weight
print(S/Z)
print("tempo de execução de %s secundos" % (time.time() - start_time))
"""
Explanation: velocidade
é interessante ter uma noção sobre os limites de velocidade de computação
esses aspectos podem ser importantes na tomada de decisões de programação
Exemplo
O oscilador harmonico simples quantico tem niveis de energia dados por:
$E_n = \hbar \omega (n+\frac{1}{2})$
A energia média para o oscilador a uma temperatura T é:
$\left\langle E\right\rangle =\dfrac {1} {Z}\displaystyle\sum {n=0}^{\infty }E{n}e^{-\beta E_{n}}$
onde $\beta =1 / k_{B}T$ com $K_B$ a constante de Boltzmann e $Z=\displaystyle\sum {n=0}^{\infty }e^{-\beta E{n}}$
Suponha que se queira calcular o valor de $\left\langle E\right\rangle$ quando $k_{B}T = 100$
Usando unidades de $\hbar =\omega =1$ temos o programa abaixo:
End of explanation
"""
|
cuemacro/chartpy | chartpy_examples/notebooks/web_page_examples.ipynb | apache-2.0 | import sys
try:
sys.path.append('E:/Remote/chartpy')
except:
pass
"""
Explanation: Creating charts (& webpages!) with chartpy
By Saeed Amen (@saeedamenfx) - saeed@cuemacro.com
A great way to present a group of charts is via a webpage. How can we do this in a quick and easy way in Python? Furthemore, how can we change the plotting engine, from say bokeh to matplotlib and vice-versa, without totally rewriting our code? I've designed [chartpy]http://www.github.com/cuemacro/chartpy) to solve these problems and much more. chartpy has a common API for a number of underlying Python libraries, bokeh, plotly and matplotlib (I'm hoping to add more soon!) too. Furthermore, once charts are generated in this way, they can easily be grouped together into webpages, all within Python, without having to mess around with HTML or CSS.
As a first step let's add chartpy to our path (you don't have to do this, if you have installed it via pip).
End of explanation
"""
# support Quandl 3.x.x
try:
import quandl as Quandl
except:
# if import fails use Quandl 2.x.x
import Quandl
from chartpy import Chart, Style, Canvas
# get your own free Quandl API key from https://www.quandl.com/ (i've used another class for this)
try:
from chartpy.chartcred import ChartCred
cred = ChartCred()
quandl_api_key = cred.quandl_api_key
except:
quandl_api_key = "x"
"""
Explanation: We need to download the data in order to plot. Let's use Quandl for this (although, I'd very much recommend using my library findatapy which provides an easy way to download data from many data sources including Bloomberg, Quandl, FRED, Yahoo, Google etc. Let's also import all the classes we need from chartpy too
End of explanation
"""
df = Quandl.get(["FRED/A191RL1Q225SBEA"], authtoken=quandl_api_key)
df.columns = ["Real QoQ"]
"""
Explanation: Let's download the data from Quandl for US Real GDP QoQ
End of explanation
"""
import copy
style = Style(title="US GDP", source="Quandl/Fred", scale_factor=-1, width=400, height=300, silent_display=True, thin_margin=True)
style_bokeh = copy.copy(style); style_bokeh.html_file_output = 's_bokeh.html'
style_plotly = copy.copy(style); style_plotly.html_file_output = 's_plotly.html'
style_matplotlib = copy.copy(style); style_matplotlib.file_output = 's_matplotlib.png'
# Chart object is initialised with the dataframe and our chart style
chart_bokeh = Chart(df=df, chart_type='line', engine='bokeh', style=style_bokeh)
chart_plotly = Chart(df=df, chart_type='line', engine='plotly', style=style_plotly)
chart_matplotlib = Chart(df=df, chart_type='line', engine='matplotlib', style=style_matplotlib)
"""
Explanation: We now create style objects, which give us the property setting for the charts. We can then create Chart objects with these styles. We just change the engine variable to switch from using bokeh to matplotlib etc. No need to use different calls for each plotting engine! We specify filenames for each plot (otherwise, the filename will be automatically created from the timestamp).
End of explanation
"""
%matplotlib inline
chart_matplotlib.plot()
"""
Explanation: We can if we choose plot individual charts like this (we need the inline statement for matplotlib to plot in the Jupyter notebook), using the following syntax.
End of explanation
"""
text = "A demo of chartpy canvas!!"
# using plain template
canvas = Canvas([[text, chart_bokeh], [chart_plotly, df.tail(n=5)]])
canvas.generate_canvas(jupyter_notebook=True, silent_display=True, canvas_plotter='plain', output_filename='s_canvas_plain.html')
"""
Explanation: Creating a Canvas for a webpage
We now create a canvas object, combining our various chart objects. We can also include text or pandas dataframes (which will be printed as tables. We we use the 'plain' canvas plotter, which has minimal formatting. We need a flag for Jupyter notebook, so it will link the final HTML file inside this notebook. If we turn silent_display to False, it would also open the HTML file in a web browser.
End of explanation
"""
# using the Keen template (needs static folder in the same place as final HTML file)
canvas = Canvas([[chart_bokeh, chart_plotly], [chart_plotly, chart_matplotlib]])
canvas.generate_canvas(jupyter_notebook=True, silent_display=True, canvas_plotter='keen', output_filename = 's_canvas_keen.html')
"""
Explanation: We create another canvas object and then use the Keen IO based template, which looks a bit neater.
End of explanation
"""
|
yashdeeph709/Algorithms | PythonBootCamp/Complete-Python-Bootcamp-master/.ipynb_checkpoints/While loops -checkpoint.ipynb | apache-2.0 | x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
"""
Explanation: while loops
The while statement in Python is one of most general ways to perform iteration. A while statement will repeatedly execute a single statement or group of statements as long as the condition is true. The reason it is called a 'loop' is because the code statements are looped through over and over again until the condition is met.
The general format of a while loop is:
while test:
code statement
else:
final code statements
Let’s look at a few simple while loops in action.
End of explanation
"""
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
else:
print 'All Done!'
"""
Explanation: Notice how many times the print statements occured and how the while loop kept going until the True condition was met, which occured once x==10. Its important to note that once this occured the code stopped. Lets see how we could add an else statement:
End of explanation
"""
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
if x ==3:
print 'x==3'
else:
print 'continuing...'
continue
"""
Explanation: break, continue, pass
We can use break, continue, and pass statements in our loops to add additional functionality for various cases. The three statements are defined by:
break: Breaks out of the current closest enclosing loop.
continue: Goes to the top of the closest enclosing loop.
pass: Does nothing at all.
Thinking about break and continue statements, the general format of the while loop looks like this:
while test:
code statement
if test:
break
if test:
continue
else:
break and continue statements can appear anywhere inside the loop’s body,but we will usually put them furhter nested in conjunction with an if statement to perform an action based on some condition.
Lets go ahead and look at some examples!
End of explanation
"""
x = 0
while x < 10:
print 'x is currently: ',x
print ' x is still less than 10, adding 1 to x'
x+=1
if x ==3:
print 'Breaking because x==3'
break
else:
print 'continuing...'
continue
"""
Explanation: Note how we have a printed statement when x==3, and a continue being printed out as we continue through the outer while loop. Let's put in a break once x ==3 and see if the result makes sense:
End of explanation
"""
# DO NOT RUN THIS CODE!!!!
while True:
print 'Uh Oh infinite Loop!'
"""
Explanation: Note how the other else statement wasn't reached and continuing was never printed!
After these brief but simple examples, you should feel comfortable using while statements in you code.
A word of caution however! It is possible to create an infinitely running loop with while statements. For example:
End of explanation
"""
|
AntArch/Presentations_Github | 20150916_OGC_Reuse_under_licence/.ipynb_checkpoints/20150916_OGC_Reuse_under_licence-checkpoint_conflict-20150910-195436.ipynb | cc0-1.0 | from IPython.display import YouTubeVideo
YouTubeVideo('F4rFuIb1Ie4')
## PDF output using pandoc
import os
### Export this notebook as markdown
commandLineSyntax = 'ipython nbconvert --to markdown 20150916_OGC_Reuse_under_licence.ipynb'
print (commandLineSyntax)
os.system(commandLineSyntax)
### Export this notebook and the document header as PDF using Pandoc
commandLineSyntax = 'pandoc -f markdown -t latex -N -V geometry:margin=1in DocumentHeader.md 20150916_OGC_Reuse_under_licence.md --filter pandoc-citeproc --latex-engine=xelatex --toc -o 20150916_OGC_Reuse_under_licence.pdf '
os.system(commandLineSyntax)
### Remove cruft from the pdf
commandLineSyntax = 'pdftk 20150916_OGC_Reuse_under_licence.pdf cat 1-4 15-end output outfile_p12-15.pdf'
os.system(commandLineSyntax)
"""
Explanation: Go down for licence and other metadata about this presentation
\newpage
Preamble
Licence
Unless stated otherwise all content is released under a [CC0]+BY licence. I'd appreciate it if you reference this but it is not necessary.
\newpage
Using Ipython for presentations
A short video showing how to use Ipython for presentations
End of explanation
"""
%install_ext https://raw.githubusercontent.com/rasbt/python_reference/master/ipython_magic/watermark.py
%load_ext watermark
%watermark -a "Anthony Beck" -d -v -m -g
#List of installed conda packages
!conda list
#List of installed pip packages
!pip list
"""
Explanation: The environment
In order to replicate my environment you need to know what I have installed!
Set up watermark
This describes the versions of software used during the creation.
Please note that critical libraries can also be watermarked as follows:
python
%watermark -v -m -p numpy,scipy
End of explanation
"""
!ipython nbconvert 20150916_OGC_Reuse_under_licence.ipynb --to slides --post serve
"""
Explanation: Running dynamic presentations
You need to install the RISE Ipython Library from Damián Avila for dynamic presentations
To convert and run this as a static presentation run the following command:
End of explanation
"""
#Future proof python 2
from __future__ import print_function #For python3 print syntax
from __future__ import division
# def
import IPython.core.display
# A function to collect user input - ipynb_input(varname='username', prompt='What is your username')
def ipynb_input(varname, prompt=''):
"""Prompt user for input and assign string val to given variable name."""
js_code = ("""
var value = prompt("{prompt}","");
var py_code = "{varname} = '" + value + "'";
IPython.notebook.kernel.execute(py_code);
""").format(prompt=prompt, varname=varname)
return IPython.core.display.Javascript(js_code)
# inline
%pylab inline
"""
Explanation: To close this instances press control 'c' in the ipython notebook terminal console
Static presentations allow the presenter to see speakers notes (use the 's' key)
If running dynamically run the scripts below
Pre load some useful libraries
End of explanation
"""
from IPython.display import YouTubeVideo
YouTubeVideo('jUzGF401vLc')
"""
Explanation: About me
Research Fellow, University of Nottingham: orcid
Director, Geolytics Limited - A spatial data analytics consultancy
\newpage
In the beginning was the geoword
and the word was cartography
\newpage
Cartography was king. Static representations of spatial knowledge with the cartographer deciding what to represent.
\newpage
And then there was data .........
\newpage
Restrictive data
\newpage
Making data interoperable and open
\newpage
Technical interoperability - levelling the field
\newpage
Facilitating data driven visualization
From Map to Model The changing paradigm of map creation from cartography to data driven visualization
\newpage
\newpage
\newpage
\newpage
What about non-technical interoperability issues?
Issues surrounding non-technical interoperability include:
Policy interoperabilty
Licence interoperability
Legal interoperability
Social interoperability
We will focus on licence interoperability
\newpage
There is a multitude of formal and informal data.
\newpage
Each of these data objects can be licenced in a different way. This shows some of the licences described by the RDFLicence ontology
\newpage
What is a licence?
Wikipedia state:
A license may be granted by a party ("licensor") to another party ("licensee") as an element of an agreement between those parties.
A shorthand definition of a license is "an authorization (by the licensor) to use the licensed material (by the licensee)."
\newpage
Two lead organisations have developed legal frameworks for content licensing:
Creative Commons (CC) and
Open Data Commons (ODC).
Until the release of CC version 4, published in November 2013, the CC licence did not cover data. Between them, CC and ODC licences can cover all forms of digital work.
There are many others - many bespoke.
I'll describe CC in more detail
\newpage
Creative Commons Zero
Creative Commons Zero (CC0) is essentially public domain which allows:
Reproduction
Distribution
Derivations
Constraints on CC0
The following clauses constrain CC0:
Permissions
ND – No derivatives: the licensee can not derive new content from the resource.
Requirements
BY – By attribution: the licensee must attribute the source.
SA – Share-alike: if the licensee adapts the resource, it must be released under the same licence.
Prohibitions
NC – Non commercial: the licensee must not use the work commercially without prior approval.
CC license combinations
License|Reproduction|Distribution|Derivation|ND|BY|SA|NC
----|----|----|----|----|----|----|----
CC0|X|X|X||||
CC-BY-ND|X|X||X|X||
CC-BY-NC-ND|X|X||X|X||X
CC-BY|X|X|X||X||
CC-BY-SA|X|X|X||X|X|
CC-BY-NC|X|X|X||X||X
CC-BY-NC-SA|X|X|X||X|X|X
Table: Creative Commons license combinations
\newpage
Why are licenses important?
It's all about license compatibility!
\newpage
Which is important when we mash up data
Certain licences when combined:
Are incompatible
Creating data islands
Inhibit commercial exploitation (NC)
Force the adoption of certain licences
If you want people to commercially exploit your stuff don't incorporate CC-BY-NC-SA data!
Stops the derivation of new works
A conceptual licence processing workflow. The licence processing service analyses the incoming licence metadata and determines if the data can be legally integrated and any resulting licence implications for the derived product.
\newpage
A rudimentry logic example
```text
Data1 isPartOf NewThing.
Data1 hasLicence a cc-by-sa.
What hasLicence a cc-by-sa? #reason here
If X isPartOf Y and hasLicence Z then Y hasLicence Z. #reason here
Data2 isPartOf NewThing.
Data2 hasLicence a cc-by-nc-sa. #reason here
Nothing hasLicence a cc-by-nc-sa and hasLicence a cc-by-sa. #reason here
```
And processing this within the Protege reasoning environment
End of explanation
"""
from IPython.display import YouTubeVideo
YouTubeVideo('tkRB5Rp1_W4')
"""
Explanation: \newpage
Here's something I prepared earlier
A live presentation.....
End of explanation
"""
|
harish-garg/Machine-Learning | udacity/enron/ud120-projects-master/final_project/Data Exploration and Cleanup.ipynb | mit | print "print out some values of the observation 'TOTAL'"
for name, person in data_dict.iteritems():
if name == 'TOTAL':
print person
salary = []
for name, person in data_dict.iteritems():
if float(person['salary']) > 0:
salary.append(float(person['salary']))
print "the sum of salary of all other persons is: ",np.sum(salary)/2
"""
Explanation: We see above that there is a entry called "TOTAL". That obvisously cannot be a name. We would need to remove that from the dataset. Before we do, let's confirm it is what it the name suggests it is.
End of explanation
"""
# Let's remove this TOTAL record.
data_dict.pop('TOTAL')
# There is a also a record which belongs to "THE TRAVEL AGENCY IN THE PARK".
# This is not a person and hence should be removed.
data_dict.pop("THE TRAVEL AGENCY IN THE PARK")
# No of records after removal of TOTAL & THE TRAVEL AGENCY IN THE PARK
print "No of records after removal of TOTAL: ", len(data_dict)
### Task 3: Create new feature(s)
### Store to my_dataset for easy export below.
my_dataset = data_dict
print "we create two new features here 'to_poi_message_ratio' and 'from_poi_message_ratio' "
for person in my_dataset.values():
person['to_poi_message_ratio'] = 0
person['from_poi_message_ratio'] = 0
if float(person['from_messages']) > 0:
person['to_poi_message_ratio'] = float(person['from_this_person_to_poi'])/float(person['from_messages'])
if float(person['to_messages']) > 0:
person['from_poi_message_ratio'] = float(person['from_poi_to_this_person'])/float(person['to_messages'])
features_list.extend(['to_poi_message_ratio', 'from_poi_message_ratio'])
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list)
labels, features = targetFeatureSplit(data)
### Task 4: Try a varity of classifiers
### Please name your classifier clf for easy export below.
### Note that if you want to do PCA or other multi-stage operations,
### you'll need to use Pipelines. For more info:
### http://scikit-learn.org/stable/modules/pipeline.html
# Provided to give you a starting point. Try a variety of classifiers.
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf = DecisionTreeClassifier(min_samples_split=6, random_state=10)
test_classifier(clf, my_dataset, features_list)
#clf = ensemble.RandomForestClassifier(criterion='gini', n_estimators=14, max_depth=7,
# max_features=None, random_state=42, min_samples_split=1)
#clf = AdaBoostClassifier(algorithm='SAMME')
#params = dict(reduce_dim__n_components=[1, 2, 3], tree__min_samples_split=[2, 4, 6, 8 10])
#clf = GridSearchCV(clf, param_grid=params, n_jobs=-1, scoring='recall')
#test_classifier(clf, my_dataset, features_list)
### Task 5: Tune your classifier to achieve better than .3 precision and recall
### using our testing script. Check the tester.py script in the final project
### folder for details on the evaluation method, especially the test_classifier
### function. Because of the small size of the dataset, the script uses
### stratified shuffle split cross validation. For more info:
### http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.StratifiedShuffleSplit.html
# Example starting point. Try investigating other evaluation techniques!
from sklearn.cross_validation import train_test_split
features_train, features_test, labels_train, labels_test = \
train_test_split(features, labels, test_size=0.3, random_state=42)
### Task 6: Dump your classifier, dataset, and features_list so anyone can
### check your results. You do not need to change anything below, but make sure
### that the version of poi_id.py that you submit can be run on its own and
### generates the necessary .pkl files for validating your results.
dump_classifier_and_data(clf, my_dataset, features_list)
"""
Explanation: We see that the total salary matches to the salary against the "TOTAL" record in the dataset.
End of explanation
"""
|
BL-Labs/poetryhunt | WordFrequencyClassifier.ipynb | mit | from newspaperaccess import *
# Get the connection set up to get access to the newspaper text
n = NewspaperArchive()
# Load up the references to the pages that we know reference Abolitionists
import csv
# Month list to convert a name to a number:
MONTHS = {"january": "01", "february": "02", "march": "03", "april": "04", "may": "05", "june": "06",
"july": "07", "august": "08", "september": "09", "october": "10", "november": "11", "december": "12"}
# a method to open the csv file, read it in and store the references in the list
def get_references(filename):
# Start a list to hold the references
references = []
with open(filename, "r") as pcsv:
# this "DictReader" opens the csv file up and then uses the column headers
# to work out what to call each bit of data
reflist = csv.DictReader(pcsv)
# now go through each row, adding them to the list
for row in reflist:
# a row will be something like:
# {"Newspaper": "Glasgow Herald", "Day": "24", "Month": "January", "Year": "1851", Page: "", etc}
# change the month to be a number, not a name:
row["month"] = MONTHS[row['month'].lower()]
references.append(row)
return references
positivereferences = get_references("positives.csv")
# There, we should have a big list draw from that spreadsheet
# Let's see what the 100th item is: (computers count from 0!)
positivereferences[100]
# Can we get the text for this reference?
doc = n.get(**positivereferences[100])
doc.keys()
# What is on page 3? Just the first 500 characters anyway
print([title for title, _ in doc['0003'].values()])
"""
Explanation: Classifier
We'll need to do the following:
Load up the list of references
Make sure that we can get the text for each one (or close to all of them)
Load up the list of negative references (ie the things that aren't what we are looking for)
Make sure that these are fine as well
We need to decide what 'features' we want the classifier to understand and write some methods that take in the text and return features accompanied by a number that represents that features 'strength'. For example, we could make features out of the word frequency. In this case, the feature would be the word itself, and the number is how often it occurred.
Finally we need to load the text for these and pass them through the Naive Bayes classifier, tagging them
End of explanation
"""
nonabospeechesreferences = get_references("nonabospeeches.csv")
nonabospeechesreferences[10]
print(n.get(**nonabospeechesreferences[10]))
"""
Explanation: Wow! Terrible OCR! Nevermind, we shall try to continue. The quality is better in other newspapers.
Let's load in the negatives now as well:
End of explanation
"""
nonabospeechesreferences[16]
print(n.get(**negativereferences[16])['0008'])
"""
Explanation: The error here (the final line tends to be the imporant one) says "No Such Newspaper: The Manchester Times and Gazette. Looking at the list of newspapers that the newspaperaccess file knows about, there isn't a clear match here. Let's try another:
End of explanation
"""
from feature import get_common_wordlist
# create the word frequency distribution from the newspaper references we have for positive matches
pos_worddist, nn = get_common_wordlist([(x['newspaper'], x['year'], x['month'], x['day']) for x in positivereferences])
print(nn)
print("Wordlist length: {0}\n Top 10 words:".format(len(pos_worddist)))
print(pos_worddist.most_common()[:100])
# create the word frequency distribution from the newspaper references we have for negative matches
neg_worddist, negn = get_common_wordlist([(x['newspaper'], x['year'], x['month'], x['day']) for x in negativereferences])
print(negn)
print("Wordlist length: {0}\n Top 10 words:".format(len(neg_worddist)))
print(neg_worddist.most_common()[:10])
# store these wordlists so we don't have to recreate them later on
import json
with open("pos_worddist.json", "w") as pfp:
json.dump(pos_worddist, pfp)
with open("neg_worddist.json", "w") as nfp:
json.dump(neg_worddist, nfp)
# create a wordlist from the positive set of words that do not appear in the negative set
only_pos_worddist = pos_worddist.copy()
for item in set(pos_worddist).intersection(set(neg_worddist)):
del only_pos_worddist[item]
# Most common positive only words?
print(only_pos_worddist.most_common()[:100])
# let's store this word dist too, just because
with open("only_pos_worddist.json", "w") as ofp:
json.dump(only_pos_worddist, ofp)
# So we have some basic feature sets we could use (common words overall, top 1000 words from just the positives, and so on)
# We also have the set of documents to train on.
import nltk
# Let's make a little method to return a featureset created a newpaper reference and a word dist
def get_features(worddist, **newspaper_ref):
if n.exists(newspaper_ref['newspaper'], newspaper_ref['year'], newspaper_ref['month'], newspaper_ref['day']):
features = {"has({0})".format(fword): False for fword,_ in worddist}
features.update({"count({0})".format(fword): 0 for fword,_ in worddist})
# create freq dist for the newspaper but only for the words we care about
doc = n.get(**newspaper_ref)
fdoc = nltk.FreqDist(w.lower() for w in " ".join(doc.values()).split(" ") if w in worddist)
features.update({"has({0})".format(fword): True for fword, _ in fdoc})
features.update({"count({0})".format(fword): c for fword, c in worddist})
return features
# n.get(**positivereferences[100]) -> is in JISC1
featureset = get_features(only_pos_worddist, **positivereferences[100])
len(featureset)
# Too many features is often a key step in over-fitting our model to the training data
# This is a bad thing! Let's alter that features method to only use the 2000 most common words
def get_features(total_worddist, features_to_take=2000, **newspaper_ref):
if n.exists(newspaper_ref['newspaper'], newspaper_ref['year'], newspaper_ref['month'], newspaper_ref['day']):
worddist = total_worddist.most_common()[:features_to_take]
features = {"has({0})".format(fword): False for fword, _ in worddist}
features.update({"count({0})".format(fword): 0 for fword, _ in worddist})
# create freq dist for the newspaper but only for the words we care about
doc = n.get(**newspaper_ref)
fdoc = nltk.FreqDist(w.lower() for w in " ".join(doc.values()).split(" ") if w in worddist)
features.update({"has({0})".format(fword): True for fword, _ in fdoc})
features.update({"count({0})".format(fword): c for fword, c in worddist})
return features
# Let's try that again
featureset = get_features(only_pos_worddist, **positivereferences[100])
len(featureset)
'has(slaveholder)' in featureset
list(featureset.items())[:10]
# let's make a teeny tiny classifer now..
refs = {'p':[], 'n':[]}
while len(refs['p']) <= 10:
for item in positivereferences:
if n.exists(item['newspaper'], item['year'], item['month'], item['day']):
refs['p'].append(item)
while len(refs['n']) <= 10:
for item in negativereferences:
if n.exists(item['newspaper'], item['year'], item['month'], item['day']):
refs['n'].append(item)
# get featuresets for the small set
featuressets = [(get_features(only_pos_wordlist, **ref), label) for label, ref in refs.items()]
#split the set in half to train and test
train_set, test_set = featuresets[:10], featuresets[-10:]
# Train on one half
classifier = nltk.NaiveBayesClassifier.train(train_set)
# test with 2nd half
print(nltk.classify.accuracy(classifier, test_set))
print(only_pos_worddist.most_common()[:100])
"""
Explanation: {'day': '5',
'month': '05',
'newspaper': 'The Dundee Courier',
'page': '',
'year': '1846'}
from newspaperaccess import NewspaperArchive
n = NewspaperArchive()
doc = n.get(newspaper = "ANJO", year = "1846", month = "05", day = "31")
len(doc)
print(doc["0001"][:200])
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.