repo_name stringlengths 6 77 | path stringlengths 8 215 | license stringclasses 15
values | content stringlengths 335 154k |
|---|---|---|---|
recepkabatas/Spark | 2_fullyconnected.ipynb | apache-2.0 | # These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
import cPickle as pickle
import numpy as np
import tensorflow as tf
"""
Explanation: Deep Learning with TensorFlow
Credits: Forked from TensorFlow by Google
Setup
Refer to the setup instructions.
Exercise 2
Previously in 1_notmnist.ipynb, we created a pickle with formatted datasets for training, development and testing on the notMNIST dataset.
The goal of this exercise is to progressively train deeper and more accurate models using TensorFlow.
End of explanation
"""
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print 'Training set', train_dataset.shape, train_labels.shape
print 'Validation set', valid_dataset.shape, valid_labels.shape
print 'Test set', test_dataset.shape, test_labels.shape
"""
Explanation: First reload the data we generated in 1_notmist.ipynb.
End of explanation
"""
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print 'Training set', train_dataset.shape, train_labels.shape
print 'Validation set', valid_dataset.shape, valid_labels.shape
print 'Test set', test_dataset.shape, test_labels.shape
"""
Explanation: Reformat into a shape that's more adapted to the models we're going to train:
- data as a flat matrix,
- labels as float 1-hot encodings.
End of explanation
"""
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random valued following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
"""
Explanation: We're first going to train a multinomial logistic regression using simple gradient descent.
TensorFlow works like this:
* First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:
with graph.as_default():
...
Then you can run the operations on this graph as many times as you want by calling session.run(), providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:
with tf.Session(graph=graph) as session:
...
Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
End of explanation
"""
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.global_variables_initializer().run()
print 'Initialized'
for step in xrange(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print 'Loss at step', step, ':', l
print 'Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :])
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print 'Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels)
print 'Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels)
"""
Explanation: Let's run this computation and iterate:
End of explanation
"""
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
"""
Explanation: Let's now switch to stochastic gradient descent training instead, which is much faster.
The graph will be similar, except that instead of holding all the training data into a constant node, we create a Placeholder node which will be fed actual data at every call of sesion.run().
End of explanation
"""
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.global_variables_initializer().run()
print "Initialized"
for step in xrange(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print "Minibatch loss at step", step, ":", l
print "Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels)
print "Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels)
print "Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels)
"""
Explanation: Let's run it:
End of explanation
"""
|
jamesfolberth/NGC_STEM_camp_AWS | notebooks/machineLearning_notebooks/03_Optimization/04_Stochastic_Gradient_Ascent.ipynb | bsd-3-clause | plotsurface()
"""
Explanation: Lecture 4: Stochastic Gradient Ascent
<img src="figs/mountains.jpg",width=1100,height=50>
Note: There are several large Helper Functions at the bottom of the notebook. Scroll down and execute those cells before you continue.
<br>
<br>
The Learning Rate Schedule Game
In the case when your log-likelihood function is convex, the choice of learning rate mainly affects the convergence of your SGA routine. In a nonconvex problem, the choice of learning rate can determine whether you find the global maximum, or get stuck forever in a local maximum. In most sophisticated optimization routines, the learning rate is adapted over time. Varying learning rate schedules allow you to explore local maximums but still be able to make it out and eventually find the global maximum.
The following game is a cheap facsimile of stochastic gradient ascent. There is no log-likelihood function, or training set. You just have a simple function that you would like to maximize, namely
$$
f(x,y) = \sin(3 \pi x) ~ \sin(3 \pi y) + 3~\textrm{exp}\left[{-\left(x-\frac{1}{2}\right)^2 - \left(y-\frac{1}{2}\right)^2}\right]
$$
The surface looks as follows. Notice that there is a global maximum at $(1/2,1/2)$ and several local maxima and minima surrounding it.
End of explanation
"""
def schedule(k, n, eta0):
'''
:param k: The current iteration
:param n: The max number of iterations
:param eta0: The original learning rate
'''
return eta0
playgame(np.array([0.15,0.0]), 150, .01)
"""
Explanation: I've given you a starting point and a basic gradient ascent algorithm (located in the Helper Functions section below). Below this text there is a learning rate scheduling function that currently just returns the initial learning rate that you prescribe. The goal of this game is for you to adjust the initial learning rate and the scheduling function that allows the iterate to make it to the global maximum. The only things you're allowed to change are the initial learning rate and the schedule function. Before you can play you need to evaluate the code-blocks at the bottom of the page. Then come back and evaluate the $\texttt{playgame}$ function with it's current inputs and see what happens!
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
def myfun(x, y):
return np.sin(3*np.pi*x)*np.sin(3*np.pi*y) + 3*np.exp(-(x-.5)**2 - (y-.5)**2)
def mygrad(x):
g1 = 3 * np.pi * np.cos(3*np.pi*x[0]) * np.sin(3*np.pi*x[1]) - 4 * (x[0]-.5) * np.exp(-(x[0]-.5)**2 - (x[1]-.5)**2)
g2 = 3 * np.pi * np.sin(3*np.pi*x[0]) * np.cos(3*np.pi*x[1]) - 4 * (x[1]-.5) * np.exp(-(x[0]-.5)**2 - (x[1]-.5)**2)
return np.array([g1, g2])
def SGA(x, numstep, eta0):
'''
:param x: Starting point
:param numstep: Total number iterations
:param eta0: Initial learning rate
'''
xhist = np.zeros((numstep+1,2))
xhist[0,:] = x
for kk in range(numstep):
x = x + schedule(kk, numstep, eta0) * mygrad(x)
xhist[kk+1,:] = x
return xhist
def playgame(x0, numstep, eta0):
'''
:param x0: The starting point
:param numstep: The total number of iterations to do
:param eta0: The original learning rate
'''
xx, yy = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 200))
Z = myfun(xx, yy)
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
CS = plt.contour(xx, yy, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.xlim([0,1])
plt.ylim([0,1])
xhist = SGA(x0, numstep, eta0)
fvals = np.zeros(numstep+1)
fvals[0] = myfun(x0[0], x0[1])
for ii in range(xhist.shape[0]-1):
x0 = xhist[ii][0]
y0 = xhist[ii][1]
x1 = xhist[ii+1][0]
y1 = xhist[ii+1][1]
ax1.plot([x0, x1], [y0,y1], color="black", marker="o", lw=1.5, markersize=5)
fvals[ii+1] = myfun(x0, y0)
plt.xlabel("x1", fontsize=16)
plt.ylabel("x2", fontsize=16)
maxval = myfun(0.5,0.5)
ax2 = fig.add_subplot(122)
ax2.plot(fvals, 'r--', marker="o")
ax2.plot([0, numstep+1], [maxval, maxval], 'k--', lw=2, alpha=0.5)
plt.xlim([0,numstep+1])
plt.ylim([0,1.25*maxval])
plt.xlabel("iteration", fontsize=16)
plt.ylabel("function value", fontsize=16);
def plotsurface():
xx, yy = np.meshgrid(np.linspace(0, 1, 100), np.linspace(0, 1, 200))
Z = myfun(xx, yy)
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(121)
CS = plt.contour(xx, yy, Z)
plt.clabel(CS, inline=1, fontsize=10)
plt.xlim([0,1])
plt.ylim([0,1])
plt.xlabel("x1", fontsize=16)
plt.ylabel("x2", fontsize=16)
from IPython.core.display import HTML
HTML("""
<style>
.MathJax nobr>span.math>span{border-left-width:0 !important};
</style>
""")
"""
Explanation: Hint: If you're not having much luck, try implementing a schedule of the form
$
\eta_k = \dfrac{\eta_0}{ 1 + \alpha ~ k~/~n}
$ where here $\alpha$ is a tuning parameter. You'll probably also have to make your initial learning rate bigger.
<br><br><br><br><br>
<br><br><br><br><br>
<br><br><br><br><br>
<br><br><br><br><br>
Helper Functions
End of explanation
"""
|
saashimi/code_guild | interactive-coding-challenges/graphs_trees/bst_validate/bst_validate_challenge.ipynb | mit | %run ../bst/bst.py
%load ../bst/bst.py
def validate_bst(node):
# TODO: Implement me
pass
"""
Explanation: <small><i>This notebook was prepared by Donne Martin. Source and license info is on GitHub.</i></small>
Challenge Notebook
Problem: Determine if a tree is a valid binary search tree.
Constraints
Test Cases
Algorithm
Code
Unit Test
Solution Notebook
Constraints
Can the tree have duplicates?
Yes
Can we assume we already have a Node class?
Yes
Test Cases
<pre>
Valid:
5
/ \
5 8
/ \ /
4 6 7
Invalid:
5
/ \
5 8
/ \ /
4 9 7
</pre>
Algorithm
Refer to the Solution Notebook. If you are stuck and need a hint, the solution notebook's algorithm discussion might be a good place to start.
Code
End of explanation
"""
# %load test_bst_validate.py
from nose.tools import assert_equal
class TestBstValidate(object):
def test_bst_validate(self):
node = Node(5)
insert(node, 8)
insert(node, 5)
insert(node, 6)
insert(node, 4)
insert(node, 7)
assert_equal(validate_bst(node), True)
root = Node(5)
left = Node(5)
right = Node(8)
invalid = Node(20)
root.left = left
root.right = right
root.left.right = invalid
assert_equal(validate_bst(root), False)
print('Success: test_bst_validate')
def main():
test = TestBstValidate()
test.test_bst_validate()
if __name__ == '__main__':
main()
"""
Explanation: Unit Test
The following unit test is expected to fail until you solve the challenge.
End of explanation
"""
|
davidthomas5412/PanglossNotebooks | MassInferencePanglossPerformance.ipynb | mit | from pangloss import BackgroundCatalog, ForegroundCatalog, \
TrueHaloMassDistribution, Kappamap, Shearmap
ITERATIONS = 4
RADIUS = 2.0
# initialize background and foreground
B = BackgroundCatalog(N=10.0, domain=[1.5, 1.4, -1.5, -1.4], field=[0, 0, 0, 0])
F = ForegroundCatalog.guo()
F.set_mass_prior(TrueHaloMassDistribution())
# initialize maps from Hilbert et al 2009
K = Kappamap.example()
S = Shearmap.example()
# run monte carlo samples
def pangloss_benchmark():
for _ in xrange(ITERATIONS):
F.draw_halo_masses()
B.drill_lightcones(radius=RADIUS, foreground=F, smooth_corr=False)
B.lens_by_map(K, S)
B.lens_by_halos(lookup_table=False, smooth_corr=False, relevance_lim=0)
B.halo_mass_log_likelihood()
"""
Explanation: Pangloss Performance Benchmark
End of explanation
"""
from massinference.angle import Angle
from massinference.catalog import SourceCatalogFactory, FastSampleHaloCatalogFactory, \
MutableHaloMassCatalog, SourceCatalog, HaloCatalog
from massinference.distribution import MassPrior
from massinference.inference import log_likelihood
from massinference.lenser import MapLenser
from massinference.lightcone import LightconeManager
from massinference.map import KappaMap, ShearMap
from massinference.plot import Limits
import cProfile
ITERATIONS = 4
RADIUS = 2.0
# run parameters
sigma_e = 0.2
random_seed = 1
source_density = 10.0
limits = Limits(Angle.from_degree(1.5), Angle.from_degree(1.4),
Angle.from_degree(-1.5), Angle.from_degree(-1.4))
limits_with_perimeter = limits.add_perimeter(Angle.from_arcmin(RADIUS))
# make a mock WL catalog, of observed, lensed, galaxy ellipticities:
source_factory = SourceCatalogFactory(limits, source_density, sigma_e)
source_catalog = source_factory.generate()
max_z = source_catalog.dataframe[SourceCatalog.Z].max()
e1, e2 = MapLenser(KappaMap.default(), ShearMap.default()).lens(source_catalog)
base_halo_catalog = MutableHaloMassCatalog.default(limits, max_z)
mass_prior = MassPrior(base_halo_catalog.dataframe[HaloCatalog.HALO_MASS].as_matrix())
halo_catalog_factory = FastSampleHaloCatalogFactory(base_halo_catalog,
mass_prior, random_seed)
lightcone_manager = LightconeManager(source_catalog, halo_catalog_factory, RADIUS)
def mass_inference_benchmark():
predictions = lightcone_manager.run(ITERATIONS)
log_likelihood(predictions, e1, e2, sigma_e)
cProfile.run('mass_inference_benchmark()')
"""
Explanation: MassInference Performance Benchmark
End of explanation
"""
|
yashdeeph709/Algorithms | PythonBootCamp/Complete-Python-Bootcamp-master/Filter.ipynb | apache-2.0 | #First let's make a function
def even_check(num):
if num%2 ==0:
return True
"""
Explanation: filter
The function filter(function, list) offers a convenient way to filter out all the elements of an iterable, for which the function returns True.
The function filter(function(),l) needs a function as its first argument. The function needs to return a Boolean value (either True or False). This function will be applied to every element of the iterable. Only if the function returns True will the element of the iterable be included in the result.
Lets see some examples:
End of explanation
"""
lst =range(20)
filter(even_check,lst)
"""
Explanation: Now let's filter a list of numbers. Note: putting the function into filter without any parenthesis might feel strange, but keep in mind that functions are objects as well.
End of explanation
"""
filter(lambda x: x%2==0,lst)
"""
Explanation: filter() is more commonly used with lambda functions, this because we usually use filter for a quick job where we don't want to write an entire function. Lets repeat the example above using a lambda expression:
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.23/_downloads/09baca5bff98c3be2834792aebba565c/montage_sgskip.ipynb | bsd-3-clause | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Joan Massich <mailsik@gmail.com>
#
# License: BSD Style.
import os.path as op
import mne
from mne.channels.montage import get_builtin_montages
from mne.datasets import fetch_fsaverage
from mne.viz import set_3d_title, set_3d_view
"""
Explanation: Plotting sensor layouts of EEG systems
This example illustrates how to load all the EEG system montages
shipped in MNE-python, and display it on the fsaverage template subject.
End of explanation
"""
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
sphere = mne.make_sphere_model(r0='auto', head_radius='auto', info=info)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head',
bem=sphere, info=info)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
"""
Explanation: Check all montages against a sphere
End of explanation
"""
subjects_dir = op.dirname(fetch_fsaverage())
for current_montage in get_builtin_montages():
montage = mne.channels.make_standard_montage(current_montage)
# Create dummy info
info = mne.create_info(
ch_names=montage.ch_names, sfreq=100., ch_types='eeg')
info.set_montage(montage)
fig = mne.viz.plot_alignment(
# Plot options
show_axes=True, dig='fiducials', surfaces='head', mri_fiducials=True,
subject='fsaverage', subjects_dir=subjects_dir, info=info,
coord_frame='mri',
trans='fsaverage', # transform from head coords to fsaverage's MRI
)
set_3d_view(figure=fig, azimuth=135, elevation=80)
set_3d_title(figure=fig, title=current_montage)
"""
Explanation: Check all montages against fsaverage
End of explanation
"""
|
atcemgil/notes | matkoy2021-1.ipynb | mit | import matplotlib.pyplot as plt
import numpy as np
%matplotlib inline
from __future__ import print_function
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import matplotlib.pylab as plt
from IPython.display import clear_output, display, HTML
x = np.array([8.0 , 6.1 , 11., 7., 9., 12. , 4., 2., 10, 5, 3])
y = np.array([6.04, 4.95, 5.58, 6.81, 6.33, 7.96, 5.24, 2.26, 8.84, 2.82, 3.68])
def plot_fit(w1, w0):
f = w0 + w1*x
plt.figure(figsize=(4,3))
plt.plot(x,y,'sk')
plt.plot(x,f,'o-r')
#plt.axis('equal')
plt.xlim((0,15))
plt.ylim((0,10))
for i in range(len(x)):
plt.plot((x[i],x[i]),(f[i],y[i]),'b')
# plt.show()
# plt.figure(figsize=(4,1))
plt.bar(x,(f-y)**2/2)
plt.title('Toplam kare hata = '+str(np.sum((f-y)**2/2)))
plt.ylim((0,10))
plt.xlim((0,15))
plt.show()
plot_fit(0.0,3.79)
interact(plot_fit, w1=(-2, 2, 0.01), w0=(-5, 5, 0.01));
"""
Explanation: Yapay Öğrenmeye Giriş I
Ali Taylan Cemgil
Parametrik Regresyon, Parametrik Fonksyon Oturtma Problemi (Parametric Regression, Function Fitting)
Verilen girdi ve çıktı ikilileri $x, y$ için parametrik bir fonksyon $f$ oturtma problemi.
Parametre $w$ değerlerini öyle bir seçelim ki
$$
y \approx f(x; w)
$$
$x$: Girdi (Input)
$y$: Çıktı (Output)
$w$: Parametre (Weight, ağırlık)
$e$: Hata
Örnek 1:
$$
e = y - f(x)
$$
Örnek 2:
$$
e = \frac{y}{f(x)}-1
$$
$E$, $D$: Hata fonksyonu (Error function), Iraksay (Divergence)
Doğrusal Regresyon (Linear Regression)
Oturtulacak $f$ fonksyonun model parametreleri $w$ cinsinden doğrusal olduğu durum (Girdiler $x$ cinsinden doğrusal olması gerekmez).
Tanım: Doğrusallık
Bir $g$ fonksyonu doğrusaldır demek, herhangi skalar $a$ ve $b$ içn
$$
g(aw_1 + b w_2) = a g(w_1) + b g(w_2)
$$
olması demektir.
Örnek: Doğru oturtmak (Line Fitting)
Girdi-Çıktı ikilileri
$$
(x_i, y_i)
$$
$i=1\dots N$
Model
$$
y_i \approx f(x; w_1, w_0) = w_0 + w_1 x
$$
$x$ : Girdi
$w_1$: Eğim
$w_0$: Kesişme
$f_i \equiv f(x_i; w_1, w_0)$
Örnek 2: Parabol Oturtma
Girdi-Çıktı ikilileri
$$
(x_i, y_i)
$$
$i=1\dots N$
Model
$$
y_i \approx f(x_i; w_2, w_1, w_0) = w_0 + w_1 x_i + w_2 x_i^2
$$
$x$ : Girdi
$w_2$: Karesel terimin katsayısı
$w_1$: Doğrusal terimin katsayısı
$w_0$: Sabit terim katsayısı
$f_i \equiv f(x_i; w_2, w_1, w_0)$
Bir parabol $x$'in doğrusal fonksyonu değil ama $w_2, w_1, w_0$ parametrelerinin doğrusal fonksyonu.
End of explanation
"""
x = np.array([8.0 , 6.1 , 11., 7., 9., 12. , 4., 2., 10, 5, 3])
y = np.array([6.04, 4.95, 5.58, 6.81, 6.33, 7.96, 5.24, 2.26, 8.84, 2.82, 3.68])
def hata(y, x, w):
N = len(y)
f = x*w[1]+w[0]
e = y-f
return np.sum(e*e)/2
w = np.array([0, 0])
E = hata(y, x, w)
for e in range(1000):
g = 0.1*np.random.randn(2)
w_temp = w + g
E_temp = hata(y, x, w_temp)
if E_temp<E:
E = E_temp
w = w_temp
#print(e, E)
print(e, E)
w
"""
Explanation: Rasgele Arama
End of explanation
"""
%matplotlib inline
import scipy as sc
import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pylab as plt
df_arac = pd.read_csv(u'data/arac.csv',sep=';')
df_arac[['Year','Car']]
#df_arac
BaseYear = 1995
x = np.matrix(df_arac.Year[0:]).T-BaseYear
y = np.matrix(df_arac.Car[0:]).T/1000000.
plt.plot(x+BaseYear, y, 'o-')
plt.xlabel('Yil')
plt.ylabel('Araba (Milyon)')
plt.show()
%matplotlib inline
from __future__ import print_function
from ipywidgets import interact, interactive, fixed
import ipywidgets as widgets
import matplotlib.pylab as plt
from IPython.display import clear_output, display, HTML
w_0 = 0.27150786
w_1 = 0.37332256
BaseYear = 1995
x = np.matrix(df_arac.Year[0:]).T-BaseYear
y = np.matrix(df_arac.Car[0:]).T/1000000.
fig, ax = plt.subplots()
f = w_1*x + w_0
plt.plot(x+BaseYear, y, 'o-')
ln, = plt.plot(x+BaseYear, f, 'r')
plt.xlabel('Years')
plt.ylabel('Number of Cars (Millions)')
ax.set_ylim((-2,13))
plt.close(fig)
def set_line(w_1, w_0):
f = w_1*x + w_0
e = y - f
ln.set_ydata(f)
ax.set_title('Total Error = {} '.format(np.asscalar(e.T*e/2)))
display(fig)
set_line(0.32,3)
interact(set_line, w_1=(-2, 2, 0.01), w_0=(-5, 5, 0.01));
w_0 = 0.27150786
w_1 = 0.37332256
w_2 = 0.1
BaseYear = 1995
x = np.array(df_arac.Year[0:]).T-BaseYear
y = np.array(df_arac.Car[0:]).T/1000000.
fig, ax = plt.subplots()
f = w_2*x**2 + w_1*x + w_0
plt.plot(x+BaseYear, y, 'o-')
ln, = plt.plot(x+BaseYear, f, 'r')
plt.xlabel('Yıl')
plt.ylabel('Araba Sayısı (Milyon)')
ax.set_ylim((-2,13))
plt.close(fig)
def set_line(w_2, w_1, w_0):
f = w_2*x**2 + w_1*x + w_0
e = y - f
ln.set_ydata(f)
ax.set_title('Ortalama Kare Hata = {} '.format(np.sum(e*e/len(e))))
display(fig)
set_line(w_2, w_1, w_0)
interact(set_line, w_2=(-0.1,0.1,0.001), w_1=(-2, 2, 0.01), w_0=(-5, 5, 0.01))
"""
Explanation: Gerçek veri: Türkiyedeki araç sayıları
End of explanation
"""
from itertools import product
BaseYear = 1995
x = np.matrix(df_arac.Year[0:]).T-BaseYear
y = np.matrix(df_arac.Car[0:]).T/1000000.
# Setup the vandermonde matrix
N = len(x)
A = np.hstack((np.ones((N,1)), x))
left = -5
right = 15
bottom = -4
top = 6
step = 0.05
W0 = np.arange(left,right, step)
W1 = np.arange(bottom,top, step)
ErrSurf = np.zeros((len(W1),len(W0)))
for i,j in product(range(len(W1)), range(len(W0))):
e = y - A*np.matrix([W0[j], W1[i]]).T
ErrSurf[i,j] = e.T*e/2
plt.figure(figsize=(7,7))
plt.imshow(ErrSurf, interpolation='nearest',
vmin=0, vmax=1000,origin='lower',
extent=(left,right,bottom,top),cmap='Blues_r')
plt.xlabel('w0')
plt.ylabel('w1')
plt.title('Error Surface')
plt.colorbar(orientation='horizontal')
plt.show()
"""
Explanation: Örnek 1, devam: Modeli Öğrenmek
Öğrenmek: parametre kestirimi $w = [w_0, w_1]$
Genelde model veriyi hatasız açıklayamayacağı için her veri noktası için bir hata tanımlıyoruz:
$$e_i = y_i - f(x_i; w)$$
Toplam kare hata
$$
E(w) = \frac{1}{2} \sum_i (y_i - f(x_i; w))^2 = \frac{1}{2} \sum_i e_i^2
$$
Toplam kare hatayı $w_0$ ve $w_1$ parametrelerini değiştirerek azaltmaya çalışabiliriz.
Hata yüzeyi
End of explanation
"""
# Solving the Normal Equations
# Setup the Design matrix
N = len(x)
A = np.hstack((np.ones((N,1)), x))
#plt.imshow(A, interpolation='nearest')
# Solve the least squares problem
w_ls,E,rank,sigma = np.linalg.lstsq(A, y)
print('Parametreler: \nw0 = ', w_ls[0],'\nw1 = ', w_ls[1] )
print('Toplam Kare Hata:', E/2)
f = np.asscalar(w_ls[1])*x + np.asscalar(w_ls[0])
plt.plot(x+BaseYear, y, 'o-')
plt.plot(x+BaseYear, f, 'r')
plt.xlabel('Yıl')
plt.ylabel('Araba sayısı (Milyon)')
plt.show()
"""
Explanation: Modeli Nasıl Kestirebiliriz?
Fikir: En küçük kare hata
(Gauss 1795, Legendre 1805)
Toplam hatanın $w_0$ ve $w_1$'e göre türevini hesapla, sıfıra eşitle ve çıkan denklemleri çöz
\begin{eqnarray}
\left(
\begin{array}{c}
y_0 \ y_1 \ \vdots \ y_{N-1}
\end{array}
\right)
\approx
\left(
\begin{array}{cc}
1 & x_0 \ 1 & x_1 \ \vdots \ 1 & x_{N-1}
\end{array}
\right)
\left(
\begin{array}{c}
w_0 \ w_1
\end{array}
\right)
\end{eqnarray}
\begin{eqnarray}
y \approx A w
\end{eqnarray}
$A = A(x)$: Model Matrisi
$w$: Model Parametreleri
$y$: Gözlemler
Hata vektörü: $$e = y - Aw$$
\begin{eqnarray}
E(w) & = & \frac{1}{2}e^\top e = \frac{1}{2}(y - Aw)^\top (y - Aw)\
& = & \frac{1}{2}y^\top y - \frac{1}{2} y^\top Aw - \frac{1}{2} w^\top A^\top y + \frac{1}{2} w^\top A^\top Aw \
& = & \frac{1}{2} y^\top y - y^\top Aw + \frac{1}{2} w^\top A^\top Aw \
\end{eqnarray}
Gradyan
https://tr.khanacademy.org/math/multivariable-calculus/multivariable-derivatives/partial-derivative-and-gradient-articles/a/the-gradient
\begin{eqnarray}
\frac{d E}{d w } & = & \left(\begin{array}{c}
\partial E/\partial w_0 \ \partial E/\partial w_1 \ \vdots \ \partial E/\partial w_{K-1}
\end{array}\right)
\end{eqnarray}
Toplam hatanın gradyanı
\begin{eqnarray}
\frac{d}{d w }E(w) & = & \frac{d}{d w }(\frac{1}{2} y^\top y) &+ \frac{d}{d w }(- y^\top Aw) &+ \frac{d}{d w }(\frac{1}{2} w^\top A^\top Aw) \
& = & 0 &- A^\top y &+ A^\top A w \
& = & - A^\top (y - Aw) \
& = & - A^\top e \
& \equiv & \nabla E(w)
\end{eqnarray}
Yapay zekaya gönül veren herkesin bilmesi gereken eşitlikler
Vektör iç çarpımının gradyeni
\begin{eqnarray}
\frac{d}{d w }(h^\top w) & = & h
\end{eqnarray}
Karesel bir ifadenin gradyeni
\begin{eqnarray}
\frac{d}{d w }(w^\top K w) & = & (K+K^\top) w
\end{eqnarray}
En küçük kare hata çözümü doğrusal modellerde doğrusal denklemlerin çözümü ile bulunabiliyor
\begin{eqnarray}
w^* & = & \arg\min_{w} E(w)
\end{eqnarray}
Eniyileme Şartı (gradyan sıfır olmalı )
\begin{eqnarray}
\nabla E(w^*) & = & 0
\end{eqnarray}
\begin{eqnarray}
0 & = & - A^\top y + A^\top A w^ \
A^\top y & = & A^\top A w^ \
w^* & = & (A^\top A)^{-1} A^\top y
\end{eqnarray}
Geometrik (Projeksyon) yorumu:
\begin{eqnarray}
f & = A w^* = A (A^\top A)^{-1} A^\top y
\end{eqnarray}
End of explanation
"""
x = np.array([10, 8, 13, 9, 11, 14, 6, 4, 12, 7, 5])
N = len(x)
x = x.reshape((N,1))
y = np.array([8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]).reshape((N,1))
#y = np.array([9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]).reshape((N,1))
#y = np.array([7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]).reshape((N,1))
def fit_and_plot_poly(degree):
#A = np.hstack((np.power(x,0), np.power(x,1), np.power(x,2)))
A = np.hstack((np.power(x,i) for i in range(degree+1)))
# Setup the vandermonde matrix
xx = np.matrix(np.linspace(np.asscalar(min(x))-1,np.asscalar(max(x))+1,300)).T
A2 = np.hstack((np.power(xx,i) for i in range(degree+1)))
#plt.imshow(A, interpolation='nearest')
# Solve the least squares problem
w_ls,E,rank,sigma = np.linalg.lstsq(A, y)
f = A2*w_ls
plt.plot(x, y, 'o')
plt.plot(xx, f, 'r')
plt.xlabel('x')
plt.ylabel('y')
plt.gca().set_ylim((0,20))
#plt.gca().set_xlim((1950,2025))
if E:
plt.title('Mertebe = '+str(degree)+' Hata='+str(E[0]))
else:
plt.title('Mertebe = '+str(degree)+' Hata= 0')
plt.show()
fit_and_plot_poly(0)
interact(fit_and_plot_poly, degree=(0,10))
"""
Explanation: Polinomlar
Parabol
\begin{eqnarray}
\left(
\begin{array}{c}
y_0 \ y_1 \ \vdots \ y_{N-1}
\end{array}
\right)
\approx
\left(
\begin{array}{ccc}
1 & x_0 & x_0^2 \ 1 & x_1 & x_1^2 \ \vdots \ 1 & x_{N-1} & x_{N-1}^2
\end{array}
\right)
\left(
\begin{array}{c}
w_0 \ w_1 \ w_2
\end{array}
\right)
\end{eqnarray}
$K$ derecesinde polinom
\begin{eqnarray}
\left(
\begin{array}{c}
y_0 \ y_1 \ \vdots \ y_{N-1}
\end{array}
\right)
\approx
\left(
\begin{array}{ccccc}
1 & x_0 & x_0^2 & \dots & x_0^K \ 1 & x_1 & x_1^2 & \dots & x_1^K\ \vdots \ 1 & x_{N-1} & x_{N-1}^2 & \dots & x_{N-1}^K
\end{array}
\right)
\left(
\begin{array}{c}
w_0 \ w_1 \ w_2 \ \vdots \ w_K
\end{array}
\right)
\end{eqnarray}
\begin{eqnarray}
y \approx A w
\end{eqnarray}
$A = A(x)$: Model matrisi
$w$: Model Parametreleri
$y$: Gözlemler
Polinom oturtmada ortaya çıkan özel yapılı matrislere Vandermonde matrisleri de denmektedir.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/cccma/cmip6/models/sandbox-1/toplevel.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'cccma', 'sandbox-1', 'toplevel')
"""
Explanation: ES-DOC CMIP6 Model Properties - Toplevel
MIP Era: CMIP6
Institute: CCCMA
Source ID: SANDBOX-1
Sub-Topics: Radiative Forcings.
Properties: 85 (42 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:46
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Flux Correction
3. Key Properties --> Genealogy
4. Key Properties --> Software Properties
5. Key Properties --> Coupling
6. Key Properties --> Tuning Applied
7. Key Properties --> Conservation --> Heat
8. Key Properties --> Conservation --> Fresh Water
9. Key Properties --> Conservation --> Salt
10. Key Properties --> Conservation --> Momentum
11. Radiative Forcings
12. Radiative Forcings --> Greenhouse Gases --> CO2
13. Radiative Forcings --> Greenhouse Gases --> CH4
14. Radiative Forcings --> Greenhouse Gases --> N2O
15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
17. Radiative Forcings --> Greenhouse Gases --> CFC
18. Radiative Forcings --> Aerosols --> SO4
19. Radiative Forcings --> Aerosols --> Black Carbon
20. Radiative Forcings --> Aerosols --> Organic Carbon
21. Radiative Forcings --> Aerosols --> Nitrate
22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
24. Radiative Forcings --> Aerosols --> Dust
25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
27. Radiative Forcings --> Aerosols --> Sea Salt
28. Radiative Forcings --> Other --> Land Use
29. Radiative Forcings --> Other --> Solar
1. Key Properties
Key properties of the model
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Top level overview of coupled model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of coupled model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.flux_correction.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Flux Correction
Flux correction properties of the model
2.1. Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how flux corrections are applied in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.year_released')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Genealogy
Genealogy and history of the model
3.1. Year Released
Is Required: TRUE Type: STRING Cardinality: 1.1
Year the model was released
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP3_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.2. CMIP3 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP3 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.CMIP5_parent')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. CMIP5 Parent
Is Required: FALSE Type: STRING Cardinality: 0.1
CMIP5 parent if any
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.genealogy.previous_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Previous Name
Is Required: FALSE Type: STRING Cardinality: 0.1
Previously known as
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Software Properties
Software properties of model
4.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.components_structure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.4. Components Structure
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how model realms are structured into independent software components (coupled via a coupler) and internal software components.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.software_properties.coupler')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OASIS"
# "OASIS3-MCT"
# "ESMF"
# "NUOPC"
# "Bespoke"
# "Unknown"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.5. Coupler
Is Required: FALSE Type: ENUM Cardinality: 0.1
Overarching coupling framework for model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Coupling
**
5.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of coupling in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_double_flux')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.2. Atmosphere Double Flux
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the atmosphere passing a double flux to the ocean and sea ice (as opposed to a single one)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_fluxes_calculation_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Atmosphere grid"
# "Ocean grid"
# "Specific coupler grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 5.3. Atmosphere Fluxes Calculation Grid
Is Required: FALSE Type: ENUM Cardinality: 0.1
Where are the air-sea fluxes calculated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.coupling.atmosphere_relative_winds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 5.4. Atmosphere Relative Winds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Are relative or absolute winds used to compute the flux? I.e. do ocean surface currents enter the wind stress calculation?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Tuning Applied
Tuning methodology for model
6.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics/diagnostics retained. Document the relative weight given to climate performance metrics/diagnostics versus process oriented metrics/diagnostics, and on the possible conflicts with parameterization level tuning. In particular describe any struggle with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics/diagnostics of the global mean state used in tuning model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics/diagnostics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics/diagnostics used in tuning model/component (such as 20th century)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.energy_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.5. Energy Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how energy balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.tuning_applied.fresh_water_balance')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. Fresh Water Balance
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how fresh_water balance was obtained in the full system: in the various components independently or at the components coupling stage?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Conservation --> Heat
Global heat convervation properties of the model
7.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how heat is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.heat.land_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.6. Land Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how heat is conserved at the land/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.global')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation --> Fresh Water
Global fresh water convervation properties of the model
8.1. Global
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh_water is conserved globally
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_ocean_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.2. Atmos Ocean Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh_water is conserved at the atmosphere/ocean coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_land_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Atmos Land Interface
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how fresh water is conserved at the atmosphere/land coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.atmos_sea-ice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Atmos Sea-ice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the atmosphere/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.5. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how fresh water is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.runoff')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.6. Runoff
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how runoff is distributed and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.iceberg_calving')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.7. Iceberg Calving
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how iceberg calving is modeled and conserved
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.endoreic_basins')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.8. Endoreic Basins
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how endoreic basins (no ocean access) are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.fresh_water.snow_accumulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.9. Snow Accumulation
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe how snow accumulation over land and over sea-ice is treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.salt.ocean_seaice_interface')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Key Properties --> Conservation --> Salt
Global salt convervation properties of the model
9.1. Ocean Seaice Interface
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how salt is conserved at the ocean/sea-ice coupling interface
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.key_properties.conservation.momentum.details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10. Key Properties --> Conservation --> Momentum
Global momentum convervation properties of the model
10.1. Details
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how momentum is conserved in the model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11. Radiative Forcings
Radiative forcings of the model for historical and scenario (aka Table 12.1 IPCC AR5)
11.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of radiative forcings (GHG and aerosols) implementation in model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12. Radiative Forcings --> Greenhouse Gases --> CO2
Carbon dioxide forcing
12.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CO2.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Radiative Forcings --> Greenhouse Gases --> CH4
Methane forcing
13.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CH4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Radiative Forcings --> Greenhouse Gases --> N2O
Nitrous oxide forcing
14.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.N2O.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Radiative Forcings --> Greenhouse Gases --> Tropospheric O3
Troposheric ozone forcing
15.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.tropospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 16. Radiative Forcings --> Greenhouse Gases --> Stratospheric O3
Stratospheric ozone forcing
16.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.stratospheric_O3.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17. Radiative Forcings --> Greenhouse Gases --> CFC
Ozone-depleting and non-ozone-depleting fluorinated gases forcing
17.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.equivalence_concentration')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "Option 1"
# "Option 2"
# "Option 3"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 17.2. Equivalence Concentration
Is Required: TRUE Type: ENUM Cardinality: 1.1
Details of any equivalence concentrations used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.greenhouse_gases.CFC.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 18. Radiative Forcings --> Aerosols --> SO4
SO4 aerosol forcing
18.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.SO4.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19. Radiative Forcings --> Aerosols --> Black Carbon
Black carbon aerosol forcing
19.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.black_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 20. Radiative Forcings --> Aerosols --> Organic Carbon
Organic carbon aerosol forcing
20.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.organic_carbon.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 21. Radiative Forcings --> Aerosols --> Nitrate
Nitrate forcing
21.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.nitrate.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Radiative Forcings --> Aerosols --> Cloud Albedo Effect
Cloud albedo effect forcing (RFaci)
22.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 22.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_albedo_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 22.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Radiative Forcings --> Aerosols --> Cloud Lifetime Effect
Cloud lifetime effect forcing (ERFaci)
23.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.aerosol_effect_on_ice_clouds')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.2. Aerosol Effect On Ice Clouds
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative effects of aerosols on ice clouds are represented?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.RFaci_from_sulfate_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.3. RFaci From Sulfate Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Radiative forcing from aerosol cloud interactions from sulfate aerosol only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.cloud_lifetime_effect.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 24. Radiative Forcings --> Aerosols --> Dust
Dust forcing
24.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.dust.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 24.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Radiative Forcings --> Aerosols --> Tropospheric Volcanic
Tropospheric volcanic forcing
25.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.tropospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 25.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Radiative Forcings --> Aerosols --> Stratospheric Volcanic
Stratospheric volcanic forcing
26.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.historical_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.2. Historical Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in historical simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.future_explosive_volcanic_aerosol_implementation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Type A"
# "Type B"
# "Type C"
# "Type D"
# "Type E"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26.3. Future Explosive Volcanic Aerosol Implementation
Is Required: TRUE Type: ENUM Cardinality: 1.1
How explosive volcanic aerosol is implemented in future simulations
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.stratospheric_volcanic.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.4. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Radiative Forcings --> Aerosols --> Sea Salt
Sea salt forcing
27.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.aerosols.sea_salt.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "M"
# "Y"
# "E"
# "ES"
# "C"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 28. Radiative Forcings --> Other --> Land Use
Land use forcing
28.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How this forcing agent is provided (e.g. via concentrations, emission precursors, prognostically derived, etc.)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.crop_change_only')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 28.2. Crop Change Only
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Land use change represented via crop change only?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.land_use.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28.3. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.provision')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "N/A"
# "irradiance"
# "proton"
# "electron"
# "cosmic ray"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 29. Radiative Forcings --> Other --> Solar
Solar forcing
29.1. Provision
Is Required: TRUE Type: ENUM Cardinality: 1.N
How solar forcing is provided
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.toplevel.radiative_forcings.other.solar.additional_information')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 29.2. Additional Information
Is Required: FALSE Type: STRING Cardinality: 0.1
Additional information relating to the provision and implementation of this forcing agent (e.g. citations, use of non-standard datasets, explaining how multiple provisions are used, etc.).
End of explanation
"""
|
pfschus/fission_bicorrelation | analysis/Cf072115_to_Cf072215b/create_bhp_nn_1ns.ipynb | mit | import os
import sys
import matplotlib.pyplot as plt
import matplotlib.colors
import numpy as np
import imageio
import scipy.io as sio
sys.path.append('../../scripts/')
import bicorr as bicorr
import bicorr_plot as bicorr_plot
%load_ext autoreload
%autoreload 2
"""
Explanation: Analysis of combined data sets Cf072115 - Cf072215b
Patricia Schuster
University of Michigan
2/9/2018
We are expecting 8.5" of snow today. ..........
I am combining four data sets:
Cf072115
Cf072115b
Cf072215a
Cf072215b
I have combined the sparse_bhm.npz, sparse_bhm_neg.npz, and singles_hist.npz files on flux and downloaded to my local machine.
Now I will revive those files and produce bhp_nn for positive and negative time ranges. This is so that I don't have to keep importing the entire bhm files each time because it takes forever and a ton of memory.
I'm going to use 1 ns time binning for this to save 16x space in the time dimensions.
End of explanation
"""
import seaborn as sns
sns.set(style='ticks')
"""
Explanation: Use seaborn to make plots prettier
End of explanation
"""
os.listdir('../../meas_info/')
det_df = bicorr.load_det_df('../../meas_info/det_df_pairs_angles.csv',plot_flag=True)
chList, fcList, detList, num_dets, num_det_pairs = bicorr.build_ch_lists(print_flag=True)
"""
Explanation: Load det_df, channel lists
End of explanation
"""
num_fissions = int(int(sio.loadmat('datap/num_fissions.mat')['num_fissions'])*float(sio.loadmat('datap/fc_efficiency.mat')['fc_efficiency']))
num_fissions
"""
Explanation: Calculate num_fissions
Sometime it would be convenient to store all of the measurement in a database, and then load it according to which datasets are specified in note. For now, input the data manually.
(Calculated in excel file analysis_status.xlsx)
End of explanation
"""
os.listdir()
sparse_bhm, dt_bin_edges, note = bicorr.load_sparse_bhm(filepath='datap')
sparse_bhm.nbytes
bhm_pos = bicorr.revive_sparse_bhm(sparse_bhm, det_df, dt_bin_edges)
(bhm_pos.nbytes)/16 # .5 ns bins
"""
Explanation: Load data
Load sparse_bhm.npz, revive bhm
End of explanation
"""
sparse_bhm_neg, dt_bin_edges_neg, note_neg = bicorr.load_sparse_bhm(filename = 'sparse_bhm_neg.npz', filepath='datap')
bhm_neg = bicorr.revive_sparse_bhm(sparse_bhm_neg, det_df, dt_bin_edges_neg)
"""
Explanation: I'm going to perform the background subtraction, then store bhp_nn_diff for all 990 pairs to disk so I can reload it later.
Load sparse_bhm_neg.npz, revive bhm_neg
End of explanation
"""
singles_hist, dt_bin_edges_sh, dict_det_to_index, dict_index_to_det = bicorr.load_singles_hist(filepath='datap')
help(bicorr.load_singles_hist)
plt.figure(figsize=(4,3))
dt_bin_centers_sh = (dt_bin_edges_sh[:-1]+dt_bin_edges_sh[1:])/2
plt.plot(dt_bin_centers_sh,np.sum(singles_hist,axis=(0,1)))
plt.xlabel('Time (ns)')
plt.ylabel('Number of events')
plt.title('TOF distribution, all events')
plt.yscale('log')
sns.despine(right=False)
bicorr_plot.save_fig_to_folder('singles_hist_allt_allp',extensions=['png','pdf'])
plt.show()
plt.figure(figsize=(4,3))
plt.plot(dt_bin_centers_sh,np.sum(singles_hist[0,:,:],axis=(0)))
plt.plot(dt_bin_centers_sh,np.sum(singles_hist[1,:,:],axis=(0)))
plt.xlabel('Time (ns)')
plt.ylabel('Number of events')
plt.title('TOF distribution, all detectors')
plt.legend(['N','G'])
plt.yscale('log')
sns.despine(right=False)
bicorr_plot.save_fig_to_folder('singles_hist_ng_allp',extensions=['png','pdf'])
plt.show()
plt.figure(figsize=(4,3))
plt.plot(dt_bin_centers_sh,singles_hist[0,dict_det_to_index[2],:])
plt.plot(dt_bin_centers_sh,singles_hist[1,dict_det_to_index[2],:])
plt.xlabel('Time (ns)')
plt.ylabel('Number of events')
plt.title('TOF distribution, channel 2')
plt.legend(['N','G'])
plt.yscale('log')
sns.despine(right=False)
bicorr_plot.save_fig_to_folder('singles_hist_ng_ch2',extensions=['png','pdf'])
plt.show()
"""
Explanation: Load singles_hist.npz
End of explanation
"""
print(bhm_pos.shape)
print(bhm_neg.shape)
bhm_pos, dt_bin_edges = bicorr.coarsen_bhm(bhm_pos,dt_bin_edges, 4,True)
bhm_neg, dt_bin_edges_neg = bicorr.coarsen_bhm(bhm_neg,dt_bin_edges_neg,4,True)
print(bhm_pos.shape)
print(bhm_neg.shape)
"""
Explanation: Coarsen bhm to 1 ns. time binning.
End of explanation
"""
pair_is = bicorr.generate_pair_is(det_df, ignore_fc_neighbors_flag=True)
len(pair_is)
"""
Explanation: Produce bhp for $nn$ events
One key piece of data that I am going to work with for producing multiple plots is the bhp for $nn$ events across all detector pairs. (Actually, only the pairs not next to the fission chambers)
So I am going to produce that for future use. This will be copied into another notebook, but the process of loading all of the data is the same so I'm doing that here since all the data is loaded.
I'm going to make this with 1 ns time binning to keep the file size manageable.
Produce pair_is for pairs not next to fission chamber
End of explanation
"""
plt.figure(figsize=(6,6))
plt.plot(det_df.iloc[pair_is]['d1'],det_df.iloc[pair_is]['d2'],'sk')
for i in [1,17,33]:
plt.axvline(i,c='r')
plt.axhline(i,c='r')
plt.xlabel('Detector 1 channel')
plt.ylabel('Detector 2 channel')
plt.title('Included detector pairs')
sns.despine(right=False)
bicorr_plot.save_fig_to_folder(fig_filename='pair_is_without_fc_neighbors',extensions=['png','pdf'])
plt.show()
"""
Explanation: Look at this distribution.
End of explanation
"""
bhm_pos.shape
bhm_pos_shape = bhm_pos[pair_is,:,:,:].shape
print(bhm_pos_shape)
"""
Explanation: Create bhp_nn_pos, bhp_nn_neg, bhp_nn_diff
Following instructions from bicorr > methods > nn_sum_and_br_subtraction.
I'm going to create arrays with 1 ns time binning and save them to disk, so I can easily reload them in the future.
End of explanation
"""
bhp_nn_pos = np.zeros((bhm_pos_shape[0],bhm_pos_shape[2],bhm_pos_shape[3]))
bhp_nn_neg = np.zeros((bhm_pos_shape[0],bhm_pos_shape[2],bhm_pos_shape[3]))
bhp_nn_neg.shape
for i in np.arange(len(pair_is)):
pair_i = pair_is[i]
bhp_nn_pos[i,:,:] = bicorr.build_bhp(bhm_pos,dt_bin_edges,pair_is=[pair_i],type_is=[0])[0]
bhp_nn_neg[i,:,:] = bicorr.build_bhp(bhm_neg,dt_bin_edges_neg,pair_is=[pair_i],type_is=[0])[0]
print(bhp_nn_pos.shape)
print(bhp_nn_neg.shape)
"""
Explanation: The challenge here is that I want to preserve the dimension of pair_is (I don't want to sum across all pairs in pair_is). How can I do this without significantly modifying my code base?
Set up arrays to fill
End of explanation
"""
i = 500
bicorr_plot.bhp_plot(bhp_nn_pos[i,:,:],dt_bin_edges,show_flag=True,title='bhp_nn_pos at i={}'.format(i))
bicorr_plot.bhp_plot(bhp_nn_neg[i,:,:],dt_bin_edges_neg,show_flag=True,title='bhp_nn_neg at i={}'.format(i))
"""
Explanation: Plot a few to make sure they look good.
End of explanation
"""
bicorr_plot.bhp_plot(np.sum(bhp_nn_pos,axis=0),dt_bin_edges,show_flag=True,title='bhp_nn_pos')
bicorr_plot.bhp_plot(np.sum(bhp_nn_neg,axis=0),dt_bin_edges_neg,show_flag=True,title='bhp_nn_neg')
"""
Explanation: Plot them now as sums across all pairs.
End of explanation
"""
bhp_nn_diff = np.subtract(bhp_nn_pos.astype(np.int32),bhp_nn_neg[:,::-1,::-1].astype(np.int32))
bicorr_plot.bhp_plot(np.sum(bhp_nn_diff,axis=0),dt_bin_edges,show_flag=True,title='bhp_nn_diff')
i = 4
bicorr_plot.bhp_plot(bhp_nn_diff[i,:,:],dt_bin_edges,show_flag=True,title='bhp_nn_diff')
"""
Explanation: Now create bhp_nn_diff.
Question: Should I create bhp_nn_diff here, or work with bhp_nn_pos and bhp_nn_neg? The data is still pretty sparse, so bhp_nn_diff would end up with a lot of negative values in it. Mathematically, once I start taking sums, it would be the same. But I will always have to load bhp_nn_pos and bhp_nn_neg anyway, so I could just create bhp_nn_diff whenever I load them. Yeah. Do that.
End of explanation
"""
bhp_nn_diff_pair = np.zeros((861, 200, 200))
for i in np.arange(len(pair_is)):
pair_i = pair_is[i]
bhp_nn_diff_pair[i,:,:] = np.subtract(bhp_nn_pos[i,:,:].astype(np.int32),bhp_nn_neg[i,::-1,::-1].astype(np.int32))
bhp_nn_diff_pair.shape
np.array_equal(bhp_nn_diff,bhp_nn_diff_pair)
"""
Explanation: One thing to keep in mind is that bicorr.bicorr_plot does not show negative values, so the background subtraction makes it look "cleaner" than it is in reality.
Verify bhp_nn_diff
Calculate bhp_nn_diff pair by pair and make sure it matches what I've already done.
End of explanation
"""
note = 'Stored from Cf072115_to_Cf072215b with 1 ns time binning. Pairs are without fc neighbors. -PFS, 2/9/18'
save_filename = 'datap/bhp_nn_by_pair_1ns'
np.savez(save_filename, bhp_nn_neg = bhp_nn_neg, bhp_nn_pos = bhp_nn_pos,
dt_bin_edges = dt_bin_edges, pair_is = pair_is, note = note)
"""
Explanation: Store bhp_nn_pos, bhp_nn_neg
bhp_nn_neg, bhp_nn_pos
dt_bin_edges
pair_is
note
Save these four arrays and the note to the same .npz file
End of explanation
"""
whos
"""
Explanation: Reload instructions
In my other analysis files, I'm going to load these variables from disk.
Restart the notebook so I start fresh and start from here.
End of explanation
"""
whos
load_filename = 'datap/bhp_nn_by_pair_1ns.npz'
npzfile = np.load(load_filename)
print(npzfile.files)
print(npzfile['note'])
pair_is = npzfile['pair_is']
bhp_nn_pos = npzfile['bhp_nn_pos']
bhp_nn_neg = npzfile['bhp_nn_neg']
dt_bin_edges = npzfile['dt_bin_edges']
"""
Explanation: Go back and import all the packages.
End of explanation
"""
bhp_nn_diff = np.subtract(bhp_nn_pos.astype(np.int32),bhp_nn_neg[:,::-1,::-1].astype(np.int32))
bhp_nn_diff.shape
"""
Explanation: Calculate bhp_nn_diff.
End of explanation
"""
bicorr.bicorr_plot(np.sum(bhp_nn_diff,axis=0),dt_bin_edges,show_flag=True,title='bhp_nn_diff')
"""
Explanation: Plot them to make sure they look good.
End of explanation
"""
|
4dsolutions/Python5 | Euler's Formula Using Tau.ipynb | mit | from math import e, pi as π
τ = 2 * π
i = 1j
result = e ** (i * τ)
print ("{:1.5f}".format(result.real))
"""
Explanation: Thanks to unicode, we may use Greek letters directly in our code.
In this Jupyter Notebook, lets use θ (theta), π (pi) and τ (tau) with τ = 2 * π.
Then we'll plot the graph of Euler's Formula, e to the i θ over the range 0 to τ.
In Python we signify i, the root of -1, as 1j for readability, so lets bind i to 1j as well.
End of explanation
"""
import matplotlib.pyplot as plt
import numpy as np
t = np.arange(0.0, τ, 0.01)
s = np.array([(np.e ** (i * θ)).real for θ in t])
plt.plot(t, s)
plt.xlabel('radians')
plt.ylabel('real part')
plt.title('Euler\'s Formula from 0 to tau')
plt.grid(True)
plt.savefig("euler_test.png") # uploaded to Flickr for display below
"""
Explanation: Below we import some industrial grade tools used for plotting with Python. The same greek letter names remain active and guide the construction of a domain t and range s. Then we label the graph and generate a picture. plt.show(), if used, produces a plot in its own window.
End of explanation
"""
|
t-silvers/supreme-robot | TCGA_OV_exp-cn_jointplot.ipynb | mit | import xenaPython as xena
import seaborn as sns
import numpy as np
import scipy as scipy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
plt.rcParams.update({'figure.max_open_warning': 0})
import rpy2
%matplotlib inline
%load_ext rpy2.ipython
def accessXenaData(hub, data_set):
samples = [x.encode('UTF8') for x in xena.xenaAPI.dataset_samples(hub, data_set)]
genes = [x.encode('UTF8') for x in xena.xenaAPI.dataset_fields(hub, data_set)]
# genes = [x.encode('UTF8') for x in xena.xenaAPI.dataset_fields(hub, data_set)][start:stop]
return samples, genes
def matchEnsemblData(biomart_df, xena_genes):
xena_df = pd.DataFrame(xena_genes, columns=['Identifiers'])
xena_df = pd.merge(xena_df, biomart_df, left_on='Identifiers', right_on='hgnc_symbol')
xena_df.drop('Identifiers', axis=1, inplace=True)
hgnc_symbol = np.asarray(xena_df['hgnc_symbol'])
a = hgnc_symbol
indices = np.setdiff1d(np.arange(len(a)), np.unique(a, return_index=True)[1])
xena_df.drop(xena_df.index[indices], inplace=True)
return xena_df
def analysisMultiIndexDataFrame(ensembl_df, type_of_analysis, analysis_samples, analysis_dataset, analysis_genes):
hgnc_symbol = np.asarray(ensembl_df['hgnc_symbol'])
chromosome_name = np.asarray(ensembl_df['chromosome_name'])
start_position = np.asarray(ensembl_df['start_position'])
end_position = np.asarray(ensembl_df['end_position'])
analysis_type = np.array(len(hgnc_symbol) * [type_of_analysis])
columns = pd.MultiIndex.from_arrays(
[hgnc_symbol, chromosome_name, start_position, end_position, analysis_type],
names=['hgnc_symbol','chromosome_name', 'start_position','end_position','analysis_type'])
index = analysis_samples
data = pd.DataFrame(xena.xenaAPI.Probes_values (hub, analysis_dataset, analysis_samples, analysis_genes)).T
data = pd.DataFrame(data.values.astype(float), columns = analysis_genes)
data.fillna(method='ffill', inplace=True)
data_columns = data.columns.tolist()
orphan_genes = list(set(analysis_genes) - set(hgnc_symbol))
data_2 = data.drop(orphan_genes, axis=1)
values_plus_metadata = pd.DataFrame(data_2.values, index=index, columns=columns)
return values_plus_metadata
hub = "https://tcga.xenahubs.net"
CNV_dataset = "TCGA.OV.sampleMap/Gistic2_CopyNumber_Gistic2_all_data_by_genes"
RNA_dataset = "TCGA.OV.sampleMap/AgilentG4502A_07_3"
"""
Explanation: Jointplot correlation of expression~copy_number
Access gene expression and gene copy number data using UCSC Xena python API and generate jointplots showing expression~CN.
=====================================
Author: Thomas Silvers
Date: 20170917
Parameters
----------
hub : "https://..." Xena hub where data located
CNV_dataset : xena data set with copy number data
RNA_dataset : xena data set with gene expression data
Returns
----------
*_samples : List of patient IDs in data set.
*_probes : List of gene names in data set.
*_jointplot : Jointplot figure.
End of explanation
"""
%%R
library(biomaRt)
library(plyr)
ensembl = useEnsembl(biomart="ensembl", dataset="hsapiens_gene_ensembl")
#=======================================================================#
# define biomart object
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
# query biomart
get_chr_genes <- function(chr_num, mart_server){
subset(
unique(
getBM(
attributes=c(
'ensembl_gene_id',
'ensembl_transcript_id',
'hgnc_symbol',
'chromosome_name',
'start_position',
'end_position'
),
filters = 'chromosome_name', values =chr_num, mart = mart_server
)[-c(1:2)]
),
hgnc_symbol != ""
)
}
N <- 23
chr_L <- vector("list", N)
for( i in 1:21) {
chr <- get_chr_genes(i, ensembl)
chr_L[[i]] <- chr
}
chr_X <- get_chr_genes("X", ensembl)
chr_Y <- get_chr_genes("Y", ensembl)
chr_L[[22]] <- chr_X
chr_L[[23]] <- chr_Y
#=======================================================================#
chr_df <- ldply(chr_L, data.frame)
%Rpull chr_df
CNV_samples, CNV_probes = accessXenaData(hub, CNV_dataset)
RNA_samples, RNA_probes = accessXenaData(hub, RNA_dataset)
CNV_L_df = matchEnsemblData(chr_df, CNV_probes)
RNA_L_df = matchEnsemblData(chr_df, RNA_probes)
CNV_df = analysisMultiIndexDataFrame(CNV_L_df, 'CNV', CNV_samples, CNV_dataset, CNV_probes)
RNA_df = analysisMultiIndexDataFrame(RNA_L_df, 'RNA', RNA_samples, RNA_dataset, RNA_probes)
all_data_df = pd.merge(CNV_df, RNA_df, left_index=True, right_index=True)
all_data_df.index.names=['samples']
all_data_df.columns=all_data_df.columns.droplevel([u'chromosome_name', u'start_position', u'end_position'])
all_data_df=all_data_df.T.unstack([u'hgnc_symbol']).T
all_data_df=all_data_df.dropna()
sns.axes_style('white')
"""
Explanation: Using Rmagic to query Biomart in R and return dataframe with genomic coordinate data.
End of explanation
"""
Fig1C_dist_hex=sns.jointplot("CNV", "RNA", data=all_data_df, kind='hex', color='g')
"""
Explanation: Jointplot of all data points as hex to avoid overplotting
Used over scatter to avoid overplotting
End of explanation
"""
Fig1C_dist_reg=sns.jointplot("CNV", "RNA", data=all_data_df, kind='scatter', color='k')
overplot_both_df=all_data_df[((all_data_df.CNV < -0.5) | (all_data_df.CNV > 0.5)) & ((all_data_df.RNA < -1.0) | (all_data_df.RNA > 1.0))]
overplot_both_df=sns.jointplot("CNV", "RNA", data=overplot_both_df, kind='hex', color='g')
overplot_normal_copy_df=all_data_df[((all_data_df.CNV > -0.5) & (all_data_df.CNV < 0.5)) & ((all_data_df.RNA < -2.0) | (all_data_df.RNA > 2.0))]
overplot_normal_copy_df=sns.jointplot("CNV", "RNA", data=overplot_normal_copy_df, kind='hex', color='g')
"""
Explanation: Jointplot of all data points as scatter
Used under hex plot to to show variation
End of explanation
"""
|
nproctor/phys202-project | project/Morse Net Part 4.ipynb | mit | import NeuralNetImport as NN
import numpy as np
import NNpix as npx
from IPython.display import Image
"""
Explanation: Morse Code Neural Net
I created a text file that has the entire alphabet of numerical morse code. Meaning, "." is represented by the number "0.5" and "-" is represented by "1.0". This neural net is trained through that set until it's accuracy is 100%. Then, it is "tested" by user generated input. Once the weights are able to give the training set 100%, it will have 100% accuracy for all tested data, since the inputs do not change. Seeing if the neural network can determine similar but not exact test data requires the neural network to find the best fit function for data. In this neural network, it is not finding the "best-fit" function, but rather finding an exact function that satisfies the data. This isn't a question of how accurate the neural network can predict future data, but rather a question of how much does it take for a neural network to memorize perfect data.
I will be importing the neural net base code for this neural network.
End of explanation
"""
npx.morse1
"""
Explanation: Visualize Morse Code
Notice that numbes are represented by differing numbers of components. Some letters have 4 components and some have as little as one.
End of explanation
"""
enter = input("Enter Your Morse: ")
"""
Explanation: Enter Morse Cord Sentence
Note, each letter is separated by a single space. And each word is separated by four spaces. To test every letter of the alphabet, use the sentence below:
"- .... . --.- ..- .. -.-. -.- -... .-. --- .-- -. ..-. --- -..- .--- ..- -- .--. . -.. --- ...- . .-. - .... . .-.. .- --.. -.-- -.. --- --."
FOR THIS NOTEBOOK NOT TO RAISE ERRORS, THERE MUST BE AN INPUT BELOW
End of explanation
"""
def morse_to_num_str(morse):
""" Takes morse code and divides in into a 3D array, 1D for each letter, 2D for each word, and 3D for the sentence"""
morse = morse.replace(".", "0.5,")
morse = morse.replace("-", "1.0,")
new = list(morse)
for i in range(len(new)):
if i > 1 and new[i-1] == "," and new[i] == " ":
new[i-1] = " "
if i == (len(new)-1):
new[i] = ""
new = "".join(new)
a = new.split(" ")
for i in range(len(a)):
a[i] = a[i].split(" ")
for h in range(len(a)):
for j in range(len(a[h])):
a[h][j] = a[h][j].split(",")
return a
assert morse_to_num_str("-. -- -- ..") == [[['1.0', '0.5'], ['1.0', '1.0']], [['1.0', '1.0'], ['0.5', '0.5']]]
"""
Explanation: Morse Code to 3D Number Array
Use two functions to turn a morse code sentence into a 3D array of numbers. This first function turns the morse code into string numbers. Each dot is "0.5" and each dash is "1.0". Each letter is an array, each word is an array of arrays, and each sentence is an array of arrays of arrays.
End of explanation
"""
def morse_str_to_float(morse):
""" Turns the 3D array generated above into float"""
""" Adds 0.0 for letters without 4 elements"""
for i in range(len(morse)):
for j in range(len(morse[i])):
while len(morse[i][j]) != 4:
morse[i][j].append("0.0")
for k in range(len(morse[i][j])):
morse[i][j][k] = float(morse[i][j][k])
return np.array(morse)
assert np.all(morse_str_to_float([[['1.0', '0.5'], ['1.0', '1.0']], [['1.0', '1.0'], ['0.5', '0.5']]]) == np.array(([[[ 1. , 0.5, 0. , 0. ],
[ 1. , 1. , 0. , 0. ]],[[ 1. , 1. , 0. , 0. ],[ 0.5, 0.5, 0. , 0. ]]])))
"""
Explanation: This second function turns each string number into a float. Because our neural net needs a constant value of inputs, and morse letters have 1 to 4 components, "0.0" is appened on to the end of each letter array that has less than four components.
End of explanation
"""
""" The entire morse alphabet in numerical morse"""
all_in = np.genfromtxt("MorseTxt.txt", delimiter=",", usecols=(1,2,3,4))
"""
Explanation: Create Input Array
This input array is the entire morse alphabet. Each letter has four number components that correspond to its dots and dashes. There are 4 inputs in the input layer.
End of explanation
"""
""" The letters that correspond with all-in above"""
real_letters = np.genfromtxt("MorseTxt.txt", dtype=str, delimiter=",", usecols=(0))
""" 26 element array of all the ouputs"""
all_out = NN.create_training_soln(np.genfromtxt("MorseTxt.txt", dtype=str, delimiter=",", usecols=(0)),26)
"""
Explanation: Create Solution Array
There are 26 possible solutions and therefore 26 neurons in the output layer. Different letters are represented by their placement in the alphabet. A is 0. The 0th node "firing" represents an A.
End of explanation
"""
morse_net = NN.NN_training(all_in, all_out, 4, 26, 30, 400, 0.7)
"""
Explanation: Training the Neural Network
This Neural Network has a input for every output and a unique output for every input. Because of this, the neural network must be trained to 100% accuracy on the training set to get a correct translation. For this, the neural net requires 30 neurons in the hidden layer and 400 iterations with a learning rate of 0.7.
End of explanation
"""
# x,y = morse_net.train()
f = np.load("MorseWeights.npz")
x = f['arr_0']
y = f['arr_1']
assert len(x) == 30
assert len(y) == 26
"""
Explanation: I am commenting out the cell below. This is how you would calculate weights, but for the demonstration, I will load weights from a previous training.
End of explanation
"""
morse_ask = NN.NN_ask(all_in, x, y)
comp_vals = [chr(morse_ask.get_ans()[i]+65) for i in range(26)]
assert np.all(comp_vals == real_letters)
"""
Explanation: Assert 100% Accuracy
End of explanation
"""
new_net = NN.NN_ask_morse(morse_str_to_float(morse_to_num_str(enter)), x, y)
ans = new_net.get_ans()
print("".join([chr(ans[i]) for i in range(len(ans))]))
"""
Explanation: Translate Morse
Because the Neural Network is perfectly trained, the accuracy of the "test data" will be 100%. Giving the neural net and morse code sentence will receive a perfect translation.
End of explanation
"""
|
anhquan0412/deeplearning_fastai | deeplearning1/nbs/lesson4.ipynb | apache-2.0 | ratings = pd.read_csv(path+'ratings.csv')
ratings.head()
len(ratings)
"""
Explanation: Set up data
We're working with the movielens data, which contains one rating per row, like this:
End of explanation
"""
movie_names = pd.read_csv(path+'movies.csv').set_index('movieId')['title'].to_dict
users = ratings.userId.unique()
movies = ratings.movieId.unique()
# userId and movieId become ditionary elements with values ranging from 0 to max len
userid2idx = {o:i for i,o in enumerate(users)}
movieid2idx = {o:i for i,o in enumerate(movies)}
"""
Explanation: Just for display purposes, let's read in the movie names too.
End of explanation
"""
ratings.movieId = ratings.movieId.apply(lambda x: movieid2idx[x])
ratings.userId = ratings.userId.apply(lambda x: userid2idx[x])
user_min, user_max, movie_min, movie_max = (ratings.userId.min(),
ratings.userId.max(), ratings.movieId.min(), ratings.movieId.max())
user_min, user_max, movie_min, movie_max
n_users = ratings.userId.nunique()
n_movies = ratings.movieId.nunique()
n_users, n_movies
"""
Explanation: We update the movie and user ids so that they are contiguous integers, which we want when using embeddings.
End of explanation
"""
n_factors = 50
np.random.seed = 42
"""
Explanation: This is the number of latent factors in each embedding.
End of explanation
"""
msk = np.random.rand(len(ratings)) < 0.8
trn = ratings[msk]
val = ratings[~msk]
"""
Explanation: Randomly split into training and validation.
End of explanation
"""
g=ratings.groupby('userId')['rating'].count()
topUsers=g.sort_values(ascending=False)[:15]
g=ratings.groupby('movieId')['rating'].count()
topMovies=g.sort_values(ascending=False)[:15]
top_r = ratings.join(topUsers, rsuffix='_r', how='inner', on='userId')
top_r = top_r.join(topMovies, rsuffix='_r', how='inner', on='movieId')
pd.crosstab(top_r.userId, top_r.movieId, top_r.rating, aggfunc=np.sum)
"""
Explanation: Create subset for Excel
We create a crosstab of the most popular movies and most movie-addicted users which we'll copy into Excel for creating a simple example. This isn't necessary for any of the modeling below however.
End of explanation
"""
user_in = Input(shape=(1,), dtype='int64', name='user_in')
u = Embedding(input_dim=n_users, output_dim=n_factors, input_length=1, embeddings_regularizer=l2(1e-4))(user_in)
movie_in = Input(shape=(1,), dtype='int64', name='movie_in')
m = Embedding(input_dim=n_movies, output_dim=n_factors, input_length=1, embeddings_regularizer=l2(1e-4))(movie_in)
x = dot([u, m], axes=2)
x = Flatten()(x)
model = Model([user_in, movie_in], x)
model.compile(Adam(0.001), loss='mse')
model.summary()
user_in
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=1,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.01
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=3,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.001
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=6,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: Dot product
The most basic model is a dot product of a movie embedding and a user embedding. Let's see how well that works:
End of explanation
"""
def embedding_input(name, n_in, n_out, reg):
inp = Input(shape=(1,), dtype='int64', name=name)
return inp, Embedding(input_dim=n_in, output_dim=n_out, input_length=1, embeddings_regularizer=l2(reg))(inp)
user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)
movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)
def create_bias(inp, n_in):
x = Embedding(input_dim=n_in, output_dim=1, input_length=1)(inp)
return Flatten()(x)
ub = create_bias(user_in, n_users)
mb = create_bias(movie_in, n_movies)
x = dot([u, m], axes=2)
x = Flatten()(x)
x = add([x, ub])
x = add([x, mb])
model = Model([user_in, movie_in], x)
model.compile(Adam(0.001), loss='mse')
model.summary()
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=1,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.01
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=6,
validation_data=([val.userId, val.movieId], val.rating))
model.optimizer.lr=0.001
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=10,
validation_data=([val.userId, val.movieId], val.rating))
model.fit([trn.userId, trn.movieId], trn.rating, batch_size=batch_size, epochs=5,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: The best benchmarks are a bit over 0.9, so this model doesn't seem to be working that well...
Bias
The problem is likely to be that we don't have bias terms - that is, a single bias for each user and each movie representing how positive or negative each user is, and how good each movie is. We can add that easily by simply creating an embedding with one output for each movie and each user, and adding it to our output.
End of explanation
"""
model.save_weights(model_path+'bias.h5')
model.load_weights(model_path+'bias.h5')
"""
Explanation: This result is quite a bit better than the best benchmarks that we could find with a quick google search - so looks like a great approach!
End of explanation
"""
model.predict([np.array([3]), np.array([6])])
"""
Explanation: We can use the model to generate predictions by passing a pair of ints - a user id and a movie id. For instance, this predicts that user #3 would really enjoy movie #6.
End of explanation
"""
g=ratings.groupby('movieId')['rating'].count()
topMovies=g.sort_values(ascending=False)[:2000]
topMovies = np.array(topMovies.index)
"""
Explanation: Analyze results
To make the analysis of the factors more interesting, we'll restrict it to the top 2000 most popular movies.
End of explanation
"""
get_movie_bias = Model(movie_in, mb)
movie_bias = get_movie_bias.predict(topMovies)
movie_ratings = [(b[0], movie_names()[movies[i]]) for i,b in zip(topMovies,movie_bias)]
"""
Explanation: First, we'll look at the movie bias term. We create a 'model' - which in keras is simply a way of associating one or more inputs with one more more outputs, using the functional API. Here, our input is the movie id (a single id), and the output is the movie bias (a single float).
End of explanation
"""
sorted(movie_ratings, key=itemgetter(0))[:15]
sorted(movie_ratings, key=itemgetter(0), reverse=True)[:15]
"""
Explanation: Now we can look at the top and bottom rated movies. These ratings are corrected for different levels of reviewer sentiment, as well as different types of movies that different reviewers watch.
End of explanation
"""
get_movie_emb = Model(movie_in, m)
movie_emb = np.squeeze(get_movie_emb.predict([topMovies]))
movie_emb.shape
get_movie_emb.predict([topMovies]).shape
"""
Explanation: We can now do the same thing for the embeddings.
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
movie_pca = pca.fit(movie_emb.T).components_
fac0 = movie_pca[0]
movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac0, topMovies)]
"""
Explanation: Because it's hard to interpret 50 embeddings, we use PCA to simplify them down to just 3 vectors.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
fac1 = movie_pca[1]
movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac1, topMovies)]
"""
Explanation: Here's the 1st component. It seems to be 'critically acclaimed' or 'classic'.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
fac2 = movie_pca[2]
movie_comp = [(f, movie_names()[movies[i]]) for f,i in zip(fac2, topMovies)]
"""
Explanation: The 2nd is 'hollywood blockbuster'.
End of explanation
"""
sorted(movie_comp, key=itemgetter(0), reverse=True)[:10]
sorted(movie_comp, key=itemgetter(0))[:10]
"""
Explanation: The 3rd is 'violent vs happy'.
End of explanation
"""
# The following would be for Python 2 only
# reload(sys)
# sys.setdefaultencoding('utf8')
start=50; end=100
X = fac0[start:end]
Y = fac2[start:end]
plt.figure(figsize=(15,15))
plt.scatter(X, Y)
for i, x, y in zip(topMovies[start:end], X, Y):
plt.text(x,y,movie_names()[movies[i]], color=np.random.rand(3)*0.7, fontsize=14)
plt.show()
"""
Explanation: We can draw a picture to see how various movies appear on the map of these components. This picture shows the 1st and 3rd components.
End of explanation
"""
user_in, u = embedding_input('user_in', n_users, n_factors, 1e-4)
movie_in, m = embedding_input('movie_in', n_movies, n_factors, 1e-4)
x = concatenate([u, m], axis=2)
x = Flatten()(x)
x = Dropout(0.3)(x)
x = Dense(70, activation='relu')(x)
x = Dropout(0.75)(x)
x = Dense(1)(x)
nn = Model([user_in, movie_in], x)
nn.compile(Adam(0.001), loss='mse')
nn.summary()
nn.fit([trn.userId, trn.movieId], trn.rating, batch_size=64, epochs=8,
validation_data=([val.userId, val.movieId], val.rating))
"""
Explanation: Neural net
Rather than creating a special purpose architecture (like our dot-product with bias earlier), it's often both easier and more accurate to use a standard neural network. Let's try it! Here, we simply concatenate the user and movie embeddings into a single vector, which we feed into the neural net.
End of explanation
"""
|
tpin3694/tpin3694.github.io | machine-learning/.ipynb_checkpoints/adding_and_subtracting_matrices-checkpoint.ipynb | mit | # Load library
import numpy as np
"""
Explanation: Title: Adding And Subtracting Matrices
Slug: adding_and_subtracting_matrices
Summary: How to add and subtract matrices in Python.
Date: 2017-09-03 12:00
Category: Machine Learning
Tags: Vectors Matrices Arrays
Authors: Chris Albon
Preliminaries
End of explanation
"""
# Create matrix
matrix_a = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 2]])
# Create matrix
matrix_b = np.array([[1, 3, 1],
[1, 3, 1],
[1, 3, 8]])
"""
Explanation: Create Matrices
End of explanation
"""
# Add two matrices
np.add(matrix_a, matrix_b)
"""
Explanation: Add Matrices
End of explanation
"""
# Subtract two matrices
np.subtract(matrix_a, matrix_b)
"""
Explanation: Subtract Matrices
End of explanation
"""
|
kunbud1989/scraping-google-news-indonesia | 2_Scraping_Content_Publisher_News_Indonesia.ipynb | mit | from goose import Goose
from pprint import pprint
import string
import datetime
class scrap_news(object):
def __init__(self, url):
self.url = url
def scrap_publisher_news(self):
g = Goose(
{
# 'browser_user_agent': 'Opera/9.80 (Android; Opera Mini/8.0.1807/36.1609; U; en) Presto/2.12.423 Version/12.16',
'use_meta_language': False,
'target_language':'id',
'enable_image_fetching': False,
'http_timeout': 2,
}
)
article = g.extract(url=self.url)
content = article.cleaned_text
printable = set(string.printable)
content = filter(lambda x: x in printable, content)
title = article.title
title = filter(lambda x: x in printable, title)
if len(content) < 2 :
article = g.extract(article.amphtml)
content = article.cleaned_text
content = filter(lambda x: x in printable, content)
else:
article = article
if len(content) > 0 :
title = title
content = content.replace('\n','')
return (title, content)
"""
Explanation: Scraping Content Publsiher News Indonesia
Dalam tahapan ini, kita akan melakukan scraping isi dari sebuah berita.
Silakan melakukan step pertama (1_Scraping_Google_News_Indonesia.pynb) untuk menghasilkan list_links_google_news_indonesia.txt yang selanjutnya akan menjadi acuhan kita mengambil content tersebut.
Requirement
Goose
Installation
Goose
sh
$ git clone https://github.com/kunbud1989/python-goose.git
$ cd python-goose
$ pip install -r requirements.txt
$ python setup.py install
Kode Program
End of explanation
"""
url = '''https://news.detik.com/berita/3494173/polisi-jl-jend-sudirman-macet-karena-salju-palsu-dari-busa-air-got'''
sn = scrap_news(url)
result = sn.scrap_publisher_news()
print('URL : %s' % url)
print('Title : %s' % result[0])
print('Content : %s' % result[1])
"""
Explanation: Result
Detik.com
End of explanation
"""
url = '''https://kumparan.com/kita-setara/menyingkirkan-stigma-buruk-hiv-aids'''
sn = scrap_news(url)
result = sn.scrap_publisher_news()
print('URL : %s' % url)
print('Title : %s' % result[0])
print('Content : %s' % result[1])
"""
Explanation: Kumparan
End of explanation
"""
url = '''http://celebrity.okezone.com/read/2017/05/06/33/1684964/el-rumi-rayakan-kelulusan-di-puncak-gunung-penanggungan'''
sn = scrap_news(url)
result = sn.scrap_publisher_news()
print('URL : %s' % url)
print('Title : %s' % result[0])
print('Content : %s' % result[1])
f = open('list_links_google_news_indonesia.txt','r')
list_google_news = f.read().replace('[','').replace(']','').replace("u'","").replace("'","").split(',')
set(list_google_news)
checkType = type(list_google_news)
pprint(checkType)
total_link = len(list_google_news)
pprint(total_link)
for link in list_google_news[:5]:
print(link)
import os
def generate_and_save_to_file(data):
if(len(data[1]) > 0):
fname = os.path.join('google_news',data[0]+'.txt')
f = open(fname,'w')
f.write(data[1])
f.close()
else:
fname = 'CONTENT NOT VALID'
return fname
index_link = 1
for link in list_google_news:
try:
url = '''%s''' % link
sn = scrap_news(url)
result = sn.scrap_publisher_news()
fname = generate_and_save_to_file(result)
print('%d / %d : %s' % (index_link,total_link,fname))
except:
print('%d / %d : %s' % (index_link,total_link,'ERROR'))
pass
index_link = index_link + 1
os.listdir('google_news')
"""
Explanation: Metro TV News
End of explanation
"""
|
gully/adrasteia | notebooks/adrasteia_05-02_DR2_variability_catalog_exploratory.ipynb | mit | # %load /Users/obsidian/Desktop/defaults.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
! du -hs ../data/dr2/Gaia/gdr2/vari_classifier_result/csv
df0 = pd.read_csv('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_0.csv.gz')
df0.shape
df0.head()
"""
Explanation: Gaia DR2 variability catalogs
Part I: What's in them?
gully
May 2, 2018
End of explanation
"""
import glob
fns = glob.glob('../data/dr2/Gaia/gdr2/vari_classifier_result/csv/VariClassifierResult_*.csv.gz')
n_files = len(fns)
df_classifier = pd.DataFrame()
"""
Explanation: The catalog is not too long. We can just read in all the files and concatenate them
End of explanation
"""
for i, fn in enumerate(fns):
df_i = pd.read_csv(fn)
df_classifier = df_classifier.append(df_i, ignore_index=True)
df_classifier.shape
"""
Explanation: This step only takes 1 second:
End of explanation
"""
df_classifier.best_class_name.value_counts()
"""
Explanation: Ok, we have 363,969 classifications of variable stars, which matches exactly with the number presented in Table 1 of Brown et al. 2018. What are the categories?
End of explanation
"""
df_classifier.classifier_name.value_counts()
"""
Explanation: Section 7.3.3 of the Gaia DR2 Documentation lists the classification code definition.
The training set included objects of the classes targeted for publication in Gaia DR2 (listed in bold) as well as other types to reduce the contamination of the published classification results. The full list of object classes, with labels (used in the rest of this section) and corresponding descriptions, follows below.
RRAB: Fundamental-mode RR Lyrae stars.
MIRA: Long period variable stars of the o (omicron) Ceti type (Mira).
SR: Long period variable stars of the semiregular type.
RRC: First-overtone RR Lyrae stars.
DSCT: δ Scuti-type stars.
SXPHE: SX Phoenicis-type stars.
CEP: Classical (δ) Cepheids.
T2CEP: Type-II Cepheids.
RRD: Double-mode RR Lyrae stars.
ACEP: Anomalous Cepheids.
ARRD: Anomalous double-mode RR Lyrae stars.
The stars I'm interested in are not automatically classified, but are used in training the classifier that labels these stars. That's too bad, I'd like to see the lightcurves for these classes:
ACV: α2 Canum Venaticorum-type stars.
CONSTANT: Objects whose variations (or absence thereof) are consistent with those of constant sources (Section 7.2.3).
ECL: Eclipsing binary stars.
FLARES: Magnetically active stars displaying flares.
ROT: Rotation modulation in solar-like stars due to magnetic activity (spots).
RS: RS Canum Venaticorum-type stars.
SOLARLIKE: Stars with solar-like variability induced by magnetic activity (flares, spots, and rotational modulation).
Oh well, looks like these desired classifications may ellude us for now. What's the deal with the classifier name column? Section 7.3.1 of the Documentation explains:
The results of this classification can be found in the Gaia DR2 archive in the classification table associated with the nTransits:2+ classifier, although subsequent filtering [...] increased the minimum number of FoV transits to five
End of explanation
"""
df_classifier.drop(columns='classifier_name', inplace=True)
"""
Explanation: They all have the same entry, so let's drop this column.
End of explanation
"""
df_classifier.best_class_score.hist(bins=20)
plt.xlim(1, 0)
plt.xlabel('Best Class Score')
plt.ylabel('$N$')
"""
Explanation: What is the distribution of best class scores?
End of explanation
"""
df_summary = df_classifier.groupby('best_class_name').best_class_score.describe()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']})
"""
Explanation: Many sources have best class scores close to 1. How do the classifications break down by Class?
End of explanation
"""
df_summary['parent_fraction'] = df_summary['count']/df_summary['count'].sum()
df_summary.style.format({key: "{:.0%}" for key in ['mean', 'std', 'min', '25%', '50%', '75%', 'max']}
).format({'parent_fraction': "{:.2%}"})
"""
Explanation: Comparing the classification score between types might not be trivial, depending on what assumptions are made in the classifier (the prior probabilities vary drastically). For example, if you made a classifier that guessed MIRA_SR, you would be right 41% of the time. Finding annomalous Cepheids is a needle-in-a-haystack problem.
End of explanation
"""
|
banyh/ShareIPythonNotebook | NLP_With_Python/Ch4.ipynb | gpl-3.0 | a = list('hello') # a指向一個list物件
b = a # b指向a所指向的list物件
b[3] = 'x' # 改變物件第3個元素,因為實際件只有一個,所以a,b看到的物件會同時改變
a, b
a = ['maybe']
b = [a, a, a]
b
a[0] = 'will'
b
"""
Explanation: Ch4 Writing Structured Programs
Assignments
End of explanation
"""
a = ['play']
b = a[:]
a[0] = 'zero'
a, b
a = ['play']
b = [a, a]
a[0] = 'what'
a, b, id(a), id(b[0])
"""
Explanation: 注意: 如果你要複製list,必須用[:]來複製,否則只會複製指標。
End of explanation
"""
a is b[0], a is b[1]
b = a[:]
# 因為用複製的,所以值相同但物件不同
a is b, a == b
"""
Explanation: Equality
用==是比較兩個元素值是否相同。
用is是比較兩個元素是否參考同一個物件。
End of explanation
"""
e = []
if e: print e, " is not empty"
e = []
if not e: print e, " is empty"
"""
Explanation: Conditions
將list放在if中,會直接判斷list是否為空,相當於if len(list) > 0:。
End of explanation
"""
a = [0, 1, 2, 3, 4, 5]
any(a), all(a), 3 in a, 8 in a
"""
Explanation: any()判斷一個list是否存在True的元素,all()判斷一個list是否全為True,in用來判斷值是否存在list中。
End of explanation
"""
a = [3, 3, 2, 4, 1]
[item for item in a] # 原始順序
[item for item in sorted(a)] # 排序
[item for item in set(a)] # 只考慮唯一的元素
[item for item in reversed(a)] # 倒序
[item for item in set(a).difference([3,4])] # 不要某些元素
import random
random.shuffle(a) # shuffle後,會直接影響a內部的值
[item for item in a]
''.join(['hello', 'world']) # join可以將字串連在一起
"""
Explanation: Sequences
sequence最常用的操作是用for訪問每一個元素。
End of explanation
"""
a = [1, 2, 3, 4, 5]
(a[2], a[3], a[4]) = (5, 6, 7)
a
"""
Explanation: 利用tuple可以同時進行多個元素的取代。
End of explanation
"""
a = range(5)
b = range(5, 10)
zip(a, b, a, b)
list(enumerate(b)) # enumerate 會傳回 (index, a[index])
a = [5, 3, 2, 4, 1]
a.sort() # .sort() 會直接修改原始list
a
a = [5, 3, 2, 4, 1]
sorted(a), a # 用sorted()不會影響原始list
"""
Explanation: 用zip可以將多個list結合成tuple。
End of explanation
"""
'hello' * 3
['hello'] * 3
[['a'] * 3] * 2
"""
Explanation: 重覆元素的方法
End of explanation
"""
def func1(a):
a[0] = 'modified'
s = ['hello', 'world']
func1(s)
s
"""
Explanation: Function Inputs and Outputs
在設計function時,要注意,如果會修改輸入參數,最好不要有輸出,否則會讓使用者混淆。
def sort1(a): # OK, 會修改輸入但沒有輸出
a.sort()
def sort2(a): # OK, 不會修改輸入, 有輸出
return sorted(a)
def sort3(a): # BAD, 有修改輸入又有輸出, 一定會有人搞錯
a.sort()
return a
所有function的參數都是call-by-value,但要注意,如果參數是一個list,list傳入的value是物件id,傳到function內部後變成可修改的list。
End of explanation
"""
a = 'hello'
assert(isinstance(a, basestring)) # 沒問題
a = 3
assert(isinstance(a, basestring)) # 錯誤
"""
Explanation: Variable Scope
Python遵守LGB Rule,先找local,再找global,再找built-in。
function可以透過global關鍵字創造global變數,但實際上越少用越好,這會影響function的可用性。
Check Variable Type
一般用assert(cond)配合isinstance來完成。assert當參數為False時,會出現AssertionError。
End of explanation
"""
def hello(a):
"""
This is a hello function.
The only function is print hello world.
@param a: a string to be printed
@type a: C{basestring}
@rtype: C{float}
"""
print 'hello world', a
return(3.14)
hello('my dear')
print hello.__doc__
"""
Explanation: Documenting Function
End of explanation
"""
z = lambda w: w**2
z(5)
"""
Explanation: Lambda Expression
lambda是用來產生臨時性function的方法。
End of explanation
"""
def generic(*a, **b):
print a # 集中所有 unnamed arguments
print b # 集中所有 names arguments
generic(1, 3.5, 'money', zzz='maybe', ggg='good')
def func(*a, z):
print a, z # 因為有指定 *a 收集所有 unnamed arguments,造成 z 出錯
func('hi', 'this')
"""
Explanation: Named Arguments
End of explanation
"""
nltk.corpus.__file__
help(nltk.bigrams)
"""
Explanation: Structure of a Python Module
End of explanation
"""
def insert(trie, key, value):
if key:
first, rest = key[0], key[1:]
if first not in trie:
trie[first] = {} # empty dict
insert(trie[first], rest, value) # key[1:] is new key
else:
trie['value'] = value
trie = nltk.defaultdict(dict)
insert(trie, 'chat', 100)
insert(trie, 'chair', 2000)
insert(trie, 'chien', 150)
trie
trie['c']['h']['a']['t']['value']
"""
Explanation: Letter Trie
End of explanation
"""
%matplotlib inline
import matplotlib
from matplotlib import pylab
import nltk
nltk.ngrams()
"""
Explanation: Matplotlib
End of explanation
"""
|
Pittsburgh-NEH-Institute/Institute-Materials-2017 | schedule/week_2/Tokenization.ipynb | gpl-3.0 | from collatex import *
"""
Explanation: Tokenization
Default tokenization
Tokenization (the first of the five parts of the Gothenburg model) divides the texts to be collated into tokens, which are most commonly (but not obligatorily) words. By default CollateX considers punctuation to be its own token, which means that the witness readings “Hi!” and “Hi” will both contain a token that reads “Hi” (and the first witness will contain an additional token, which reads “!”). In this situation, that’s the behavior the user probably wants, since both witnesses contain what a human would recognize as the same word.
We are going to be using the CollateX library to demonstrate tokenization, so let's go ahead and import it.
End of explanation
"""
collation = Collation()
collation.add_plain_witness("A", "Peter's cat.")
collation.add_plain_witness("B", "Peter's dog.")
table = collate(collation, segmentation=False)
print(table)
"""
Explanation: Issues with default tokenization
But is a word like “Peter’s” the same word as “Peter” for collation purposes? Because CollateX will regard the apostrophe as a separate token, “Peter’s” will be tokenized as three tokens: the name, the apostrophe, and the possessive. Here’s the default behavior:
End of explanation
"""
input = "Peter's cat."
print(input)
"""
Explanation: For possessives that may be acceptable behavior, but how about contractions like “didn’t” or “A’dam” (short for “Amsterdam”)? If the default tokenization does what you need, so much the better, but if not, you can override it according to your own requirements. Below we describe what CollateX does by default and how to override that behavior and perform your own tokenization.
How CollateX tokenizes: default behavior
The default tokenizer built into CollateX defines a token as a string of either alphanumeric characters (in any writing system) or non-alphanumeric characters, in both cases including any (optional) trailing whitespace. This means that the input reading “Peter’s cat.” will be analyzed as consisting of five tokens: “Peter” plus “’” plus “s ” plus “cat” plus “.”. For alignment purposes CollateX ignores any trailing white space, so that “cat” in “The cat in the hat” would be tokenzied as “cat ” (with a trailing space), but for collation purposes it would match the “cat” in “Peter’s cat.”, which has no trailing space because it’s followed by a period.
If we need to override the default tokenization behavior, we can create our own tokenized input and tell CollateX to use that, instead of letting CollateX perform the tokenization itself prior to collation.
Doing your own tokenization
In a way that is consistent with the modular design of the Gothenburg model, CollateX permits the user to change the tokenization without having to change the other parts of the collation process. Since the tokenizer passes to CollateX the indivisible units that are to be aligned, performing our own collation means specifying those units on our own. We will now look at how we can split a text into tokens the way we prefer.
Automating the tokenization
In the example above we built our token list by hand, but that obviously isn’t scalable to a real project with more than a handful of words. Let’s enhance the code above so that it builds the token lists for us by tokenizing the input strings according to our requirements. This is where projects have to identify and formalize their own specifications, since, unfortunately, there is no direct way to tell Python to read your mind and “keep punctuation with adjacent letters when I want it there, but not when I don’t.” For this example, we’ll write a tokenizer that breaks a string first on white space (which would give us two tokens: “Peter’s” and “cat.”) and then, within those intermediate tokens, on final punctuation (separating the final period from “cat” but not breaking on the internal apostrophe in “Peter’s”). This strategy would also keep English-language contractions together as single tokens, but as we’ve written it, it wouldn’t separate a leading quotation mark from a word token, although that’s a behavior we’d probably want. In Real Life we might fine-tune the routine still further, but for this tutorial we’ll prioritize just handling the sample data.
Splitting on white space and then separating final but not internal punctuation
To develop our tokenization, let’s start with:
End of explanation
"""
import re
input = "Peter's cat."
words = re.split(r'\s+', input)
print(words)
"""
Explanation: and split it into a list of whitespace-separated words with the Python re library, which we will import here so that we can use it below.
End of explanation
"""
input = "Peter's cat."
words = re.split(r'\s+', input)
tokens_by_word = [re.findall(r'.*\w|\W+$', word) for word in words]
print(tokens_by_word)
"""
Explanation: Now let’s treat final punctuation as a separate token without splitting on internal punctuation:
End of explanation
"""
input = "Peter's cat."
words = re.split(r'\s+', input)
tokens_by_word = [re.findall(r'.*\w|\W+$', word) for word in words]
tokens = []
for item in tokens_by_word:
tokens.extend(item)
print(tokens)
"""
Explanation: The regex says that a token is either a string of any characters that ends in a word character (which will match “Peter’s” with the internal apostrophe as one token, since it ends in “s”, which is a word character) or a string of non-word characters. The re.findall method will give us back a list of all the separate (i.e. non-overlapping) times our expression matched. In the case of the string cat., the .*\w alternative matches cat (i.e. anything ending in a word character), and then the \W+ alternative matches . (i.e anything that is made entirely of non-word characters).
We now have three tokens, but they’re in nested lists, which isn’t what we want. Rather, we want a single list with all the tokens on the same level. We can accomplish that with a for loop and the .extend method for lists:
End of explanation
"""
input = "Peter's cat."
words = re.split(r'\s+', input)
tokens_by_word = [re.findall(r'.*\w|\W+$', word) for word in words]
tokens = []
for item in tokens_by_word:
tokens.extend(item)
token_list = [{"t": token} for token in tokens]
print(token_list)
"""
Explanation: We’ve now split our witness text into tokens, but instead of returning them as a list of strings, we need to format them into the list of Python dictionaries that CollateX requires. So let's talk about what CollateX requires.
Specifying the witnesses to be used in the collation
The format in which CollateX expects to receive our custom lists of tokens for all witnesses to be collated is a Python dictionary, which has the following structure:
{ "witnesses": [ witness_a, witness_b ] }
This is a Python dictionary whose key is the word witnesses, and whose value is a list of the witnesses (that is, the sets of text tokens) that we want to collate. Doing our own tokenization, then, means building a dictionary like the one above and putting our custom tokens in the correct format where the witness_a and witness_b variables stand above.
Specifying the siglum and token list for each witness
The witness data for each witness is a Python dictionary that must contain two properties, which have as keys the strings id and tokens. The value for the id key is a string that will be used as the siglum of the witness in any CollateX output. The value for the tokens key is a Python list of tokens that comprise the text (much like what we have made with our regular expressions, but we have one more step to get through...!
witness_a = { "id": "A", "tokens": list_of_tokens_for_witness_a }
Specifying the tokens for each witness
Each token for each witness is a Python dictionary with at least one member, which has the key "t" (think “text”). You'll learn in the Normalization unit what else you can put in here. A token for the string “cat” would look like:
{ "t": "cat" }
The key for every token is the string "t"; the value for this token is the string "cat". As noted above, the tokens for a witness are structured as a Python list, so if we chose to split our text only on whitespace we would tokenize our first witness as:
list_of_tokens_for_witness_a = [ { "t": "Peter's" }, { "t": "cat." } ]
Our witness has two tokens, instead of the five that the default tokenizer would have provided, because we’ve done the tokenization ourselves according to our own specifications.
Putting it all together
For ease of exposition we’ve used variables to limit the amount of code we write in any one line. We define our sets of tokens as:
list_of_tokens_for_witness_a = [ { "t": "Peter's" }, { "t": "cat." } ]
list_of_tokens_for_witness_b = [ { "t": "Peter's" }, { "t": "dog." } ]
Once we have those, we can define our witnesses that bear these tokens:
witness_a = { "id": "A", "tokens": list_of_tokens_for_witness_a }
witness_b = { "id": "B", "tokens": list_of_tokens_for_witness_b }
until finally we define our collation set as:
{ "witnesses": [ witness_a, witness_b ] }
with variables that point to the data for the two witnesses.
It is also possible to represent the same information directly, without variables:
{"witnesses": [
{
"id": "A",
"tokens": [
{"t": "Peter's"},
{"t": "cat."}
]
},
{
"id": "B",
"tokens": [
{"t": "Peter's"},
{"t": "dog."}
]
}
]}
So let's put a single witness together in the format CollateX requires, starting with that list of tokens we made.
End of explanation
"""
def tokenize(input):
words = re.split(r'\s+', input) # split on whitespace
tokens_by_word = [re.findall(r'.*\w|\W+$', word) for word in words] # break off final punctuation
tokens = []
for item in tokens_by_word:
tokens.extend(item)
token_list = [{"t": token} for token in tokens] # create dictionaries for each token
return token_list
input_a = "Peter's cat."
input_b = "Peter's dog."
tokens_a = tokenize(input_a)
tokens_b = tokenize(input_b)
witness_a = { "id": "A", "tokens": tokens_a }
witness_b = { "id": "B", "tokens": tokens_b }
input = { "witnesses": [ witness_a, witness_b ] }
input
"""
Explanation: Since we want to tokenize all of our witnesses, let’s turn our tokenization routine into a Python function that we can call with different input text:
End of explanation
"""
table = collate(input, segmentation=False)
print(table)
"""
Explanation: Let's see how it worked! Here is how to give the tokens to CollateX.
End of explanation
"""
## Your code goes here
"""
Explanation: Hands-on
The task
Suppose you want to keep the default tokenization (punctuation is always a separate token), except that:
Words should not break on internal hyphenation. For example, “hands-on” should be treated as one word.
English possessive apostrophe + “s” should be its own token. For example, “Peter’s” should be tokenized as “Peter” plus “’s”.
How to think about the task
Create a regular expression that mimics the default behavior, where punctuation is a separate token.
Enhance it to exclude hyphens from the inventory of punctuation that signals a token division.
Enhance it to treat “’s” as a separate token.
You can practice your regular expressions at http://www.regexpal.com/.
Sample sentence
Peter’s cat has completed the hands-on tokenization exercise.
End of explanation
"""
from lxml import etree
with open('ozymandias.xml', encoding='utf-8') as f:
ozzy = etree.parse(f)
print("Got an ElementTree with root tag", ozzy.getroot().tag)
print(etree.tostring(ozzy).decode('utf-8'))
"""
Explanation: The next step: tokenizing XML
After all that work on marking up your document in XML, you are certainly going to want to tokenize it! This works in basically the same way, only we also have to learn to use an XML parser.
Personally I favor the lxml.etree library, though its methods of handling text nodes takes some getting used to. If you have experience with more standard XML parsing models, take a look at the Integrating XML with Python notebook in this directory. We will see as we go along how etree works.
For this exercise, let's tokenize the Ozymandias file that we were working on yesterday. It's a good idea to work with "our" version of the file until you understand what is going on here, but once you think you have the hang of it, feel free to try it with the file you marked up!
End of explanation
"""
def tei(tag):
return "{http://www.tei-c.org/ns/1.0}%s" % tag
tei('text')
"""
Explanation: Notice here what ETree does with the namespace! It doesn't naturally like namespace prefixes like tei:, but prefers to just stick the entire URL in curly braces. We can make a little shortcut to do this for us, and then we can use it to find our elements.
End of explanation
"""
for phrase in ozzy.iter(tei('phr')):
print(phrase.text)
"""
Explanation: In our Ozymandias file, the words of the poem are contained in phrases. So let's start by seeking out all the <phr> elements and getting their text.
End of explanation
"""
for phrase in ozzy.iter(tei('phr')):
content = phrase.text
for child in phrase:
content = content + child.tail
print(content)
"""
Explanation: This looks plausible at first, but we notice pretty soon that we are missing pieces of line - the third line, for example, should read something like
"Two vast and trunkless legs of stone
<lb/>Stand in the desart....
What's going on?
Here is the slightly mind-bending thing about ETree: each element has not only textual content, but can also have a text tail. In this case, the <phr> element has the following contents:
Text content: Two vast and trunkless legs of stone\n
A child element: <lb/>
The <lb/> has no content, but it does have a tail! The tail is Stand in the desart.... and we have to ask for it separately. So let's try this - instead of getting just the text of each element, let's get its text AND the tail of any child elements. Here's how we do that.
End of explanation
"""
tokens = []
for phrase in ozzy.iter(tei('phr')):
content = phrase.text
for child in phrase:
content = content + child.tail
tokens.extend(tokenize(content))
print(tokens)
"""
Explanation: Now that's looking better. We have a bunch of text, and now all we need to do is tokenize it! For this we can come back to the function that we wrote earlier, tokenize. Let's plug each of these bits of content in turn into our tokenizer, and see what we get.
End of explanation
"""
with open('ozymandias_2.xml', encoding='utf-8') as f:
ozzy2 = etree.parse(f)
print(etree.tostring(ozzy2).decode('utf-8'))
tokens = []
for phrase in ozzy2.iter(tei('phr')):
content = phrase.text
for child in phrase:
content = content + child.tail
tokens.extend(tokenize(content))
print(tokens)
"""
Explanation: Adding complexity
As XML tokenization goes, this one was pretty straightforward - all your text was in <phr> elements, and none of the text was in any child element, so we were able to get by with a combination of .text and .tail for the elements we encountered. What if our markup isn't so simple? What do we do?
Here is where you start to really have to grapple with the fact that TEI allows a thousand encoding variations to bloom. In order to tokenize your particular text, you will have to think about what you encoded and how, and what "counts" as text you want to extract.
IN the file ozymandias_2.xml I have provided a simple example of this. Here the encoder chose to add the canonical spelling for the word "desert" in a <corr> element, as part of a <choice>. If I tokenize that file in the same way as above, here is what I get.
End of explanation
"""
tokens = []
for phrase in ozzy2.iter(tei('phr')):
content = phrase.text
for child in phrase:
if child.tag == tei('choice'):
## We know there is only one 'sic' element, but
## etree won't assume that! So we have to deal
## with "all" of them.
for sic in child.iter(tei('corr')):
content = content + sic.text
content = content + child.tail
tokens.extend(tokenize(content))
print(tokens)
"""
Explanation: Notice that I have neither "desert" nor "desart"! That is because, while I got the tail of the <choice> element, I didn't look inside it, and I didn't visit the <sic> or <corr> elements at all. I have to make my logic a little more complex, and I also have to think about which alternative I want. Let's say that I want to stay relatively true to the original. Here is the sort of thing I would have to do.
End of explanation
"""
|
ktmud/deep-learning | gan_mnist/Intro_to_GANs_Solution.ipynb | mit | %matplotlib inline
import pickle as pkl
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data')
"""
Explanation: Generative Adversarial Network
In this notebook, we'll be building a generative adversarial network (GAN) trained on the MNIST dataset. From this, we'll be able to generate new handwritten digits!
GANs were first reported on in 2014 from Ian Goodfellow and others in Yoshua Bengio's lab. Since then, GANs have exploded in popularity. Here are a few examples to check out:
Pix2Pix
CycleGAN
A whole list
The idea behind GANs is that you have two networks, a generator $G$ and a discriminator $D$, competing against each other. The generator makes fake data to pass to the discriminator. The discriminator also sees real data and predicts if the data it's received is real or fake. The generator is trained to fool the discriminator, it wants to output data that looks as close as possible to real data. And the discriminator is trained to figure out which data is real and which is fake. What ends up happening is that the generator learns to make data that is indistiguishable from real data to the discriminator.
The general structure of a GAN is shown in the diagram above, using MNIST images as data. The latent sample is a random vector the generator uses to contruct it's fake images. As the generator learns through training, it figures out how to map these random vectors to recognizable images that can foold the discriminator.
The output of the discriminator is a sigmoid function, where 0 indicates a fake image and 1 indicates an real image. If you're interested only in generating new images, you can throw out the discriminator after training. Now, let's see how we build this thing in TensorFlow.
End of explanation
"""
def model_inputs(real_dim, z_dim):
inputs_real = tf.placeholder(tf.float32, (None, real_dim), name='input_real')
inputs_z = tf.placeholder(tf.float32, (None, z_dim), name='input_z')
return inputs_real, inputs_z
"""
Explanation: Model Inputs
First we need to create the inputs for our graph. We need two inputs, one for the discriminator and one for the generator. Here we'll call the discriminator input inputs_real and the generator input inputs_z. We'll assign them the appropriate sizes for each of the networks.
End of explanation
"""
def generator(z, out_dim, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('generator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(z, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
# Logits and tanh output
logits = tf.layers.dense(h1, out_dim, activation=None)
out = tf.tanh(logits)
return out
"""
Explanation: Generator network
Here we'll build the generator network. To make this network a universal function approximator, we'll need at least one hidden layer. We should use a leaky ReLU to allow gradients to flow backwards through the layer unimpeded. A leaky ReLU is like a normal ReLU, except that there is a small non-zero output for negative input values.
Variable Scope
Here we need to use tf.variable_scope for two reasons. Firstly, we're going to make sure all the variable names start with generator. Similarly, we'll prepend discriminator to the discriminator variables. This will help out later when we're training the separate networks.
We could just use tf.name_scope to set the names, but we also want to reuse these networks with different inputs. For the generator, we're going to train it, but also sample from it as we're training and after training. The discriminator will need to share variables between the fake and real input images. So, we can use the reuse keyword for tf.variable_scope to tell TensorFlow to reuse the variables instead of creating new ones if we build the graph again.
To use tf.variable_scope, you use a with statement:
python
with tf.variable_scope('scope_name', reuse=False):
# code here
Here's more from the TensorFlow documentation to get another look at using tf.variable_scope.
Leaky ReLU
TensorFlow doesn't provide an operation for leaky ReLUs, so we'll need to make one . For this you can use take the outputs from a linear fully connected layer and pass them to tf.maximum. Typically, a parameter alpha sets the magnitude of the output for negative values. So, the output for negative input (x) values is alpha*x, and the output for positive x is x:
$$
f(x) = max(\alpha * x, x)
$$
Tanh Output
The generator has been found to perform the best with $tanh$ for the generator output. This means that we'll have to rescale the MNIST images to be between -1 and 1, instead of 0 and 1.
End of explanation
"""
def discriminator(x, n_units=128, reuse=False, alpha=0.01):
with tf.variable_scope('discriminator', reuse=reuse):
# Hidden layer
h1 = tf.layers.dense(x, n_units, activation=None)
# Leaky ReLU
h1 = tf.maximum(alpha * h1, h1)
logits = tf.layers.dense(h1, 1, activation=None)
out = tf.sigmoid(logits)
return out, logits
"""
Explanation: Discriminator
The discriminator network is almost exactly the same as the generator network, except that we're using a sigmoid output layer.
End of explanation
"""
# Size of input image to discriminator
input_size = 784
# Size of latent vector to generator
z_size = 100
# Sizes of hidden layers in generator and discriminator
g_hidden_size = 128
d_hidden_size = 128
# Leak factor for leaky ReLU
alpha = 0.01
# Smoothing
smooth = 0.1
"""
Explanation: Hyperparameters
End of explanation
"""
tf.reset_default_graph()
# Create our input placeholders
input_real, input_z = model_inputs(input_size, z_size)
# Build the model
g_model = generator(input_z, input_size, n_units=g_hidden_size, alpha=alpha)
# g_model is the generator output
d_model_real, d_logits_real = discriminator(input_real, n_units=d_hidden_size, alpha=alpha)
d_model_fake, d_logits_fake = discriminator(g_model, reuse=True, n_units=d_hidden_size, alpha=alpha)
"""
Explanation: Build network
Now we're building the network from the functions defined above.
First is to get our inputs, input_real, input_z from model_inputs using the sizes of the input and z.
Then, we'll create the generator, generator(input_z, input_size). This builds the generator with the appropriate input and output sizes.
Then the discriminators. We'll build two of them, one for real data and one for fake data. Since we want the weights to be the same for both real and fake data, we need to reuse the variables. For the fake data, we're getting it from the generator as g_model. So the real data discriminator is discriminator(input_real) while the fake discriminator is discriminator(g_model, reuse=True).
End of explanation
"""
# Calculate losses
d_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_real,
labels=tf.ones_like(d_logits_real) * (1 - smooth)))
d_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.zeros_like(d_logits_real)))
d_loss = d_loss_real + d_loss_fake
g_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(logits=d_logits_fake,
labels=tf.ones_like(d_logits_fake)))
"""
Explanation: Discriminator and Generator Losses
Now we need to calculate the losses, which is a little tricky. For the discriminator, the total loss is the sum of the losses for real and fake images, d_loss = d_loss_real + d_loss_fake. The losses will by sigmoid cross-entropys, which we can get with tf.nn.sigmoid_cross_entropy_with_logits. We'll also wrap that in tf.reduce_mean to get the mean for all the images in the batch. So the losses will look something like
python
tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=labels))
For the real image logits, we'll use d_logits_real which we got from the discriminator in the cell above. For the labels, we want them to be all ones, since these are all real images. To help the discriminator generalize better, the labels are reduced a bit from 1.0 to 0.9, for example, using the parameter smooth. This is known as label smoothing, typically used with classifiers to improve performance. In TensorFlow, it looks something like labels = tf.ones_like(tensor) * (1 - smooth)
The discriminator loss for the fake data is similar. The logits are d_logits_fake, which we got from passing the generator output to the discriminator. These fake logits are used with labels of all zeros. Remember that we want the discriminator to output 1 for real images and 0 for fake images, so we need to set up the losses to reflect that.
Finally, the generator losses are using d_logits_fake, the fake image logits. But, now the labels are all ones. The generator is trying to fool the discriminator, so it wants to discriminator to output ones for fake images.
End of explanation
"""
# Optimizers
learning_rate = 0.002
# Get the trainable_variables, split into G and D parts
t_vars = tf.trainable_variables()
g_vars = [var for var in t_vars if var.name.startswith('generator')]
d_vars = [var for var in t_vars if var.name.startswith('discriminator')]
d_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(d_loss, var_list=d_vars)
g_train_opt = tf.train.AdamOptimizer(learning_rate).minimize(g_loss, var_list=g_vars)
"""
Explanation: Optimizers
We want to update the generator and discriminator variables separately. So we need to get the variables for each part build optimizers for the two parts. To get all the trainable variables, we use tf.trainable_variables(). This creates a list of all the variables we've defined in our graph.
For the generator optimizer, we only want to generator variables. Our past selves were nice and used a variable scope to start all of our generator variable names with generator. So, we just need to iterate through the list from tf.trainable_variables() and keep variables to start with generator. Each variable object has an attribute name which holds the name of the variable as a string (var.name == 'weights_0' for instance).
We can do something similar with the discriminator. All the variables in the discriminator start with discriminator.
Then, in the optimizer we pass the variable lists to var_list in the minimize method. This tells the optimizer to only update the listed variables. Something like tf.train.AdamOptimizer().minimize(loss, var_list=var_list) will only train the variables in var_list.
End of explanation
"""
batch_size = 100
epochs = 100
samples = []
losses = []
# Only save generator variables
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for e in range(epochs):
for ii in range(mnist.train.num_examples//batch_size):
batch = mnist.train.next_batch(batch_size)
# Get images, reshape and rescale to pass to D
batch_images = batch[0].reshape((batch_size, 784))
batch_images = batch_images*2 - 1
# Sample random noise for G
batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
# Run optimizers
_ = sess.run(d_train_opt, feed_dict={input_real: batch_images, input_z: batch_z})
_ = sess.run(g_train_opt, feed_dict={input_z: batch_z})
# At the end of each epoch, get the losses and print them out
train_loss_d = sess.run(d_loss, {input_z: batch_z, input_real: batch_images})
train_loss_g = g_loss.eval({input_z: batch_z})
print("Epoch {}/{}...".format(e+1, epochs),
"Discriminator Loss: {:.4f}...".format(train_loss_d),
"Generator Loss: {:.4f}".format(train_loss_g))
# Save losses to view after training
losses.append((train_loss_d, train_loss_g))
# Sample from generator as we're training for viewing afterwards
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
samples.append(gen_samples)
saver.save(sess, './checkpoints/generator.ckpt')
# Save training generator samples
with open('train_samples.pkl', 'wb') as f:
pkl.dump(samples, f)
"""
Explanation: Training
End of explanation
"""
fig, ax = plt.subplots()
losses = np.array(losses)
plt.plot(losses.T[0], label='Discriminator')
plt.plot(losses.T[1], label='Generator')
plt.title("Training Losses")
plt.legend()
"""
Explanation: Training loss
Here we'll check out the training losses for the generator and discriminator.
End of explanation
"""
def view_samples(epoch, samples):
fig, axes = plt.subplots(figsize=(7,7), nrows=4, ncols=4, sharey=True, sharex=True)
for ax, img in zip(axes.flatten(), samples[epoch]):
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
im = ax.imshow(img.reshape((28,28)), cmap='Greys_r')
return fig, axes
# Load samples from generator taken while training
with open('train_samples.pkl', 'rb') as f:
samples = pkl.load(f)
"""
Explanation: Generator samples from training
Here we can view samples of images from the generator. First we'll look at images taken while training.
End of explanation
"""
_ = view_samples(-1, samples)
"""
Explanation: These are samples from the final training epoch. You can see the generator is able to reproduce numbers like 1, 7, 3, 2. Since this is just a sample, it isn't representative of the full range of images this generator can make.
End of explanation
"""
rows, cols = 10, 6
fig, axes = plt.subplots(figsize=(7,12), nrows=rows, ncols=cols, sharex=True, sharey=True)
for sample, ax_row in zip(samples[::int(len(samples)/rows)], axes):
for img, ax in zip(sample[::int(len(sample)/cols)], ax_row):
ax.imshow(img.reshape((28,28)), cmap='Greys_r')
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
"""
Explanation: Below I'm showing the generated images as the network was training, every 10 epochs. With bonus optical illusion!
End of explanation
"""
saver = tf.train.Saver(var_list=g_vars)
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
sample_z = np.random.uniform(-1, 1, size=(16, z_size))
gen_samples = sess.run(
generator(input_z, input_size, n_units=g_hidden_size, reuse=True, alpha=alpha),
feed_dict={input_z: sample_z})
_ = view_samples(0, [gen_samples])
"""
Explanation: It starts out as all noise. Then it learns to make only the center white and the rest black. You can start to see some number like structures appear out of the noise like 1s and 9s.
Sampling from the generator
We can also get completely new images from the generator by using the checkpoint we saved after training. We just need to pass in a new latent vector $z$ and we'll get new samples!
End of explanation
"""
|
beangoben/HistoriaDatos_Higgs | Dia2/6_Datos_Iris.ipynb | gpl-2.0 | import pandas as pd
import numpy as np # modulo de computo numerico
import matplotlib.pyplot as plt # modulo de graficas
# esta linea hace que las graficas salgan en el notebook
import seaborn as sns
%matplotlib inline
"""
Explanation: Classificando Iris
Ahora vamos a ver un conjunto de datos muy famosos, los datos iris, son 150 mediciones sobre 3 especies de plantas:
Cada planta tiene cuatro propiedades que se midieron:
Vamos a empezar con varias versiones de los datos, incrementando en complejidad:
Datos 2D sobre los sepalos de
Datos 2D sobre los petalos de las tres especies.
Datos 4D sobre los petalos de las tres especies. Los datos completos!
Su mission: <br> Disenar estrategis empiricas para classificar las plantas.
Primero las librerias
End of explanation
"""
df=pd.read_csv('files/ejemplo.csv')
print(df.shape)
df.head()
"""
Explanation: Un mini-ejemplo: Classificacion
Usaremos el archivo 'files/ejemplo.csv' en conjunto con pandas:
End of explanation
"""
sns.pairplot(df,hue='Tipo')
plt.title('Distribuciones de Datos')
plt.show()
"""
Explanation: Visualizando
Utilizaremos la funcion sns.pairplot() que combina las técnicas de visualización que ya vimos, para cada variable crea una versión 1D (Histograma) y luego para cada pareja de variables crea un scatter plot.
Usamos hue='Tipo' para colorear los datos en base a el tipo de dato.
End of explanation
"""
|
jmankoff/data | Assignments/networks-byte6/.ipynb_checkpoints/byte6-SN-checkpoint.ipynb | gpl-3.0 | import copy
# open the file you have downloaded
# these files are organized
file = open("amazon.txt")
# this returns an array with one entry for each line ni the file
lines = file.readlines()
print len(lines)
# Note: the format of the snap files is to list a node (identified by a unique number)
# and all of the nodes it links to (also identified by numbers), on the same line, separated by tabs.
# construct the graph
# a set is an unordered collection of unique elements
edges = set()
# this will store our nodes
nodes = {}
# divide the line into the node and all of its edges
# for each line in the file that was loaded in
for line in lines:
# divide the line into the node and all of its edges
data = line.split()
a = int(data[0])
b = int(data[1])
# add the edge
edges.add((a, b))
# update the count for the number of times we've seen each node
nodes[a] = nodes.get(a, -1) + 1
nodes[b] = nodes.get(b, -1) + 1
print "number of unique edges"
print len(edges)
print "number of unique nodes"
print len(nodes)
# get the degrees of each node in a set of edges
def get_degrees(edges):
degree_counts={}
# for each pair of nodes (edge)
for i,j in edges:
# increment the count for the number of edges connected
# to each node by one
degree_counts[i] = degree_counts.get(i, 0) + 1
degree_counts[j] = degree_counts.get(j, 0) + 1
return degree_counts
# Delete all nodes in delete_nodes from edges
def delete_node(edges, delete_nodes):
# construct a new set of edges
new_edges = []
print "# of nodes to be deleted", len(delete_nodes)
# loop through all the current edges
for i, j in edges:
# if an edges two nodes are not in the
# set of nodes to be deleted
if i not in delete_nodes and j not in delete_nodes:
# append that edge to our new edges
new_edges.append((i,j))
return new_edges
# kcore algorithm
# We run the kcore algorithm to delete all
# the nodes whose cores are less than k
# returns a new set of edges and nodes
# including only those in the k core.
def kcore(edges, k):
# make a complete copy of the edges so we can delete or change
# things without messing up our original
edges = copy.deepcopy(edges)
# now for each pair of nodes, count the number of
degree_counts = get_degrees(edges)
# sort the nodes by degree and return
# only the node numbers (not their degree)
sorted_nodes = sorted(degree_counts, key = degree_counts.get)
print "largest degree: ", degree_counts[sorted_nodes[0]]
# repeatedly delete all nodes with degrees < k to find the k core
# if we run out of nodes, or the largest count is < k we should stop
while ((len(sorted_nodes) > 0) and (degree_counts[sorted_nodes[0]]<k)):
# collect nodes with degrees < k in to_delete
to_delete = set()
for node in sorted_nodes:
if degree_counts[node]<k:
to_delete.add(node)
else:
break
# delete all edges that include those nodes
edges = delete_node(edges, to_delete)
print "# of edges left:",len(edges)
# recount the degrees for this (smaller) graph
degree_counts = get_degrees(edges)
# resort the nodes
sorted_nodes = sorted(degree_counts, key = degree_counts.get)
return edges, sorted_nodes
core_edges, core_nodes=kcore(edges, 3)
"""
Explanation: <h1>Social Network Analysis</h1>
The goal of this byte is to explore some algorithms and visualizations relating to networks
This Byte has three parts:
* Choose a graph data set from http://snap.stanford.edu/data/index.html
* Construct a graph using this dataset and apply the K-Core algorithm. Then visualize it.
* Check its power law properties. Then try other datasets and see whether it follows power law.
K-Core Algorithm Introduction:
K-Core is an approach of simplifying a graph by removing the edges that have small degrees. The goal of the algorithm is to find groups of nodes that are all connected to at least k other people in the same group
For more information, you can read this paper http://arxiv.org/pdf/cs/0504107v2.pdf
Algorithm:
Delete all the nodes and corresppoding edges that have degrees less than k
Calculate the degrees of all the remaining nodes.
If the degrees of all the nodes are larger than k or equal to k, return; Otherwise, repeat from step 1
End of explanation
"""
# We can use this method to create
# an adjacency matrix to represent the graph
def build_neighborhood(edges, nodes):
neighborhood = {}
for node in nodes:
# create a place to store the neighbors
neighborhood[node]=set()
for edge in edges:
# if either side of the edge contains node
# add the other side as a neighbor
if node == edge[0]:
neighborhood[node].add(edge[1])
if node == edge[1]:
neighborhood[node].add(edge[0])
return neighborhood
# This method is used to discover the connected components
# The basic idea is Breadth First Search
# We start from a node and find all the nodes it can reach
# In this way we can get a cluster of nodes which is called
# a connected component
# to start, we pass in the edges,
def get_connected_components(edges, neighborhood, nodes):
result = []
nodes = set(nodes)
# keep track of what we've seen
visited = set()
# loop until there are no more nodes
while nodes:
# grab the first one
node = nodes.pop()
# create a new set for it
component = set()
# start searching from node
queue = [node]
while queue:
# pick a node and mark as visited
node = queue.pop(0)
visited.add(node)
# add it to our connected component
component.add(node)
# find all its neighbors
neighbors = neighborhood[node]
# add them to the queue (if we haven't seen them before)
for neighbor in neighbors:
if neighbor not in visited:
nodes.discard(neighbor)
queue.append(neighbor)
result.append(component)
return result
neighborhood = build_neighborhood(core_edges, core_nodes)
ret = get_connected_components(core_edges, neighborhood, core_nodes)
print "# of connected components",len(ret)
"""
Explanation: Next, let's find if there exists clusters(connected components)
End of explanation
"""
import networkx as nx
from networkx.readwrite import json_graph
import json
# create a graph and add al the edges
G=nx.Graph()
for edge in edges:
G.add_edge(edge[0],edge[1])
nld = json_graph.node_link_data(G)
# We store the data in a json file
# So the javascript code can read it
json.dump(nld, open('force.json','w'))
from IPython.display import IFrame
# IPython Notebook can serve files and display them into
# inline frames. Prepend the path with the 'files' prefix.
viz_file = 'force.html'
IFrame(viz_file, width=700, height=550)
"""
Explanation: Visualization
You may need to install the library "networkx". It is a very great tool to help you analyze graph data. You can combine it with Dephi to visualize and analyze social network. Here we use D3 library to visualize the graph data after running k-core algorithm. You can use other different fancy tools or graphs to visualize it.
End of explanation
"""
# code to analyze undirected graphs
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
# get the degrees for each node (again)
nodes = get_degrees(edges)
v = nodes.values()
# this ensures that we don't have any values more than once
noRep = list(set(v))
noRep.sort()
x = []
y = []
for count in noRep:
# f is the number of times this value occurs
f = v.count(count)
x.append(count)
y.append(f)
figure()
loglog(x, y, '*')
xlabel('x')
ylabel('y')
title('power law plot')
show()
"""
Explanation: Power Law Property
You can download some other undirected datasets from SNAP and check whether it follows the power law properties.
You can also modify this code to make it able to generate indegree and outdegree loglog plot of directed graphs. Compare their differences and try to explain it.
End of explanation
"""
# code to analyze directed graphs
file = open("twitter.txt")
lines = file.readlines()
edges = set()
nodes_indegree = {}
nodes_outdegree = {}
# construct the indegree info and edges
# very similar to what we did for directed graphs
for line in lines:
data = line.split()
source = int(data[0])
endpoint = int(data[1])
# add the edge
edges.add((source, endpoint))
# update the count for the number of times we've seen each node
nodes_indegree[source] = nodes_indegree.get(source, -1) + 1
nodes_outdegree[endpoint] = nodes_outdegree.get(endpoint, -1) + 1
%matplotlib inline
from pylab import *
import matplotlib.pyplot as plt
# now show this to the viewer
v_indegree = nodes_indegree.values()
v_outdegree = nodes_outdegree.values()
noRep_indegree = list(set(v_indegree))
noRep_outdegree = list(set(v_outdegree))
noRep_indegree.sort()
noRep_outdegree.sort()
x_indegree = []
y_indegree = []
x_outdegree = []
y_outdegree = []
for count in noRep_indegree:
f = v_indegree.count(count)
x_indegree.append(count)
y_indegree.append(f)
for count in noRep_outdegree:
f = v_outdegree.count(count)
x_outdegree.append(count)
y_outdegree.append(f)
figure()
loglog(x_indegree, y_indegree, '*')
xlabel('x')
ylabel('y')
title('indegree distribution')
show()
figure()
loglog(x_outdegree, y_outdegree, '*')
xlabel('x')
ylabel('y')
title('outdegree distribution')
show()
"""
Explanation: Directed Graphs
Directed graphs are handled slightly differently than regular graphs:
We need to keep track of both incoming and outgoing edges...
End of explanation
"""
|
mne-tools/mne-tools.github.io | dev/_downloads/23237b92405a4b223d89222e217ffffd/morph_volume_stc.ipynb | bsd-3-clause | # Author: Tommy Clausner <tommy.clausner@gmail.com>
#
# License: BSD-3-Clause
import os
import nibabel as nib
import mne
from mne.datasets import sample, fetch_fsaverage
from mne.minimum_norm import apply_inverse, read_inverse_operator
from nilearn.plotting import plot_glass_brain
print(__doc__)
"""
Explanation: Morph volumetric source estimate
This example demonstrates how to morph an individual subject's
:class:mne.VolSourceEstimate to a common reference space. We achieve this
using :class:mne.SourceMorph. Data will be morphed based on
an affine transformation and a nonlinear registration method
known as Symmetric Diffeomorphic Registration (SDR) by
:footcite:AvantsEtAl2008.
Transformation is estimated from the subject's anatomical T1 weighted MRI
(brain) to FreeSurfer's 'fsaverage' T1 weighted MRI (brain)_.
Afterwards the transformation will be applied to the volumetric source
estimate. The result will be plotted, showing the fsaverage T1 weighted
anatomical MRI, overlaid with the morphed volumetric source estimate.
End of explanation
"""
sample_dir_raw = sample.data_path()
sample_dir = os.path.join(sample_dir_raw, 'MEG', 'sample')
subjects_dir = os.path.join(sample_dir_raw, 'subjects')
fname_evoked = os.path.join(sample_dir, 'sample_audvis-ave.fif')
fname_inv = os.path.join(sample_dir, 'sample_audvis-meg-vol-7-meg-inv.fif')
fname_t1_fsaverage = os.path.join(subjects_dir, 'fsaverage', 'mri',
'brain.mgz')
fetch_fsaverage(subjects_dir) # ensure fsaverage src exists
fname_src_fsaverage = subjects_dir + '/fsaverage/bem/fsaverage-vol-5-src.fif'
"""
Explanation: Setup paths
End of explanation
"""
evoked = mne.read_evokeds(fname_evoked, condition=0, baseline=(None, 0))
inverse_operator = read_inverse_operator(fname_inv)
# Apply inverse operator
stc = apply_inverse(evoked, inverse_operator, 1.0 / 3.0 ** 2, "dSPM")
# To save time
stc.crop(0.09, 0.09)
"""
Explanation: Compute example data. For reference see ex-inverse-volume.
Load data:
End of explanation
"""
src_fs = mne.read_source_spaces(fname_src_fsaverage)
morph = mne.compute_source_morph(
inverse_operator['src'], subject_from='sample', subjects_dir=subjects_dir,
niter_affine=[10, 10, 5], niter_sdr=[10, 10, 5], # just for speed
src_to=src_fs, verbose=True)
"""
Explanation: Get a SourceMorph object for VolSourceEstimate
subject_from can typically be inferred from
:class:src <mne.SourceSpaces>,
and subject_to is set to 'fsaverage' by default. subjects_dir can be
None when set in the environment. In that case SourceMorph can be initialized
taking src as only argument. See :class:mne.SourceMorph for more
details.
The default parameter setting for zooms will cause the reference volumes
to be resliced before computing the transform. A value of '5' would cause
the function to reslice to an isotropic voxel size of 5 mm. The higher this
value the less accurate but faster the computation will be.
The recommended way to use this is to morph to a specific destination source
space so that different subject_from morphs will go to the same space.`
A standard usage for volumetric data reads:
End of explanation
"""
stc_fsaverage = morph.apply(stc)
"""
Explanation: Apply morph to VolSourceEstimate
The morph can be applied to the source estimate data, by giving it as the
first argument to the :meth:morph.apply() <mne.SourceMorph.apply> method.
<div class="alert alert-info"><h4>Note</h4><p>Volumetric morphing is much slower than surface morphing because the
volume for each time point is individually resampled and SDR morphed.
The :meth:`mne.SourceMorph.compute_vol_morph_mat` method can be used
to compute an equivalent sparse matrix representation by computing the
transformation for each source point individually. This generally takes
a few minutes to compute, but can be
:meth:`saved <mne.SourceMorph.save>` to disk and be reused. The
resulting sparse matrix operation is very fast (about 400× faster) to
:meth:`apply <mne.SourceMorph.apply>`. This approach is more efficient
when the number of time points to be morphed exceeds the number of
source space points, which is generally in the thousands. This can
easily occur when morphing many time points and multiple conditions.</p></div>
End of explanation
"""
# Create mri-resolution volume of results
img_fsaverage = morph.apply(stc, mri_resolution=2, output='nifti1')
"""
Explanation: Convert morphed VolSourceEstimate into NIfTI
We can convert our morphed source estimate into a NIfTI volume using
:meth:morph.apply(..., output='nifti1') <mne.SourceMorph.apply>.
End of explanation
"""
# Load fsaverage anatomical image
t1_fsaverage = nib.load(fname_t1_fsaverage)
# Plot glass brain (change to plot_anat to display an overlaid anatomical T1)
display = plot_glass_brain(t1_fsaverage,
title='subject results to fsaverage',
draw_cross=False,
annotate=True)
# Add functional data as overlay
display.add_overlay(img_fsaverage, alpha=0.75)
"""
Explanation: Plot results
End of explanation
"""
|
JKarathiya/Lean | Research/KitchenSinkQuantBookTemplate.ipynb | apache-2.0 | # Load in our startup script, required to set runtime for PythonNet
%run ../start.py
# Create an instance of our QuantBook
qb = QuantBook()
"""
Explanation: Welcome to The QuantConnect Research Page
Refer to this page for documentation https://www.quantconnect.com/docs/research/overview
Contribute to this template file https://github.com/QuantConnect/Lean/blob/master/Research/KitchenSinkQuantBookTemplate.ipynb
QuantBook Basics
The following example is ready to be used in our Docker container, reference the readme for more details on setting this up.
In order to use this notebook locally you will need to make a few small changes:
Either create the notebook in your build folder (bin/debug) or set working directory of the notebook to it like so in the first cell:
%cd "PathToLean/Lean/Launcher/bin/Debug/
Run the following command in another cell to load in QuantConnect libraries:
%run start.py
Start QuantBook
Add the references and imports
Create a QuantBook instance
End of explanation
"""
# Show that our api object is connected to the Web Api
print(api.Connected)
# Get our list of projects from the cloud and print their names
projectResponse = api.ListProjects()
for project in projectResponse.Projects:
print(project.Name)
"""
Explanation: Using the Web API
Our script start.py automatically loads an instance of the web API for you to use.**
Look at Lean's Api class for more functions to interact with the cloud
**Note: This will only connect if you have your User ID and Api token in config.json
End of explanation
"""
spy = qb.AddEquity("SPY")
eur = qb.AddForex("EURUSD")
btc = qb.AddCrypto("BTCUSD")
fxv = qb.AddData[FxcmVolume]("EURUSD_Vol", Resolution.Hour)
"""
Explanation: Selecting Asset Data
Checkout the QuantConnect docs to learn how to select asset data.
End of explanation
"""
# Gets historical data from the subscribed assets, the last 360 datapoints with daily resolution
h1 = qb.History(qb.Securities.Keys, 360, Resolution.Daily)
# Plot closing prices from "SPY"
h1.loc["SPY"]["close"].plot()
# Gets historical data from the subscribed assets, from the last 30 days with daily resolution
h2 = qb.History(qb.Securities.Keys, datetime(2014,1,1), datetime.now(), Resolution.Daily)
# Plot high prices from "EURUSD"
h2.loc["EURUSD"]["high"].plot()
# Gets historical data from the subscribed assets, between two dates with daily resolution
h3 = qb.History([btc.Symbol], datetime(2014,1,1), datetime.now(), Resolution.Daily)
# Plot closing prices from "BTCUSD"
h3.loc["BTCUSD"]["close"].plot()
# Only fetchs historical data from a desired symbol
# NOTE: This will return empty when ran locally because this data is not included
h4 = qb.History([spy.Symbol], timedelta(360), Resolution.Daily)
# or qb.History(["SPY"], 360, Resolution.Daily)
# Only fetchs historical data from a desired symbol
# NOTE: This will return empty when ran locally because this data is not included
h5 = qb.History([eur.Symbol], timedelta(30), Resolution.Daily)
# or qb.History(["EURUSD"], 30, Resolution.Daily)
"""
Explanation: Historical Data Requests
We can use the QuantConnect API to make Historical Data Requests. The data will be presented as multi-index pandas.DataFrame where the first index is the Symbol.
For more information, please follow the link.
End of explanation
"""
goog = qb.AddOption("GOOG")
goog.SetFilter(-2, 2, timedelta(0), timedelta(180))
option_history = qb.GetOptionHistory(goog.Symbol, datetime(2015, 12, 24))
print (option_history.GetStrikes())
print (option_history.GetExpiryDates())
h7 = option_history.GetAllData()
"""
Explanation: Historical Options Data Requests
Select the option data
Sets the filter, otherwise the default will be used SetFilter(-1, 1, timedelta(0), timedelta(35))
Get the OptionHistory, an object that has information about the historical options data
End of explanation
"""
es = qb.AddFuture("ES")
es.SetFilter(timedelta(0), timedelta(180))
future_history = qb.GetFutureHistory(es.Symbol, datetime(2013, 10, 7))
print (future_history.GetExpiryDates())
h7 = future_history.GetAllData()
"""
Explanation: Historical Future Data Requests
Select the future data
Sets the filter, otherwise the default will be used SetFilter(timedelta(0), timedelta(35))
Get the FutureHistory, an object that has information about the historical future data
End of explanation
"""
data = qb.GetFundamental(["AAPL","AIG","BAC","GOOG","IBM"], "ValuationRatios.PERatio")
data
"""
Explanation: Get Fundamental Data
GetFundamental([symbol], selector, start_date = datetime(1998,1,1), end_date = datetime.now())
We will get a pandas.DataFrame with fundamental data.
End of explanation
"""
# Example with BB, it is a datapoint indicator
# Define the indicator
bb = BollingerBands(30, 2)
# Gets historical data of indicator
bbdf = qb.Indicator(bb, "SPY", 360, Resolution.Daily)
# drop undesired fields
bbdf = bbdf.drop('standarddeviation', 1)
# Plot
bbdf.plot()
# For EURUSD
bbdf = qb.Indicator(bb, "EURUSD", 360, Resolution.Daily)
bbdf = bbdf.drop('standarddeviation', 1)
bbdf.plot()
# Example with ADX, it is a bar indicator
adx = AverageDirectionalIndex("adx", 14)
adxdf = qb.Indicator(adx, "SPY", 360, Resolution.Daily)
adxdf.plot()
# For EURUSD
adxdf = qb.Indicator(adx, "EURUSD", 360, Resolution.Daily)
adxdf.plot()
# Example with ADO, it is a tradebar indicator (requires volume in its calculation)
ado = AccumulationDistributionOscillator("ado", 5, 30)
adodf = qb.Indicator(ado, "SPY", 360, Resolution.Daily)
adodf.plot()
# For EURUSD.
# Uncomment to check that this SHOULD fail, since Forex is data type is not TradeBar.
# adodf = qb.Indicator(ado, "EURUSD", 360, Resolution.Daily)
# adodf.plot()
# SMA cross:
symbol = "EURUSD"
# Get History
hist = qb.History([symbol], 500, Resolution.Daily)
# Get the fast moving average
fast = qb.Indicator(SimpleMovingAverage(50), symbol, 500, Resolution.Daily)
# Get the fast moving average
slow = qb.Indicator(SimpleMovingAverage(200), symbol, 500, Resolution.Daily)
# Remove undesired columns and rename others
fast = fast.drop('rollingsum', 1).rename(columns={'simplemovingaverage': 'fast'})
slow = slow.drop('rollingsum', 1).rename(columns={'simplemovingaverage': 'slow'})
# Concatenate the information and plot
df = pd.concat([hist.loc[symbol]["close"], fast, slow], axis=1).dropna(axis=0)
df.plot()
# Get indicator defining a lookback period in terms of timedelta
ema1 = qb.Indicator(ExponentialMovingAverage(50), "SPY", timedelta(100), Resolution.Daily)
# Get indicator defining a start and end date
ema2 = qb.Indicator(ExponentialMovingAverage(50), "SPY", datetime(2016,1,1), datetime(2016,10,1), Resolution.Daily)
ema = pd.concat([ema1, ema2], axis=1)
ema.plot()
rsi = RelativeStrengthIndex(14)
# Selects which field we want to use in our indicator (default is Field.Close)
rsihi = qb.Indicator(rsi, "SPY", 360, Resolution.Daily, Field.High)
rsilo = qb.Indicator(rsi, "SPY", 360, Resolution.Daily, Field.Low)
rsihi = rsihi.rename(columns={'relativestrengthindex': 'high'})
rsilo = rsilo.rename(columns={'relativestrengthindex': 'low'})
rsi = pd.concat([rsihi['high'], rsilo['low']], axis=1)
rsi.plot()
"""
Explanation: Indicators
We can easily get the indicator of a given symbol with QuantBook.
For all indicators, please checkout QuantConnect Indicators Reference Table
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mpi-m/cmip6/models/sandbox-3/ocnbgchem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-3', 'ocnbgchem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocnbgchem
MIP Era: CMIP6
Institute: MPI-M
Source ID: SANDBOX-3
Topic: Ocnbgchem
Sub-Topics: Tracers.
Properties: 65 (37 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:17
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
4. Key Properties --> Transport Scheme
5. Key Properties --> Boundary Forcing
6. Key Properties --> Gas Exchange
7. Key Properties --> Carbon Chemistry
8. Tracers
9. Tracers --> Ecosystem
10. Tracers --> Ecosystem --> Phytoplankton
11. Tracers --> Ecosystem --> Zooplankton
12. Tracers --> Disolved Organic Matter
13. Tracers --> Particules
14. Tracers --> Dic Alkalinity
1. Key Properties
Ocean Biogeochemistry key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean biogeochemistry model code (PISCES 2.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.model_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Geochemical"
# "NPZD"
# "PFT"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Fixed"
# "Variable"
# "Mix of both"
# TODO - please enter value(s)
"""
Explanation: 1.4. Elemental Stoichiometry
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe elemental stoichiometry (fixed, variable, mix of the two)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.elemental_stoichiometry_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.5. Elemental Stoichiometry Details
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe which elements have fixed/variable stoichiometry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.6. Prognostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all prognostic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.diagnostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.7. Diagnostic Variables
Is Required: TRUE Type: STRING Cardinality: 1.N
List of all diagnotic tracer variables in the ocean biogeochemistry component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.damping')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.8. Damping
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any tracer damping used (such as artificial correction or relaxation to climatology,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Time Stepping Framework --> Passive Tracers Transport
Time stepping method for passive tracers transport in ocean biogeochemistry
2.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for passive tracers
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.passive_tracers_transport.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for passive tracers (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "use ocean model transport time step"
# "use specific time step"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Time Stepping Framework --> Biology Sources Sinks
Time stepping framework for biology sources and sinks in ocean biogeochemistry
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time stepping framework for biology sources and sinks
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.time_stepping_framework.biology_sources_sinks.timestep_if_not_from_ocean')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Timestep If Not From Ocean
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Time step for biology sources and sinks (if different from ocean)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Transport Scheme
Transport scheme in ocean biogeochemistry
4.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transport scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Use that of ocean model"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 4.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Transport scheme used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.transport_scheme.use_different_scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.3. Use Different Scheme
Is Required: FALSE Type: STRING Cardinality: 0.1
Decribe transport scheme if different than that of ocean model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.atmospheric_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Atmospheric Chemistry model"
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Boundary Forcing
Properties of biogeochemistry boundary forcing
5.1. Atmospheric Deposition
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how atmospheric deposition is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.river_input')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "from file (climatology)"
# "from file (interannual variations)"
# "from Land Surface model"
# TODO - please enter value(s)
"""
Explanation: 5.2. River Input
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how river input is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_boundary_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Sediments From Boundary Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from boundary condition
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.boundary_forcing.sediments_from_explicit_model')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Sediments From Explicit Model
Is Required: FALSE Type: STRING Cardinality: 0.1
List which sediments are speficied from explicit sediment model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Gas Exchange
*Properties of gas exchange in ocean biogeochemistry *
6.1. CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.2. CO2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe CO2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.3. O2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is O2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.O2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 6.4. O2 Exchange Type
Is Required: FALSE Type: ENUM Cardinality: 0.1
Describe O2 gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.5. DMS Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is DMS gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.DMS_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.6. DMS Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify DMS gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.7. N2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.8. N2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.9. N2O Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is N2O gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.N2O_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.10. N2O Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify N2O gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.11. CFC11 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC11 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC11_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.12. CFC11 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC11 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.13. CFC12 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is CFC12 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.CFC12_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.14. CFC12 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify CFC12 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.15. SF6 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is SF6 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.SF6_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.16. SF6 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify SF6 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.17. 13CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 13CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.13CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.18. 13CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 13CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.19. 14CO2 Exchange Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is 14CO2 gas exchange modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.14CO2_exchange_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.20. 14CO2 Exchange Type
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify 14CO2 gas exchange scheme type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.gas_exchange.other_gases')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.21. Other Gases
Is Required: FALSE Type: STRING Cardinality: 0.1
Specify any other gas exchange
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OMIP protocol"
# "Other protocol"
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Carbon Chemistry
Properties of carbon chemistry biogeochemistry
7.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe how carbon chemistry is modeled
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.pH_scale')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sea water"
# "Free"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 7.2. PH Scale
Is Required: FALSE Type: ENUM Cardinality: 0.1
If NOT OMIP protocol, describe pH scale.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.key_properties.carbon_chemistry.constants_if_not_OMIP')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Constants If Not OMIP
Is Required: FALSE Type: STRING Cardinality: 0.1
If NOT OMIP protocol, list carbon chemistry constants.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Tracers
Ocean biogeochemistry tracers
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of tracers in ocean biogeochemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.sulfur_cycle_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Sulfur Cycle Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sulfur cycle modeled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nutrients_present')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrogen (N)"
# "Phosphorous (P)"
# "Silicium (S)"
# "Iron (Fe)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.3. Nutrients Present
Is Required: TRUE Type: ENUM Cardinality: 1.N
List nutrient species present in ocean biogeochemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_species_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Nitrates (NO3)"
# "Amonium (NH4)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.4. Nitrous Species If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous species.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.nitrous_processes_if_N')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Dentrification"
# "N fixation"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.5. Nitrous Processes If N
Is Required: FALSE Type: ENUM Cardinality: 0.N
If nitrogen present, list nitrous processes.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_definition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Tracers --> Ecosystem
Ecosystem properties in ocean biogeochemistry
9.1. Upper Trophic Levels Definition
Is Required: TRUE Type: STRING Cardinality: 1.1
Definition of upper trophic level (e.g. based on size) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.upper_trophic_levels_treatment')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9.2. Upper Trophic Levels Treatment
Is Required: TRUE Type: STRING Cardinality: 1.1
Define how upper trophic level are treated
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "PFT including size based (specify both below)"
# "Size based only (specify below)"
# "PFT only (specify below)"
# TODO - please enter value(s)
"""
Explanation: 10. Tracers --> Ecosystem --> Phytoplankton
Phytoplankton properties in ocean biogeochemistry
10.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of phytoplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.pft')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diatoms"
# "Nfixers"
# "Calcifiers"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Pft
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton functional types (PFT) (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.phytoplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microphytoplankton"
# "Nanophytoplankton"
# "Picophytoplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.3. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Phytoplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Generic"
# "Size based (specify below)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Tracers --> Ecosystem --> Zooplankton
Zooplankton properties in ocean biogeochemistry
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of zooplankton
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.ecosystem.zooplankton.size_classes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Microzooplankton"
# "Mesozooplankton"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Size Classes
Is Required: FALSE Type: ENUM Cardinality: 0.N
Zooplankton size classes (if applicable)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.bacteria_present')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 12. Tracers --> Disolved Organic Matter
Disolved organic matter properties in ocean biogeochemistry
12.1. Bacteria Present
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there bacteria representation ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.disolved_organic_matter.lability')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Labile"
# "Semi-labile"
# "Refractory"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Lability
Is Required: TRUE Type: ENUM Cardinality: 1.1
Describe treatment of lability in dissolved organic matter
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diagnostic"
# "Diagnostic (Martin profile)"
# "Diagnostic (Balast)"
# "Prognostic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Tracers --> Particules
Particulate carbon properties in ocean biogeochemistry
13.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is particulate carbon represented in ocean biogeochemistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.types_if_prognostic')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "POC"
# "PIC (calcite)"
# "PIC (aragonite"
# "BSi"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Types If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.N
If prognostic, type(s) of particulate matter taken into account
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "No size spectrum used"
# "Full size spectrum"
# "Discrete size classes (specify which below)"
# TODO - please enter value(s)
"""
Explanation: 13.3. Size If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, describe if a particule size spectrum is used to represent distribution of particules in water volume
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.size_if_discrete')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13.4. Size If Discrete
Is Required: FALSE Type: STRING Cardinality: 0.1
If prognostic and discrete size, describe which size classes are used
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.particules.sinking_speed_if_prognostic')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Function of particule size"
# "Function of particule type (balast)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.5. Sinking Speed If Prognostic
Is Required: FALSE Type: ENUM Cardinality: 0.1
If prognostic, method for calculation of sinking speed of particules
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.carbon_isotopes')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "C13"
# "C14)"
# TODO - please enter value(s)
"""
Explanation: 14. Tracers --> Dic Alkalinity
DIC and alkalinity properties in ocean biogeochemistry
14.1. Carbon Isotopes
Is Required: TRUE Type: ENUM Cardinality: 1.N
Which carbon isotopes are modelled (C13, C14)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.abiotic_carbon')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.2. Abiotic Carbon
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is abiotic carbon modelled ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocnbgchem.tracers.dic_alkalinity.alkalinity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Prognostic"
# "Diagnostic)"
# TODO - please enter value(s)
"""
Explanation: 14.3. Alkalinity
Is Required: TRUE Type: ENUM Cardinality: 1.1
How is alkalinity modelled ?
End of explanation
"""
|
keras-team/keras-io | examples/vision/ipynb/conv_lstm.ipynb | apache-2.0 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import io
import imageio
from IPython.display import Image, display
from ipywidgets import widgets, Layout, HBox
"""
Explanation: Next-Frame Video Prediction with Convolutional LSTMs
Author: Amogh Joshi<br>
Date created: 2021/06/02<br>
Last modified: 2021/06/05<br>
Description: How to build and train a convolutional LSTM model for next-frame video prediction.
Introduction
The
Convolutional LSTM
architectures bring together time series processing and computer vision by
introducing a convolutional recurrent cell in a LSTM layer. In this example, we will explore the
Convolutional LSTM model in an application to next-frame prediction, the process
of predicting what video frames come next given a series of past frames.
Setup
End of explanation
"""
# Download and load the dataset.
fpath = keras.utils.get_file(
"moving_mnist.npy",
"http://www.cs.toronto.edu/~nitish/unsupervised_video/mnist_test_seq.npy",
)
dataset = np.load(fpath)
# Swap the axes representing the number of frames and number of data samples.
dataset = np.swapaxes(dataset, 0, 1)
# We'll pick out 1000 of the 10000 total examples and use those.
dataset = dataset[:1000, ...]
# Add a channel dimension since the images are grayscale.
dataset = np.expand_dims(dataset, axis=-1)
# Split into train and validation sets using indexing to optimize memory.
indexes = np.arange(dataset.shape[0])
np.random.shuffle(indexes)
train_index = indexes[: int(0.9 * dataset.shape[0])]
val_index = indexes[int(0.9 * dataset.shape[0]) :]
train_dataset = dataset[train_index]
val_dataset = dataset[val_index]
# Normalize the data to the 0-1 range.
train_dataset = train_dataset / 255
val_dataset = val_dataset / 255
# We'll define a helper function to shift the frames, where
# `x` is frames 0 to n - 1, and `y` is frames 1 to n.
def create_shifted_frames(data):
x = data[:, 0 : data.shape[1] - 1, :, :]
y = data[:, 1 : data.shape[1], :, :]
return x, y
# Apply the processing function to the datasets.
x_train, y_train = create_shifted_frames(train_dataset)
x_val, y_val = create_shifted_frames(val_dataset)
# Inspect the dataset.
print("Training Dataset Shapes: " + str(x_train.shape) + ", " + str(y_train.shape))
print("Validation Dataset Shapes: " + str(x_val.shape) + ", " + str(y_val.shape))
"""
Explanation: Dataset Construction
For this example, we will be using the
Moving MNIST
dataset.
We will download the dataset and then construct and
preprocess training and validation sets.
For next-frame prediction, our model will be using a previous frame,
which we'll call f_n, to predict a new frame, called f_(n + 1).
To allow the model to create these predictions, we'll need to process
the data such that we have "shifted" inputs and outputs, where the
input data is frame x_n, being used to predict frame y_(n + 1).
End of explanation
"""
# Construct a figure on which we will visualize the images.
fig, axes = plt.subplots(4, 5, figsize=(10, 8))
# Plot each of the sequential images for one random data example.
data_choice = np.random.choice(range(len(train_dataset)), size=1)[0]
for idx, ax in enumerate(axes.flat):
ax.imshow(np.squeeze(train_dataset[data_choice][idx]), cmap="gray")
ax.set_title(f"Frame {idx + 1}")
ax.axis("off")
# Print information and display the figure.
print(f"Displaying frames for example {data_choice}.")
plt.show()
"""
Explanation: Data Visualization
Our data consists of sequences of frames, each of which
are used to predict the upcoming frame. Let's take a look
at some of these sequential frames.
End of explanation
"""
# Construct the input layer with no definite frame size.
inp = layers.Input(shape=(None, *x_train.shape[2:]))
# We will construct 3 `ConvLSTM2D` layers with batch normalization,
# followed by a `Conv3D` layer for the spatiotemporal outputs.
x = layers.ConvLSTM2D(
filters=64,
kernel_size=(5, 5),
padding="same",
return_sequences=True,
activation="relu",
)(inp)
x = layers.BatchNormalization()(x)
x = layers.ConvLSTM2D(
filters=64,
kernel_size=(3, 3),
padding="same",
return_sequences=True,
activation="relu",
)(x)
x = layers.BatchNormalization()(x)
x = layers.ConvLSTM2D(
filters=64,
kernel_size=(1, 1),
padding="same",
return_sequences=True,
activation="relu",
)(x)
x = layers.Conv3D(
filters=1, kernel_size=(3, 3, 3), activation="sigmoid", padding="same"
)(x)
# Next, we will build the complete model and compile it.
model = keras.models.Model(inp, x)
model.compile(
loss=keras.losses.binary_crossentropy, optimizer=keras.optimizers.Adam(),
)
"""
Explanation: Model Construction
To build a Convolutional LSTM model, we will use the
ConvLSTM2D layer, which will accept inputs of shape
(batch_size, num_frames, width, height, channels), and return
a prediction movie of the same shape.
End of explanation
"""
# Define some callbacks to improve training.
early_stopping = keras.callbacks.EarlyStopping(monitor="val_loss", patience=10)
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor="val_loss", patience=5)
# Define modifiable training hyperparameters.
epochs = 20
batch_size = 5
# Fit the model to the training data.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_val, y_val),
callbacks=[early_stopping, reduce_lr],
)
"""
Explanation: Model Training
With our model and data constructed, we can now train the model.
End of explanation
"""
# Select a random example from the validation dataset.
example = val_dataset[np.random.choice(range(len(val_dataset)), size=1)[0]]
# Pick the first/last ten frames from the example.
frames = example[:10, ...]
original_frames = example[10:, ...]
# Predict a new set of 10 frames.
for _ in range(10):
# Extract the model's prediction and post-process it.
new_prediction = model.predict(np.expand_dims(frames, axis=0))
new_prediction = np.squeeze(new_prediction, axis=0)
predicted_frame = np.expand_dims(new_prediction[-1, ...], axis=0)
# Extend the set of prediction frames.
frames = np.concatenate((frames, predicted_frame), axis=0)
# Construct a figure for the original and new frames.
fig, axes = plt.subplots(2, 10, figsize=(20, 4))
# Plot the original frames.
for idx, ax in enumerate(axes[0]):
ax.imshow(np.squeeze(original_frames[idx]), cmap="gray")
ax.set_title(f"Frame {idx + 11}")
ax.axis("off")
# Plot the new frames.
new_frames = frames[10:, ...]
for idx, ax in enumerate(axes[1]):
ax.imshow(np.squeeze(new_frames[idx]), cmap="gray")
ax.set_title(f"Frame {idx + 11}")
ax.axis("off")
# Display the figure.
plt.show()
"""
Explanation: Frame Prediction Visualizations
With our model now constructed and trained, we can generate
some example frame predictions based on a new video.
We'll pick a random example from the validation set and
then choose the first ten frames from them. From there, we can
allow the model to predict 10 new frames, which we can compare
to the ground truth frame predictions.
End of explanation
"""
# Select a few random examples from the dataset.
examples = val_dataset[np.random.choice(range(len(val_dataset)), size=5)]
# Iterate over the examples and predict the frames.
predicted_videos = []
for example in examples:
# Pick the first/last ten frames from the example.
frames = example[:10, ...]
original_frames = example[10:, ...]
new_predictions = np.zeros(shape=(10, *frames[0].shape))
# Predict a new set of 10 frames.
for i in range(10):
# Extract the model's prediction and post-process it.
frames = example[: 10 + i + 1, ...]
new_prediction = model.predict(np.expand_dims(frames, axis=0))
new_prediction = np.squeeze(new_prediction, axis=0)
predicted_frame = np.expand_dims(new_prediction[-1, ...], axis=0)
# Extend the set of prediction frames.
new_predictions[i] = predicted_frame
# Create and save GIFs for each of the ground truth/prediction images.
for frame_set in [original_frames, new_predictions]:
# Construct a GIF from the selected video frames.
current_frames = np.squeeze(frame_set)
current_frames = current_frames[..., np.newaxis] * np.ones(3)
current_frames = (current_frames * 255).astype(np.uint8)
current_frames = list(current_frames)
# Construct a GIF from the frames.
with io.BytesIO() as gif:
imageio.mimsave(gif, current_frames, "GIF", fps=5)
predicted_videos.append(gif.getvalue())
# Display the videos.
print(" Truth\tPrediction")
for i in range(0, len(predicted_videos), 2):
# Construct and display an `HBox` with the ground truth and prediction.
box = HBox(
[
widgets.Image(value=predicted_videos[i]),
widgets.Image(value=predicted_videos[i + 1]),
]
)
display(box)
"""
Explanation: Predicted Videos
Finally, we'll pick a few examples from the validation set
and construct some GIFs with them to see the model's
predicted videos.
You can use the trained model hosted on Hugging Face Hub and try the demo on Hugging Face Spaces.
End of explanation
"""
|
empet/Math | Imags/Animating a family-of-complex-functions.ipynb | bsd-3-clause | import plotly.graph_objects as go
import numpy as np
Plotly version of the HSV colorscale, corresponding to S=1, V=1, where S is saturation and V is the value.
pl_hsv = [[0.0, 'rgb(0, 255, 255)'],
[0.0833, 'rgb(0, 127, 255)'],
[0.1667, 'rgb(0, 0, 255)'],
[0.25, 'rgb(127, 0, 255)'],
[0.3333, 'rgb(255, 0, 255)'],
[0.4167, 'rgb(255, 0, 127)'],
[0.5, 'rgb(255, 0, 0)'],
[0.5833, 'rgb(255, 127, 0)'],
[0.6667, 'rgb(254, 255, 0)'],
[0.75, 'rgb(127, 255, 0)'],
[0.8333, 'rgb(0, 255, 0)'],
[0.9167, 'rgb(0, 255, 127)'],
[1.0, 'rgb(0, 255, 255)']]
def evaluate_function(func, re=(-1,1), im=(-1,1), N=100):
# func is the complex function to be ploted
# re, im are the interval ends on the real and imaginary axes, defining the rectangular region in the complex plane
# N gives the number of points in an interval of length 1
l = re[1]-re[0]
h = im[1]-im[0]
resL = int(N*l) #horizontal resolution
resH = int(N*h) #vertical resolution
X = np.linspace(re[0], re[1], resL)
Y = np.linspace(im[0], im[1], resH)
x, y = np.meshgrid(X,Y)
z = x+1j*y
return X, Y, z
"""
Explanation: Animation of a family of complex functions
End of explanation
"""
tickvals = [-np.pi, -2*np.pi/3, -np.pi/3, 0, np.pi/3, 2*np.pi/3, np.pi]
ticktext=['-\u03c0', '-2\u03c0/3', '-\u03c0/3', '0', '\u03c0/3', '2\u03c0/3', '\u03c0']
"""
Explanation: For a particular parameter, the corresponding function of the given family is represented as heatmap of its argument, which illustrates
its zeros and poles position.
Define tickvals and ticktext for the heatmap colorbar
End of explanation
"""
from functools import partial
def func(t, z):
return z**4+np.exp(2*t*1j)/z**2*np.exp(1j*t)
f0 = partial(func, 0)
x, y, z = evaluate_function(f0, re=(-1.5, 1.5), im=(-1.5,1.5), N=50)
w = f0(z)
argument = np.angle(w)
fig = go.Figure(go.Heatmap(x=x, y=y, z=argument, colorscale=pl_hsv,
colorbar=dict(thickness=20, tickvals=tickvals,
ticktext=ticktext,
title='arg(f(z))')))
frames = []
t = np.linspace(0, 3, 45) #6, 85
for s in t:
g = partial(func, s)
w = g(z)
argument = np.angle(w)
frames.append(go.Frame(data=[go.Heatmap(z=argument)]))
fig.update(frames=frames);
fig.update_layout(width=500, height=475,
updatemenus=[dict(type='buttons',
y=1,
x=1.45,
active=0,
buttons=[dict(label='Play',
method='animate',
args=[None,
dict(frame=dict(duration=10,
redraw=True),
transition=dict(duration=0),
fromcurrent=True,
mode='immediate')])])]);
"""
Explanation: Define a family of complex functions, depending on the parameter t. To animate the motion of function zeros and poles, as t varies,
we use functools.partial to get the
function corresponding to a particular parameter:
End of explanation
"""
|
MJuddBooth/pandas | doc/source/user_guide/style.ipynb | bsd-3-clause | import matplotlib.pyplot
# We have this here to trigger matplotlib's font cache stuff.
# This cell is hidden from the output
import pandas as pd
import numpy as np
np.random.seed(24)
df = pd.DataFrame({'A': np.linspace(1, 10, 10)})
df = pd.concat([df, pd.DataFrame(np.random.randn(10, 4), columns=list('BCDE'))],
axis=1)
df.iloc[0, 2] = np.nan
"""
Explanation: Styling
New in version 0.17.1
<span style="color: red">Provisional: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.</span>
This document is written as a Jupyter Notebook, and can be viewed or downloaded here.
You can apply conditional formatting, the visual styling of a DataFrame
depending on the data within, by using the DataFrame.style property.
This is a property that returns a Styler object, which has
useful methods for formatting and displaying DataFrames.
The styling is accomplished using CSS.
You write "style functions" that take scalars, DataFrames or Series, and return like-indexed DataFrames or Series with CSS "attribute: value" pairs for the values.
These functions can be incrementally passed to the Styler which collects the styles before rendering.
Building Styles
Pass your style functions into one of the following methods:
Styler.applymap: elementwise
Styler.apply: column-/row-/table-wise
Both of those methods take a function (and some other keyword arguments) and applies your function to the DataFrame in a certain way.
Styler.applymap works through the DataFrame elementwise.
Styler.apply passes each column or row into your DataFrame one-at-a-time or the entire table at once, depending on the axis keyword argument.
For columnwise use axis=0, rowwise use axis=1, and for the entire table at once use axis=None.
For Styler.applymap your function should take a scalar and return a single string with the CSS attribute-value pair.
For Styler.apply your function should take a Series or DataFrame (depending on the axis parameter), and return a Series or DataFrame with an identical shape where each value is a string with a CSS attribute-value pair.
Let's see some examples.
End of explanation
"""
df.style
"""
Explanation: Here's a boring example of rendering a DataFrame, without any (visible) styles:
End of explanation
"""
df.style.highlight_null().render().split('\n')[:10]
"""
Explanation: Note: The DataFrame.style attribute is a property that returns a Styler object. Styler has a _repr_html_ method defined on it so they are rendered automatically. If you want the actual HTML back for further processing or for writing to file call the .render() method which returns a string.
The above output looks very similar to the standard DataFrame HTML representation. But we've done some work behind the scenes to attach CSS classes to each cell. We can view these by calling the .render method.
End of explanation
"""
def color_negative_red(val):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val < 0 else 'black'
return 'color: %s' % color
"""
Explanation: The row0_col2 is the identifier for that particular cell. We've also prepended each row/column identifier with a UUID unique to each DataFrame so that the style from one doesn't collide with the styling from another within the same notebook or page (you can set the uuid if you'd like to tie together the styling of two DataFrames).
When writing style functions, you take care of producing the CSS attribute / value pairs you want. Pandas matches those up with the CSS classes that identify each cell.
Let's write a simple style function that will color negative numbers red and positive numbers black.
End of explanation
"""
s = df.style.applymap(color_negative_red)
s
"""
Explanation: In this case, the cell's style depends only on it's own value.
That means we should use the Styler.applymap method which works elementwise.
End of explanation
"""
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
df.style.apply(highlight_max)
"""
Explanation: Notice the similarity with the standard df.applymap, which operates on DataFrames elementwise. We want you to be able to reuse your existing knowledge of how to interact with DataFrames.
Notice also that our function returned a string containing the CSS attribute and value, separated by a colon just like in a <style> tag. This will be a common theme.
Finally, the input shapes matched. Styler.applymap calls the function on each scalar input, and the function returns a scalar output.
Now suppose you wanted to highlight the maximum value in each column.
We can't use .applymap anymore since that operated elementwise.
Instead, we'll turn to .apply which operates columnwise (or rowwise using the axis keyword). Later on we'll see that something like highlight_max is already defined on Styler so you wouldn't need to write this yourself.
End of explanation
"""
df.style.\
applymap(color_negative_red).\
apply(highlight_max)
"""
Explanation: In this case the input is a Series, one column at a time.
Notice that the output shape of highlight_max matches the input shape, an array with len(s) items.
We encourage you to use method chains to build up a style piecewise, before finally rending at the end of the chain.
End of explanation
"""
def highlight_max(data, color='yellow'):
'''
highlight the maximum in a Series or DataFrame
'''
attr = 'background-color: {}'.format(color)
if data.ndim == 1: # Series from .apply(axis=0) or axis=1
is_max = data == data.max()
return [attr if v else '' for v in is_max]
else: # from .apply(axis=None)
is_max = data == data.max().max()
return pd.DataFrame(np.where(is_max, attr, ''),
index=data.index, columns=data.columns)
"""
Explanation: Above we used Styler.apply to pass in each column one at a time.
<span style="background-color: #DEDEBE">Debugging Tip: If you're having trouble writing your style function, try just passing it into <code style="background-color: #DEDEBE">DataFrame.apply</code>. Internally, <code style="background-color: #DEDEBE">Styler.apply</code> uses <code style="background-color: #DEDEBE">DataFrame.apply</code> so the result should be the same.</span>
What if you wanted to highlight just the maximum value in the entire table?
Use .apply(function, axis=None) to indicate that your function wants the entire table, not one column or row at a time. Let's try that next.
We'll rewrite our highlight-max to handle either Series (from .apply(axis=0 or 1)) or DataFrames (from .apply(axis=None)). We'll also allow the color to be adjustable, to demonstrate that .apply, and .applymap pass along keyword arguments.
End of explanation
"""
df.style.apply(highlight_max, color='darkorange', axis=None)
"""
Explanation: When using Styler.apply(func, axis=None), the function must return a DataFrame with the same index and column labels.
End of explanation
"""
df.style.apply(highlight_max, subset=['B', 'C', 'D'])
"""
Explanation: Building Styles Summary
Style functions should return strings with one or more CSS attribute: value delimited by semicolons. Use
Styler.applymap(func) for elementwise styles
Styler.apply(func, axis=0) for columnwise styles
Styler.apply(func, axis=1) for rowwise styles
Styler.apply(func, axis=None) for tablewise styles
And crucially the input and output shapes of func must match. If x is the input then func(x).shape == x.shape.
Finer Control: Slicing
Both Styler.apply, and Styler.applymap accept a subset keyword.
This allows you to apply styles to specific rows or columns, without having to code that logic into your style function.
The value passed to subset behaves similar to slicing a DataFrame.
A scalar is treated as a column label
A list (or series or numpy array)
A tuple is treated as (row_indexer, column_indexer)
Consider using pd.IndexSlice to construct the tuple for the last one.
End of explanation
"""
df.style.applymap(color_negative_red,
subset=pd.IndexSlice[2:5, ['B', 'D']])
"""
Explanation: For row and column slicing, any valid indexer to .loc will work.
End of explanation
"""
df.style.format("{:.2%}")
"""
Explanation: Only label-based slicing is supported right now, not positional.
If your style function uses a subset or axis keyword argument, consider wrapping your function in a functools.partial, partialing out that keyword.
python
my_func2 = functools.partial(my_func, subset=42)
Finer Control: Display Values
We distinguish the display value from the actual value in Styler.
To control the display value, the text is printed in each cell, use Styler.format. Cells can be formatted according to a format spec string or a callable that takes a single value and returns a string.
End of explanation
"""
df.style.format({'B': "{:0<4.0f}", 'D': '{:+.2f}'})
"""
Explanation: Use a dictionary to format specific columns.
End of explanation
"""
df.style.format({"B": lambda x: "±{:.2f}".format(abs(x))})
"""
Explanation: Or pass in a callable (or dictionary of callables) for more flexible handling.
End of explanation
"""
df.style.highlight_null(null_color='red')
"""
Explanation: Builtin Styles
Finally, we expect certain styling functions to be common enough that we've included a few "built-in" to the Styler, so you don't have to write them yourself.
End of explanation
"""
import seaborn as sns
cm = sns.light_palette("green", as_cmap=True)
s = df.style.background_gradient(cmap=cm)
s
"""
Explanation: You can create "heatmaps" with the background_gradient method. These require matplotlib, and we'll use Seaborn to get a nice colormap.
End of explanation
"""
# Uses the full color range
df.loc[:4].style.background_gradient(cmap='viridis')
# Compress the color range
(df.loc[:4]
.style
.background_gradient(cmap='viridis', low=.5, high=0)
.highlight_null('red'))
"""
Explanation: Styler.background_gradient takes the keyword arguments low and high. Roughly speaking these extend the range of your data by low and high percent so that when we convert the colors, the colormap's entire range isn't used. This is useful so that you can actually read the text still.
End of explanation
"""
df.style.highlight_max(axis=0)
"""
Explanation: There's also .highlight_min and .highlight_max.
End of explanation
"""
df.style.set_properties(**{'background-color': 'black',
'color': 'lawngreen',
'border-color': 'white'})
"""
Explanation: Use Styler.set_properties when the style doesn't actually depend on the values.
End of explanation
"""
df.style.bar(subset=['A', 'B'], color='#d65f5f')
"""
Explanation: Bar charts
You can include "bar charts" in your DataFrame.
End of explanation
"""
df.style.bar(subset=['A', 'B'], align='mid', color=['#d65f5f', '#5fba7d'])
"""
Explanation: New in version 0.20.0 is the ability to customize further the bar chart: You can now have the df.style.bar be centered on zero or midpoint value (in addition to the already existing way of having the min value at the left side of the cell), and you can pass a list of [color_negative, color_positive].
Here's how you can change the above with the new align='mid' option:
End of explanation
"""
import pandas as pd
from IPython.display import HTML
# Test series
test1 = pd.Series([-100,-60,-30,-20], name='All Negative')
test2 = pd.Series([10,20,50,100], name='All Positive')
test3 = pd.Series([-10,-5,0,90], name='Both Pos and Neg')
head = """
<table>
<thead>
<th>Align</th>
<th>All Negative</th>
<th>All Positive</th>
<th>Both Neg and Pos</th>
</thead>
</tbody>
"""
aligns = ['left','zero','mid']
for align in aligns:
row = "<tr><th>{}</th>".format(align)
for serie in [test1,test2,test3]:
s = serie.copy()
s.name=''
row += "<td>{}</td>".format(s.to_frame().style.bar(align=align,
color=['#d65f5f', '#5fba7d'],
width=100).render()) #testn['width']
row += '</tr>'
head += row
head+= """
</tbody>
</table>"""
HTML(head)
"""
Explanation: The following example aims to give a highlight of the behavior of the new align options:
End of explanation
"""
df2 = -df
style1 = df.style.applymap(color_negative_red)
style1
style2 = df2.style
style2.use(style1.export())
style2
"""
Explanation: Sharing Styles
Say you have a lovely style built up for a DataFrame, and now you want to apply the same style to a second DataFrame. Export the style with df1.style.export, and import it on the second DataFrame with df1.style.set
End of explanation
"""
with pd.option_context('display.precision', 2):
html = (df.style
.applymap(color_negative_red)
.apply(highlight_max))
html
"""
Explanation: Notice that you're able share the styles even though they're data aware. The styles are re-evaluated on the new DataFrame they've been used upon.
Other Options
You've seen a few methods for data-driven styling.
Styler also provides a few other options for styles that don't depend on the data.
precision
captions
table-wide styles
hiding the index or columns
Each of these can be specified in two ways:
A keyword argument to Styler.__init__
A call to one of the .set_ or .hide_ methods, e.g. .set_caption or .hide_columns
The best method to use depends on the context. Use the Styler constructor when building many styled DataFrames that should all share the same properties. For interactive use, the.set_ and .hide_ methods are more convenient.
Precision
You can control the precision of floats using pandas' regular display.precision option.
End of explanation
"""
df.style\
.applymap(color_negative_red)\
.apply(highlight_max)\
.set_precision(2)
"""
Explanation: Or through a set_precision method.
End of explanation
"""
df.style.set_caption('Colormaps, with a caption.')\
.background_gradient(cmap=cm)
"""
Explanation: Setting the precision only affects the printed number; the full-precision values are always passed to your style functions. You can always use df.round(2).style if you'd prefer to round from the start.
Captions
Regular table captions can be added in a few ways.
End of explanation
"""
from IPython.display import HTML
def hover(hover_color="#ffff99"):
return dict(selector="tr:hover",
props=[("background-color", "%s" % hover_color)])
styles = [
hover(),
dict(selector="th", props=[("font-size", "150%"),
("text-align", "center")]),
dict(selector="caption", props=[("caption-side", "bottom")])
]
html = (df.style.set_table_styles(styles)
.set_caption("Hover to highlight."))
html
"""
Explanation: Table Styles
The next option you have are "table styles".
These are styles that apply to the table as a whole, but don't look at the data.
Certain sytlings, including pseudo-selectors like :hover can only be used this way.
End of explanation
"""
df.style.hide_index()
df.style.hide_columns(['C','D'])
"""
Explanation: table_styles should be a list of dictionaries.
Each dictionary should have the selector and props keys.
The value for selector should be a valid CSS selector.
Recall that all the styles are already attached to an id, unique to
each Styler. This selector is in addition to that id.
The value for props should be a list of tuples of ('attribute', 'value').
table_styles are extremely flexible, but not as fun to type out by hand.
We hope to collect some useful ones either in pandas, or preferable in a new package that builds on top the tools here.
Hiding the Index or Columns
The index can be hidden from rendering by calling Styler.hide_index. Columns can be hidden from rendering by calling Styler.hide_columns and passing in the name of a column, or a slice of columns.
End of explanation
"""
from IPython.html import widgets
@widgets.interact
def f(h_neg=(0, 359, 1), h_pos=(0, 359), s=(0., 99.9), l=(0., 99.9)):
return df.style.background_gradient(
cmap=sns.palettes.diverging_palette(h_neg=h_neg, h_pos=h_pos, s=s, l=l,
as_cmap=True)
)
def magnify():
return [dict(selector="th",
props=[("font-size", "4pt")]),
dict(selector="td",
props=[('padding', "0em 0em")]),
dict(selector="th:hover",
props=[("font-size", "12pt")]),
dict(selector="tr:hover td:hover",
props=[('max-width', '200px'),
('font-size', '12pt')])
]
np.random.seed(25)
cmap = cmap=sns.diverging_palette(5, 250, as_cmap=True)
bigdf = pd.DataFrame(np.random.randn(20, 25)).cumsum()
bigdf.style.background_gradient(cmap, axis=1)\
.set_properties(**{'max-width': '80px', 'font-size': '1pt'})\
.set_caption("Hover to magnify")\
.set_precision(2)\
.set_table_styles(magnify())
"""
Explanation: CSS Classes
Certain CSS classes are attached to cells.
Index and Column names include index_name and level<k> where k is its level in a MultiIndex
Index label cells include
row_heading
row<n> where n is the numeric position of the row
level<k> where k is the level in a MultiIndex
Column label cells include
col_heading
col<n> where n is the numeric position of the column
level<k> where k is the level in a MultiIndex
Blank cells include blank
Data cells include data
Limitations
DataFrame only (use Series.to_frame().style)
The index and columns must be unique
No large repr, and performance isn't great; this is intended for summary DataFrames
You can only style the values, not the index or columns
You can only apply styles, you can't insert new HTML entities
Some of these will be addressed in the future.
Terms
Style function: a function that's passed into Styler.apply or Styler.applymap and returns values like 'css attribute: value'
Builtin style functions: style functions that are methods on Styler
table style: a dictionary with the two keys selector and props. selector is the CSS selector that props will apply to. props is a list of (attribute, value) tuples. A list of table styles passed into Styler.
Fun stuff
Here are a few interesting examples.
Styler interacts pretty well with widgets. If you're viewing this online instead of running the notebook yourself, you're missing out on interactively adjusting the color palette.
End of explanation
"""
df.style.\
applymap(color_negative_red).\
apply(highlight_max).\
to_excel('styled.xlsx', engine='openpyxl')
"""
Explanation: Export to Excel
New in version 0.20.0
<span style="color: red">Experimental: This is a new feature and still under development. We'll be adding features and possibly making breaking changes in future releases. We'd love to hear your feedback.</span>
Some support is available for exporting styled DataFrames to Excel worksheets using the OpenPyXL or XlsxWriter engines. CSS2.2 properties handled include:
background-color
border-style, border-width, border-color and their {top, right, bottom, left variants}
color
font-family
font-style
font-weight
text-align
text-decoration
vertical-align
white-space: nowrap
Only CSS2 named colors and hex colors of the form #rgb or #rrggbb are currently supported.
The following pseudo CSS properties are also available to set excel specific style properties:
number-format
End of explanation
"""
from jinja2 import Environment, ChoiceLoader, FileSystemLoader
from IPython.display import HTML
from pandas.io.formats.style import Styler
"""
Explanation: A screenshot of the output:
Extensibility
The core of pandas is, and will remain, its "high-performance, easy-to-use data structures".
With that in mind, we hope that DataFrame.style accomplishes two goals
Provide an API that is pleasing to use interactively and is "good enough" for many tasks
Provide the foundations for dedicated libraries to build on
If you build a great library on top of this, let us know and we'll link to it.
Subclassing
If the default template doesn't quite suit your needs, you can subclass Styler and extend or override the template.
We'll show an example of extending the default template to insert a custom header before each table.
End of explanation
"""
with open("templates/myhtml.tpl") as f:
print(f.read())
"""
Explanation: We'll use the following template:
End of explanation
"""
class MyStyler(Styler):
env = Environment(
loader=ChoiceLoader([
FileSystemLoader("templates"), # contains ours
Styler.loader, # the default
])
)
template = env.get_template("myhtml.tpl")
"""
Explanation: Now that we've created a template, we need to set up a subclass of Styler that
knows about it.
End of explanation
"""
MyStyler(df)
"""
Explanation: Notice that we include the original loader in our environment's loader.
That's because we extend the original template, so the Jinja environment needs
to be able to find it.
Now we can use that custom styler. It's __init__ takes a DataFrame.
End of explanation
"""
HTML(MyStyler(df).render(table_title="Extending Example"))
"""
Explanation: Our custom template accepts a table_title keyword. We can provide the value in the .render method.
End of explanation
"""
EasyStyler = Styler.from_custom_template("templates", "myhtml.tpl")
EasyStyler(df)
"""
Explanation: For convenience, we provide the Styler.from_custom_template method that does the same as the custom subclass.
End of explanation
"""
with open("templates/template_structure.html") as f:
structure = f.read()
HTML(structure)
"""
Explanation: Here's the template structure:
End of explanation
"""
# Hack to get the same style in the notebook as the
# main site. This is hidden in the docs.
from IPython.display import HTML
with open("themes/nature_with_gtoc/static/nature.css_t") as f:
css = f.read()
HTML('<style>{}</style>'.format(css))
"""
Explanation: See the template in the GitHub repo for more details.
End of explanation
"""
|
csaladenes/blog | airports/airportia_jo_dest_parser.ipynb | mit | for i in locations:
print i
if i not in sch:sch[i]={}
#march 11-24 = 2 weeks
for d in range (11,25):
if d not in sch[i]:
try:
url=airportialinks[i]
full=url+'departures/201703'+str(d)
m=requests.get(full).content
sch[i][full]=pd.read_html(m)[0]
#print full
except: pass #print 'no tables',i,d
for i in range(11,25):
testurl=u'https://www.airportia.com/jordan/queen-alia-international-airport/departures/201703'+str(i)
print 'nr. of flights on March',i,':',len(sch['AMM'][testurl])
testurl=u'https://www.airportia.com/jordan/queen-alia-international-airport/departures/20170318'
k=sch['AMM'][testurl]
k[k['To']=='Frankfurt FRA']
"""
Explanation: record schedules for 2 weeks, then augment count with weekly flight numbers.
seasonal and seasonal charter will count as once per week for 3 months, so 12/52 per week. TGM separate, since its history is in the past.
End of explanation
"""
mdf=pd.DataFrame()
for i in sch:
for d in sch[i]:
df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)
df['From']=i
df['Date']=d
mdf=pd.concat([mdf,df])
mdf['City']=[i[:i.rfind(' ')] for i in mdf['To']]
mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['To']]
k=mdf[mdf['Date']==testurl]
k[k['To']=='Frankfurt FRA']
"""
Explanation: sch checks out with source
End of explanation
"""
file("mdf_jo_dest.json",'w').write(json.dumps(mdf.reset_index().to_json()))
len(mdf)
airlines=set(mdf['Airline'])
cities=set(mdf['City'])
file("cities_jo_dest.json",'w').write(json.dumps(list(cities)))
file("airlines_jo_dest.json",'w').write(json.dumps(list(airlines)))
citycoords={}
for i in cities:
if i not in citycoords:
if i==u'Birmingham': z='Birmingham, UK'
elif i==u'Valencia': z='Valencia, Spain'
elif i==u'Naples': z='Naples, Italy'
elif i==u'St. Petersburg': z='St. Petersburg, Russia'
elif i==u'Bristol': z='Bristol, UK'
elif i==u'Beida': z='Bayda, Libya'
else: z=i
citycoords[i]=Geocoder(apik).geocode(z)
print i
citysave={}
for i in citycoords:
citysave[i]={"coords":citycoords[i][0].coordinates,
"country":citycoords[i][0].country}
file("citysave_jo_dest.json",'w').write(json.dumps(citysave))
"""
Explanation: mdf checks out with source
End of explanation
"""
|
drericstrong/Blog | 20170419_BootstrappingTheCentralLimitTheorem.ipynb | agpl-3.0 | import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
# Read the data and plot a histogram
df = pd.read_csv('20170419_data_bootstrap.csv', header=None)
df.hist()
plt.title('Data from Unknown Distribution')
plt.xlabel('Value');
"""
Explanation: Continuing on the previous blog post, this post will demonstrate the Central Limit Theorem on a dataset that was sampled from a series of random distributions. I won't give away exactly how I generated the dataset, but I will tell you that the random distributions I used to generate the data were definitely not Gaussian. However, as we found last time, this doesn't matter. The Central Limit Theorem even applies to datasets where the sampling distribution is unknown.
First, let's load the dataset and look at a histogram of the values, which should clearly demonstrate that the dataset is not Gaussian.
End of explanation
"""
import numpy as np
# random.choice takes a data array, and you provide it with
# the size of samples that you want, with replacement or
# without replacement
boot = np.random.choice(df[0].values, 100, replace=True)
# Let's look at the histogram of the data again, to compare
# it to the first figure.
boot_df = pd.DataFrame(boot)
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
plt.suptitle('Sampling with Replacement, 100 Samples')
ax1.hist(boot_df[0].values, normed=1)
ax1.set_xlabel('Bootstrapped Data')
ax2.hist(df[0].values, normed=1)
ax2.set_xlabel('Original Data');
"""
Explanation: According to the figure above, the distribution is heavily skewed towards the right, with a much longer tail on the left side. It does not appear to be Gaussian.
As a reminder from the previous post, the Central Limit Theorem states:
The arithmetic mean of a sufficiently large number of samples taken from an independent random distribution will be approximately normally distributed, regardless of the underlying distribution.
We can still demonstrate the Central Limit Theorem with this dataset, even though we don't know the initial random distribution that the data was sampled from. This will be accomplished using a statistical technique called bootstrapping, which is often used to obtain confidence intervals when the initial distribution is unknown.
Bootstrapping works by randomly sampling the known data, each time replacing the data point so that it might be selected again ("random sampling with replacement"). Why might this process work for calculating confidence intervals? You can think of a dataset as a "discrete" type of distribution. Even if we don't know the original random distribution that it came from, samples taken from the dataset will still follow some exploitable rules, such as the Central Limit Theorem.
Let's demonstrate this on our dataset above. First, let's sample the dataset 100 times (with replacement). Luckily, numpy has a function that makes this process very easy:
End of explanation
"""
import numpy as np
# random.choice takes a data array, and you provide it with
# the size of samples that you want, with replacement or
# without replacement
boot2 = np.random.choice(df[0].values, 10000, replace=True)
# Let's look at the histogram of the data again, to compare
# it to the first figure.
boot_df2 = pd.DataFrame(boot2)
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
plt.suptitle('Sampling with Replacement, 10000 Samples')
ax1.hist(boot_df2[0].values, normed=1)
ax1.set_xlabel('Bootstrapped Data')
ax2.hist(df[0].values, normed=1)
ax2.set_xlabel('Original Data');
"""
Explanation: Notice that the histogram of the bootstrapped data looks similar to the original data, but it's certainly not perfect. The original dataset had slightly over 10000 data points, but we only took 100 samples. Let's try the same process as above, but with 10000 samples:
End of explanation
"""
num_hist = int(1e6)
# Run the Monte Carlo simulation with the above number of histories
mc_boot = []
for _ in range(num_hist):
# Random sampling with replacement
boot3 = np.random.choice(df[0].values, 10000, replace=True)
# Save the mean of the random sampling in the mc_boot array
mc_boot.append(np.mean(boot3))
# Plot the results
mc_df = pd.DataFrame(mc_boot)
mc_df.hist();
plt.title('Distribution of the Sampling Means')
plt.xlabel('Values');
"""
Explanation: Now the bootstrapped data looks very similar to the original data. In general, it's preferable to choose at least the same order of magnitude of samples as the original dataset, but this is not advisable in all circumstances.
Finally, let's apply Monte Carlo simulation to this demonstration. According to the Central Limit Theorem, if we take the mean of the bootstrapping process above, many times, we should see that the distribution of the means looks Gaussian, despite the fact that our original dataset it clearly not Gaussian:
End of explanation
"""
|
tsarouch/data_science_references_python | regression/regression_tree_and_max_depth.ipynb | gpl-2.0 | import sklearn.datasets as datasets
import pandas as pd
iris=datasets.load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df.head(2)
"""
Explanation: Regression based on Iris dataset
We ll use the Iris dataset in the regression setup
- not use the target variable (typicall classification case)
- use petal width (cm) as dependent variable using others as independent
End of explanation
"""
independent_vars = ['sepal length (cm)','sepal width (cm)', 'petal length (cm)']
dependent_var = 'petal width (cm)'
X = df[independent_vars]
y = df[dependent_var]
from sklearn import tree
model = tree.DecisionTreeRegressor()
model.fit(X,y)
# get feature importances
importances = model.feature_importances_
pd.Series(importances, index=independent_vars)
"""
Explanation: Regression with Tree Classifier
End of explanation
"""
from sklearn import model_selection
results = model_selection.cross_val_score(tree.DecisionTreeRegressor(), X, y, cv=10, scoring='neg_mean_squared_error')
print("MSE: %.3f (%.3f)") % (results.mean(), results.std())
"""
Explanation: Score of Regression
Some evaluation metrics (like mean squared error) are naturally descending scores (the smallest score is best)
</br>
This is important to note, because some scores will be reported as negative that by definition can never be negative.
</br>
In order to keep this clear:
metrics which measure the distance between the model and the data, like metrics.mean_squared_error,
are available as neg_mean_squared_error which return the negated value of the metric.
End of explanation
"""
import matplotlib.pyplot as plt
from sklearn import model_selection
scores = []
depths = []
for depth in range(1, 25):
scores.append(
neg_mean_squared_error = model_selection.cross_val_score(tree.DecisionTreeRegressor(max_depth = depth), X, y, cv=10, scoring='neg_mean_squared_error'),
neg_median_absolute_error = model_selection.cross_val_score(tree.DecisionTreeRegressor(max_depth = depth), X, y, cv=10, scoring='neg_median_absolute_error')
neg_median_absolute_error = model_selection.cross_val_score(tree.DecisionTreeRegressor(max_depth = depth), X, y, cv=10, scoring='neg_median_absolute_error')
)
depths.append(depth)
_ = pd.DataFrame(data = scores, index = depths, columns = ['score']).plot()
# looks like a best depth around 5 is the best choice for regression
"""
Explanation: Regression Performance over tree depth
End of explanation
"""
|
JKeun/project-02-watcha | 01_crawling/05_additional_feature(raw_df2, lee_df).ipynb | mit | import requests
from bs4 import BeautifulSoup
import json
import pandas as pd
url_df = pd.read_csv('./resource/url_df.csv')
url_df
for title in url_df['title_url']:
print(title)
url_df[400:]
"""
Explanation: title_url 돌려 feature 뽑기
End of explanation
"""
df1 = pd.DataFrame(columns=['DESC', '감독', '배우', '평가자수', '보고싶어요수',
'코멘트수', '별점분포'])
for title in url_df['title_url'][:418]:
response = requests.get("https://watcha.net/mv/{title_url}".format(title_url=title))
dom = BeautifulSoup(response.content, 'html.parser')
director = dom.select('li.movie-person.director a.kor-name')[1].text # 감독
actor_list = dom.select('li.movie-person.actor .kor-name')
actors = []
for actor in actor_list[1:6:2]:
actor = actor.text
actors.append(actor) # 배우 3명
rate_count = dom.select('div.cumulative-count .rate')[0].text # 평가자 수
wish_count = dom.select('div.cumulative-count .wish')[0].text # 보고싶어요 수
review_count = dom.select('div.cumulative-count .review')[0].text # 코메트 수
desc = dom.select('div.desc')[0].text # 장르가 포함된 DESC
rating_dist = dom.select_one('div.rating-chart-wrapper').get('data-distribution') # 분포
df1.loc[len(df1)] = [desc, director, actors, rate_count, wish_count, review_count, rating_dist]
df1
"""
Explanation: 추가 feature dataframe
작업시간 약 10분걸림
전체로하니 418개 나옴 (director부분에서 에러)
인덱스 419번째 영화 : 반지의제왕2 에서 문제발생 => 이것만 따로 떼어내서 합침
실수때문에 418번쨰 영화 : 타이타닉도 따로 뗴어내서 합침
1. df1 : 0~417번째까지
End of explanation
"""
df2 = pd.DataFrame(columns=['DESC', '감독', '배우', '평가자수', '보고싶어요수',
'코멘트수', '별점분포'])
for title in url_df['title_url'][420:]:
response = requests.get("https://watcha.net/mv/{title_url}".format(title_url=title))
dom = BeautifulSoup(response.content, 'html.parser')
director = dom.select('li.movie-person.director a.kor-name')[1].text # 감독
actor_list = dom.select('li.movie-person.actor .kor-name')
actors = []
for actor in actor_list[1:6:2]:
actor = actor.text
actors.append(actor) # 배우 3명
rate_count = dom.select('div.cumulative-count .rate')[0].text # 평가자 수
wish_count = dom.select('div.cumulative-count .wish')[0].text # 보고싶어요 수
review_count = dom.select('div.cumulative-count .review')[0].text # 코메트 수
desc = dom.select('div.desc')[0].text # 장르가 포함된 DESC
rating_dist = dom.select_one('div.rating-chart-wrapper').get('data-distribution') # 분포
df2.loc[len(df2)] = [desc, director, actors, rate_count, wish_count, review_count, rating_dist]
df2
"""
Explanation: 2. df2 : 420~끝까지
End of explanation
"""
df418 = pd.DataFrame(data=[[
"The Lord Of The Rings: The Two Towers, 2002, 뉴질랜드, 미국, 판타지, 액션, 12세 관람가, 2시간 57분",
'피터 잭슨',
'[일라이저 우드, 비고 모텐슨, 올랜도 볼룸]',
'384699', '4941', '1190',
'{"1":1095,"2":5860,"3":1134,"4":12084,"5":4210,"6":51302,"7":22580,"8":121204,"9":25916,"10":139314}'
]],
columns=['DESC', '감독', '배우', '평가자수', '보고싶어요수', '코멘트수', '별점분포'])
df418
"""
Explanation: 3. df418 : 418번 sample(반지의제왕2) - (오류가발생한샘플)직접만듦
End of explanation
"""
df419 = pd.DataFrame(columns=['DESC', '감독', '배우', '평가자수', '보고싶어요수',
'코멘트수', '별점분포'])
for title in url_df['title_url'][419:420]:
response = requests.get("https://watcha.net/mv/{title_url}".format(title_url=title))
dom = BeautifulSoup(response.content, 'html.parser')
director = dom.select('li.movie-person.director a.kor-name')[1].text # 감독
actor_list = dom.select('li.movie-person.actor .kor-name')
actors = []
for actor in actor_list[1:6:2]:
actor = actor.text
actors.append(actor) # 배우 3명
rate_count = dom.select('div.cumulative-count .rate')[0].text # 평가자 수
wish_count = dom.select('div.cumulative-count .wish')[0].text # 보고싶어요 수
review_count = dom.select('div.cumulative-count .review')[0].text # 코메트 수
desc = dom.select('div.desc')[0].text # 장르가 포함된 DESC
rating_dist = dom.select_one('div.rating-chart-wrapper').get('data-distribution') # 분포
df419.loc[len(df419)] = [desc, director, actors, rate_count, wish_count, review_count, rating_dist]
df419
"""
Explanation: 4. df419 : 419번 sample(타이타닉)-실수로 인덱싱못한부분
End of explanation
"""
df = df1.append([df418, df419, df2], ignore_index=True)
df[400:]
"""
Explanation: 완성된 추가 feature dataframe = df
append
End of explanation
"""
lee_df = pd.DataFrame(columns=['name', '이동진 별점', '이동진 코멘트'])
for code in url_df['code']:
json_response = requests.get(
"https://watcha.net/comment/list?unique_id={code_list}&start_index=0&count=5&type=like".format(
code_list = code))
comment_dict = json.loads(json_response.text)
lee = comment_dict.get('data')[0].get('username')
lee_rating = comment_dict.get('data')[0].get('rating')
lee_comment = comment_dict.get('data')[0].get('text')
lee_df.loc[len(lee_df)] = [lee, lee_rating, lee_comment]
lee_df
"""
Explanation: 이동진 평가 dataframe = lee_df
작업시간 약 5분
End of explanation
"""
path='C:/Users/JKEUN/ipython notebook/project-02-watcha/resource/'
df.to_csv(path+'raw_df2.csv', index=False, encoding='utf8')
path='C:/Users/JKEUN/ipython notebook/project-02-watcha/resource/'
lee_df.to_csv(path+'lee_df.csv', index=False, encoding='utf8')
"""
Explanation: 2개의 dataframe => csv파일로 저장
df, lee_df
End of explanation
"""
|
flohorovicic/pynoddy | docs/notebooks/Likelihood_extraction.ipynb | gpl-2.0 | from IPython.core.display import HTML
css_file = 'pynoddy.css'
HTML(open(css_file, "r").read())
import sys, os
import matplotlib.pyplot as plt
# adjust some settings for matplotlib
from matplotlib import rcParams
# print rcParams
rcParams['font.size'] = 15
# determine path of repository to set paths corretly below
repo_path = os.path.realpath('../..')
import pynoddy.history
import numpy as np
%matplotlib inline
# Combined: model generation and output vis to test:
history = "simple_model.his"
output_name = "simple_out"
#
# A general note: the 'reload' statements are only important
# for development purposes (when modules were chnaged), but not
# in required for normal execution.
#
reload(pynoddy.history)
reload(pynoddy.events)
# create pynoddy object
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
strati_options = {'num_layers' : 8,
'layer_names' : ['layer 1', 'layer 2', 'layer 3',
'layer 4', 'layer 5', 'layer 6',
'layer 7', 'layer 8'],
'layer_thickness' : [1500, 500, 500, 500, 500, 500, 500, 500]}
nm.add_event('stratigraphy', strati_options )
nm.write_history(history)
# Compute the model
reload(pynoddy)
pynoddy.compute_model(history, output_name)
# Plot output
import pynoddy.output
reload(pynoddy.output)
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('y', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_strati.eps")
"""
Explanation: Model set-up to simulate apparent thickness likelihood at drillhole location
End of explanation
"""
reload(pynoddy.history)
reload(pynoddy.events)
import pynoddy.experiment
reload(pynoddy.experiment)
ex1 = pynoddy.experiment.Experiment(history)
ex1.plot_section()
reload(pynoddy.history)
reload(pynoddy.events)
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
strati_options = {'num_layers' : 8,
'layer_names' : ['layer 1', 'layer 2', 'layer 3',
'layer 4', 'layer 5', 'layer 6',
'layer 7', 'layer 8'],
'layer_thickness' : [1500, 500, 500, 500, 500,
500, 500, 500]}
nm.add_event('stratigraphy', strati_options )
tilt_options = {'name' : 'Tilt',
'pos' : (4000, 3500, 5000),
'rotation' : 0.,
'plunge_direction' : 0,
'plunge' : 0.}
nm.add_event('tilt', tilt_options)
nm.write_history(history)
# Compute the model
reload(pynoddy)
pynoddy.compute_model(history, output_name)
# Plot output
import pynoddy.output
reload(pynoddy.output)
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_strati.eps")
ex1 = pynoddy.experiment.Experiment(history)
ex1.plot_section()
"""
Explanation: Import in Experiment object
End of explanation
"""
ex1.events[2].properties['Rotation'] = 20.
ex1.plot_section('y')
"""
Explanation: Adjust properties of tilt event:
End of explanation
"""
ex1.freeze()
"""
Explanation: Now, we can define a stochastic variable for the tilt rotation:
First step: "freeze" current state as base model:
End of explanation
"""
ex1.base_events[2].properties
"""
Explanation: This method stores the current events and properties in a "base_events" attribute which is not changed in the following experiments:
End of explanation
"""
ex1.set_random_seed(12345)
"""
Explanation: Set random seed to ensure reproducibility:
End of explanation
"""
param_stats = [{'event' : 2,
'parameter': 'Rotation',
'stdev': 10.0,
'type': 'normal'}
]
ex1.set_parameter_statistics(param_stats)
ex1.random_draw()
ex1.events[2].properties
ex1.plot_section(colorbar=True, colorbar_orientation='horizontal')
# extract only layer 4:
l4 = ex1.get_section('y').block[:,:,:] == 4
plt.imshow(l4[:,0,:].T, origin = 'lower left', cmap = 'gray_r')
"""
Explanation: Define parameter distributions for stochastic properties:
End of explanation
"""
# change resolution to increase simulation speed:
resolution = 100
ex1.change_cube_size(resolution)
# initialise output variable
tmp = ex1.get_section('y')
prob_4 = np.zeros_like(tmp.block[:,:,:])
n_draws = 1000
# now: generate random models and extract blocks of layer '4'
for i in range(n_draws):
ex1.random_draw()
tmp = ex1.get_section('y', resolution = resolution)
prob_4 += (tmp.block[:,:,:] == 4)
# Normalise
prob_4 = prob_4 / float(n_draws)
fig = plt.figure(figsize = (12,8))
ax = fig.add_subplot(111)
ax.imshow(prob_4.transpose()[:,0,:],
origin = 'lower left',
interpolation = 'none')
plt.title("Estimated probability of unit 4")
plt.xlabel("x (E-W)")
plt.ylabel("z")
"""
Explanation: Let's set up a simple uncertainty estimation from scratch:
End of explanation
"""
plt.plot(prob_4[20,:,:][0], np.arange(0,50,1))
"""
Explanation: As a first test, we can now extract a 1-D profile:
End of explanation
"""
reload(pynoddy.experiment)
ex1 = pynoddy.experiment.Experiment(history)
"""
Explanation: Next step (homework for Alex): extract thickness and plot as histogram:
Extract information at sampling lines
End of explanation
"""
ex1.add_sampling_line(2500, 3500)
"""
Explanation: First step: add sampling line at a specified (x,y) location (note: default cube size: 1 m):
End of explanation
"""
plt.plot(ex1.get_model_lines(), np.arange(0,5000,1))
"""
Explanation: Extract information at the position of the sampling line:
End of explanation
"""
ex1.plot_section()
"""
Explanation: For comparison, see the complete model section:
End of explanation
"""
reload(pynoddy.history)
reload(pynoddy.events)
nm = pynoddy.history.NoddyHistory()
# add stratigraphy
strati_options = {'num_layers' : 8,
'layer_names' : ['layer 1', 'layer 2', 'layer 3',
'layer 4', 'layer 5', 'layer 6',
'layer 7', 'layer 8'],
'layer_thickness' : [1500, 500, 500, 500, 500,
500, 500, 500]}
nm.add_event('stratigraphy', strati_options )
tilt_options = {'name' : 'Tilt',
'pos' : (4000, 3500, 5000),
'rotation' : 0.,
'plunge_direction' : 0,
'plunge' : 0.}
nm.add_event('tilt', tilt_options)
nm.write_history(history)
# Compute the model
reload(pynoddy)
pynoddy.compute_model(history, output_name)
# Plot output
import pynoddy.output
reload(pynoddy.output)
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_strati.eps")
"""
Explanation: Homework 2 for Alex: extract value of one layer and generate statistics with dipping layers
Change layer thickness
Next step: change layer thickness in pynoddy model
End of explanation
"""
reload(pynoddy.history)
reload(pynoddy.events)
history = 'simple_model.his'
nm = pynoddy.history.NoddyHistory(history)
history = "test.his"
nm.write_history(history)
# Compute the model
reload(pynoddy)
pynoddy.compute_model(history, output_name)
strati.layers[-1].property_lines['Remanent Magnetization']
"""
Explanation: Load history back and check possibility to adjust thicknesses:
End of explanation
"""
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_strati.eps")
reload(pynoddy.history)
reload(pynoddy.events)
history = 'simple_model.his'
nm = pynoddy.history.NoddyHistory(history)
"""
Explanation: Test change of layer thicknesses:
End of explanation
"""
nm.events[1].layers[5].properties['Height'] = 4200.
nm.write_history(history)
# Compute the model
reload(pynoddy)
pynoddy.compute_model(history, output_name)
nout = pynoddy.output.NoddyOutput(output_name)
nout.plot_section('x', layer_labels = strati_options['layer_names'][::-1],
colorbar = True, title="",
savefig = False, fig_filename = "ex01_strati.eps")
"""
Explanation: Now: change height of one layer:
End of explanation
"""
|
BONSAMURAIS/bonsai | legacy-examples/Correspondences_table_example_1.ipynb | bsd-3-clause | import numpy as np
import pandas as pd
"""
Explanation: Example on the use of correspondence tables
In this simple example it is shown how a vector classified according to one classification is converted into another classification
The first classification has four categories: A, B, C, D
The second classification has three categories: 1, 2, 3
End of explanation
"""
CM=pd.DataFrame.from_items([('A', [1, 0, 0]), ('B', [0, 1, 0]),('C', [0, 0, 1]),('D', [0, 0, 1])],
orient='index', columns=['1', '2', '3'])
display (CM)
"""
Explanation: Let's create an arbitrary classification matrix (CM)
End of explanation
"""
CM_tot2=CM.sum(axis=1)
C2=CM
C2['total']=CM_tot2
display (C2)
"""
Explanation: Notice that moving from the first classification to the second one is possible since the 'totals' of rows are all equal to 1 (see below the other way around)
End of explanation
"""
V1 = np.random.randint(0, 10, size=4).reshape(4, 1)
Class_A = [_ for _ in 'ABCD']
V1_A = pd.DataFrame(V1, index=Class_A, columns = ['amount'])
display (V1_A)
"""
Explanation: Let's create an arbitrary vector classified according to the first classification
End of explanation
"""
V1_A_transp=pd.DataFrame.transpose(V1_A)
V1_B= pd.DataFrame((np.dot(V1_A_transp, CM)), index=['amount'], columns = ['1','2','3'])
display (V1_B)
"""
Explanation: This vector is converted into the second classification
End of explanation
"""
sum_row = {col: CM[col].sum() for col in CM}
#sum_row =CM.sum()
sum_rCM = pd.DataFrame(sum_row, index=["Total"])
CM_tot = CM.append(sum_rCM)
display (CM_tot)
"""
Explanation: Moving from second classifcation to the second one may cause problems, since the "totals" of columns is not always 1.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/nerc/cmip6/models/hadgem3-gc31-hh/ocean.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'nerc', 'hadgem3-gc31-hh', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: NERC
Source ID: HADGEM3-GC31-HH
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:26
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
turbomanage/training-data-analyst | CPB100/lab4a/demandforecast2.ipynb | apache-2.0 | !sudo pip install --user pandas-gbq
!pip install --user pandas_gbq
"""
Explanation: <h1>Demand forecasting with BigQuery and TensorFlow</h1>
In this notebook, we will develop a machine learning model to predict the demand for taxi cabs in New York.
To develop the model, we will need to get historical data of taxicab usage. This data exists in BigQuery. Let's start by looking at the schema.
Set up
End of explanation
"""
PROJECT = 'cloud-training-demos' # CHANGE this to your GCP project
BUCKET = PROJECT + '-ml'
REGION = 'us-central1' # CHANGE this to the region you want to use
import os
os.environ['PROJECT'] = PROJECT
os.environ['BUCKET'] = BUCKET
os.environ['REGION'] = REGION
%%bash
gcloud config set project ${PROJECT}
gcloud config set compute/region ${REGION}
def query_to_dataframe(query):
import pandas as pd
return pd.read_gbq(query, dialect='standard', project_id=PROJECT)
"""
Explanation: Restart the kernel after installation.
End of explanation
"""
import pandas as pd
import numpy as np
import shutil
query_to_dataframe("""
SELECT * FROM `bigquery-public-data.new_york.tlc_yellow_trips_2015` LIMIT 10
""")
"""
Explanation: Explore table
End of explanation
"""
query_to_dataframe("""
SELECT
EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber
FROM `bigquery-public-data.new_york.tlc_yellow_trips_2015`
LIMIT 5
""")
"""
Explanation: <h2> Analyzing taxicab demand </h2>
Let's pull the number of trips for each day in the 2015 dataset using Standard SQL.
End of explanation
"""
def taxiquery(year):
return """
WITH trips AS (
SELECT EXTRACT (DAYOFYEAR from pickup_datetime) AS daynumber
FROM `bigquery-public-data.new_york.tlc_yellow_trips_*`
where _TABLE_SUFFIX = '{}'
)
SELECT daynumber, COUNT(1) AS numtrips FROM trips
GROUP BY daynumber ORDER BY daynumber
""".format(year)
trips = query_to_dataframe(taxiquery(2015))
trips[:5]
"""
Explanation: <h3> Modular queries and Pandas dataframe </h3>
Let's use the total number of trips as our proxy for taxicab demand (other reasonable alternatives are total trip_distance or total fare_amount). It is possible to predict multiple variables using Tensorflow, but for simplicity, we will stick to just predicting the number of trips.
We will give our query a name 'taxiquery' and have it use an input variable '$YEAR'. We can then invoke the 'taxiquery' by giving it a YEAR. The to_dataframe() converts the BigQuery result into a <a href='http://pandas.pydata.org/'>Pandas</a> dataframe.
End of explanation
"""
avg = np.mean(trips['numtrips'])
print('Just using average={0} has RMSE of {1}'.format(avg, np.sqrt(np.mean((trips['numtrips'] - avg)**2))))
"""
Explanation: <h3> Benchmark </h3>
Often, a reasonable estimate of something is its historical average. We can therefore benchmark our machine learning model against the historical average.
End of explanation
"""
query_to_dataframe("""
SELECT * FROM `bigquery-public-data.noaa_gsod.stations`
WHERE state = 'NY' AND wban != '99999' AND name LIKE '%LA GUARDIA%'
""")
"""
Explanation: The mean here is about 400,000 and the root-mean-square-error (RMSE) in this case is about 52,000. In other words, if we were to estimate that there are 400,000 taxi trips on any given day, that estimate is will be off on average by about 52,000 in either direction.
Let's see if we can do better than this -- our goal is to make predictions of taxicab demand whose RMSE is lower than 52,000.
What kinds of things affect people's use of taxicabs?
<h2> Weather data </h2>
We suspect that weather influences how often people use a taxi. Perhaps someone who'd normally walk to work would take a taxi if it is very cold or rainy.
One of the advantages of using a global data warehouse like BigQuery is that you get to mash up unrelated datasets quite easily.
End of explanation
"""
def wxquery(year):
return """
SELECT EXTRACT (DAYOFYEAR FROM CAST(CONCAT('{0}','-',mo,'-',da) AS TIMESTAMP)) AS daynumber,
MIN(EXTRACT (DAYOFWEEK FROM CAST(CONCAT('{0}','-',mo,'-',da) AS TIMESTAMP))) dayofweek,
MIN(min) mintemp, MAX(max) maxtemp, MAX(IF(prcp=99.99,0,prcp)) rain
FROM `bigquery-public-data.noaa_gsod.gsod*`
WHERE stn='725030' AND _TABLE_SUFFIX = '{0}'
GROUP BY 1 ORDER BY daynumber DESC
""".format(year)
weather = query_to_dataframe(wxquery(2015))
weather[:5]
"""
Explanation: <h3> Variables </h3>
Let's pull out the minimum and maximum daily temperature (in Fahrenheit) as well as the amount of rain (in inches) for La Guardia airport.
End of explanation
"""
data = pd.merge(weather, trips, on='daynumber')
data[:5]
"""
Explanation: <h3> Merge datasets </h3>
Let's use Pandas to merge (combine) the taxi cab and weather datasets day-by-day.
End of explanation
"""
j = data.plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: <h3> Exploratory analysis </h3>
Is there a relationship between maximum temperature and the number of trips?
End of explanation
"""
j = data.plot(kind='scatter', x='dayofweek', y='numtrips')
"""
Explanation: The scatterplot above doesn't look very promising. There appears to be a weak downward trend, but it's also quite noisy.
Is there a relationship between the day of the week and the number of trips?
End of explanation
"""
j = data[data['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: Hurrah, we seem to have found a predictor. It appears that people use taxis more later in the week. Perhaps New Yorkers make weekly resolutions to walk more and then lose their determination later in the week, or maybe it reflects tourism dynamics in New York City.
Perhaps if we took out the <em>confounding</em> effect of the day of the week, maximum temperature will start to have an effect. Let's see if that's the case:
End of explanation
"""
data2 = data # 2015 data
for year in [2014, 2016]:
weather = query_to_dataframe(wxquery(year))
trips = query_to_dataframe(taxiquery(year))
data_for_year = pd.merge(weather, trips, on='daynumber')
data2 = pd.concat([data2, data_for_year])
data2.describe()
j = data2[data2['dayofweek'] == 7].plot(kind='scatter', x='maxtemp', y='numtrips')
"""
Explanation: Removing the confounding factor does seem to reflect an underlying trend around temperature. But ... the data are a little sparse, don't you think? This is something that you have to keep in mind -- the more predictors you start to consider (here we are using two: day of week and maximum temperature), the more rows you will need so as to avoid <em> overfitting </em> the model.
<h3> Adding 2014 and 2016 data </h3>
Let's add in 2014 and 2016 data to the Pandas dataframe. Note how useful it was for us to modularize our queries around the YEAR.
End of explanation
"""
import tensorflow as tf
shuffled = data2.sample(frac=1, random_state=13)
# It would be a good idea, if we had more data, to treat the days as categorical variables
# with the small amount of data, we have though, the model tends to overfit
#predictors = shuffled.iloc[:,2:5]
#for day in range(1,8):
# matching = shuffled['dayofweek'] == day
# key = 'day_' + str(day)
# predictors[key] = pd.Series(matching, index=predictors.index, dtype=float)
predictors = shuffled.iloc[:,1:5]
predictors[:5]
shuffled[:5]
targets = shuffled.iloc[:,5]
targets[:5]
"""
Explanation: The data do seem a bit more robust. If we had even more data, it would be better of course. But in this case, we only have 2014-2016 data for taxi trips, so that's what we will go with.
<h2> Machine Learning with Tensorflow </h2>
We'll use 80% of our dataset for training and 20% of the data for testing the model we have trained. Let's shuffle the rows of the Pandas dataframe so that this division is random. The predictor (or input) columns will be every column in the database other than the number-of-trips (which is our target, or what we want to predict).
The machine learning models that we will use -- linear regression and neural networks -- both require that the input variables are numeric in nature.
The day of the week, however, is a categorical variable (i.e. Tuesday is not really greater than Monday). So, we should create separate columns for whether it is a Monday (with values 0 or 1), Tuesday, etc.
Against that, we do have limited data (remember: the more columns you use as input features, the more rows you need to have in your training dataset), and it appears that there is a clear linear trend by day of the week. So, we will opt for simplicity here and use the data as-is. Try uncommenting the code that creates separate columns for the days of the week and re-run the notebook if you are curious about the impact of this simplification.
End of explanation
"""
trainsize = int(len(shuffled['numtrips']) * 0.8)
avg = np.mean(shuffled['numtrips'][:trainsize])
rmse = np.sqrt(np.mean((targets[trainsize:] - avg)**2))
print('Just using average={0} has RMSE of {1}'.format(avg, rmse))
"""
Explanation: Let's update our benchmark based on the 80-20 split and the larger dataset.
End of explanation
"""
SCALE_NUM_TRIPS = 600000.0
trainsize = int(len(shuffled['numtrips']) * 0.8)
testsize = len(shuffled['numtrips']) - trainsize
npredictors = len(predictors.columns)
noutputs = 1
tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ...
shutil.rmtree('./trained_model_linear', ignore_errors=True) # so that we don't load weights from previous runs
estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear',
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values))
print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output")
def input_fn(features, targets):
return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS)
estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000)
pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS )
rmse = np.sqrt(np.mean(np.power((targets[trainsize:].values - pred), 2)))
print('LinearRegression has RMSE of {0}'.format(rmse))
"""
Explanation: <h2> Linear regression with tf.contrib.learn </h2>
We scale the number of taxicab rides by 400,000 so that the model can keep its predicted values in the [0-1] range. The optimization goes a lot faster when the weights are small numbers. We save the weights into ./trained_model_linear and display the root mean square error on the test dataset.
End of explanation
"""
SCALE_NUM_TRIPS = 600000.0
trainsize = int(len(shuffled['numtrips']) * 0.8)
testsize = len(shuffled['numtrips']) - trainsize
npredictors = len(predictors.columns)
noutputs = 1
tf.logging.set_verbosity(tf.logging.WARN) # change to INFO to get output every 100 steps ...
shutil.rmtree('./trained_model', ignore_errors=True) # so that we don't load weights from previous runs
estimator = tf.contrib.learn.DNNRegressor(model_dir='./trained_model',
hidden_units=[5, 5],
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(predictors.values))
print("starting to train ... this will take a while ... use verbosity=INFO to get more verbose output")
def input_fn(features, targets):
return tf.constant(features.values), tf.constant(targets.values.reshape(len(targets), noutputs)/SCALE_NUM_TRIPS)
estimator.fit(input_fn=lambda: input_fn(predictors[:trainsize], targets[:trainsize]), steps=10000)
pred = np.multiply(list(estimator.predict(predictors[trainsize:].values)), SCALE_NUM_TRIPS )
rmse = np.sqrt(np.mean((targets[trainsize:].values - pred)**2))
print('Neural Network Regression has RMSE of {0}'.format(rmse))
"""
Explanation: The RMSE here (57K) is lower than the benchmark (62K) indicates that we are doing about 10% better with the machine learning model than we would be if we were to just use the historical average (our benchmark).
<h2> Neural network with tf.contrib.learn </h2>
Let's make a more complex model with a few hidden nodes.
End of explanation
"""
input = pd.DataFrame.from_dict(data =
{'dayofweek' : [4, 5, 6],
'mintemp' : [60, 40, 50],
'maxtemp' : [70, 90, 60],
'rain' : [0, 0.5, 0]})
# read trained model from ./trained_model
estimator = tf.contrib.learn.LinearRegressor(model_dir='./trained_model_linear',
feature_columns=tf.contrib.learn.infer_real_valued_columns_from_input(input.values))
pred = np.multiply(list(estimator.predict(input.values)), SCALE_NUM_TRIPS )
print(pred)
"""
Explanation: Using a neural network results in similar performance to the linear model when I ran it -- it might be because there isn't enough data for the NN to do much better. (NN training is a non-convex optimization, and you will get different results each time you run the above code).
<h2> Running a trained model </h2>
So, we have trained a model, and saved it to a file. Let's use this model to predict taxicab demand given the expected weather for three days.
Here we make a Dataframe out of those inputs, load up the saved model (note that we have to know the model equation -- it's not saved in the model file) and use it to predict the taxicab demand.
End of explanation
"""
|
CELMA-project/CELMA | MES/boundaries/2-uEParSheath/calculations/exactSolutions.ipynb | lgpl-3.0 | %matplotlib notebook
from sympy import init_printing
from sympy import S
from sympy import sin, cos, tanh, exp, pi, sqrt, log
from boutdata.mms import x, y, z, t
from boutdata.mms import DDX
import os, sys
# If we add to sys.path, then it must be an absolute path
common_dir = os.path.abspath('./../../../../common')
# Sys path is a list of system paths
sys.path.append(common_dir)
from CELMAPy.MES import make_plot, BOUT_print
init_printing()
"""
Explanation: Exact solution used in MES runs
We would like to MES the implementation of the sheath boundary condition in $u_{e,\|}$. We have that
$$
u_{e,\|} = c_s \exp(\Lambda - [\phi_0 + \phi])
$$
Which normalized yields
$$
u_{e,\|} = \exp(\Lambda - [\phi_0 + \phi])
$$
End of explanation
"""
folder = '../gaussianWSinAndParabola/'
"""
Explanation: Initialize
End of explanation
"""
# Initialization
the_vars = {}
# We need Lx
from boututils.options import BOUTOptions
myOpts = BOUTOptions(folder)
Lx = eval(myOpts.geom['Lx'])
Ly = eval(myOpts.geom['Ly'])
mu = eval(myOpts.cst['mu']) # Needed for Lambda
Lambda = eval(myOpts.cst['Lambda'])
phiRef = eval(myOpts.cst['phiRef'])
# No y variation in the profile
# The potential
# The skew sinus
# In cartesian coordinates we would like a sinus with with a wave-vector in the direction
# 45 degrees with respect to the first quadrant. This can be achieved with a wave vector
# k = [1/sqrt(2), 1/sqrt(2)]
# sin((1/sqrt(2))*(x + y))
# We would like 2 nodes, so we may write
# sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))
the_vars['phi'] = sin((1/sqrt(2))*(x + y)*(2*pi/(2*Lx)))
# The profile
the_vars['profile'] = sin(2*pi*x/Lx)**2
# The parallel velocity, given by the sheath boundary condition
the_vars['uEPar'] = exp(Lambda-(phiRef+the_vars['phi']))*the_vars['profile']
"""
Explanation: Define the variables
End of explanation
"""
make_plot(folder=folder, the_vars=the_vars, plot2d=True, include_aux=False, direction='y')
"""
Explanation: Plot
End of explanation
"""
BOUT_print(the_vars, rational=False)
"""
Explanation: Print the variables in BOUT++ format
End of explanation
"""
|
metpy/MetPy | v0.8/_downloads/Hodograph_Inset.ipynb | bsd-3-clause | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import numpy as np
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
"""
Explanation: Hodograph Inset
Layout a Skew-T plot with a hodograph inset into the plot.
End of explanation
"""
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
df['u_wind'], df['v_wind'] = mpcalc.get_wind_components(df['speed'],
np.deg2rad(df['direction']))
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed',
'u_wind', 'v_wind'), how='all').reset_index(drop=True)
"""
Explanation: Upper air data can be obtained using the siphon package, but for this example we will use
some of MetPy's sample data.
End of explanation
"""
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.get_wind_components(wind_speed, wind_dir)
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, np.hypot(u, v))
# Show the plot
plt.show()
"""
Explanation: We will pull the data out of the example dataset into individual variables and
assign units.
End of explanation
"""
|
stevetjoa/stanford-mir | energy.ipynb | mit | x, sr = librosa.load('audio/simple_loop.wav')
sr
x.shape
librosa.get_duration(x, sr)
"""
Explanation: ← Back to Index
Energy and RMSE
The energy (Wikipedia of a signal corresponds to the total magntiude of the signal. For audio signals, that roughly corresponds to how loud the signal is. The energy in a signal is defined as
$$ \sum_n \left| x(n) \right|^2 $$
The root-mean-square energy (RMSE) in a signal is defined as
$$ \sqrt{ \frac{1}{N} \sum_n \left| x(n) \right|^2 } $$
Let's load a signal:
End of explanation
"""
ipd.Audio(x, rate=sr)
"""
Explanation: Listen to the signal:
End of explanation
"""
librosa.display.waveplot(x, sr=sr)
"""
Explanation: Plot the signal:
End of explanation
"""
hop_length = 256
frame_length = 512
energy = numpy.array([
sum(abs(x[i:i+frame_length]**2))
for i in range(0, len(x), hop_length)
])
energy.shape
"""
Explanation: Compute the short-time energy using a list comprehension:
End of explanation
"""
rmse = librosa.feature.rmse(x, frame_length=frame_length, hop_length=hop_length, center=True)
rmse.shape
rmse = rmse[0]
"""
Explanation: Compute the RMSE using librosa.feature.rmse:
End of explanation
"""
frames = range(len(energy))
t = librosa.frames_to_time(frames, sr=sr, hop_length=hop_length)
librosa.display.waveplot(x, sr=sr, alpha=0.4)
plt.plot(t, energy/energy.max(), 'r--') # normalized for visualization
plt.plot(t[:len(rmse)], rmse/rmse.max(), color='g') # normalized for visualization
plt.legend(('Energy', 'RMSE'))
"""
Explanation: Plot both the energy and RMSE along with the waveform:
End of explanation
"""
def strip(x, frame_length, hop_length):
# Compute RMSE.
rmse = librosa.feature.rmse(x, frame_length=frame_length, hop_length=hop_length, center=True)
# Identify the first frame index where RMSE exceeds a threshold.
thresh = 0.01
frame_index = 0
while rmse[0][frame_index] < thresh:
frame_index += 1
# Convert units of frames to samples.
start_sample_index = librosa.frames_to_samples(frame_index, hop_length=hop_length)
# Return the trimmed signal.
return x[start_sample_index:]
"""
Explanation: Questions
Write a function, strip, that removes leading silence from a signal. Make sure it works for a variety of signals recorded in different environments and with different signal-to-noise ratios (SNR).
End of explanation
"""
y = strip(x, frame_length, hop_length)
ipd.Audio(y, rate=sr)
librosa.display.waveplot(y, sr=sr)
"""
Explanation: Let's see if it works.
End of explanation
"""
|
bjshaw/phys202-project | galaxy_project/Ia) Base Question Implementation.ipynb | mit | %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint
from initial_velocities import velocities_m, velocities_S
from DE_solver import derivs, equationsolver
"""
Explanation: Base Question Implementation
End of explanation
"""
ic_base = np.zeros(484)
"""
Explanation: Finally, I am ready to answer the base question that I have laid out, which is how a system of many stars would react when the disrupting galaxy came into a close orbit to the main galaxy. Toomre and Toomre's paper lays out the initial orbit of 120 stars around the main galaxy, 12 particles with a circular orbit of radius 20 percent $R_{min}(25\hspace{1 mm} kpc)$, 18 particles at 30 percent, 24 at 40 percent, 30 at 50 percent, and 36 and 60 percent.
Defining emtpy initial condition array:
End of explanation
"""
max_time_base = 1.5
time_step_base = 120
M_base = 1e11
S_base = 1e11
S_y_base = 70
S_x_base = -.01*S_y_base**2+25
vxS_base = velocities_S(M_base,S_base,S_x_base,S_y_base)[0]
vyS_base = velocities_S(M_base,S_base,S_x_base,S_y_base)[1]
"""
Explanation: Setting values for S, M, and t:
End of explanation
"""
ic_base[0] = S_x_base
ic_base[1] = S_y_base
ic_base[2] = vxS_base
ic_base[3] = vyS_base
"""
Explanation: Setting initial condition array values pertaining to S:
End of explanation
"""
particles = np.array([12,18,24,30,36])
percent = np.array([.20,.30,.40,.50,.60])
a=np.array([particles,percent])
x = []
y = []
for i in range(0,5):
theta = np.arange(0,2*np.pi,(2*np.pi)/a[0,i])
r = 25*a[1,i]
for t in theta:
x.append(r*np.cos(t))
y.append(r*np.sin(t))
x_y = np.array([x,y])
"""
Explanation: Creating positions for the stars of M:
End of explanation
"""
plt.figure(figsize=(5,5))
for n in range(0,120):
plt.scatter(x_y[0,n],x_y[1,n])
"""
Explanation: The following plot shows all the initial positions of the stars:
End of explanation
"""
np.savez('star_positions.npz',x_y)
"""
Explanation: I also wrote these positions to disk so I could use them for other cases:
End of explanation
"""
for i in range(0,120):
ic_base[(i+1)*4] = x_y[0][i]
ic_base[((i+1)*4)+1] = x_y[1][i]
for n in range(1,int(len(ic_base)/4)):
ic_base[n*4+2] = velocities_m(M_base,ic_base[n*4],ic_base[n*4+1])[0]
ic_base[n*4+3] = velocities_m(M_base,ic_base[n*4],ic_base[n*4+1])[1]
"""
Explanation: Putting these values into my initial condition array, as well calling the initial velocity function on each position:
End of explanation
"""
sol_base = equationsolver(ic_base,max_time_base,time_step_base,M_base,S_base)
np.savez('base_question_data.npz',sol_base,ic_base)
"""
Explanation: Calling my differential equation solver, and saving the data to disk:
Times vary, either just under or just over a minute
End of explanation
"""
|
shengshuyang/StanfordCNNClass | assignment1/knn.ipynb | gpl-3.0 | # Run some setup code for this notebook.
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
# Load the raw CIFAR-10 data.
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# As a sanity check, we print out the size of the training and test data.
print 'Training data shape: ', X_train.shape
print 'Training labels shape: ', y_train.shape
print 'Test data shape: ', X_test.shape
print 'Test labels shape: ', y_test.shape
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
num_classes = len(classes)
samples_per_class = 7
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx].astype('uint8'))
plt.axis('off')
if i == 0:
plt.title(cls)
plt.show()
# Subsample the data for more efficient code execution in this exercise
num_training = 5000
mask = range(num_training)
X_train = X_train[mask]
y_train = y_train[mask]
num_test = 500
mask = range(num_test)
X_test = X_test[mask]
y_test = y_test[mask]
# Reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
print X_train.shape, X_test.shape
from cs231n.classifiers import KNearestNeighbor
# Create a kNN classifier instance.
# Remember that training a kNN classifier is a noop:
# the Classifier simply remembers the data and does no further processing
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
"""
Explanation: k-Nearest Neighbor (kNN) exercise
Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the assignments page on the course website.
The kNN classifier consists of two stages:
During training, the classifier takes the training data and simply remembers it
During testing, kNN classifies every test image by comparing to all training images and transfering the labels of the k most similar training examples
The value of k is cross-validated
In this exercise you will implement these steps and understand the basic Image Classification pipeline, cross-validation, and gain proficiency in writing efficient, vectorized code.
End of explanation
"""
# Open cs231n/classifiers/k_nearest_neighbor.py and implement
# compute_distances_two_loops.
# Test your implementation:
dists = classifier.compute_distances_one_loop(X_test)
print dists.shape
# We can visualize the distance matrix: each row is a single test example and
# its distances to training examples
plt.imshow(dists, interpolation='none')
plt.show()
"""
Explanation: We would now like to classify the test data with the kNN classifier. Recall that we can break down this process into two steps:
First we must compute the distances between all test examples and all train examples.
Given these distances, for each test example we find the k nearest examples and have them vote for the label
Lets begin with computing the distance matrix between all training and test examples. For example, if there are Ntr training examples and Nte test examples, this stage should result in a Nte x Ntr matrix where each element (i,j) is the distance between the i-th test and j-th train example.
First, open cs231n/classifiers/k_nearest_neighbor.py and implement the function compute_distances_two_loops that uses a (very inefficient) double loop over all pairs of (test, train) examples and computes the distance matrix one element at a time.
End of explanation
"""
# Now implement the function predict_labels and run the code below:
# We use k = 1 (which is Nearest Neighbor).
y_test_pred = classifier.predict_labels(dists, k=1)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
"""
Explanation: Inline Question #1: Notice the structured patterns in the distance matrix, where some rows or columns are visible brighter. (Note that with the default color scheme black indicates low distances while white indicates high distances.)
What in the data is the cause behind the distinctly bright rows?
What causes the columns?
Your Answer: fill this in.
End of explanation
"""
y_test_pred = classifier.predict_labels(dists, k=5)
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
"""
Explanation: You should expect to see approximately 27% accuracy. Now lets try out a larger k, say k = 5:
End of explanation
"""
# Now lets speed up distance matrix computation by using partial vectorization
# with one loop. Implement the function compute_distances_one_loop and run the
# code below:
dists_one = classifier.compute_distances_one_loop(X_test)
# To ensure that our vectorized implementation is correct, we make sure that it
# agrees with the naive implementation. There are many ways to decide whether
# two matrices are similar; one of the simplest is the Frobenius norm. In case
# you haven't seen it before, the Frobenius norm of two matrices is the square
# root of the squared sum of differences of all elements; in other words, reshape
# the matrices into vectors and compute the Euclidean distance between them.
difference = np.linalg.norm(dists - dists_one, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Now implement the fully vectorized version inside compute_distances_no_loops
# and run the code
dists_two = classifier.compute_distances_no_loops(X_test)
# check that the distance matrix agrees with the one we computed before:
difference = np.linalg.norm(dists - dists_two, ord='fro')
print 'Difference was: %f' % (difference, )
if difference < 0.001:
print 'Good! The distance matrices are the same'
else:
print 'Uh-oh! The distance matrices are different'
# Let's compare how fast the implementations are
def time_function(f, *args):
"""
Call a function f with args and return the time (in seconds) that it took to execute.
"""
import time
tic = time.time()
f(*args)
toc = time.time()
return toc - tic
two_loop_time = time_function(classifier.compute_distances_two_loops, X_test)
print 'Two loop version took %f seconds' % two_loop_time
one_loop_time = time_function(classifier.compute_distances_one_loop, X_test)
print 'One loop version took %f seconds' % one_loop_time
no_loop_time = time_function(classifier.compute_distances_no_loops, X_test)
print 'No loop version took %f seconds' % no_loop_time
# you should see significantly faster performance with the fully vectorized implementation
"""
Explanation: You should expect to see a slightly better performance than with k = 1.
End of explanation
"""
num_folds = 5
k_choices = [1, 3, 5, 8, 10, 12, 15, 20, 50, 100]
X_train_folds = []
y_train_folds = []
################################################################################
# TODO: #
# Split up the training data into folds. After splitting, X_train_folds and #
# y_train_folds should each be lists of length num_folds, where #
# y_train_folds[i] is the label vector for the points in X_train_folds[i]. #
# Hint: Look up the numpy array_split function. #
################################################################################
X_train_folds = np.array_split(X_train, num_folds)
y_train_folds = np.array_split(y_train, num_folds)
#print len(X_train_folds)
#print X_train_folds[0].shape
################################################################################
# END OF YOUR CODE #
################################################################################
# A dictionary holding the accuracies for different values of k that we find
# when running cross-validation. After running cross-validation,
# k_to_accuracies[k] should be a list of length num_folds giving the different
# accuracy values that we found when using that value of k.
k_to_accuracies = {}
################################################################################
# TODO: #
# Perform k-fold cross validation to find the best value of k. For each #
# possible value of k, run the k-nearest-neighbor algorithm num_folds times, #
# where in each case you use all but one of the folds as training data and the #
# last fold as a validation set. Store the accuracies for all fold and all #
# values of k in the k_to_accuracies dictionary. #
################################################################################
k_to_accuracies = {}
for kk in k_choices:
print "k = %d" %(kk)
accus = np.zeros((num_folds,1))
for itr in range(num_folds):
xtr = np.vstack( X_train_folds[:itr]+X_train_folds[itr+1:])
xtest= X_train_folds[itr]
ytr = np.hstack( y_train_folds[:itr]+y_train_folds[itr+1:])
ytest = y_train_folds[itr]
classifier = KNearestNeighbor()
classifier.train(xtr, ytr)
dists = classifier.compute_distances_no_loops(xtest)
y_test_pred = classifier.predict_labels(dists, k=kk)
accus[itr,0] = np.mean(y_test_pred == ytest)
print "accuracy this itr: %f" %(accus[itr,0])
k_to_accuracies[kk] = accus
print "mean avg for k = %d is %f" %(kk,np.mean(accus))
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out the computed accuracies
for k in sorted(k_to_accuracies):
for accuracy in k_to_accuracies[k]:
print 'k = %d, accuracy = %f' % (k, accuracy)
# plot the raw observations
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 28% accuracy on the test data.
best_k = 10
classifier = KNearestNeighbor()
classifier.train(X_train, y_train)
y_test_pred = classifier.predict(X_test, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print 'Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy)
"""
Explanation: Cross-validation
We have implemented the k-Nearest Neighbor classifier but we set the value k = 5 arbitrarily. We will now determine the best value of this hyperparameter with cross-validation.
End of explanation
"""
|
ML4DS/ML4all | NLP2.Spacy_Tutorial (Data Preprocessing)/spaCy_tutorial_students.ipynb | mit | # Common imports
import numpy as np
import pandas as pd
import zipfile as zp
from termcolor import colored
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
#To wrap long text lines
from IPython.display import HTML, display
def set_css():
display(HTML('''
<style>
pre {
white-space: pre-wrap;
}
</style>
'''))
get_ipython().events.register('pre_run_cell', set_css)
#For fancy table Display
%load_ext google.colab.data_table
"""
Explanation: Using spaCy for Text Preprocessing
Date: Mar 16, 2021
Author: Jerónimo Arenas-García (jeronimo.arenas@uc3m.es)
Version 1.0
This notebook is based on the spaCy 101 course and documentation available at the spaCy website.
Our goal here is to present a basic overview of spacy that covers the elements necessary for implementing the preprocessing pipelines that we will need for obtaining the Bag of Word Representation of the document.
A more Advanced Tutorial by Ines Montani, one of the main developers of the library, is proposed for further study of interested students. In that tutorial, you can learn how to use spaCy matching functionalities, or how to retrain neural network models using your own training data.
End of explanation
"""
!pip install --upgrade spacy
import spacy
"""
Explanation: 1. What is spaCy
spaCy is a free, open-source library for NLP in Python
Providing optimized pipelines for taking models to production, i.e., facilitating integration with other components, and scalability
Current version (spaCy v3, released in Feb 2021) comes with pre-trained deep learning models, including state-of-the-art transformers, trained over huge data sets of documents
Available models can be fine-tuned to better fit specific document collections characteristics
SpaCy is intended to be used as a component of a more complex system, not as final application itself, i.e., it cannot be directly used to implement a chatbot system, a sentiment analyzer, etc ... but it provides a lot of tools that are easy to integrate for taking such systems into production.
1.1. SpaCy Features
spaCy provides a lot of features similar to those we have already discussed for the NLTK library.
spaCy makes it very easy to concatenate several of these operations:
Pipelines allow to concatenate a number of components to carry out the desired preprocessing tasks
Specific components can be enabled or disabled if necessary
It is possible to add ad-hoc components
Other developers are providing specific components ready to use in spaCy, e.g., spaCy langdetect is a wrapper for the langdetect library for language classification.
1.2. Language and Models
spaCy v3 comes with 55 pre-trained models for 17 languages. Details and installation instructions can be found here.
For most of these languages, three models are available, e.g.:
- en_core_web_sm
- en_core_web_md
- en_core_web_lg
[Convention for the model name is language_core_source_size]
These models are optimized for CPU usage, but they still incorporate neural networks for certain components.
Medium and Large models come with word-embeddings available, while small model does not
The larger the model, the higher the accuracy, but also the longer it takes to analyze a text fragment. I.e., accuracy comes at the cost of larger networks and, therefore, more computation
Accuracy of pipeline components are provided for specific annotated datasets
For English, Spanish, French, German, and Chinese, a fourth model (e.g. en_core_web_trf) based on transformers is also provided. These models are optimized to run over a GPU
1.3. Performance
--- WPS: Words per second
2. Using spaCy in Google Colab
2.1. Installing spaCy and loading language models
You can check that Google Colab already comes with spaCy v2 preinstalled. However, since in this notebook we will be using the new v3 release, you will need to upgrade to the latest available version
End of explanation
"""
!python -m spacy download en_core_web_md
"""
Explanation: In order to use a specific model you need to download it first. If working locally, you will need to download the model just once; however, in the cloud your environment resets and you will need to download the model on each session.
For this tutorial we will use an English model of medium size, the smallest model that incorporates word embeddings. For a complete list of available models, please refer to the spaCy website.
End of explanation
"""
spacy.info('en_core_web_md')
"""
Explanation: 2.2. Obtaining Model Info
You can retrieve the most relevant information about available language models using the following command
spacy.info('model_name')
Note that you can only apply this command on models that have already been downloades. Otherwise, an exception is thrown.
Exercise 1: Run the following command and find the information related to
- Components included in the pipeline
- Are all components enabled?
- How many types of entities can be recognized by the corresponding component?
- What Part-of-Speech elements can you recognize?
- What is the dimension of the word-embeddings incorporated in the model?
Detailed information about some specific components of the pipeline, as well as how they can be used, will be studied in the next sections.
End of explanation
"""
text = 'Modern condensed matter physics research has produced novel materials with fundamental properties that underpin a remarkable number of cutting-edge technologies. It is now generally accepted that novel materials are necessary for critical advances in technologies and whoever discovers novel materials generally controls the science and technology of the future. Transition metal oxides have attracted enormous interest within both the basic and applied science communities. However, for many decades, the overwhelming balance of effort was focused on the 3d-elements (such as iron, copper, etc.) and their compounds; the heavier 4d- and 5d-elements (such as ruthenium, iridium, etc., which constitute two thirds of the d-elements listed in the Periodic Table) and their compounds have been largely ignored until recently. The principal investigator seeks to discover novel materials containing 4d- and/or 5d-elements and understand how they offer wide-ranging opportunities for the discovery of new physics and, ultimately, new device paradigms. This project also provides rigorous training to all students involved, focusing on synthesis and characterization techniques covering a broad spectrum of materials and experimental probes available in the principal investigator\'s laboratory. Technical Abstract: Physics driven by spin-orbit interactions is among the most important topics in contemporary condensed matter physics. Since the spin-orbit interaction is comparable to the on-site Coulomb and other relevant interactions, it creates a unique balance between competing interactions that drive complex behaviors and exotic states not observed in other materials. The project encompasses a systematic effort to elucidate physics of novel phenomena in spin-orbit-coupled and correlated materials and a rigorous search for new materials having exotic ground states. This project focuses on the following areas: (1) Novel phenomena at high pressures and high magnetic fields, (2) Unusual correlations between the insulating gap and magnetic transition in iridates and ruthenates, (3) Exotic metallic and superconducting states in iridates, (4) Mott insulators with "intermediate-strength" spin-orbit interaction and other competing energies, and (5) Single-crystal synthesis and search for novel materials. The principal investigator is one of a few key pioneers who have initiated seminal studies on iridates and, before that, ruthenates, and has comprehensive facilities and proven expertise for single-crystal synthesis and wide-ranging studies of structural, transport, magnetic, thermal and dielectric properties as functions of temperature, magnetic field, pressure and doping.'
print(text)
nlp = spacy.load('en_core_web_md')
doc = nlp(text)
print(colored('============= Original Text =============', 'blue'))
print(doc)
print(colored('\n============= Lemmatized Text =============', 'red'))
print(' '.join([tk.lemma_ for tk in doc]))
print(colored('\n============= Entities Found =============', 'green'))
print('\n'.join([ent.text for ent in doc.ents]))
"""
Explanation: 3. Spacy Data Structures and Processing Pipelines
3.1. Introduction and basic usage
Processing texts with spaCy is really easy. You just need to load the model, and pass any text you wish to process. SpaCy will execute a series of transformations (a pipeline) and return a Doc object. The returned object has all information extracted from the original text, and provides a number of features to facilitate accessing the desired information.
<figure>
<center>
<img src='https://spacy.io/pipeline-fde48da9b43661abcdf62ab70a546d71.svg' width="800"></img>
<figcaption>Source: https://spacy.io/pipeline-fde48da9b43661abcdf62ab70a546d71.svg</figcaption></center>
</figure>
End of explanation
"""
#<SOL>
#</SOL>
"""
Explanation: Note how in the example we could easily access all lemmas and entities found by iterating over the document (variable doc) itself or over its entitities (doc.ents)
3.2. Architecture
Central data structures:
Language: is instantiated when loading the model, and contains the pipeline. Transforms text into spaCy documents.
Doc: Sequence of tokens with annotations. We can iterate over tokens, access individual tokens (doc[3]) or a span of tokens (doc[5:15]).
Vocab: Unique vocabulary associated to the language. Vocabulary is composed of Lexemes that are hashed and stored in the vocabulary with word vectors and attributes. This is memory efficient and assures a unique ground truth.
<figure>
<center>
<img src='https://spacy.io/architecture-415624fc7d149ec03f2736c4aa8b8f3c.svg' width="600"></img>
<figcaption>Source: https://spacy.io/architecture-415624fc7d149ec03f2736c4aa8b8f3c.svg</figcaption></center>
</figure>
The Tokenizer component of the pipeline is special, since this is where the Doc object is generated from the text. Subsequent pipeline components perform operations in place, obtanining new attributes that are stored as annotations in the tokens.
Pipeline components can be fine-tuned using annotated data
New components can be easily implemented and added to the Pipeline
Exercise 2:
- Find the Spans associated to the following text fragments contained in the original text:
* structural, transport, magnetic, thermal and dielectric properties
* temperature, magnetic field, pressure and doping
* This project also provides rigorous training to all students involved
- Use command dir to examine what are the different methods and attributes of the Span object
- Recover the vector representation associated to each of the previous strings
- Compute the Euclidean distances between the selected Spans
--Hint: To compute Euclidean distances at this point, it can be convenient to use numpy function np.linalg.norm. Later in the notebook you will find that spaCy provides functions to carry out these calculations.
End of explanation
"""
#<SOL>
#</SOL>
#<SOL>
#</SOL>
#<SOL>
#</SOL>
"""
Explanation: Exercise 3: You can access all vocab elements as nlp.vocab. Each element of the vocabulary is known as a Lexeme
- Use command dir to examine what are the different methods and attributes of Lexeme objects.
- For each element in the vocabulary, print the text representation, the hash representation, and whether the term should be considered as a stopword or not.
- Find all stopwords in the Vocabulary
- Which is the current size of your vocabulary? Create an additional doc object from a text with words that have not been previously used, and check the new size of the vocabulary after processing the new document.
--Hint: For displaying the vocabulary in a convenient format, you can store the requested information in a Pandas DataFrame, and print the DataFrame instead
End of explanation
"""
shortext = 'Natural Language Processing is a key component of many relevant Artificial Intelligence Applications.' \
' Libraries such as spaCy v3 make it simple to benefit from statistical NLP models based on neural networks.' \
' It is estimated that NLP market in the U.S. will grow to around 30000 MUSD during the next five years.' \
' I don\'t know how accurate this is, but a solid growth is guaranteed'
shortdoc = nlp(shortext)
print(colored('============= The original text information is still kept in the Doc object =============', 'blue'))
print(shortdoc)
print(colored('\n============= Identified Tokens =============', 'red'))
for token in shortdoc:
print(token.text, end='\t\t')
#print('\t\t'.join([token.text for token in shortdoc]))
"""
Explanation: 3.3. Usual Pipelines Components and Annotations
4. Linguistic Features
In this Section we will review a set of liguistic features provided by most spaCy pretrained pipelines. We will focus mainly on pipeline components that are relevant to build Bag of Words (BoW) representations of text.
4.1. Tokenizer
4.1.1. Word Tokenization
The Tokenizer is always the first component of the spaCy pretrained pipelines.
It has the important role of producing a Doc object out of a text string
It first splits the string using blank spaces
Then tokens are processed sequentially from left to right performing two operations
First, language-specific rules and exceptions are applied (e.g., in English "don't" is splitted into two separate tokens, but U.K. is kept as one token)
Second, prefixes or suffixes are identified. This is relevant to separate punctuation marks from the main tokens
It is important to note that tokenization rules, as well as exceptions are language specific. This means you need to make sure that the languages of the text and the selected Tokenizer match, otherwise you could get unexpected results.
Once the Doc object has been created, you can easily iterate over the identified tokens. Note also that the original text is preserved. You can access the string representation of Doc, Span, Token and even Lexeme objects by using the text attribute.
End of explanation
"""
# Add special case rule
from spacy.symbols import ORTH
special_case = [{ORTH: "M"}, {ORTH: "USD"}]
nlp.tokenizer.add_special_case("MUSD", special_case)
shortdoc = nlp(shortext)
print(colored('============= The original text information is still kept in the Doc object =============', 'blue'))
print(shortdoc)
print(colored('\n============= Identified Tokens =============', 'red'))
for token in shortdoc:
print(token.text, end='\t\t')
#print('\t\t'.join([token.text for token in shortdoc]))
"""
Explanation: Unlike other spaCy components, the Tokenizer is not a statistical model. A finite set of rules and exceptions are encoded. If you wish to modify its behavior, you cannot retrain the component using labeled data. Instead, you would need to extend the list of rules and exceptions.
The following example adds an exception to expand word MUSD into tokens MUSD. Newly added exceptions are always applied after previous rules. Note also that exceptions must preserve the original text. Otherwise, an exception will be raised.
End of explanation
"""
for sentence in shortdoc.sents:
print(sentence.text)
"""
Explanation: 4.1.2. Sentence Tokenization
Note that with the Doc object you can also iterate over sentences
End of explanation
"""
!python -m spacy download xx_sent_ud_sm
!pip install --upgrade spacy_langdetect
multilingualtext = 'Natural Language Processing is a key component of many relevant Artificial Intelligence Applications.' \
' El Procesamiento de Lenguaje Natural es un componente de gran importancia en multitud de aplicaciones de la Inteligencia Artificial.' \
' Libraries such as spaCy v3 make it simple to benefit from statistical NLP models based on neural networks.' \
' SpaCy v3 y otras librerías similares hacen posible emplear métodos de NLP basados en redes neuronales de manera sencilla.' \
' It is estimated that NLP market in the U.S. will grow to around 30000 MUSD during the next five years.' \
' Se estima que el mercado del NLP en USA será de alrededor de 30.000 millones de dolares en cinco años.'
#<SOL>
#</SOL>
print(colored('\n============= English sentences =============', 'green'))
print(english_text)
print(colored('\n============= Spanish sentences =============', 'green'))
print(spanish_text)
"""
Explanation: However, be aware that sentences are not identified by the Tokenizer element we have just described, but sentence tokenization is carried out instead as a subproduct of the dependency extraction component, that we will shortly review.
This can be a problem for multilingual documents, since all components of the previously used pipeline assumes an input in English language. In this case, what we normally do is:
- Split the document in sentences using a multilingual sentence tokenizer
- Detect the language of each sentence
- Use the appropriate pipeline for each sentence depending on its language
Exercise 4: Split the following paragraph into two variables english_text and spanish_text using multilingual sentence tokenizers and language detection libraries.
Sentence tokenization: If you opt to use spaCy, you can use the multilingual pipeline xx_sent_ud_sm which provides just a basic (rule-based) sentence tokenizer. You may also use NLTK library (from nltk.tokenize import sent_tokenize)
Language detection: You can use python library langdetect, or the pipeline component spacy-langdetect for spaCy, which is just a wrapper for the previous library
End of explanation
"""
from spacy.language import Language
from spacy_langdetect import LanguageDetector
# Add LanguageDetector and assign it a string name
@Language.factory("language_detector")
def create_language_detector(nlp, name):
return LanguageDetector(language_detection_function=None)
mult_nlp = spacy.load('xx_sent_ud_sm')
mult_nlp.add_pipe('language_detector', last=True)
mult_doc = mult_nlp(multilingualtext)
# document level language detection. Think of it like average language of the document!
print(colored('============= Document level language detection =============', 'blue'))
print(mult_doc._.language)
# sentence level language detection
print(colored('\n============= Sentence level language detection =============', 'red'))
for sent in mult_doc.sents:
print(sent, sent._.language)
# English and Spanish Texts
print(colored('\n============= English sentences =============', 'green'))
english_text = ' '.join([sent.text for sent in mult_doc.sents if sent._.language['language']=='en'])
print(english_text)
print(colored('\n============= Spanish sentences =============', 'green'))
spanish_text = ' '.join([sent.text for sent in mult_doc.sents if sent._.language['language']=='es'])
print(spanish_text)
"""
Explanation: Example: The following code fragment adapts the example provided in the documenation for spacy-langdetect to construct a new pipeline that concatenates xx_sent_ud_sm and spacy-langdetect.
The new pipeline is then used to calculate variables english_text and spanish_text
End of explanation
"""
text = 'Modern condensed matter physics research has produced novel materials with fundamental properties that underpin a remarkable number of cutting-edge technologies. It is now generally accepted that novel materials are necessary for critical advances in technologies and whoever discovers novel materials generally controls the science and technology of the future. Transition metal oxides have attracted enormous interest within both the basic and applied science communities. However, for many decades, the overwhelming balance of effort was focused on the 3d-elements (such as iron, copper, etc.) and their compounds; the heavier 4d- and 5d-elements (such as ruthenium, iridium, etc., which constitute two thirds of the d-elements listed in the Periodic Table) and their compounds have been largely ignored until recently. The principal investigator seeks to discover novel materials containing 4d- and/or 5d-elements and understand how they offer wide-ranging opportunities for the discovery of new physics and, ultimately, new device paradigms. This project also provides rigorous training to all students involved, focusing on synthesis and characterization techniques covering a broad spectrum of materials and experimental probes available in the principal investigator\'s laboratory. Technical Abstract: Physics driven by spin-orbit interactions is among the most important topics in contemporary condensed matter physics. Since the spin-orbit interaction is comparable to the on-site Coulomb and other relevant interactions, it creates a unique balance between competing interactions that drive complex behaviors and exotic states not observed in other materials. The project encompasses a systematic effort to elucidate physics of novel phenomena in spin-orbit-coupled and correlated materials and a rigorous search for new materials having exotic ground states. This project focuses on the following areas: (1) Novel phenomena at high pressures and high magnetic fields, (2) Unusual correlations between the insulating gap and magnetic transition in iridates and ruthenates, (3) Exotic metallic and superconducting states in iridates, (4) Mott insulators with "intermediate-strength" spin-orbit interaction and other competing energies, and (5) Single-crystal synthesis and search for novel materials. The principal investigator is one of a few key pioneers who have initiated seminal studies on iridates and, before that, ruthenates, and has comprehensive facilities and proven expertise for single-crystal synthesis and wide-ranging studies of structural, transport, magnetic, thermal and dielectric properties as functions of temperature, magnetic field, pressure and doping.'
nlp = spacy.load('en_core_web_md')
doc = nlp(text)
df = pd.DataFrame([[token.text, token.pos_, token.tag_] for token in doc],
columns = ['Token', 'POS', 'TAG'])
df
"""
Explanation: 4.2. Part of Speech Tagging
The next component available in most spaCy pipelines is a POS tagger. The role of this component is to predict the part of speech of every detected token. This is where the statistical models come in. These components have been trained using machine learning methods over a large set of annotated data. If necessary, the models can be fine-tuned providing additional training data. This can be sometimes helpful when working in specialized domains.
The attributes calculated by all pipeline components after the Tokenizer are added as additional attributes to each of the patterns.
POS tags can be accessed as Token.pos_ (POS strings are hashed, and the underscore facilitates accessing the readable format of the variable).
Finer tags can be accessed as Token.tag_
The following code fragment allows us to represent the calculated POS for each token in the provided text.
End of explanation
"""
# Descriptions for POS values
#<SOL>
#</SOL>
# Descriptions for TAGS values
#<SOL>
#</SOL>
"""
Explanation: Exercise 5: Use spaCy command spacy.explain() to obtain the descriptions of all POS and TAG values that you got for the previous text fragment. Avoid repetitions.
End of explanation
"""
nlp.disable_pipe("parser")
#If you wish to completely remove the component from the pipeline, you can use the following command
#nlp.remove_pipe("parser")
"""
Explanation: 4.3. Dependency Parser
The dependency parser aims at syntatic analysis of the text. It is the component that identifies the relation among the tokens in the given text, e.g., noun-chunks, verb objects, dependent clauses, etc.
Since our goal here is to use the pipeline to obtain BoW representation of the documents, we will not go deeper in the description of this component. If you are interested in learning more about dependy parsing in spaCy, you can check the official documentation
Since we will not be using this information, we can disable the component in the pipeline. This will also speed up document preprocessing.
End of explanation
"""
doc = nlp(english_text)
df_ents = pd.DataFrame([[ent.text, ent.label_, spacy.explain(ent.label_)] for ent in doc.ents], columns=['Entity', 'Type', 'Description'])
df_ents
"""
Explanation: 4.4. Named Entity Recognition
According to spaCy documentation:
spaCy features an extremely fast statistical entity recognition system, that assigns labels to contiguous spans of tokens. The default trained pipelines can indentify a variety of named and numeric entities, including companies, locations, organizations and products. You can add arbitrary classes to the entity recognition system, and update the model with new examples.
A named entity is a “real-world object” that’s assigned a name – for example, a person, a country, a product or a book title. spaCy can recognize various types of named entities in a document, by asking the model for a prediction.
Because models are statistical and strongly depend on the examples they were trained on, this doesn’t always work perfectly and might need some tuning later, depending on your use case
You can iterate over the entities found in the text using doc.ents, as illustrated in the following example that displays also the types of the entities found.
End of explanation
"""
from spacy import displacy
wiki_text = 'Albert Einstein (14 March 1879 – 18 April 1955) was a German-born theoretical physicist, widely acknowledged to be one of the greatest physicists of all time.' \
' Einstein is known widely for developing the theory of relativity, but he also made important contributions to the development of the theory of quantum mechanics.' \
' He received the 1921 Nobel Prize in Physics "for his services to theoretical physics, and especially for his discovery of the law of the photoelectric effect".' \
' Einstein was born in the German Empire, but moved to Switzerland in 1895, forsaking his German citizenship the following year.' \
' Einstein was awarded a PhD by the University of Zürich.' \
' On the eve of World War II, he endorsed a letter to President Franklin D. Roosevelt.'
wiki_doc = nlp(wiki_text)
displacy.render(wiki_doc, style="ent", jupyter=True, options={'distance': 90})
entypes = set([ent.label_ for ent in wiki_doc.ents])
df_ent = pd.DataFrame([[enttyp, spacy.explain(enttyp)] for enttyp in entypes], columns=['Entity type', 'Description'])
df_ent
"""
Explanation: Discussion: You can check that the NER algorithm is not always very accurate, both with respect to detection of entities and entity type classification. Note, however, that for some other texts performance can be much better. The following example contains an excerpt from the Albert Einstein wikipedia web page.
How does NER accuracy in this example compares to the previous case?
What do you believe is the reason for this?
End of explanation
"""
doc = nlp(text)
print(colored('============= Original text =============', 'blue'))
print(doc.text)
print(colored('\n============= Lemmas =============', 'red'))
print(' '.join([token.lemma_ for token in doc]))
"""
Explanation: 4.5. Lemmatization
English lemmatizer in spaCy consists of the following elements:
Lookup tables
Rule-based lemmatizer, that exploits POS information
List-based exceptions aquired from WordNet
The annotation attribute can be easily accessed as Token.lemma_
End of explanation
"""
mult_nlp = spacy.load('xx_sent_ud_sm')
mult_nlp.add_pipe('language_detector', last=True)
nlp = spacy.load('en_core_web_md')
nlp.disable_pipe('parser')
nlp.disable_pipe('ner')
valid_POS = set(['VERB', 'NOUN', 'ADJ', 'PROPN'])
specific_stw = set(['relevant', 'simple', 'base'])
def text_preprocessing(rawtext):
#<SOL>
#</SOL>
print(colored('============= Original text =============', 'blue'))
print(multilingualtext)
print(colored('\n============= Lemmatized text =============', 'red'))
print(text_preprocessing(multilingualtext))
"""
Explanation: 4.6. Other Annotations
Exercise 6: Have a look at the available attributes and functions of spaCy tokens using Python dir command.
Find out the significance of the following attributes: is_stop, is_alpha, is_digit, like_url, like_email, like_num, vector, and test your findings using text examples of your own.
5. Final Implementation of a pre-processing pipeline
Exercise 7: Implement a function that takes a string object and outputs the lemmatized text, ready for calculating BoW representation. The function should carry out the following steps:
Sentence tokenization and filtering of non-English sentences
Tokenization
POS
Lemmatization
Keep only alphanumeric tokens
Keep nouns, verbs, and adjectives
Generic Stopword removal
Specific Stopword removal
End of explanation
"""
|
rainyear/pytips | Tips/2016-03-28-Heap-and-Queue.ipynb | mit | import heapq
print(heapq.__all__)
"""
Explanation: Python 的堆与优先队列
Python 中内置的 heapq 库和 queue 分别提供了堆和优先队列结构,其中优先队列 queue.PriorityQueue 本身也是基于 heapq 实现的,因此我们这次重点看一下 heapq。
堆(Heap)是一种特殊形式的完全二叉树,其中父节点的值总是大于子节点,根据其性质,Python 中可以用一个满足 heap[k] <= heap[2*k+1] and heap[k] <= heap[2*k+2] 的列表来实现(heapq 也确实是这么做的)。堆可以用于实现调度器(例见:Python 3.5 之协程),更常用的是优先队列(例如:ImageColorTheme)。
heapq 提供了下面这些方法:
End of explanation
"""
from heapq import *
heap = []
heappush(heap, 3)
heappush(heap, 2)
heappush(heap, 1)
print(heap)
"""
Explanation: 由于 Heap 是通过列表实现的,我们可以直接用列表创建:
End of explanation
"""
heap = list(reversed(range(5)))
print("List: ", heap)
heapify(heap)
print("Heap: ", heap)
"""
Explanation: pop 或 sort 前要确保 heapify
或者通过 heapify 将普通列表转化为 Heap:
End of explanation
"""
heap = [5,4,3,2,1]
heapify(heap)
print(heappop(heap))
print(heappop(heap))
print(heappop(heap))
"""
Explanation: 每次从 Heap 中 pop 出来的元素都是最小的(因而可以据此实现堆排序):
End of explanation
"""
from queue import PriorityQueue as PQueue
pq = PQueue()
pq.put((5 * -1, 'Python'))
pq.put((4 * -1, 'C'))
pq.put((3 * -1, 'Js'))
print("Inside PriorityQueue: ", pq.queue) # 内部存储
while not pq.empty():
print(pq.get()[1])
"""
Explanation: 优先队列
queue.PriorityQueue 实际上只是对 heapq 的简单封装,直接使用其 heappush/heappop 方法:
End of explanation
"""
import random
lst = [random.randrange(1, 100) for _ in range(5)]
lst.sort()
print("List: ", lst)
print("Poped: ", heappop(lst))
heappush(lst, 4)
print("Heap: ", lst)
"""
Explanation: 由于 heapq 是最小堆,而通常 PriorityQueue 用在较大有限制的排前面,所以需要给 priority * -1。
sorted 一定是 Heap,反之未必
需要注意的是,虽然 Heap 通过 List 实习,但未经过 heapify() 处理的仍然是一个普通的 List,而 heappush 和 heappop 操作每次都会对 Heap 进行重新整理。此外,一个 Heap 列表不一定是正确排序的,但是经过 list.sort() 的列表一定是 Heap:
End of explanation
"""
heap = [random.randrange(1, 1000) for _ in range(1000)]
heapify(heap)
print("N largest: ", nlargest(10, heap))
print("N smallest: ", nsmallest(10, heap))
print(len(heap)) # 不原地修改
"""
Explanation: 最大/最小的 N 个数
Heap 还提供了 nsmallest 和 nlargest 方法用于取出前 n 个最大/最小数:
End of explanation
"""
heapA = sorted([random.randrange(1, 100) for _ in range(3)])
heapB = sorted([random.randrange(1, 100) for _ in range(3)])
merged = []
for i in merge(heapA, heapB):
merged.append(i)
print(merged)
"""
Explanation: 合并(排序)
merge 方法用于将两个 Heap 进行合并:
End of explanation
"""
lstA = [1,2,3,4,5]
lstB = [1,2,3,4,5]
poped = heapreplace(lstA, 0)
print("lstA: ", lstA, "poped: ", poped)
# is equal to...
poped = heappop(lstB)
heappush(lstB, 0)
print("lstB: ", lstA, "poped: ", poped)
print("*"*30)
poped = heappushpop(lstA, 9)
print("lstA: ", lstA, "poped: ", poped)
# is equal to...
heappush(lstB, 9)
poped = heappop(lstB)
print("lstB: ", lstB, "poped: ", poped)
"""
Explanation: 最后两个方法 heapreplace 和 heappushpop 分别相当于:
End of explanation
"""
item = 0
lstA = [1,2,3,4,5]
if item < lstA[0]:
# replace
poped = lstA[0]
lstA[0] = item
print("lstA: ", lstA, "poped: ", poped)
"""
Explanation: 这两个方法的执行效率要比分开写的方法高,但要注意 heapreplace 要取代的值是否比 heap[0] 大,如果不是,可以用更有效的方法:
End of explanation
"""
|
kyclark/metagenomics-book | python/consensus/consensus.ipynb | gpl-3.0 | import pandas as pd
from collections import Counter
seqs = ['TCGGGGGTTTTT',
'CCGGTGACTTAC',
'ACGGGGATTTTC',
'TTGGGGACTTTT',
'AAGGGGACTTCC',
'TTGGGGACTTCC',
'TCGGGGATTCAT',
'TCGGGGATTCCT',
'TAGGGGACCTAC',
'TCGGGTATAACC']
data = {}
for i, seq in enumerate(seqs):
data[i+1] = list(seq)
df = pd.DataFrame(data).T
df
"""
Explanation: Finding a Consensus Sequence
This code is implementation of Chapter 2 from Bioinformatics Algorithms (Pevzner, Compeau).
End of explanation
"""
freq_df = pd.DataFrame()
nucs = list('ACGT')
num_seqs = len(seqs)
counts_df = pd.DataFrame(0, index=nucs, columns=list(range(num_seqs)))
profile_df = pd.DataFrame(0., index=nucs, columns=list(range(num_seqs)))
simple_consensus = []
for col in df.columns:
bases = df[col].values.tolist()
base_count = Counter(bases)
most_common = max(set(bases), key=bases.count)
simple_consensus.append(most_common)
freq_df[col] = list(map(lambda b: b if b == most_common else b.lower(), bases))
for nuc in nucs:
counts_df.at[nuc, col] = base_count[nuc]
profile_df.at[nuc, col] = base_count[nuc] / len(seqs)
print('Consensus')
print(''.join(simple_consensus))
print('Frequency')
freq_df
print('Counts')
counts_df
print('Profile')
profile_df
scores = []
for col in freq_df:
bases = freq_df[col].values.tolist()
lower = list(filter(lambda b: b.islower(), bases))
scores.append(len(lower))
print(scores)
print(sum(scores))
"""
Explanation: Now we have a DataFrame where we can get all the bases in each position to find the base(s) occurring most frequently. We'll create a second DataFrame where the less frequent bases are put into lowercase. We'll also create a profile that gives us the frequency as a percentage. Note that we can create DataFrame of zeros easily, but we need the percentage profile to be initialized with "0." to indicate that we will store floats not integers.
Lastly, we'll create a "simple consensus" that simply uses the singly most frequently occurring base. When there is a tie, a random base is chosen.
End of explanation
"""
consensus = []
for col in profile_df.columns:
profile = profile_df[col]
max_bases = profile[profile >= .4].index.tolist()
consensus.append('/'.join(max_bases))
print('Consensus')
print(' '.join(consensus))
"""
Explanation: For the "real" consensus, we'll take into account all bases occurring at a frequency of 40% or greater.
End of explanation
"""
|
napsternxg/GET17_SNA | notebooks/NetworkX.ipynb | gpl-3.0 | %matplotlib inline
from operator import itemgetter
import networkx as nx
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
from io import StringIO
import pydotplus
from IPython.display import SVG, display
sns.set_context("poster")
sns.set_style("ticks")
DATA_DIR="../data"
INPUT_NETWORK=os.path.join(DATA_DIR, "lesmis","lesmis.gml")
INPUT_NETWORK
"""
Explanation: Les Misrebles Network Analysis using NetworkX
More details at: http://networkx.readthedocs.io/en/networkx-1.10/index.html
Source: http://www-personal.umich.edu/~mejn/netdata/ and http://networkdata.ics.uci.edu/data/lesmis/
Code adopted from: https://github.com/networkx/notebooks
Citation: Les Miserables: coappearance network of characters in the novel Les Miserables. Please cite D. E. Knuth, The Stanford GraphBase: A Platform for Combinatorial Computing, Addison-Wesley, Reading, MA (1993).
End of explanation
"""
G = nx.read_gml(INPUT_NETWORK)
#nx.write_gml(G, "../data/lesmis/lesmis.paj.gml")
df_node_degree = pd.DataFrame(list(dict(G.degree()).items()), columns=["node_name", "degree"])
df_node_degree.sort_values("degree", ascending=False).head(10)
print("radius: {:d}\n".format(nx.radius(G)))
print("diameter: {:d}\n".format(nx.diameter(G)))
print("eccentricity: {}\n".format(nx.eccentricity(G)))
print("center: {}\n".format(nx.center(G)))
print("periphery: {}\n".format(nx.periphery(G)))
print("density: {:f}".format(nx.density(G)))
"""
Explanation: Reading the GML format
Please read the following about the GML format for storing networks
http://networkx.readthedocs.io/en/networkx-1.10/reference/readwrite.gml.html#format
End of explanation
"""
connected_components = sorted(nx.connected_component_subgraphs(G), key = len, reverse=True)
print("{} connected components found.".format(len(connected_components)))
"""
Explanation: Connected components
End of explanation
"""
nx.draw(G)
fig, ax = plt.subplots(1,1, figsize=(16,16))
nx.draw_networkx(
G, with_labels=True,
node_size=[x[1]*10 for x in G.degree_iter()],
pos=nx.spring_layout(G),
node_color="g",
font_size=8,
ax=ax)
ax.axis("off")
def show_graph(G, file_path):
dotfile = StringIO()
nx.drawing.nx_pydot.write_dot(G, dotfile)
pydotplus.graph_from_dot_data(dotfile.getvalue()).write_svg(file_path)
display(SVG(file_path))
show_graph(G, "../output/lesmis.svg")
"""
Explanation: Drawing the graph
End of explanation
"""
|
icrtiou/coursera-ML | ex4-NN back propagation/2- the cost function.ipynb | mit | %reload_ext autoreload
%autoreload 2
import sys
sys.path.append('..')
from helper import nn
from helper import logistic_regression as lr
import numpy as np
"""
Explanation: note
Didn't mean to generalize NN here. Just plow through this 400>25>10 setup to get the feeling of NN
End of explanation
"""
X_raw, y_raw = nn.load_data('ex4data1.mat', transpose=False)
X = np.insert(X_raw, 0, np.ones(X_raw.shape[0]), axis=1)
X.shape
"""
Explanation: prepare data
End of explanation
"""
y_raw
y = nn.expand_y(y_raw)
y
"""
Explanation:
End of explanation
"""
t1, t2 = nn.load_weight('ex4weights.mat')
t1.shape, t2.shape
theta = nn.serialize(t1, t2) # flatten params
theta.shape
"""
Explanation: load weight
End of explanation
"""
_, _, _, _, h = nn.feed_forward(theta, X)
h # 5000*10
"""
Explanation: feed forward
(400 + 1) -> (25 + 1) -> (10)
<img style="float: left;" src="../img/nn_model.png">
End of explanation
"""
nn.cost(theta, X, y)
"""
Explanation: cost function
<img style="float: left;" src="../img/nn_cost.png">
think about this, now we have $y$ and $h_{\theta} \in R^{5000 \times 10}$
If you just ignore the m and k dimention, pairwisely this computation is trivial.
the eqation $= ylog(h_{\theta}) - (1-y)log(1-h_{\theta})$
all you need to do after pairwise computation is sums this 2d array up and divided by m
End of explanation
"""
nn.regularized_cost(theta, X, y)
"""
Explanation: regularized cost function
<img style="float: left;" src="../img/nn_regcost.png">
the first column of t1 and t2 is intercept $\theta$, just forget them when you do regularization
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/mpi-m/cmip6/models/sandbox-2/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'mpi-m', 'sandbox-2', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: MPI-M
Source ID: SANDBOX-2
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:17
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
kimkipyo/dss_git_kkp | 통계, 머신러닝 복습/160524화_7일차_기초 확률론 3 - 확률 모형 Probability Models(단변수 분포)/2.이항 확률 분포.ipynb | mit | N = 10
theta = 0.6
rv = sp.stats.binom(N, theta)
rv
"""
Explanation: 이항 확률 분포
베르누이 시도(Bernoulli trial)란 성공 혹은 실패로 결과가 나오는 것을 말한다.
성공확률이 $\theta$ 인 베르누이 시도를 $N$번 하는 경우를 생각해 보자. 가장 운이 좋을 때에는 $N$번 모두 성공할 것이고 가장 운이 나쁜 경우에는 한 번도 성공하지 못할 것이다. $N$번 중 성공한 횟수를 확률 변수 $X$ 라고 한다면 $X$의 값은 0 부터 $N$ 까지의 정수 중 하나가 될 것이다.
이러한 확률 변수를 이항 분포(binomial distribution)를 따르는 확률 변수라고 한다.
$$ X \sim \text{Bin}(x;N,\theta) $$
이항 확률 분포를 수식으로 묘사해 보자.
0 또는 1이 나오는 베르누이 확률 분포를 따르는 확률 변수 $Y$를 가정한다.
$$ Y \sim \text{Bern}(y;\theta) $$
이 확률 변수의 $N$개의 샘플을 $y_1, y_2, \cdots, y_N$라고 하자. 이 값은 모두 0(실패) 아니면 1(성공) 이라는 값을 가지기 때문에 $N$번 중 성공한 횟수는 $N$개의 샘플 값의 총합이다.
$$ X = \sum_{i=1}^N y_i $$
이항 확률 분포를 수식으로 쓰면 다음과 같다.
$$ \text{Bin}(x;N,\theta) = \binom N x \theta^x(1-\theta)^{N-x} $$
이 식에서
$$ \binom N x =\dfrac{N!}{x!(N-x)!} $$
$$ N! = N\cdot (N-1) \cdots 2 \cdot 1 $$
SciPy를 사용한 베르누이 분포의 시뮬레이션
Scipy의 stats 서브 패키지에 있는 binom 클래스는 이항 분포 클래스이다. n 인수와 p 인수를 사용하여 모수를 설정한다
End of explanation
"""
xx = np.arange(N + 1)
plt.bar(xx, rv.pmf(xx), align="center")
plt.ylabel("P(x)")
plt.title("pmf of binomial distribution")
plt.show()
"""
Explanation: pmf 메서드를 사용하면 확률 질량 함수(pmf: probability mass function)를 계산할 수 있다.
End of explanation
"""
np.random.seed(0)
x = rv.rvs(100)
x
sns.countplot(x)
plt.show()
"""
Explanation: 시뮬레이션을 하려면 rvs 메서드를 사용한다.
End of explanation
"""
y = np.bincount(x, minlength=N) / len(x)
df = pd.DataFrame({"theoretic": rv.pmf(xx), "simulation": y}).stack()
df = df.reset_index()
df.columns = ["value", "type", "ratio"]
df
sns.barplot(x="value", y="ratio", hue="type", data=df)
plt.show()
"""
Explanation: 이론적인 확률 분포와 샘플의 확률 분포를 동시에 나타내려면 다음과 같은 코드를 사용한다.
End of explanation
"""
|
Agent007/deepchem | examples/notebooks/protein_ligand_complex_notebook.ipynb | mit | %load_ext autoreload
%autoreload 2
%pdb off
# set DISPLAY = True when running tutorial
DISPLAY = False
# set PARALLELIZE to true if you want to use ipyparallel
PARALLELIZE = False
import warnings
warnings.filterwarnings('ignore')
import deepchem as dc
from deepchem.utils import download_url
import os
download_url("https://s3-us-west-1.amazonaws.com/deepchem.io/datasets/pdbbind_core_df.csv.gz")
data_dir = os.path.join(dc.utils.get_data_dir())
dataset_file= os.path.join(dc.utils.get_data_dir(), "pdbbind_core_df.csv.gz")
raw_dataset = dc.utils.save.load_from_disk(dataset_file)
"""
Explanation: Basic Protein-Ligand Affinity Models
Tutorial: Use machine learning to model protein-ligand affinity.
Written by Evan Feinberg and Bharath Ramsundar
Copyright 2016, Stanford University
This DeepChem tutorial demonstrates how to use mach.ine learning for modeling protein-ligand binding affinity
Overview:
In this tutorial, you will trace an arc from loading a raw dataset to fitting a cutting edge ML technique for predicting binding affinities. This will be accomplished by writing simple commands to access the deepchem Python API, encompassing the following broad steps:
Loading a chemical dataset, consisting of a series of protein-ligand complexes.
Featurizing each protein-ligand complexes with various featurization schemes.
Fitting a series of models with these featurized protein-ligand complexes.
Visualizing the results.
First, let's point to a "dataset" file. This can come in the format of a CSV file or Pandas DataFrame. Regardless
of file format, it must be columnar data, where each row is a molecular system, and each column represents
a different piece of information about that system. For instance, in this example, every row reflects a
protein-ligand complex, and the following columns are present: a unique complex identifier; the SMILES string
of the ligand; the binding affinity (Ki) of the ligand to the protein in the complex; a Python list of all lines
in a PDB file for the protein alone; and a Python list of all lines in a ligand file for the ligand alone.
This should become clearer with the example. (Make sure to set DISPLAY = True)
End of explanation
"""
print("Type of dataset is: %s" % str(type(raw_dataset)))
print(raw_dataset[:5])
print("Shape of dataset is: %s" % str(raw_dataset.shape))
"""
Explanation: Let's see what dataset looks like:
End of explanation
"""
import nglview
import tempfile
import os
import mdtraj as md
import numpy as np
import deepchem.utils.visualization
#from deepchem.utils.visualization import combine_mdtraj, visualize_complex, convert_lines_to_mdtraj
def combine_mdtraj(protein, ligand):
chain = protein.topology.add_chain()
residue = protein.topology.add_residue("LIG", chain, resSeq=1)
for atom in ligand.topology.atoms:
protein.topology.add_atom(atom.name, atom.element, residue)
protein.xyz = np.hstack([protein.xyz, ligand.xyz])
protein.topology.create_standard_bonds()
return protein
def visualize_complex(complex_mdtraj):
ligand_atoms = [a.index for a in complex_mdtraj.topology.atoms if "LIG" in str(a.residue)]
binding_pocket_atoms = md.compute_neighbors(complex_mdtraj, 0.5, ligand_atoms)[0]
binding_pocket_residues = list(set([complex_mdtraj.topology.atom(a).residue.resSeq for a in binding_pocket_atoms]))
binding_pocket_residues = [str(r) for r in binding_pocket_residues]
binding_pocket_residues = " or ".join(binding_pocket_residues)
traj = nglview.MDTrajTrajectory( complex_mdtraj ) # load file from RCSB PDB
ngltraj = nglview.NGLWidget( traj )
ngltraj.representations = [
{ "type": "cartoon", "params": {
"sele": "protein", "color": "residueindex"
} },
{ "type": "licorice", "params": {
"sele": "(not hydrogen) and (%s)" % binding_pocket_residues
} },
{ "type": "ball+stick", "params": {
"sele": "LIG"
} }
]
return ngltraj
def visualize_ligand(ligand_mdtraj):
traj = nglview.MDTrajTrajectory( ligand_mdtraj ) # load file from RCSB PDB
ngltraj = nglview.NGLWidget( traj )
ngltraj.representations = [
{ "type": "ball+stick", "params": {"sele": "all" } } ]
return ngltraj
def convert_lines_to_mdtraj(molecule_lines):
molecule_lines = molecule_lines.strip('[').strip(']').replace("'","").replace("\\n", "").split(", ")
tempdir = tempfile.mkdtemp()
molecule_file = os.path.join(tempdir, "molecule.pdb")
with open(molecule_file, "w") as f:
for line in molecule_lines:
f.write("%s\n" % line)
molecule_mdtraj = md.load(molecule_file)
return molecule_mdtraj
first_protein, first_ligand = raw_dataset.iloc[0]["protein_pdb"], raw_dataset.iloc[0]["ligand_pdb"]
protein_mdtraj = convert_lines_to_mdtraj(first_protein)
ligand_mdtraj = convert_lines_to_mdtraj(first_ligand)
complex_mdtraj = combine_mdtraj(protein_mdtraj, ligand_mdtraj)
ngltraj = visualize_complex(complex_mdtraj)
ngltraj
"""
Explanation: One of the missions of deepchem is to form a synapse between the chemical and the algorithmic worlds: to be able to leverage the powerful and diverse array of tools available in Python to analyze molecules. This ethos applies to visual as much as quantitative examination:
End of explanation
"""
grid_featurizer = dc.feat.RdkitGridFeaturizer(
voxel_width=16.0, feature_types="voxel_combined",
voxel_feature_types=["ecfp", "splif", "hbond", "pi_stack", "cation_pi", "salt_bridge"],
ecfp_power=5, splif_power=5, parallel=True, flatten=True)
compound_featurizer = dc.feat.CircularFingerprint(size=128)
"""
Explanation: Now that we're oriented, let's use ML to do some chemistry.
So, step (2) will entail featurizing the dataset.
The available featurizations that come standard with deepchem are ECFP4 fingerprints, RDKit descriptors, NNScore-style bdescriptors, and hybrid binding pocket descriptors. Details can be found on deepchem.io.
End of explanation
"""
PDBBIND_tasks, (train_dataset, valid_dataset, test_dataset), transformers = dc.molnet.load_pdbbind_grid()
"""
Explanation: Note how we separate our featurizers into those that featurize individual chemical compounds, compound_featurizers, and those that featurize molecular complexes, complex_featurizers.
Now, let's perform the actual featurization. Calling loader.featurize() will return an instance of class Dataset. Internally, loader.featurize() (a) computes the specified features on the data, (b) transforms the inputs into X and y NumPy arrays suitable for ML algorithms, and (c) constructs a Dataset() instance that has useful methods, such as an iterator, over the featurized data. This is a little complicated, so we will use MoleculeNet to featurize the PDBBind core set for us.
End of explanation
"""
from sklearn.ensemble import RandomForestRegressor
sklearn_model = RandomForestRegressor(n_estimators=100)
model = dc.models.SklearnModel(sklearn_model)
model.fit(train_dataset)
from deepchem.utils.evaluate import Evaluator
import pandas as pd
metric = dc.metrics.Metric(dc.metrics.r2_score)
evaluator = Evaluator(model, train_dataset, transformers)
train_r2score = evaluator.compute_model_performance([metric])
print("RF Train set R^2 %f" % (train_r2score["r2_score"]))
evaluator = Evaluator(model, valid_dataset, transformers)
valid_r2score = evaluator.compute_model_performance([metric])
print("RF Valid set R^2 %f" % (valid_r2score["r2_score"]))
"""
Explanation: Now, we conduct a train-test split. If you'd like, you can choose splittype="scaffold" instead to perform a train-test split based on Bemis-Murcko scaffolds.
We generate separate instances of the Dataset() object to hermetically seal the train dataset from the test dataset. This style lends itself easily to validation-set type hyperparameter searches, which we will illustate in a separate section of this tutorial.
The performance of many ML algorithms hinges greatly on careful data preprocessing. Deepchem comes standard with a few options for such preprocessing.
Now, we're ready to do some learning!
To fit a deepchem model, first we instantiate one of the provided (or user-written) model classes. In this case, we have a created a convenience class to wrap around any ML model available in Sci-Kit Learn that can in turn be used to interoperate with deepchem. To instantiate an SklearnModel, you will need (a) task_types, (b) model_params, another dict as illustrated below, and (c) a model_instance defining the type of model you would like to fit, in this case a RandomForestRegressor.
End of explanation
"""
predictions = model.predict(test_dataset)
print(predictions)
# TODO(rbharath): This cell visualizes the ligand with highest predicted activity. Commenting it out for now. Fix this later
#from deepchem.utils.visualization import visualize_ligand
#top_ligand = predictions.iloc[0]['ids']
#ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==top_ligand]['ligand_pdb'].values[0])
#if DISPLAY:
# ngltraj = visualize_ligand(ligand1)
# ngltraj
# TODO(rbharath): This cell visualizes the ligand with lowest predicted activity. Commenting it out for now. Fix this later
#worst_ligand = predictions.iloc[predictions.shape[0]-2]['ids']
#ligand1 = convert_lines_to_mdtraj(dataset.loc[dataset['complex_id']==worst_ligand]['ligand_pdb'].values[0])
#if DISPLAY:
# ngltraj = visualize_ligand(ligand1)
# ngltraj
"""
Explanation: In this simple example, in few yet intuitive lines of code, we traced the machine learning arc from featurizing a raw dataset to fitting and evaluating a model.
Here, we featurized only the ligand. The signal we observed in R^2 reflects the ability of circular fingerprints and random forests to learn general features that make ligands "drug-like."
End of explanation
"""
def rf_model_builder(model_params, model_dir):
sklearn_model = RandomForestRegressor(**model_params)
return dc.models.SklearnModel(sklearn_model, model_dir)
params_dict = {
"n_estimators": [10, 50, 100],
"max_features": ["auto", "sqrt", "log2", None],
}
metric = dc.metrics.Metric(dc.metrics.r2_score)
optimizer = dc.hyper.HyperparamOpt(rf_model_builder)
best_rf, best_rf_hyperparams, all_rf_results = optimizer.hyperparam_search(
params_dict, train_dataset, valid_dataset, transformers,
metric=metric)
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
rf_predicted_test = best_rf.predict(test_dataset)
rf_true_test = test_dataset.y
plt.scatter(rf_predicted_test, rf_true_test)
plt.xlabel('Predicted pIC50s')
plt.ylabel('True IC50')
plt.title(r'RF predicted IC50 vs. True pIC50')
plt.xlim([2, 11])
plt.ylim([2, 11])
plt.plot([2, 11], [2, 11], color='k')
plt.show()
"""
Explanation: The protein-ligand complex view.
The preceding simple example, in few yet intuitive lines of code, traces the machine learning arc from featurizing a raw dataset to fitting and evaluating a model.
In this next section, we illustrate deepchem's modularity, and thereby the ease with which one can explore different featurization schemes, different models, and combinations thereof, to achieve the best performance on a given dataset. We will demonstrate this by examining protein-ligand interactions.
In the previous section, we featurized only the ligand. The signal we observed in R^2 reflects the ability of grid fingerprints and random forests to learn general features that make ligands "drug-like." In this section, we demonstrate how to use hyperparameter searching to find a higher scoring ligands.
End of explanation
"""
|
szitenberg/ReproPhyloVagrant | notebooks/Tutorials/Basic/3.7 Alignment trimming.ipynb | mit | from reprophylo import *
pj = unpickle_pj('./outputs/my_project.pkpj',
git=False)
"""
Explanation: This section starts with a Project that already contains alignments:
End of explanation
"""
pj.alignments.keys()
"""
Explanation: If we call the keys of the pj.alignments dictionary, we can see the names of the alignments it contains:
End of explanation
"""
gappyout = TrimalConf(pj, # The Project
method_name='gappyout', # Any unique string ('gappyout' is default)
program_name='trimal', # No alternatives in this ReproPhylo version
cmd='default', # the default is trimal. Change it here
# or in pj.defaults['trimal']
alns=['MT-CO1@mafftLinsi'], # 'all' by default
trimal_commands={'gappyout': True} # By default, the gappyout algorithm is used.
)
"""
Explanation: 3.7.1 Configuring an alignment trimming process
Like the sequence alignment phase, alignment trimming has its own configuration class, the TrimalConf class. An object of this class will generate a command-line and the required input files for the program TrimAl, but will not execute the process (this is shown below). Once the process has been successfully executed, this TrimalConf object is also stored in pj.used_methods and it can be invoked as a report.
3.7.1.1 Example1, the default gappyput algorithm
With TrimalConf, instead of specifying loci names, we provide alignment names, as they appear in the keys of pj.alignments
End of explanation
"""
rRNA_locus_names = [locus.name for locus in pj.loci if locus.feature_type == 'rRNA']
print rRNA_locus_names
"""
Explanation: 3.7.1.2 List comprehension to subset alignments
In this example, it is easy enough to copy and paste alignment names into a list and pass it to TrimalConf. But this is more difficult if we want to fish out a subset of alignments from a very large list of alignments. In such cases, Python's list comprehension is very useful. Below I show two uses of list comprehension, but the more you feel comfortable with this approach, the better.
Getting locus names of rRNA loci
If you read the code line that follows very carefully, you will see it quite literally says "take the name of each Locus found in pj.loci if its feature type is rRNA, and put it in a list":
End of explanation
"""
rRNA_alignment_names = [key for key in pj.alignments.keys() if key.split('@')[0] in rRNA_locus_names]
print rRNA_alignment_names
"""
Explanation: what we get is a list of names of our rRNA loci.
Getting alignment names that have locus names of rRNA loci
The following line says: "take the key of each alignment from the pj.alignments dictionary if the first word before the '@' symbol is in the list of rRNA locus names, and put this key in a list":
End of explanation
"""
gt50 = TrimalConf(pj,
method_name='gt50',
alns = rRNA_alignment_names,
trimal_commands={'gt': 0.5} # This will keep positions with up to
# 50% gaps.
)
"""
Explanation: We get a list of keys, of the rRNA loci alignments we produced on the previous section, and which are stored in the pj.alignments dictionary. We can now pass this list to a new TrimalConf instance that will only process rRNA locus alignments:
End of explanation
"""
pj.trim([gappyout, gt50])
"""
Explanation: 3.7.2 Executing the alignment trimming process
As for the alignment phase, this is done with a Project method, which accepts a list of TrimalConf objects.
End of explanation
"""
print pj.used_methods['gappyout']
"""
Explanation: Once used, these objects are also placed in the pj.used_methods dictionary, and they can be printed out for observation:
End of explanation
"""
pj.trimmed_alignments
"""
Explanation: 3.7.3 Accessing trimmed sequence alignments
3.7.3.1 The pj.trimmed_alignments dictionary
The trimmed alignments themselves are stored in the pj.trimmed_alignments dictionary, using keys that follow this pattern: locus_name@alignment_method_name@trimming_method_name where alignment_method_name is the name you have provided to your AlnConf object and trimming_method_name is the one you provided to your TrimalConf object.
End of explanation
"""
print pj.fta('18s@muscleDefault@gt50')[:4,410:420].format('phylip-relaxed')
"""
Explanation: 3.7.3.2 Accessing a MultipleSeqAlignment object
A trimmed alignment can be easily accessed and manipulated with any of Biopython's AlignIO tricks using the fta Project method:
End of explanation
"""
# record_id and source_organism are feature qualifiers in the SeqRecord object
# See section 3.4
files = pj.write_trimmed_alns(id=['record_id','source_organism'],
format='fasta')
files
"""
Explanation: 3.7.3.3 Writing trimmed sequence alignment files
Trimmed alignment text files can be dumped in any AlignIO format for usage in an external command line or GUI program. When writing to files, you can control the header of the sequence by, for example, adding the organism name of the gene name, or by replacing the feature ID with the record ID:
End of explanation
"""
# make a new directory for your trimmed alignment files:
if not os.path.exists('trimmed_alignment_files'):
os.mkdir('trimmed_alignment_files')
# move the files there
for f in files:
os.rename(f, "./trimmed_alignment_files/%s"%f)
"""
Explanation: The files will always be written to the current working directory (where this notebook file is), and can immediately be moved programmatically to avoid clutter:
End of explanation
"""
pj.show_aln('MT-CO1@mafftLinsi@gappyout',id=['source_organism'])
pickle_pj(pj, 'outputs/my_project.pkpj')
"""
Explanation: 3.7.3.4 Viewing trimmed alignments
Trimmed alignments can be viewed in the same way as alignments, but using this command:
End of explanation
"""
# Make a TrimalConf object
trimconf = TrimalConf(pj, **kwargs)
# Execute alignment process
pj.trim([trimconf])
# Show AlnConf description
print pj.used_methods['method_name']
# Fetch a MultipleSeqAlignment object
trim_aln_obj = pj.fta('locus_name@aln_method_name@trim_method_name')
# Write alignment text files
pj.write_trimmed_alns(id=['some_feature_qualifier'], format='fasta')
# the default feature qualifier is 'feature_id'
# 'fasta' is the default format
# View alignment in browser
pj.show_aln('locus_name@aln_method_name@trim_method_name',id=['some_feature_qualifier'])
"""
Explanation: 3.7.4 Quick reference
End of explanation
"""
|
openfisca/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/examples/notebooks/depenses_ticpe_carburants_par_decile.ipynb | agpl-3.0 | from __future__ import division
import pandas
import seaborn
from pandas import concat
"""
Explanation: Cet exemple a pour objectif de décrire pour chaque décile de revenu la consommation annuelle moyenne de carburants, ainsi que les dépenses moyennes pour la TICPE
Import de modules généraux
End of explanation
"""
from openfisca_france_indirect_taxation.examples.utils_example import graph_builder_line
from openfisca_france_indirect_taxation.surveys import SurveyScenario
"""
Explanation: Import de modules spécifiques à OpenFisca
End of explanation
"""
seaborn.set_palette(seaborn.color_palette("Set2", 12))
%matplotlib inline
"""
Explanation: Import d'une nouvelle palette de couleurs
End of explanation
"""
simulated_variables = [
'ticpe_totale',
'diesel_ticpe',
'essence_ticpe',
'depenses_carburants',
'depenses_diesel',
'depenses_essence'
]
"""
Explanation: Sélection des variables que l'on veut simuler
End of explanation
"""
to_graph = ['ticpe totale ', 'ticpe diesel ', 'ticpe essence ', 'depenses carburants ', 'depenses diesel ',
'depenses essence ']
for element in to_graph:
depenses = None
for year in [2000, 2005, 2011]:
survey_scenario = SurveyScenario.create(year = year)
pivot_table = pandas.DataFrame()
for values in simulated_variables:
pivot_table = pandas.concat([
pivot_table,
survey_scenario.compute_pivot_table(values = [values], columns = ['niveau_vie_decile'])
])
df = pivot_table.T
df.rename(columns = {'ticpe_totale': 'ticpe totale {}'.format(year),
'diesel_ticpe': 'ticpe diesel {}'.format(year),
'essence_ticpe': 'ticpe essence {}'.format(year),
'depenses_carburants': 'depenses carburants {}'.format(year),
'depenses_diesel': 'depenses diesel {}'.format(year),
'depenses_essence': 'depenses essence {}'.format(year)},
inplace = True)
if depenses is not None:
depenses = concat(
[depenses, df[element + '{}'.format(year)]], axis = 1)
else:
depenses = df[element + '{}'.format(year)]
print '{} par decile de revenu'.format(element)
graph_builder_line(depenses)
"""
Explanation: Construction des simulations par décile de revenu
End of explanation
"""
|
rscohn2/IntelPythonExamples | notebooks/Cython Example.ipynb | mit | %load_ext cython
import array
a = array.array('l',range(100))
s = 0
"""
Explanation: This notebook uses cython, which requires a C compiler. Linux comes with a compiler. Install xcode for OSX and Visual Studio for windows.
End of explanation
"""
def python_sum(a):
global s
s = 0
for i in range(len(a)):
for j in range(10000):
s = s + a[i]
return s
%timeit python_sum(a)
"""
Explanation: Sum up an array of numbers using python
End of explanation
"""
%%cython --annotate
def cython_sum1(a):
global s
s = 0
for i in range(len(a)):
for j in range(10000):
s = s + a[i]
return s
print('python sum: ',python_sum(a))
print('cython sum1: ',cython_sum1(a))
print('python sum')
%timeit python_sum(a)
print('cython sum1')
%timeit cython_sum1(a)
"""
Explanation: Use cython, without changing the code
End of explanation
"""
%%cython --annotate
def cython_sum2(a):
s = 0
for i in range(len(a)):
for j in range(10000):
s = s + a[i]
return s
print('python sum: ',python_sum(a))
print('cython sum1: ',cython_sum1(a))
print('cython sum2: ',cython_sum2(a))
print('python sum')
%timeit python_sum(a)
print('cython sum1')
%timeit cython_sum1(a)
print('cython sum2')
%timeit cython_sum2(a)
%%cython --annotate
from cpython cimport array
def cython_sum3(a):
cdef long s = 0
cdef array.array ta = a
cdef long * ap = ta.data.as_longs
for i in range(len(ta)):
for j in range(10000):
s = s + ap[i]
return s
print('python sum: ',python_sum(a))
print('cython sum1: ',cython_sum1(a))
print('cython sum2: ',cython_sum2(a))
print('cython sum3: ',cython_sum3(a))
print('python sum')
%timeit python_sum(a)
print('cython sum1')
%timeit cython_sum1(a)
print('cython sum2')
%timeit cython_sum2(a)
print('cython sum3')
%timeit cython_sum3(a)
from numba import jit
@jit
def numba_sum(a):
s = 0
for i in range(len(a)):
for j in range(10000):
s = s + a[i]
return s
print('python sum: ',python_sum(a))
print('cython sum1: ',cython_sum1(a))
print('cython sum2: ',cython_sum2(a))
print('cython sum3: ',cython_sum3(a))
print('numba sum: ', numba_sum(a))
print('python sum')
%timeit python_sum(a)
print('cython sum1')
%timeit cython_sum1(a)
print('cython sum2')
%timeit cython_sum2(a)
print('cython sum3')
%timeit cython_sum3(a)
print('numba sum')
%timeit numba_sum(a)
"""
Explanation: Does making s a local variable help?
End of explanation
"""
|
tensorflow/docs-l10n | site/ja/hub/tutorials/cord_19_embeddings_keras.ipynb | apache-2.0 | # Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Explanation: Copyright 2019 The TensorFlow Hub Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
import functools
import itertools
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_hub as hub
from tqdm import trange
"""
Explanation: TF-Hub CORD-19 Swivel 埋め込みを探索する
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/hub/tutorials/cord_19_embeddings_keras"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で実行</a></td>
<td> <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png"> Google Colabで実行</a> </td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub で表示</a></td>
<td> <a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/hub/tutorials/cord_19_embeddings_keras.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a> </td>
<td> <a href="https://tfhub.dev/tensorflow/cord-19/swivel-128d/3"><img src="https://www.tensorflow.org/images/hub_logo_32px.png">TF Hub モデルを参照</a> </td>
</table>
TF-Hub (https://tfhub.dev/tensorflow/cord-19/swivel-128d/3) の CORD-19 Swivel テキスト埋め込みモジュールは、COVID-19 に関連する自然言語テキストを分析する研究者をサポートするために構築されました。これらの埋め込みは、CORD-19 データセットの論文のタイトル、著者、抄録、本文、および参照タイトルをトレーニングしています。
この Colab では、以下について取り上げます。
埋め込み空間内の意味的に類似した単語の分析
CORD-19 埋め込みを使用した SciCite データセットによる分類器のトレーニング
セットアップ
End of explanation
"""
# Use the inner product between two embedding vectors as the similarity measure
def plot_correlation(labels, features):
corr = np.inner(features, features)
corr /= np.max(corr)
sns.heatmap(corr, xticklabels=labels, yticklabels=labels)
# Generate embeddings for some terms
queries = [
# Related viruses
'coronavirus', 'SARS', 'MERS',
# Regions
'Italy', 'Spain', 'Europe',
# Symptoms
'cough', 'fever', 'throat'
]
module = hub.load('https://tfhub.dev/tensorflow/cord-19/swivel-128d/3')
embeddings = module(queries)
plot_correlation(queries, embeddings)
"""
Explanation: 埋め込みを分析する
まず、異なる単語間の相関行列を計算してプロットし、埋め込みを分析してみましょう。異なる単語の意味をうまく捉えられるように埋め込みが学習できていれば、意味的に似た単語の埋め込みベクトルは近くにあるはずです。COVID-19 関連の用語をいくつか見てみましょう。
End of explanation
"""
builder = tfds.builder(name='scicite')
builder.download_and_prepare()
train_data, validation_data, test_data = builder.as_dataset(
split=('train', 'validation', 'test'),
as_supervised=True)
#@title Let's take a look at a few labeled examples from the training set
NUM_EXAMPLES = 10#@param {type:"integer"}
TEXT_FEATURE_NAME = builder.info.supervised_keys[0]
LABEL_NAME = builder.info.supervised_keys[1]
def label2str(numeric_label):
m = builder.info.features[LABEL_NAME].names
return m[numeric_label]
data = next(iter(train_data.batch(NUM_EXAMPLES)))
pd.DataFrame({
TEXT_FEATURE_NAME: [ex.numpy().decode('utf8') for ex in data[0]],
LABEL_NAME: [label2str(x) for x in data[1]]
})
"""
Explanation: 埋め込みが異なる用語の意味をうまく捉えていることが分かります。それぞれの単語は所属するクラスタの他の単語に類似していますが(「コロナウイルス」は「SARS」や「MERS」と高い関連性がある)、ほかのクラスタの単語とは異なります(「SARS」と「スペイン」の類似度はゼロに近い)。
では、これらの埋め込みを使用して特定のタスクを解決する方法を見てみましょう。
SciCite: 引用の意図の分類
このセクションでは、テキスト分類など下流のタスクに埋め込みを使う方法を示します。学術論文の引用の意図の分類には、TensorFlow Dataset の SciCite データセットを使用します。学術論文からの引用がある文章がある場合に、その引用の主な意図が背景情報、方法の使用、または結果の比較のうち、どれであるかを分類します。
End of explanation
"""
#@title Hyperparameters { run: "auto" }
EMBEDDING = 'https://tfhub.dev/tensorflow/cord-19/swivel-128d/3' #@param {type: "string"}
TRAINABLE_MODULE = False #@param {type: "boolean"}
hub_layer = hub.KerasLayer(EMBEDDING, input_shape=[],
dtype=tf.string, trainable=TRAINABLE_MODULE)
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(3))
model.summary()
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
"""
Explanation: 引用の意図分類器をトレーニングする
分類器のトレーニングには、SciCite データセットに対して Keras を使用します。上に分類レイヤーを持ち、CORD-19 埋め込みを使用するモデルを構築してみましょう。
End of explanation
"""
EPOCHS = 35#@param {type: "integer"}
BATCH_SIZE = 32#@param {type: "integer"}
history = model.fit(train_data.shuffle(10000).batch(BATCH_SIZE),
epochs=EPOCHS,
validation_data=validation_data.batch(BATCH_SIZE),
verbose=1)
from matplotlib import pyplot as plt
def display_training_curves(training, validation, title, subplot):
if subplot%10==1: # set up the subplots on the first call
plt.subplots(figsize=(10,10), facecolor='#F0F0F0')
plt.tight_layout()
ax = plt.subplot(subplot)
ax.set_facecolor('#F8F8F8')
ax.plot(training)
ax.plot(validation)
ax.set_title('model '+ title)
ax.set_ylabel(title)
ax.set_xlabel('epoch')
ax.legend(['train', 'valid.'])
display_training_curves(history.history['accuracy'], history.history['val_accuracy'], 'accuracy', 211)
display_training_curves(history.history['loss'], history.history['val_loss'], 'loss', 212)
"""
Explanation: モデルをトレーニングして評価する
モデルをトレーニングして評価を行い、SciCite タスクでのパフォーマンスを見てみましょう。
End of explanation
"""
results = model.evaluate(test_data.batch(512), verbose=2)
for name, value in zip(model.metrics_names, results):
print('%s: %.3f' % (name, value))
"""
Explanation: モデルを評価する
モデルがどのように実行するか見てみましょう。2 つの値が返されます。損失(誤差、値が低いほど良)と精度です。
End of explanation
"""
prediction_dataset = next(iter(test_data.batch(20)))
prediction_texts = [ex.numpy().decode('utf8') for ex in prediction_dataset[0]]
prediction_labels = [label2str(x) for x in prediction_dataset[1]]
predictions = [
label2str(x) for x in np.argmax(model.predict(prediction_texts), axis=-1)]
pd.DataFrame({
TEXT_FEATURE_NAME: prediction_texts,
LABEL_NAME: prediction_labels,
'prediction': predictions
})
"""
Explanation: 損失はすぐに減少しますが、特に精度は急速に上がることが分かります。予測と真のラベルがどのように関係しているかを確認するために、いくつかの例をプロットしてみましょう。
End of explanation
"""
|
necromuralist/student_intervention | student_intervention/student_intervention_stratified.ipynb | mit | # Import libraries
import numpy as np
import pandas as pd
# additional imports
import matplotlib.pyplot as plot
import seaborn
from sklearn.cross_validation import train_test_split
%matplotlib inline
RANDOM_STATE = 100
REPETITIONS = 1
RUN_PLOTS = True
# Read student data
student_data = pd.read_csv("student-data.csv")
print "Student data read successfully!"
# Note: The last column 'passed' is the target/label, all other are feature columns
"""
Explanation: Project 2: Supervised Learning
Building a Student Intervention System
1. Classification vs Regression
Your goal is to identify students who might need early intervention - which type of supervised machine learning problem is this, classification or regression? Why?
Identifying students who might need early intervention is a classification problem as you are sorting students into classes (needs intervention, doesn't need intervention) rather than trying to predict a quantitative value.
2. Exploring the Data
Let's go ahead and read in the student dataset first.
To execute a code cell, click inside it and press Shift+Enter.
End of explanation
"""
n_students = student_data.shape[0]
assert n_students == student_data.passed.count()
n_features = student_data.shape[1] - 1
assert n_features == len(student_data.columns[student_data.columns != 'passed'])
n_passed = sum(student_data.passed.map({'no': 0, 'yes': 1}))
assert n_passed == len(student_data[student_data.passed == 'yes'].passed)
n_failed = n_students - n_passed
grad_rate = n_passed/float(n_students)
print "Total number of students: {}".format(n_students)
print "Number of students who passed: {}".format(n_passed)
print "Number of students who failed: {}".format(n_failed)
print "Number of features: {}".format(n_features)
print "Graduation rate of the class: {:.2f}%".format(100 * grad_rate)
passing_rates = student_data.passed.value_counts()/student_data.passed.count()
print(passing_rates)
seaborn.set_style('whitegrid')
axe = seaborn.barplot(x=passing_rates.index, y=passing_rates.values)
title = axe.set_title("Proportion of Passing Students")
"""
Explanation: Now, can you find out the following facts about the dataset?
- Total number of students
- Number of students who passed
- Number of students who failed
- Graduation rate of the class (%)
- Number of features
Use the code block below to compute these values. Instructions/steps are marked using TODOs.
End of explanation
"""
# Extract feature (X) and target (y) columns
feature_cols = list(student_data.columns[:-1]) # all columns but last are features
target_col = student_data.columns[-1] # last column is the target/label
print "Feature column(s):-\n{}".format(feature_cols)
print "Target column: {}".format(target_col)
X_all = student_data[feature_cols] # feature values for all students
y_all = student_data[target_col] # corresponding targets/labels
print "\nFeature values:-"
print X_all.head() # print the first 5 rows
print(len(X_all.columns))
"""
Explanation: 3. Preparing the Data
In this section, we will prepare the data for modeling, training and testing.
Identify feature and target columns
It is often the case that the data you obtain contains non-numeric features. This can be a problem, as most machine learning algorithms expect numeric data to perform computations with.
Let's first separate our data into feature and target columns, and see if any features are non-numeric.<br/>
Note: For this dataset, the last column ('passed') is the target or label we are trying to predict.
End of explanation
"""
# Preprocess feature columns
def preprocess_features(X):
outX = pd.DataFrame(index=X.index) # output dataframe, initially empty
# Check each column
for col, col_data in X.iteritems():
# If data type is non-numeric, try to replace all yes/no values with 1/0
if col_data.dtype == object:
col_data = col_data.replace(['yes', 'no'], [1, 0])
# Note: This should change the data type for yes/no columns to int
# If still non-numeric, convert to one or more dummy variables
if col_data.dtype == object:
col_data = pd.get_dummies(col_data, prefix=col) # e.g. 'school' => 'school_GP', 'school_MS'
outX = outX.join(col_data) # collect column(s) in output dataframe
return outX
X_all = preprocess_features(X_all)
y_all = y_all.replace(['yes', 'no'], [1, 0])
print "Processed feature columns ({}):-\n{}".format(len(X_all.columns), list(X_all.columns))
len(X_all.columns)
"""
Explanation: Preprocess feature columns
As you can see, there are several non-numeric columns that need to be converted! Many of them are simply yes/no, e.g. internet. These can be reasonably converted into 1/0 (binary) values.
Other columns, like Mjob and Fjob, have more than two values, and are known as categorical variables. The recommended way to handle such a column is to create as many columns as possible values (e.g. Fjob_teacher, Fjob_other, Fjob_services, etc.), and assign a 1 to one of them and 0 to all others.
These generated columns are sometimes called dummy variables, and we will use the pandas.get_dummies() function to perform this transformation.
End of explanation
"""
# First, decide how many training vs test samples you want
num_all = student_data.shape[0] # same as len(student_data)
num_train = 300 # about 75% of the data
num_test = num_all - num_train
# TODO: Then, select features (X) and corresponding labels (y) for the training and test sets
# Note: Shuffle the data or randomly select samples to avoid any bias due to ordering in the dataset
X_train, X_test, y_train, y_test = train_test_split(X_all, y_all,
test_size=num_test,
train_size=num_train,
random_state=RANDOM_STATE,
stratify=y_all)
assert len(y_train) == 300
assert len(y_test) == 95
print "Training set: {} samples".format(X_train.shape[0])
print "Test set: {} samples".format(X_test.shape[0])
# Note: If you need a validation set, extract it from within training data
"""
Explanation: Split data into training and test sets
So far, we have converted all categorical features into numeric values. In this next step, we split the data (both features and corresponding labels) into training and test sets.
End of explanation
"""
import time
def train_classifier(clf, X_train, y_train, verbose=True):
if verbose:
print "Training {}...".format(clf.__class__.__name__)
times = []
for repetition in range(REPETITIONS):
start = time.time()
clf.fit(X_train, y_train)
times.append(time.time() - start)
if verbose:
print "Done!\nTraining time (secs): {:.3f}".format(min(times))
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
classifiers = [LogisticRegression(),
RandomForestClassifier(),
KNeighborsClassifier()]
for clf in classifiers:
# Fit model to training data
train_classifier(clf, X_train, y_train) # note: using entire training set here
# Predict on training set and compute F1 score
from sklearn.metrics import f1_score
def predict_labels(clf, features, target, verbose=True):
if verbose:
print "Predicting labels using {}...".format(clf.__class__.__name__)
times = []
scores = []
for repetition in range(REPETITIONS):
start = time.time()
y_pred = clf.predict(features)
times.append(time.time() - start)
scores.append(f1_score(target.values, y_pred, pos_label=1))
if verbose:
print "Done!\nPrediction time (secs): {:.3f}".format(min(times))
return np.median(scores)
# Predict on test data
for classifier in classifiers:
print "F1 score for test set: {}".format(predict_labels(classifier,
X_test, y_test))
class ClassifierData(object):
"""A Container for classifire performance data"""
def __init__(self, classifier, f1_test_score, f1_train_score):
"""
:param:
- `classifier`: classifier object (e.g. LogisticRegression())
- `f1_test_score`: score for the classifier on the test set
- `f1_train_score`: score for the classifier on the training set
"""
self.classifier = classifier
self.f1_test_score = f1_test_score
self.f1_train_score = f1_train_score
return
from collections import defaultdict
# Train and predict using different training set sizes
def train_predict(clf, X_train, y_train, X_test, y_test, verbose=True):
if verbose:
print "------------------------------------------"
print "Training set size: {}".format(len(X_train))
train_classifier(clf, X_train, y_train, verbose)
f1_train_score = predict_labels(clf, X_train, y_train, verbose)
f1_test_score = predict_labels(clf, X_test, y_test, verbose)
if verbose:
print "F1 score for training set: {}".format(f1_train_score)
print "F1 score for test set: {}".format(f1_test_score)
return ClassifierData(clf, f1_test_score, f1_train_score)
# TODO: Run the helper function above for desired subsets of training data
# Note: Keep the test set constant
def train_by_size(sizes = [100, 200, 300], verbose=True):
classifier_containers = {}
for classifier in classifiers:
name = classifier.__class__.__name__
if verbose:
print(name)
print("=" * len(name))
classifier_containers[name] = defaultdict(lambda: {})
for size in sizes:
x_train_sub, y_train_sub = X_train[:size], y_train[:size]
assert len(x_train_sub) == size
assert len(y_train_sub) == size
classifier_data = train_predict(classifier, x_train_sub, y_train_sub, X_test, y_test, verbose)
classifier_containers[name][size] = classifier_data
if verbose:
print('')
return classifier_containers
_ = train_by_size()
if RUN_PLOTS:
# this takes a long time, don't run if not needed
sizes = range(10, 310, 10)
classifier_containers = train_by_size(sizes=sizes,
verbose=False)
color_map = {'LogisticRegression': 'b',
'KNeighborsClassifier': 'r',
'RandomForestClassifier': 'm'}
def plot_scores(containers, which_f1='test', color_map=color_map):
"""
Plot the f1 scores for the models
:param:
- `containers`: dict of <name><size> : classifier data
- `which_f1`: 'test' or 'train'
- `color_map`: dict of <model name> : <color string>
"""
sizes = sorted(containers['LogisticRegression'].keys())
figure = plot.figure()
axe = figure.gca()
for model in containers:
scores = [getattr(containers[model][size], 'f1_{0}_score'.format(which_f1)) for size in sizes]
axe.plot(sizes, scores, label=model, color=color_map[model])
axe.legend(loc='lower right')
axe.set_title("{0} Set F1 Scores by Training-Set Size".format(which_f1.capitalize()))
axe.set_xlabel('Training Set Size')
axe.set_ylabel('F1 Score')
axe.set_ylim([0, 1.0])
if RUN_PLOTS:
for f1 in 'train test'.split():
plot_scores(classifier_containers, f1)
def plot_test_train(containers, model_name, color_map=color_map):
"""
Plot testing and training plots for each model
:param:
- `containers`: dict of <model name><size>: classifier data
- `model_name`: class name of the model
- `color_map`: dict of <model name> : color string
"""
sizes = sorted(containers['LogisticRegression'].keys())
figure = plot.figure()
axe = figure.gca()
test_scores = [containers[model][size].f1_test_score for size in sizes]
train_scores = [containers[model][size].f1_train_score for size in sizes]
axe.plot(sizes, test_scores, label="Test", color=color_map[model])
axe.plot(sizes, train_scores, '--', label="Train", color=color_map[model])
axe.legend(loc='lower right')
axe.set_title("{0} F1 Scores by Training-Set Size".format(model))
axe.set_xlabel('Training Set Size')
axe.set_ylabel('F1 Score')
axe.set_ylim([0, 1.0])
return
if RUN_PLOTS:
for model in color_map.keys():
plot_test_train(classifier_containers, model)
"""
Explanation: 4. Training and Evaluating Models
Choose 3 supervised learning models that are available in scikit-learn, and appropriate for this problem. For each model:
What are the general applications of this model? What are its strengths and weaknesses?
Given what you know about the data so far, why did you choose this model to apply?
Fit this model to the training data, try to predict labels (for both training and test sets), and measure the F<sub>1</sub> score. Repeat this process with different training set sizes (100, 200, 300), keeping test set constant.
Produce a table showing training time, prediction time, F<sub>1</sub> score on training set and F<sub>1</sub> score on test set, for each training set size.
Note: You need to produce 3 such tables - one for each model.
Train a model
End of explanation
"""
%%latex
P(passed=yes|x) = \frac{1}{1+e^{-weights^T \times attributes}}\\
"""
Explanation: 5. Choosing the Best Model
Based on the experiments you performed earlier, in 1-2 paragraphs explain to the board of supervisors what single model you chose as the best model. Which model is generally the most appropriate based on the available data, limited resources, cost, and performance?
In 1-2 paragraphs explain to the board of supervisors in layman's terms how the final model chosen is supposed to work (for example if you chose a Decision Tree or Support Vector Machine, how does it make a prediction).
Fine-tune the model. Use Gridsearch with at least one important parameter tuned and with at least 3 settings. Use the entire training set for this.
What is the model's final F<sub>1</sub> score?
Based on the previous experiments I chose Logistic Regression as the classifier to use. Given the data available, all three models have comparable F1 scores (on the test data) but the Logistic Regression classifier is the fastest for both training and prediction when compared to K-Nearest Neighbor and Random Forests. In addition, the Logistic Regression classifier offers readily interpretable coefficients and L1 regression to sparsify the data, allowing us to see the most important of the variables when deciding who will pass their final exam.
Logistic Regression works by estimating the probability that a student's attributes - such as their age, how often they go out, etc. - predicts that they will pass. It does this using the logistic function which creates an S-shaped curve which goes to 0 at negative infinity and 1 at positive infinity:
End of explanation
"""
x = np.linspace(-6, 7, 100)
y = 1/(1 + np.exp(-x))
figure = plot.figure()
axe = figure.gca()
axe.plot(x, y)
title = axe.set_title("Sigmoid Function")
axe.set_ylabel(r"P(passed=yes|x)")
label = axe.set_xlabel("x")
"""
Explanation: Here attributes is a vector of student attributes and weights is the vector of weights that the Logistic Regression algorithm finds. To see what this function looks like I'll plot its output when there is a weight of one and a single attribute whose values are centered around 0, since this is a fictional attribute that I'm creating for plotting I'll call it x.
End of explanation
"""
%%latex
\textit{probability student passed given age and school} = \frac{1}{1+e^{-(intercept + w_1 \times age + w_2 * school)}}\\
"""
Explanation: To clarify the previous equation, if we only had two attributes, age and school to predict if a student passed, then it could be written as:
End of explanation
"""
from sklearn.metrics import f1_score, make_scorer
scorer = make_scorer(f1_score)
passing_ratio = (sum(y_test) +
sum(y_train))/float(len(y_test) +
len(y_train))
assert abs(passing_ratio - .67) < .01
model = LogisticRegression()
# python standard library
import warnings
# third party
import numpy
from sklearn.grid_search import GridSearchCV
parameters = {'penalty': ['l1', 'l2'],
'C': np.arange(.01, 1., .01),
'class_weight': [None, 'balanced', {1: passing_ratio, 0: 1 - passing_ratio}]}
"""
Explanation: The goal of the Logistic Regression algorithm is to find the weights that most accurately predict whether a given student passes or not. In other words, it seeks to find the values for the weights so that the logistic function most accurately produces a probability greater than :math:\frac{1}{2} if the student passed and a probablity less than :math:\frac{1}{2} if the student did not pass.
Set up the parameters
End of explanation
"""
grid = GridSearchCV(model, param_grid=parameters, scoring=scorer, cv=10, n_jobs=-1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
grid.fit(X_train, y_train)
"""
Explanation: Grid search
End of explanation
"""
grid.best_params_
"""
Explanation: The best parameters
End of explanation
"""
def print_coefficients(grid):
column_names = X_train.columns
coefficients = grid.best_estimator_.coef_[0]
odds = np.exp(coefficients)
sorted_coefficients = sorted((column for column in coefficients), reverse=True)
non_zero_coefficients = [coefficient for coefficient in sorted_coefficients
if coefficient != 0]
non_zero_indices = [np.where(coefficients==coefficient)[0][0] for coefficient in non_zero_coefficients]
non_zero_variables = [column_names[index] for index in non_zero_indices]
non_zero_odds = [odds[index] for index in non_zero_indices]
for column, coefficient, odds_ in zip(non_zero_variables, non_zero_coefficients, non_zero_odds):
print('{0: <10}{1: >5.2f}\t{2: >8.2f}'.format(column, coefficient, odds_))
return non_zero_variables
non_zero_variables = print_coefficients(grid)
feature_map = {"sex_M": "male student",
"age": "student's age",
"Medu": "mother's education",
"traveltime": "home to school travel time",
"studytime": "weekly study time",
"failures": "number of past class failures",
"schoolsup": "extra educational support",
"famsup": "family educational support",
"paid": "extra paid classes within the course subject (Math or Portuguese)",
"activities": "extra-curricular activities",
"nursery": "attended nursery school",
"higher": "wants to take higher education",
"internet": "Internet access at home",
"romantic": "within a romantic relationship",
"famrel": "quality of family relationships",
"freetime": "free time after school",
"goout": "going out with friends",
"Dalc": "workday alcohol consumption",
"Walc": "weekend alcohol consumption",
"health": "current health status",
"absences": "number of school absences",
"passed": "did the student pass the final exam"}
"""
Explanation: The Coefficients
End of explanation
"""
data_all = X_all.copy()
data_all['passed'] = y_all.values
def plot_counts(x_name, hue='passed'):
"""
plot counts for a given variable
:param:
- `x_name`: variable name in student data
- `hue`: corellating variable
"""
title = "{0} vs Passing".format(feature_map[x_name].title())
figure = plot.figure()
axe = figure.gca()
axe.set_title(title)
lines = seaborn.countplot(x=x_name, hue=hue, data=data_all)
count_plot_variables = [name for name in non_zero_variables
if name not in ('age', 'absences')]
for variable in count_plot_variables:
plot_counts(variable)
plot_counts('passed', 'age')
axe = seaborn.kdeplot(student_data[student_data.passed=='yes'].absences, label='passed')
axe.set_title('Distribution of Absences')
axe.set_xlim([0, 80])
axe = seaborn.kdeplot(student_data[student_data.passed=='no'].absences, ax=axe, label="didn't pass")
"""
Explanation: The plots were originally created separately for the write-up but I'm putting the code here too to show how they were made.
End of explanation
"""
with warnings.catch_warnings():
warnings.simplefilter('ignore')
print("{0:.2f}".format(grid.score(X_test, y_test)))
"""
Explanation: Final F1 Score
End of explanation
"""
X_all_trimmed = X_all[non_zero_variables]
grid_2 = GridSearchCV(model, param_grid=parameters, scoring=scorer, cv=10, n_jobs=-1)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
grid_2.fit(X_train, y_train)
grid_2.best_params_
print_coefficients(grid_2)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
print("{0:.2f}".format(grid_2.score(X_test, y_test)))
"""
Explanation: Re-do with only significant columns.
End of explanation
"""
|
tbphu/fachkurs_bachelor | packages/plotting/plotting.ipynb | mit | %matplotlib inline
import numpy as np # we will need numpy
import matplotlib.pyplot as plt # and this is for plotting
"""
Explanation: Plotting in Python
Python does not have built in plotting capabilities, but there is a plethora of useful packages specialized to all kinds of plots. Here is a very incomplete list of my favorites:
Matplotlib
Matplotlib is the standard when it comes to plotting in Python. It is very useful for visualization for publications and can also be included in applications.
- Bokeh
Bokeh is useful to create beautiful interactive plots.
- Plotly
Similar to Bokeh, but more flexible. Plotly has api to other languages and a nice webinterface to style plots.
- Mayavi
Not really a plotting tool, but a specialized package for 3D data visualization.
What we will do
We will work primarily in matplotlib to cover the basics of what you will need for the projects and scientific plotting in general.
Matplotlib, as the name suggests, has strong similarities to Matlab and learning it makes it easy to plot in both languages. The pyplot module makes python work like matlab in many aspects.
To use matplotlib we need to do the following setup:
End of explanation
"""
xv=[1,2,3,4]
yv=[5,1,4,0]
plt.plot(xv,yv)
"""
Explanation: Line 1 lets matplotlib output images directly in the notebook. If you just use %matplotlib the output is opened in a new window.
And now we can plot:
End of explanation
"""
x = np.linspace(-np.pi, np.pi, 256)
y = np.sin(x)
myplot = plt.plot(x,y,'k--')
plt.setp(myplot,linewidth=3.0)
"""
Explanation: It really is that easy!
Understanding our plot
It is important to note that in the code above we imported matplotlib.pyplot. Pyplot is the part of matplotlib (MPL) we will use mostly. It is a collection of functions that can be used for the easy creation of plots. Luckily the inner workings of MPL are mostly hidden from us users.se commands create plots which consist of a.
To really work with MPL (or any other plotting library) though, it is important to understand how plots are build up.
Plots in MPL have the following components:
- figures:
A canvas to draw on
- axis:
Coordinate systems to put data in
- ticks:
labels and dimensions of the axis
MPL also uses the concept of current plot. Whenever you issue a plot command, it is drawn on your current figure if there is one, otherwise it opens a new plot.
Plots are created by plot commands but not displayed directly, usually you need to use the plt.show() command to show the figure on screen.
{python}
plt.plot(x,y)
plt.show()
In this notebook we do not need to do this, because Jupyter takes care of that if we use %matplotlib inline command.
Styling our plot
We can modify our plot after we created it using the setp function:
End of explanation
"""
plt.setp(myplot)
"""
Explanation: Calling plt.setp(myplot) shows us all the available arguments:
End of explanation
"""
print(plt.style.available)
plt.style.use('ggplot')
plt.plot(x,y)
"""
Explanation: Styles
The defaults of MPL are not the most beautiful out there, so luckily we can set better defaults using styles:
End of explanation
"""
fig = plt.figure()
ax = plt.axes()
random = np.random.random(x.shape)-.5
ax.plot(x,y)
ax.plot(x, y, color="blue", linestyle="-", label="sine")
ax.plot(x, random, color="red", linestyle="-", label="random")
ax.legend(loc='upper left')
"""
Explanation: Legends
Legends can be added to get an overview of different plot components.
Let's create a new figure to draw on:
End of explanation
"""
ax.set_xlabel("a.u.")
ax.set_ylabel("a.u.")
ax.set_title("Sine and random noise")
fig
"""
Explanation: Labels and Titles
End of explanation
"""
ax.set_xticks([-np.pi, -np.pi/2, 0, np.pi/2, np.pi])
ax.set_xticklabels([r'$-\pi$', r'$-\pi/2$', r'$0$', r'$+\pi/2$', r'$+\pi$'])
ax.set_yticks([-1, 0, +1])
ax.set_yticklabels([r'$-1$', r'$0$', r'$+1$'])
fig
"""
Explanation: Ticks
End of explanation
"""
plt.plot(x,y)
plt.savefig('foo.png', dpi=600, format='png',orientation='landscape')
"""
Explanation: Saving our work
You can save each plot in different formats:
End of explanation
"""
fig.savefig("sine.png")
"""
Explanation: If you want to save a figure that is not your current figure:
End of explanation
"""
plt.style.use("dark_background")
t = np.linspace(0,100,1000)
s = np.sin(t)/(t+1)
c = np.cos(t)/np.sqrt((t+1))
ax1 = plt.axes([.1,.1,2,2])
ax2 = plt.axes([1.3,.2,.3,.3])
ax3 = plt.axes([1.7,.2,.3,.3])
ax2.plot(t,s)
ax3.plot(t,c)
ax1.plot(s,c, 'red')
ax1.plot(s+0.005,c+0.01, 'yellow')
"""
Explanation: Axes
Axes are the areas on you figure where your actual data lives. You can put number of axes on a figure and fill them with different data.
Let's plot the eye of sauron:
End of explanation
"""
plt.style.use("ggplot")
plt.figure(figsize=(9,6))
plt.subplot(2,2,1)
plt.plot(t, c, color="blue", linewidth=1.0, linestyle="-")
plt.subplot(2,2,2)
# Plot sine using green color with a continuous line of width 1 (pixels)
plt.plot(t, s, color="green", linewidth=1.0, linestyle="--")
plt.subplot(2,2,3)
plt.plot(x,y)
plt.subplot(2,2,4)
plt.plot(x,random)
"""
Explanation: Ok, pretty close.
Subplots
The subplot command creates new axis in a regular grid that can be easily accessed. Using the subplot command we can plot different data on each of the created axis.
Calling the subplot command with a different 3rd argument can be seen as moving the cursor to a different location. Each plot directive after the subplot call will be done on the according subplot/axes.
End of explanation
"""
plt.figure(figsize=(9,3))
plt.subplot(1,3,1)
plt.plot(t,s)
plt.subplot(1,3,2)
plt.plot(t,c)
plt.subplot(1,3,3)
plt.plot(s,c)
"""
Explanation: Another example:
End of explanation
"""
f, (ax1, ax2, ax3) = plt.subplots(1, 3,
sharey=True,
sharex=False,
figsize=(9,3))
ax1.plot(t,s)
ax2.plot(t,c)
ax3.plot(s,c)
"""
Explanation: Shared axis
In the above example it would make sense to make at least the y-axis shared to keep scaling and save space. For this we need to assign axis manually using the subplots command:
End of explanation
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, (ax1, ax2, ax3) = plt.subplots(1, 3,
sharey=True,
sharex=False,
figsize=(9,3))
p1, = ax1.plot([],[])
p2, = ax2.plot([],[])
p3, = ax3.plot([],[])
t = np.linspace(0,100,2000)
x = np.sin(t)/(t+1)
y = np.cos(t)/np.sqrt(t+1)
ax1.set_ylim((-0.5,1))
ax1.set_xlim((0,100))
ax2.set_xlim((0,100))
ax3.set_xlim((-.5,.5))
def animate(i):
p1.set_data(t[0:i*10],x[0:i*10])
p2.set_data(t[0:i*10],y[0:i*10])
p3.set_data(x[0:i*10],y[0:i*10])
return p1,p2,p3
# Init only required for blitting to give a clean slate.
def init():
p1.set_data(np.ma.array(t, mask=True),np.ma.array(x, mask=True))
p2.set_data(np.ma.array(t, mask=True),np.ma.array(y, mask=True))
p3.set_data(np.ma.array(x, mask=True),np.ma.array(y, mask=True))
return p1,p2,p3
ani = animation.FuncAnimation(fig, animate, np.arange(1, 200), init_func=init,
interval=10, blit=True)
plt.style.use("classic")
ani.save('ani.gif', writer="imagemagick", fps=30, dpi=50)
"""
Explanation: Animation
End of explanation
"""
from numpy import genfromtxt
import csv
data = genfromtxt('genes.csv', delimiter=',', skip_header=1)[:,1:]
with open('genes.csv') as f:
times = csv.reader(f).__next__()[1:]
times = [int(t) for t in times]
genes = [x[0] for x in csv.reader(f)]
"""
Explanation: Matplotlib resources:
To get inspiration and further help look at the MPL website. Here are a few things you might want to have a look at before doing the exercise:
In this notebook, whenever you are in a code block and press Shift+Tab you will get a popup that tries to help you, repeatedly press and it becomes even more helpful
Gallery of beautiful plots
Examples for (almost) everything you can to with MPL
Pyplot documentation that tells you how to use the commands available
Exercises
Exercise 1
a)
First you will plot a real time series dataset. In the same folder as this notebook there is a .csv file containing the expression of cell cycle dependent yeast genes. Your task is to visualize these.
We prepared a snippet that handles the reading of the csv file and gives you three variables:
- data: contains the expression changes over time
- times: the measurement times
- genes: names of the included genes
End of explanation
"""
|
jedbrown/numerical-computation | Rootfinding.ipynb | mit | %matplotlib notebook
from matplotlib import pyplot
import numpy
tests = []
@tests.append
def f0(x):
return x*x - 2, 2*x
@tests.append
def f1(x):
return numpy.cos(x) - x, -numpy.sin(x) - 1
@tests.append
def f2(x):
return numpy.exp(-numpy.abs(x)) + numpy.sin(x), numpy.exp(-numpy.abs(x))*(-numpy.sign(x)) + numpy.cos(x)
@tests.append
def f3(x):
return x*x - x + 0.25, 2*x - 1
@tests.append
def f4(x):
return numpy.tan(x+2), numpy.cos(x+2)**(-2)
return numpy.exp(-x*x), numpy.exp(-x*x)*(-2*x)
@tests.append
def f5(x):
return (x <= 1)*1.0, 0*x
@tests.append
def f6(x):
return x*numpy.sin(5/x), numpy.sin(5/x) - numpy.cos(5/x)*5/x
x = numpy.linspace(-2,2,100)
pyplot.plot(x, 0*x, color='k')
for f in tests:
pyplot.plot(x, f(x)[0], label=f.__name__)
pyplot.legend(loc='upper right')
pyplot.style.use('ggplot')
pyplot.show()
"""
Explanation: Jupyter notebooks
This is a Jupyter notebook using Python. You can install Jupyter locally to edit and interact with this notebook.
Rootfinding
Rootfinding is the process of solving $$f(x) = 0$$ for $x$. The standard assumption is that $f : R \to R$ is continuous. We are interested in developing general-purpose algorithms---those that can use $f(x)$ as a black box without needing to look inside. When we implement our rootfinding algorithm in software, the user will pass a function or program to compute $f(x)$. Rootfinding methods for differentiable functions may also use the derivative $f'(x)$.
Some questions immediately arise:
* Existence. When does this equation have at least one solution?
* Uniqueness. When is the solution unique?
Let's consider some test functions, defined here along with their derivatives which we'll use later.
End of explanation
"""
def hasroot(f, a, b):
return f(a)[0]*f(b)[0] < 0
def bisect(f, a, b, verbose=False):
mid = (a + b)/2.
if b-a < 1e-5:
return mid
if verbose:
print('bisect', mid)
if hasroot(f, a, mid):
return bisect(f, a, mid, verbose)
else:
return bisect(f, mid, b, verbose)
"""
Explanation: Which of these functions have at least one root?
Which have more than one root?
Can we determine these properties merely by evaluating $f(x)$ for some values of $x$?
Bisection
Bisection is a rootfinding technique that starts with an interval $[a,b]$ containing a root and does not require derivatives.
End of explanation
"""
bisect(tests[0], 0, 2)
numpy.sqrt(2) - bisect(tests[0], 0, 2)
"""
Explanation: Notice that we need to define hasroot above.
Let's try running it:
End of explanation
"""
bisect(tests[0], 0, 2, verbose=True)
"""
Explanation: We get about 5 digits of accuracy. Why? How fast did we get there?
End of explanation
"""
for f in tests:
print(f.__name__, bisect(f, -2, 2.1))
"""
Explanation: Can you find any problems with this implementation? List them below:
No error checking
Can "converge" to nonsense
Recursion is bad in Python
Doesn't handle ill-defined points (function value not available)
Evaluates $f(x)$ more than necessary (multiple times at the same point)
Let's try running it on the rest of the test problem set:
End of explanation
"""
def newton(f, x, verbose=False):
for i in range(100):
fx, dfx = f(x)
if verbose:
print(f.__name__, i, x, fx)
if numpy.abs(fx) < 1e-12:
return x, fx, i
try:
x -= fx / dfx
except ZeroDivisionError:
return x, numpy.NaN, i
for f in tests:
print(f.__name__, newton(f, 1))
"""
Explanation: What's going wrong here? How can we improve the implementation and what are fundamental limitations of the algorithm?
Convergence rate
Let's quantitatively revisit the convergence rate. A convergent rootfinding algorithm produces a sequence of approximations $x_i$ such that $$\lim_{i \to \infty} x_i \to x_$$ where $f(x_) = 0$. For analysis, it is convenient to define the errors $e_i = x_i - x_$. We say that an algorithm is linearly convergent* if $$\lim_{i \to \infty} |e_{i+1}| / |e_i| = \rho < 1.$$ A smaller convergence factor $\rho$ represents faster convergence.
What is $\rho$ for bisection?
Remarks on bisection
Specifying an interval is often inconvenient
An interval in which the function changes sign guarantees convergence (robustness)
No derivative information is required
Roots of even degree are problematic
The solution error is directly available
The convergence rate is modest -- one iteration per bit of accuracy
Newton-Raphson Method
Much of numerical analysis reduces to Taylor series, the approximation
$$ f(x) = f(x_0) + f'(x_0) (x-x_0) + f''(x_0) (x - x_0)^2 / 2 + \dotsb $$
centered on some reference point $x_0$.
In numerical computation, it is exceedingly rare to look beyond the first-order approximation
$$ \tilde f_{x_0}(x) = f(x_0) + f'(x_0)(x - x_0) . $$
Since $\tilde f_{x_0}(x)$ is a linear function, we can explicitly compute the unique solution of $\tilde f_{x_0}(x) = 0$ as
$$ x = x_0 - f(x_0) / f'(x_0) . $$
This is Newton's Method (aka Newton-Raphson or Newton-Raphson-Simpson) for finding the roots of differentiable functions.
End of explanation
"""
for f in tests:
print(f.__name__, '{0:15.12f} {1:8.2e} {2:2d}'.format(*newton(f, -0.1)))
"""
Explanation: Oops, how can we fix this?
This output is kinda hard to read, so let's make it cleaner.
End of explanation
"""
def fquartic(x):
return (x - 0.9)**4, 4*(x - 0.9)**3
newton(fquartic, 0)
"""
Explanation: Did we solve all of these equations?
How can the iteration break down?
Does choosing a different initial guess lead to different solutions?
How is this convergence test different from the one we used for bisection?
Is the convergence rate similar for all test equations?
Convergence of Newton-type algorithms
We would like to know sharp conditions on when Newton-type algorithms converge, and if so, how fast. This theory will build on that for a general Fixed Point Iteration $x_{i+1} = g(x_i)$ where $g$ is a continuously differentiable function. Suppose that there exists a fixed point $r = g(r)$. By the mean value theorem, we have that
$$ x_{i+1} - r = g(x_i) - g(r) = g'(c_i) (x_i - r) $$
for some $c$ with $|c - r| < |x_i - r|$.
In other words, $|e_{i+1}| = |g'(c_i)| |e_i|$, which converges to zero if $|g'(c_i)| < 1$.
If $|g'(r)| < 1$ then for any $\epsilon > 0$ there is a neighborhood of $r$ such that $|g'(c)| < |g'(r)| + \epsilon$ for all $c$ in that neighborhood.
Consequently, we have:
Theorem (Sauer 1.6): Linear Convergence of Fixed Point Iteration
If $g$ is continuously differentiable, $r = g(r)$, and $|g'(r)| < 1$ then the fixed point iteration $x_{i+1} = g(x_i)$ is locally linearly convergent with rate $|g'(r)|$.
Observations
A rootfinding problem $f(x) = 0$ can be converted to a fixed point problem $x = x - f(x) =: g(x)$ but there is no guarantee that $g'(r) = 1 - f'(r)$ will have magnitude less than 1.
Problem-specific algebraic manipulation can be used to make $|g'(r)|$ small.
$x = x - h(x)f(x)$ is also a valid formulation for any $h(x)$ bounded away from $0$.
Can we choose $h(x)$ such that $ 1 - h'(x)f(x) - h(x)f'(x) = 0$ when $f(x) = 0$?
In other words,
$$ x_{i+1} = x_i - ??? . $$
It turns out that Newton's method has locally quadratic convergence to simple roots, $\lim_{i \to \infty} |e_{i+1}|/|e_i^2| < \infty$.
"The number of correct digits doubles each iteration."
Now that we know how to make a good guess accurate, the effort lies in getting a good guess.
Culture: fast inverse square root
The following code appeared literally (including comments) in the Quake III Arena source code (late 1990s).
```C
float Q_rsqrt( float number )
{
long i;
float x2, y;
const float threehalfs = 1.5F;
x2 = number * 0.5F;
y = number;
i = * ( long * ) &y; // evil floating point bit level hacking
i = 0x5f3759df - ( i >> 1 ); // what the fuck?
y = * ( float * ) &i;
y = y * ( threehalfs - ( x2 * y * y ) ); // 1st iteration
// y = y * ( threehalfs - ( x2 * y * y ) ); // 2nd iteration, this can be removed
return y;
}
```
We now have vector instructions for approximate inverse square root.
More at https://en.wikipedia.org/wiki/Fast_inverse_square_root
Conditioning
End of explanation
"""
format((.2 - 1/3) + 2/15, '.20f')
# format((.2 - 1/3) + (1/3 - 0.2), '.20f')
# format((1 + 1e-12) - 1, '.20f')
eps = 1
while 1 + eps > 1:
eps /= 2
eps_machine = eps # We call this "machine epsilon"
numpy.log(1 + 1e-12) - numpy.log1p(1e-12)
(numpy.log(1 + 1e-12) - numpy.log1p(1e-12)) / numpy.log1p(1e-12)
x = numpy.array([1,1e5,1e10,1e15])
numpy.sin(numpy.pi*x)
numpy.sin(x)**2 + numpy.cos(x)**2 - 1
[numpy.tan((3.14159+eps)/2) for eps in [1e-6,1e-8]], 1/numpy.cos(3.14159)**2
"""
Explanation: We only get three digits correct despite a very small residual (and it takes many iterations to get there).
Difficulty computing zeros of polynomials can also arise when all the roots are simple. For example, the Wilkinson polynomial
$$ \prod_{i=1}^{20} (x - i) = \sum_{i=0}^{20} a_i x^i $$
has roots at each of the positive integers up to 20, but the roots are extremely sensitive to perturbations of the coefficients $a_i$, as shown in this figure from Trefethen and Bau (1999).
Numerical difficulties in which "correct" algorithms produce unreliable solutions almost always stem from lack of stability and/or ill conditioning.
Absolute condition number
Consider a function $f: X \to Y$ and define the absolute condition number
$$ \hat\kappa = \lim_{\delta \to 0} \max_{|\delta x| < \delta} \frac{|f(x + \delta x) - f(x)|}{|\delta x|} = \max_{\delta x} \frac{|\delta f|}{|\delta x|}. $$
If $f$ is differentiable, then $\hat\kappa = |f'(x)|$.
Floating point arithmetic
Floating point arithmetic $x \circledast y := \text{float}(x * y)$ is exact within a relative accuracy $\epsilon_{\text{machine}}$. Formally,
$$ x \circledast y = (x * y) (1 + \epsilon) $$
for some $|\epsilon| \le \epsilon_{\text{machine}}$.
End of explanation
"""
def diff(f, x, epsilon=1e-5):
return (f(x + epsilon) - f(x)) / epsilon
diff(numpy.sin, 0.7, 1e-8) - numpy.cos(0.7)
x = .5
diff(numpy.tan, x) - 1/numpy.cos(x)**2
x = 3.14/2
[(eps, diff(numpy.tan, x, eps) - 1/numpy.cos(x)**2) for eps in [1e-14, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4]]
x = 1e4
[(eps, diff(numpy.log, x, eps) - 1/x) for eps in [1e-14, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2]]
"""
Explanation: Relative condition number
Given the relative nature of floating point arithmetic, it is more useful to discuss relative condition number,
$$ \kappa = \max_{\delta x} \frac{|\delta f|/|f|}{|\delta x|/|x|}
= \max_{\delta x} \Big[ \frac{|\delta f|/|\delta x|}{|f| / |x|} \Big] $$
or, if $f$ is differentiable,
$$ \kappa = \max_{\delta x} |f'(x)| \frac{|x|}{|f|} . $$
How does a condition number get big?
Take-home message
The relative accuracy of the best-case algorithm will not be reliably better than $\epsilon_{\text{machine}}$ times the condition number.
$$ \max_{\delta x} \frac{|\delta f|}{|f|} \ge \kappa \cdot \epsilon_{\text{machine}} $$
Numerical differentiation
Suppose we want to apply Newton's method to a function that we know how to evaluate, but don't have code to differentiate. This is often because it's difficult/error-prone to write or because the interface by which we call it does not support derivatives. (Commercial packages often fall in this category.)
End of explanation
"""
def diff_wp(f, x, eps=1e-8):
"""Numerical derivative with Walker and Pernice (1998) choice of step"""
h = eps * (1 + abs(x))
return (f(x+h) - f(x)) / h
x = 1
[(eps, diff_wp(numpy.log, x, eps) - 1/x) for eps in [1e-14, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2]]
x = 1e-4
[(eps, diff_wp(numpy.log, x, eps) - 1/x) for eps in [1e-14, 1e-12, 1e-10, 1e-8, 1e-6, 1e-4, 1e-2]]
"""
Explanation: Automatically choosing a suitable $\epsilon$
End of explanation
"""
numpy.log(-1)
x = numpy.sqrt(-1)
1/numpy.inf
x = numpy.linspace(0,3,100)
pyplot.plot(x, numpy.sqrt(x + 1e-1))
pyplot.show()
numpy.tan(1e100)
numpy.tan(1e100*(1 + 2*eps_machine))
1e100 - 1e100*(1 + 2*eps_machine)
"""
Explanation: This algorithm is imperfect, leaving some scaling responsibility to the user.
It is the default in PETSc's "matrix-free" Newton-type solvers.
Tinkering in class (2016-09-06)
End of explanation
"""
x = numpy.linspace(-5,5)
pyplot.figure()
f = numpy.sin(x) + 1.1*x
pyplot.plot(x, f)
"""
Explanation: In class 2016-09-14
We are looking for a function for which the linear fit (newton solver) is more reliable than a quadratic fit (cubic solver).
End of explanation
"""
|
projectappia/eegnet | src/ipynb/create_TFRecords.ipynb | mit | import shutil
# Read files list. Header: file, class (0: interictal, 1: preictal), safe (or not to use)
files_list = np.genfromtxt('./train_and_test_data_labels_safe.csv',
dtype=("|S15", np.int32, np.int32), delimiter=',', skip_header=1)
# Get only files which are safe to use
files_list = [fl for fl in files_list if fl[2] == 1]
# Construct new file names based on class field
new_files_list = []
for fl in files_list:
name = fl[0].split('.')[0].split('_')
if len(name) == 3:
name = name[0] + '_' + name[1] + '_' + str(fl[1]) + '.mat'
else:
name = name[0] + '_' + name[1] + 'test_' + str(fl[1]) + '.mat'
new_files_list.append(name)
# Get only files names
files_list = [fl[0] for fl in files_list]
# Move files to new folder
print('Train data size:', len(files_list))
for idx in xrange(len(files_list)):
print('Copying', files_list[idx], '----->', new_files_list[idx], 'index:', idx)
shutil.copy('../data/train/'+files_list[idx], '../data/train_new/'+new_files_list[idx])
"""
Explanation: Create new train folders with only the files from train_and_test_data_labels_safe.csv
End of explanation
"""
_SOURCE_FILES = "../data/train/*.mat"
_DEST_FOLDER = "../dataset/train/"
_NUM_FILES = None # None is the total number of files
def mat2tfr(p_file, rem_dropout = False):
# getting the filename and retrieving the patient, segement and label data
pat, seg, label = p_file.split('/')[-1].split('.')[0].split("_")
filename = pat + "_" + seg + "_" + label + ".tfr"
fullpathname = _DEST_FOLDER + filename
if os.path.exists(fullpathname):
print("Dataset file", fullpathname, "already exists, skipping...")
else:
t = time.time()
print("Converting " + p_file + " ----> " + fullpathname)
# converting mat file as numpy
mat = loadmat(p_file)
data = mat['dataStruct']['data'][0][0]
# Check if file is mainly zero's (100% dropout)
if rem_dropout:
if (np.count_nonzero(data) < 10) or (np.any(np.std(data, axis=0) < 0.5)):
print("WARNING: File %s is all dropout." %p_file)
return
# TensorFlow Records writer
with tf.python_io.TFRecordWriter(fullpathname) as tfrwriter:
# Fill protobuff
protobuf = tf.train.Example(features=tf.train.Features(feature={
'data' : tf.train.Feature(float_list=tf.train.FloatList(value=data.flatten().tolist())),
'label': tf.train.Feature(int64_list=tf.train.Int64List(value=[int(label)])),
'filename': tf.train.Feature(bytes_list=tf.train.BytesList(value=[filename])),
}))
write = tfrwriter.write(protobuf.SerializeToString())
elapsed = time.time() - t
print("elapsed: %.3fs"%elapsed)
def dataset(folder, num_files=None):
# get files
filenames = tf.gfile.Glob(folder)
# truncate reading
if num_files is not None:
filenames = filenames[:num_files]
print("Converting #%d files."%len(filenames))
for files in filenames:
mat2tfr(files)
dataset(_SOURCE_FILES, _NUM_FILES)
print('finished')
def plot_eeg(data):
plt.figure(figsize=(10,20))
for i in range(0,16):
plt.subplot(8,2,i+1)
plt.plot(data[:,i])
#plt.savefig('foo.pdf', bbox_inches='tight')
"""
Explanation: Create TFRecords
End of explanation
"""
|
ian-andrich/linear-algebra-crash-course | Notebooks/02_Inverse_Matrix_Theorem.ipynb | gpl-3.0 | import numpy as np
import numpy.linalg as la
A = np.array(range(1,5)).reshape(2,2)
determinant_A = la.det(A)
print(A)
print("Determinant is: {}".format(determinant_A)) # Notice the rounding error.
"""
Explanation: The inverse Matrix Theorem
The inverse matrix theorem is a statement about a number of equivalent conditions that we can impose on a matrix.
Given a matrix $A$ with shape $(n, n)$
1. $Det(A) \neq 0$ implies $A^{-1}$ exists.
2. We can find a Matrix B such that $A * B = B * A = np.eye(n) = I_n$
3. $Det(A) = \lambda _1 * \lambda _2 * ... * \lambda _n$ where $\lambda _i$ is an eigenvalue of the matrix.
These require some explanation.
Determinants.
Linear Transformations may stretch the underlying space by some magnitude (eigenvalues) along certain vectors (eigenvectors). These are the vectors and values which "characterize a matrix." -- Hence the word eigen.
End of explanation
"""
# Let's check it's eigenvalues.
print("The Matrix A has eigenvalues: {}".format([x for x in la.eigvals(A)]))
print("And their product is: {}".format(np.product(la.eigvals(A))))
determinant_A - np.product(la.eigvals(A)) < 10**-5
# Lets define an epsilon and check if they are the same.
eps = 10 ** -5
def check_floats_equal(float_1, float_2, eps=eps):
return float_1 - float_2 < eps
check_floats_equal(np.product(la.eigvals(A)), la.det(A))
"""
Explanation: Hmm.... is this related to anything??
End of explanation
"""
vals, vecs = la.eig(A)
print(vecs)
# Lets define a distance function for vectors in a matrix.
def l2_distance_cols(matrix):
norms = []
for row in matrix.transpose():
dist = 0
for val in row:
dist += val ** 2
norms.append(dist)
return np.array(norms)
l2_distance_cols(vecs)
"""
Explanation: Hmm... Interesting.
The Determinant of a matrix is just the product of it's eigenvalues.
End of explanation
"""
A = np.array([0, -1, 1, 0]).reshape(2, 2)
A
np.linalg.det(A)
vals, vecs = np.linalg.eig(A)
print("Values:\n{}".format(vals))
print("Vectors:\n{}".format(vecs))
"""
Explanation: It looks like the eigenvectors are normalized to length 1.
Let's look at another matrix and look at its eigenvalues and eigenvectors. This one should be a bit simpler.
End of explanation
"""
# First a quick vector making helper function.
def list_to_vector(items):
return np.array(items).reshape(len(items), 1)
np.matmul(A, list_to_vector([1,0]))
np.matmul(A, list_to_vector([0,1]))
"""
Explanation: Hmm.... what is happening there, why does it have imaginary eigenvectors??
End of explanation
"""
A_inv = np.linalg.inv(A)
print("Here is A^-1\n{}".format(A_inv))
np.matmul(A_inv, list_to_vector([1,0]))
np.matmul(A_inv, list_to_vector([0,1]))
"""
Explanation: This looks like a twist to me. It's sending
$$ [1, 0]^T \mapsto [0,1]^T$$
$$ [0, 1]^T \mapsto [-1,0]^T$$
If it's a twist... how can it have a direction it stretches something in a straight line in... we'd have to imagine really hard...
Oh.
That's why it has imaginary eigenvectors -- they don't really exist. But they do symbolically and mathematically.
Well... according to the inverse matrix theorem, the fact that $Det(A) = 1 \neq 0$ implies that it has an inverse matrix. Lets check that out.
End of explanation
"""
I_2 = np.matmul(A, A_inv)
print("{} \n* \n{} \n=\n{}".format(A, A_inv, I_2))
"""
Explanation: Hmm...
$$ [1,0]^T \mapsto [0,-1]^T \ [0,1]^T \mapsto [1,0]^T $$
Oh!
It's a rotation in the opposite direction!! Neato gang!
What happens if we multiply the two together...?
End of explanation
"""
A = np.eye(2) * 2
print(A)
vals, vecs = np.linalg.eig(A)
print("Vals:\n{}".format(vals))
print("Vecs:\n{}".format(vecs))
"""
Explanation: We just get the identity matrix back -- just like we wanted!!
Lets take another example.
Fix a new matrix $A = 2 * I_2$
End of explanation
"""
A_inv = np.linalg.inv(A)
print("The inverse is:\n{}".format(A_inv))
"""
Explanation: What should the inverse of $A$ be?
End of explanation
"""
|
dmytroKarataiev/MachineLearning | boston_housing/boston_housing.ipynb | mit | # Import libraries necessary for this project
import numpy as np
import pandas as pd
import visuals as vs # Supplementary code
from sklearn.cross_validation import ShuffleSplit
# Pretty display for notebooks
%matplotlib inline
# Load the Boston housing dataset
data = pd.read_csv('housing.csv')
prices = data['MDEV']
features = data.drop('MDEV', axis = 1)
# Success
print "Boston housing dataset has {} data points with {} variables each.".format(*data.shape)
"""
Explanation: Machine Learning Engineer Nanodegree
Model Evaluation & Validation
Project 1: Predicting Boston Housing Prices
Welcome to the first project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this project. You will not need to modify the included code beyond what is requested. Sections that begin with 'Implementation' in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a 'TODO' statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a 'Question X' header. Carefully read each question and provide thorough answers in the following text boxes that begin with 'Answer:'. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
Note: Code and Markdown cells can be executed using the Shift + Enter keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
Getting Started
In this project, you will evaluate the performance and predictive power of a model that has been trained and tested on data collected from homes in suburbs of Boston, Massachusetts. A model trained on this data that is seen as a good fit could then be used to make certain predictions about a home — in particular, its monetary value. This model would prove to be invaluable for someone like a real estate agent who could make use of such information on a daily basis.
The dataset for this project originates from the UCI Machine Learning Repository. The Boston housing data was collected in 1978 and each of the 506 entries represent aggregated data about 14 features for homes from various suburbs in Boston, Massachusetts. For the purposes of this project, the following preoprocessing steps have been made to the dataset:
- 16 data points have an 'MDEV' value of 50.0. These data points likely contain missing or censored values and have been removed.
- 1 data point has an 'RM' value of 8.78. This data point can be considered an outlier and has been removed.
- The features 'RM', 'LSTAT', 'PTRATIO', and 'MDEV' are essential. The remaining non-relevant features have been excluded.
- The feature 'MDEV' has been multiplicatively scaled to account for 35 years of market inflation.
Run the code cell below to load the Boston housing dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
End of explanation
"""
# TODO: Minimum price of the data
minimum_price = np.amin(prices)
# TODO: Maximum price of the data
maximum_price = np.amax(prices)
# TODO: Mean price of the data
mean_price = np.mean(prices)
# TODO: Median price of the data
median_price = np.median(prices)
# TODO: Standard deviation of prices of the data
std_price = np.std(prices)
# Show the calculated statistics
print "Statistics for Boston housing dataset:\n"
print "Minimum price: ${:,.2f}".format(minimum_price)
print "Maximum price: ${:,.2f}".format(maximum_price)
print "Mean price: ${:,.2f}".format(mean_price)
print "Median price ${:,.2f}".format(median_price)
print "Standard deviation of prices: ${:,.2f}".format(std_price)
"""
Explanation: Data Exploration
In this first section of this project, you will make a cursory investigation about the Boston housing data and provide your observations. Familiarizing yourself with the data through an explorative process is a fundamental practice to help you better understand and justify your results.
Since the main goal of this project is to construct a working model which has the capability of predicting the value of houses, we will need to separate the dataset into features and the target variable. The features, 'RM', 'LSTAT', and 'PTRATIO', give us quantitative information about each data point. The target variable, 'MDEV', will be the variable we seek to predict. These are stored in features and prices, respectively.
Implementation: Calculate Statistics
For your very first coding implementation, you will calculate descriptive statistics about the Boston housing prices. Since numpy has already been imported for you, use this library to perform the necessary calculations. These statistics will be extremely important later on to analyze various prediction results from the constructed model.
In the code cell below, you will need to implement the following:
- Calculate the minimum, maximum, mean, median, and standard deviation of 'MDEV', which is stored in prices.
- Store each calculation in their respective variable.
End of explanation
"""
from sklearn.metrics import r2_score
def performance_metric(y_true, y_predict):
""" Calculates and returns the performance score between
true and predicted values based on the metric chosen. """
# Calculate the performance score between 'y_true' and 'y_predict'
score = r2_score(y_true, y_predict)
# Return the score
return score
"""
Explanation: Question 1 - Feature Observation
As a reminder, we are using three features from the Boston housing dataset: 'RM', 'LSTAT', and 'PTRATIO'. For each data point (neighborhood):
- 'RM' is the average number of rooms among homes in the neighborhood.
- 'LSTAT' is the percentage of all Boston homeowners who have a greater net worth than homeowners in the neighborhood.
- 'PTRATIO' is the ratio of students to teachers in primary and secondary schools in the neighborhood.
Using your intuition, for each of the three features above, do you think that an increase in the value of that feature would lead to an increase in the value of 'MDEV' or a decrease in the value of 'MDEV'? Justify your answer for each.
Hint: Would you expect a home that has an 'RM' value of 6 be worth more or less than a home that has an 'RM' value of 7?
Answer:
1. If RM increases - MDEV will also increase (houses with more rooms are bigger, that is why their price is higher).
2. If LSTAT increases, that means that prices in our particular neighborhood are lower compared to others. So the MDEV will decrease.
3. Low PTRATIO means that schools have more money and can afford more teachers, that's why increase of this feature will decrease the MDEV value.
Developing a Model
In this second section of the project, you will develop the tools and techniques necessary for a model to make a prediction. Being able to make accurate evaluations of each model's performance through the use of these tools and techniques helps to greatly reinforce the confidence in your predictions.
Implementation: Define a Performance Metric
It is difficult to measure the quality of a given model without quantifying its performance over training and testing. This is typically done using some type of performance metric, whether it is through calculating some type of error, the goodness of fit, or some other useful measurement. For this project, you will be calculating the coefficient of determination, R<sup>2</sup>, to quantify your model's performance. The coefficient of determination for a model is a useful statistic in regression analysis, as it often describes how "good" that model is at making predictions.
The values for R<sup>2</sup> range from 0 to 1, which captures the percentage of squared correlation between the predicted and actual values of the target variable. A model with an R<sup>2</sup> of 0 always fails to predict the target variable, whereas a model with an R<sup>2</sup> of 1 perfectly predicts the target variable. Any value between 0 and 1 indicates what percentage of the target variable, using this model, can be explained by the features. A model can be given a negative R<sup>2</sup> as well, which indicates that the model is no better than one that naively predicts the mean of the target variable.
For the performance_metric function in the code cell below, you will need to implement the following:
- Use r2_score from sklearn.metrics to perform a performance calculation between y_true and y_predict.
- Assign the performance score to the score variable.
End of explanation
"""
# Calculate the performance of this model
score = performance_metric([3, -0.5, 2, 7, 4.2], [2.5, 0.0, 2.1, 7.8, 5.3])
print "Model has a coefficient of determination, R^2, of {:.3f}.".format(score)
"""
Explanation: Question 2 - Goodness of Fit
Assume that a dataset contains five data points and a model made the following predictions for the target variable:
| True Value | Prediction |
| :-------------: | :--------: |
| 3.0 | 2.5 |
| -0.5 | 0.0 |
| 2.0 | 2.1 |
| 7.0 | 7.8 |
| 4.2 | 5.3 |
Would you consider this model to have successfully captured the variation of the target variable? Why or why not?
Run the code cell below to use the performance_metric function and calculate this model's coefficient of determination.
End of explanation
"""
# Import 'train_test_split'
from sklearn import cross_validation
# Shuffle and split the data into training and testing subsets
X_train, X_test, y_train, y_test = cross_validation.train_test_split(
features, prices, test_size = 0.2, random_state = 0)
# Success
print "Training and testing split was successful."
"""
Explanation: Answer: I consider this model to be successful with a coefficient of determination of 0.923 (>90% of data is predictable).
Implementation: Shuffle and Split Data
Your next implementation requires that you take the Boston housing dataset and split the data into training and testing subsets. Typically, the data is also shuffled into a random order when creating the training and testing subsets to remove any bias in the ordering of the dataset.
For the code cell below, you will need to implement the following:
- Use train_test_split from sklearn.cross_validation to shuffle and split the features and prices data into training and testing sets.
- Split the data into 80% training and 20% testing.
- Set the random_state for train_test_split to a value of your choice. This ensures results are consistent.
- Assign the train and testing splits to X_train, X_test, y_train, and y_test.
End of explanation
"""
# Produce learning curves for varying training set sizes and maximum depths
vs.ModelLearning(features, prices)
"""
Explanation: Question 3 - Training and Testing
What is the benefit to splitting a dataset into some ratio of training and testing subsets for a learning algorithm?
Hint: What could go wrong with not having a way to test your model?
Answer: First of all it gives us an estimate on performance on an independent dataset, secondly it serves as a check on overfiting. Basically, without testing the data we can't be sure in our results at all.
Analyzing Model Performance
In this third section of the project, you'll take a look at several models' learning and testing performances on various subsets of training data. Additionally, you'll investigate one particular algorithm with an increasing 'max_depth' parameter on the full training set to observe how model complexity affects performance. Graphing your model's performance based on varying criteria can be beneficial in the analysis process, such as visualizing behavior that may not have been apparent from the results alone.
Learning Curves
The following code cell produces four graphs for a decision tree model with different maximum depths. Each graph visualizes the learning curves of the model for both training and testing as the size of the training set is increased. Note that the shaded reigon of a learning curve denotes the uncertainty of that curve (measured as the standard deviation). The model is scored on both the training and testing sets using R<sup>2</sup>, the coefficient of determination.
Run the code cell below and use these graphs to answer the following question.
End of explanation
"""
vs.ModelComplexity(X_train, y_train)
"""
Explanation: Question 4 - Learning the Data
Choose one of the graphs above and state the maximum depth for the model. What happens to the score of the training curve as more training points are added? What about the testing curve? Would having more training points benefit the model?
Hint: Are the learning curves converging to particular scores?
Answer:
I chose second graph with a depth of 3.
When we add more data - curves converge to a score of ~0.8. More training data, I think, will not improve the model dramatically, so it's not needed.
Each of the graphs testing scores grow rapidly and stay stable after adding only ~50 data points. Training scores on the other hand decrease with adding more data, but also become stable after adding only ~50 data points, except for graphs with max_depth over 6 (where the model is overfitted).
First graph with a depth of 1 has a high bias and both training and testing curves converge to a score with very low accuracy. With the addition of more data points the model isn't improving as it is very simple.
Second graph actually scores considerably well among others. Both curves converge at a score of around ~0.8 and do not improve with more data points after ~150. This graph I've chosen as the best among the results.
Third graph has an overfitting problem. We are starting to see that variance is considerably high and overall prediction score plateues at a score of ~0.7.
Last graph has a high training score and a score of around ~0.6 after adding ~50 data points, the model is also overfitted and can't be used correctly.
Complexity Curves
The following code cell produces a graph for a decision tree model that has been trained and validated on the training data using different maximum depths. The graph produces two complexity curves — one for training and one for validation. Similar to the learning curves, the shaded regions of both the complexity curves denote the uncertainty in those curves, and the model is scored on both the training and validation sets using the performance_metric function.
Run the code cell below and use this graph to answer the following two questions.
End of explanation
"""
# Import 'make_scorer', 'DecisionTreeRegressor', and 'GridSearchCV'
from sklearn.metrics import make_scorer
from sklearn.tree import DecisionTreeRegressor
from sklearn import grid_search
def fit_model(X, y):
""" Performs grid search over the 'max_depth' parameter for a
decision tree regressor trained on the input data [X, y]. """
# Create cross-validation sets from the training data
cv_sets = ShuffleSplit(X.shape[0], n_iter = 10, test_size = 0.20, random_state = 0)
# Create a decision tree regressor object
regressor = DecisionTreeRegressor()
# Create a dictionary for the parameter 'max_depth' with a range from 1 to 10
params = {'max_depth' : [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]}
# Transform 'performance_metric' into a scoring function using 'make_scorer'
scoring_fnc = make_scorer(performance_metric)
# Create the grid search object
grid = grid_search.GridSearchCV(regressor, params, cv=cv_sets, scoring=scoring_fnc)
# Fit the grid search object to the data to compute the optimal model
grid = grid.fit(X, y)
# Return the optimal model after fitting the data
return grid.best_estimator_
"""
Explanation: Question 5 - Bias-Variance Tradeoff
When the model is trained with a maximum depth of 1, does the model suffer from high bias or from high variance? How about when the model is trained with a maximum depth of 10? What visual cues in the graph justify your conclusions?
Hint: How do you know when a model is suffering from high bias or high variance?
Answer:
With a max depth of 1 the model suffers from high bias, because the model is not comlex enough which leads to low accuracy.
With a max depth of 10 the model suffers from high variance, because the model was trained with too many details from the training set and it overfits their characteristics on the whole dataset.
When we a have a bias error - training and validation curves are drawn together, but with a very low accuracy score. High variance on the other hand looks like the curves with a max depth of 10: high accuracy score on a training data, low - on a whole dataset due to the overfitting.
Question 6 - Best-Guess Optimal Model
Which maximum depth do you think results in a model that best generalizes to unseen data? What intuition lead you to this answer?
Answer:
A model with a max_depth of 3 is the best among others, because it is very consistent in scoring on testing and validation data and also having the highest score (R squared) of around 0.8. Other models, especially the first and the last one are useless, as they can't predict data correctly at all (first has a huge bias, last has a huge variance).
Evaluating Model Performance
In this final section of the project, you will construct a model and make a prediction on the client's feature set using an optimized model from fit_model.
Question 7 - Grid Search
What is the grid search technique and how it can be applied to optimize a learning algorithm?
Answer:
GridSearch is a way of systematically working through multiple combinations of parameter tunes, cross-validating as it goes to determine which tune gives the best performance.
Question 8 - Cross-Validation
What is the k-fold cross-validation training technique? What benefit does this technique provide for grid search when optimizing a model?
Hint: Much like the reasoning behind having a testing set, what could go wrong with using grid search without a cross-validated set?
Answer:
K-fold training technique divides data in equal sized bins to estimate performance of a model.
K-fold helps GridSearch to evaluate performance of a family of models and it kind of helps us to use all our data for both training and testing.
The biggest advantage of a K-fold cross-validation is that we can use all our data for testing and training, by doing that we maximize both training (best learning result) and testing (best validation) data.
If we didn't use CV on data while using GridSearch we can get wrong results, as we wouldn't know the accuracy of the model (which was computed as an average of each iteration in K-Fold CV). So the k-fold CV is computationally expensive, but is great when the number of samples is low.
Implementation: Fitting a Model
Your final implementation requires that you bring everything together and train a model using the decision tree algorithm. To ensure that you are producing an optimized model, you will train the model using the grid search technique to optimize the 'max_depth' parameter for the decision tree. The 'max_depth' parameter can be thought of as how many questions the decision tree algorithm is allowed to ask about the data before making a prediction. Decision trees are part of a class of algorithms called supervised learning algorithms.
For the fit_model function in the code cell below, you will need to implement the following:
- Use DecisionTreeRegressor from sklearn.tree to create a decision tree regressor object.
- Assign this object to the 'regressor' variable.
- Create a dictionary for 'max_depth' with the values from 1 to 10, and assign this to the 'params' variable.
- Use make_scorer from sklearn.metrics to create a scoring function object.
- Pass the performance_metric function as a parameter to the object.
- Assign this scoring function to the 'scoring_fnc' variable.
- Use GridSearchCV from sklearn.grid_search to create a grid search object.
- Pass the variables 'regressor', 'params', 'scoring_fnc', and 'cv_sets' as parameters to the object.
- Assign the GridSearchCV object to the 'grid' variable.
End of explanation
"""
# Fit the training data to the model using grid search
reg = fit_model(X_train, y_train)
# Produce the value for 'max_depth'
print "Parameter 'max_depth' is {} for the optimal model.".format(reg.get_params()['max_depth'])
"""
Explanation: Making Predictions
Once a model has been trained on a given set of data, it can now be used to make predictions on new sets of input data. In the case of a decision tree regressor, the model has learned what the best questions to ask about the input data are, and can respond with a prediction for the target variable. You can use these predictions to gain information about data where the value of the target variable is unknown — such as data the model was not trained on.
Question 9 - Optimal Model
What maximum depth does the optimal model have? How does this result compare to your guess in Question 6?
Run the code block below to fit the decision tree regressor to the training data and produce an optimal model.
End of explanation
"""
# Produce a matrix for client data
client_data = [[5, 34, 15], # Client 1
[4, 55, 22], # Client 2
[8, 7, 12]] # Client 3
# Show predictions
for i, price in enumerate(reg.predict(client_data)):
print "Predicted selling price for Client {}'s home: ${:,.2f}".format(i+1, price)
"""
Explanation: Answer: Maximum depth for the optimal model is 4. Using the pictures above I've chosen the closest which was 3.
Question 10 - Predicting Selling Prices
Imagine that you were a real estate agent in the Boston area looking to use this model to help price homes owned by your clients that they wish to sell. You have collected the following information from three of your clients:
| Feature | Client 1 | Client 2 | Client 3 |
| :---: | :---: | :---: | :---: |
| Total number of rooms in home | 5 rooms | 4 rooms | 8 rooms |
| Household net worth (income) | Top 34th percent | Bottom 45th percent | Top 7th percent |
| Student-teacher ratio of nearby schools | 15-to-1 | 22-to-1 | 12-to-1 |
What price would you recommend each client sell his/her home at? Do these prices seem reasonable given the values for the respective features?
Hint: Use the statistics you calculated in the Data Exploration section to help justify your response.
Run the code block below to have your optimized model make predictions for each client's home.
End of explanation
"""
vs.PredictTrials(features, prices, fit_model, client_data)
"""
Explanation: Answer: I think these prices are reasonable.
1. Small house in a good neighbourhood, maybe this price is a little bit lower that should've been predicted, but not by a big margin.
2. This price seems completely reasonable to me: small house, not the best schools in the area.
3. Very close to a top price in the dataset. I think, this price is absolutely correct.
Sensitivity
An optimal model is not necessarily a robust model. Sometimes, a model is either too complex or too simple to sufficiently generalize to new data. Sometimes, a model could use a learning algorithm that is not appropriate for the structure of the data given. Other times, the data itself could be too noisy or contain too few samples to allow a model to adequately capture the target variable — i.e., the model is underfitted. Run the code cell below to run the fit_model function ten times with different training and testing sets to see how the prediction for a specific client changes with the data it's trained on.
End of explanation
"""
|
paulbrodersen/netgraph | docs/source/sphinx_gallery_output/plot_14_bipartite_layout.ipynb | gpl-3.0 | import matplotlib.pyplot as plt
from netgraph import Graph
edges = [
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(5, 6)
]
Graph(edges, node_layout='bipartite', node_labels=True)
plt.show()
"""
Explanation: Bipartite node layout
By default, nodes are partitioned into two subsets using a two-coloring of the graph.
The median heuristic proposed in Eades & Wormald (1994) is used to reduce edge crossings.
End of explanation
"""
import matplotlib.pyplot as plt
from netgraph import Graph
edges = [
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(5, 6)
]
Graph(edges, node_layout='bipartite', node_layout_kwargs=dict(subsets=[(0, 2, 4, 6), (1, 3, 5)]), node_labels=True)
plt.show()
"""
Explanation: The partitions can also be made explicit using the :code:subsets argument.
In multi-component bipartite graphs, multiple two-colorings are possible,
such that explicit specification of the subsets may be necessary to achieve the desired partitioning of nodes.
End of explanation
"""
import matplotlib.pyplot as plt
from netgraph import Graph, get_bipartite_layout
edges = [
(0, 1),
(1, 2),
(2, 3),
(3, 4),
(5, 6)
]
node_positions = get_bipartite_layout(edges, subsets=[(0, 2, 4, 6), (1, 3, 5)])
node_positions = {node : (x, y) for node, (y, x) in node_positions.items()}
Graph(edges, node_layout=node_positions, node_labels=True)
plt.show()
"""
Explanation: To change the layout from the left-right orientation to a bottom-up orientation,
call the layout function directly and swap x and y coordinates of the node positions.
End of explanation
"""
|
liulixiang1988/documents | Python数据科学101.ipynb | mit | %matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
x = np.linspace(0, 3*np.pi, 500)
plt.plot(x, np.sin(x**2))
plt.title('Sine wave')
"""
Explanation: Python数据科学101
1. 配置系统
Python
JDK
创建C:\Hadoop\bin
在这里下载windows版的hadoop https://github.com/steveloughran/winutils 拷贝winutils到C:\Hadoop\bin下面
创建HADOOP_HOME环境变量,指向C:\Hadoop
创建C:\temp\hive文件夹
运行c:\hadoop\bin\winutils chmod 777 \temp\hive
下载Spark: https://spark.apache.org/downloads.html
解压下载的Spark的文件到C:\SPARK目录下,其它操作系统的放到home目录
创建SPARK_HOME,指向C:\SPARK
运行c:\spark\bin\spark-shell看看是否安装成功
2. 使用Python
安装Anaconda
检查conda: conda --version
检查安装的包: conda list
升级: conda update conda
3. 实验环境
输入jupyter notebook
End of explanation
"""
import numpy as np
import pandas as pd
"""
Explanation: 4. Pandas简介
最重要的是DataFrame和Series
End of explanation
"""
s = pd.Series([1, 3, 5, np.nan, 6, 8])
s[4] # 6.0
"""
Explanation: 4.1 Series
创建一个series,包含空值NaN
End of explanation
"""
df = pd.DataFrame({'data': ['2016-01-01', '2016-01-02', '2016-01-03'], 'qty': [20, 30, 40]})
df
"""
Explanation: 4.2 Dataframes
End of explanation
"""
rain = pd.read_csv('data/rainfall/rainfall.csv')
rain
# 加载一列
rain['City']
# 加载一行(第二行)
rain.loc[[1]]
# 第一行和第二行
rain.loc[0:1]
"""
Explanation: 更大的数据应当从文件里获取
End of explanation
"""
# 查找所有降雨量小于10的数据
rain[rain['Rainfall'] < 10]
"""
Explanation: 4.3 过滤
End of explanation
"""
rain[rain['Month'] == 'Apr']
"""
Explanation: 查找4月份的降雨
End of explanation
"""
rain[rain['City'] == 'Los Angeles']
"""
Explanation: 查找Los Angeles的数据
End of explanation
"""
rain = rain.set_index(rain['City'] + rain['Month'])
"""
Explanation: 4.4 给行起名(Naming Rows)
End of explanation
"""
rain.loc['San FranciscoApr']
"""
Explanation: 注意,当我们修改dataframe时,其实是在创建一个副本,因此要把这个值再赋值给原有的dataframe
End of explanation
"""
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
df = pd.read_csv('data/nycflights13/flights.csv.gz')
df
"""
Explanation: 5. Pandas 例子
End of explanation
"""
mean_delay_by_month = df.groupby(['month'])['arr_delay'].mean()
mean_delay_by_month
mean_month_plt = mean_delay_by_month.plot(kind='bar', title='Mean Delay By Month')
mean_month_plt
"""
Explanation: 这里我们主要关注统计数据和可视化。我们来看一下按月统计的晚点时间的均值。
End of explanation
"""
mean_delay_by_month_ord = df[(df.dest == 'ORD')].groupby(['month'])['arr_delay'].mean()
print("Flights to Chicago (ORD)")
print(mean_delay_by_month_ord)
mean_month_plt_ord = mean_delay_by_month_ord.plot(kind='bar', title="Mean Delay By Month (Chicago)")
mean_month_plt_ord
# 再看看Los Angeles进行比较一下
mean_delay_by_month_lax = df[(df.dest == 'LAX')].groupby(['month'])['arr_delay'].mean()
print("Flights to Chicago (LAX)")
print(mean_delay_by_month_lax)
mean_month_plt_lax = mean_delay_by_month_lax.plot(kind='bar', title="Mean Delay By Month (Los Angeles)")
mean_month_plt_lax
"""
Explanation: 注意,这里9、10月均值会有负值。
End of explanation
"""
# 看看是否不同的航空公司对晚点会有不同的影响
df[['carrier', 'arr_delay']].groupby('carrier').mean().plot(kind='bar', figsize=(12, 8))
plt.xticks(rotation=0)
plt.xlabel('Carrier')
plt.ylabel('Average Delay in Min')
plt.title('Average Arrival Delay by Carrier in 2008, All airports')
df[['carrier', 'dep_delay']].groupby('carrier').mean().plot(kind='bar', figsize=(12, 8))
plt.xticks(rotation=0)
plt.xlabel('Carrier')
plt.ylabel('Average Delay in Min')
plt.title('Average Departure Delay by Carrier in 2008, All airports')
"""
Explanation: 从上面的图表中我们可以直观的看到一些特征。现在我们再来看看每个航空公司晚点的情况,并进行一些可视化。
End of explanation
"""
weather = pd.read_csv('data/nycflights13/weather.csv.gz')
weather
df_withweather = pd.merge(df, weather, how='left', on=['year', 'month', 'day', 'hour'])
df_withweather
airports = pd.read_csv('data/nycflights13/airports.csv.gz')
airports
df_withairport = pd.merge(df_withweather, airports, how='left', left_on='dest', right_on='faa')
df_withairport
"""
Explanation: 从上面的图表里我们可以看到F9(Front Airlines)几乎是最经常晚点的,而夏威夷(HA)在这方面表现最好。
5.3 Joins
我们有多个数据集,天气、机场的。现在我们来看一下如何把两个表连接在一起
End of explanation
"""
import numpy as np
a = np.array([1, 2, 3])
a
"""
Explanation: 6 Numpy和SciPy
Numpy和SciPy是Python数据科学的CP。早期Python的list比较慢,并且对于处理矩阵和向量运算不太好,因此有了Numpy来解决这个问题。它引入了array-type的数据类型。
创建数组:
End of explanation
"""
np.arange(10)
# 给序列乘以一个系数
np.arange(10) * np.pi
"""
Explanation: 注意这里我们传的是列表,而不是np.array(1, 2, 3)。
现在我们创建一个arange
End of explanation
"""
a = np.array([1, 2, 3, 4, 5, 6])
a.shape = (2, 3)
a
"""
Explanation: 我们也可以使用shape方法从一维数组创建多维数组
End of explanation
"""
np.matrix('1 2; 3 4')
#矩阵乘
a1 = np.matrix('1 2; 3 4')
a2 = np.matrix('3 4; 5 7')
a1 * a2
#array转换为矩阵
mat_a = np.mat(a1)
mat_a
"""
Explanation: 6.1 矩阵Matrix
End of explanation
"""
import numpy, scipy.sparse
n = 100000
x = (numpy.random.rand(n) * 2).astype(int).astype(float) #50%稀疏矩阵
x_csr = scipy.sparse.csr_matrix(x)
x_dok = scipy.sparse.dok_matrix(x.reshape(x_csr.shape))
x_dok
"""
Explanation: 6.2 稀疏矩阵(Sparse Matrices)
End of explanation
"""
import csv
with open('data/array/array.csv', 'r') as csvfile:
csvreader = csv.reader(csvfile)
data = []
for row in csvreader:
row = [float(x) for x in row]
data.append(row)
data
"""
Explanation: 6.3 从CSV文件中加载数据
End of explanation
"""
import numpy as np
import scipy as sp
a = np.array([[3, 2, 0], [1, -1, 0], [0, 5, 1]])
b = np.array([2, 4, -1])
x = np.linalg.solve(a, b)
x
#检查结果是否正确
np.dot(a, x) == b
"""
Explanation: 6.4 求解矩阵方程(Solving a matrix)
End of explanation
"""
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler, OneHotEncoder
flights = pd.read_csv('data/nycflights13/flights.csv.gz')
weather = pd.read_csv('data/nycflights13/weather.csv.gz')
airports = pd.read_csv('data/nycflights13/airports.csv.gz')
df_withweather = pd.merge(flights, weather, how='left', on=['year', 'month', 'day', 'hour'])
df = pd.merge(df_withweather, airports, how='left', left_on='dest', right_on='faa')
df = df.dropna()
df
"""
Explanation: 7 Scikit-learn 简介
前面我们介绍了pandas和numpy、scipy。现在我们来介绍python机器库Scikit。首先需要先知道机器学习的两种:
监督学习(Supervised Learning): 从训练集建立模型进行预测
非监督学习(Unsupervised Learning): 从数据中推测模型,比如从文本中找出主题
Scikit-learn有一下特性:
- 预处理(Preprocessing):为机器学习reshape数据
- 降维处理(Dimensionality reduction):减少变量的重复
- 分类(Classification): 预测分类
- 回归(regression):预测连续变量
- 聚类(Clustering):从数据中发现自然的模式
- 模型选取(Model Selection):为数据找到最优模型
这里我们还是看nycflights13的数据集。
End of explanation
"""
pred = 'dep_delay'
features = ['month', 'day', 'dep_time', 'arr_time', 'carrier', 'dest', 'air_time',
'distance', 'lat', 'lon', 'alt', 'dewp', 'humid', 'wind_speed', 'wind_gust',
'precip', 'pressure', 'visib']
features_v = df[features]
pred_v = df[pred]
pd.options.mode.chained_assignment = None #default='warn'
# 因为航空公司不是一个数字,我们把它转化为数字哑变量
features_v['carrier'] = pd.factorize(features_v['carrier'])[0]
# dest也不是一个数字,我们也把它转为数字
features_v['dest'] = pd.factorize(features_v['dest'])[0]
features_v
"""
Explanation: 7.1 特征向量
End of explanation
"""
# 因为各个特征的维度各不相同,我们需要做标准化
scaler = StandardScaler()
scaled_features = scaler.fit_transform(features_v)
scaled_features
"""
Explanation: 7.2 对特征向量进行标准化(Scaling the feature vector)
End of explanation
"""
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
X_r = pca.fit(scaled_features).transform(scaled_features)
X_r
"""
Explanation: 7.3 特征降维(Reducing Dimensions)
我们使用PCA(Principle Component Analysis主成分析)把特征降维为2个
End of explanation
"""
import matplotlib.pyplot as plt
print('explained variance ratio (first two components): %s'
% str(pca.explained_variance_ratio_))
plt.figure()
lw = 2
plt.scatter(X_r[:,0], X_r[:,1], alpha=.8, lw=lw)
plt.title('PCA of flights dataset')
"""
Explanation: 7.4 画图(Plotting)
End of explanation
"""
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn import linear_model, cross_validation, metrics, svm, ensemble
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, accuracy_score
from sklearn.cross_validation import train_test_split, cross_val_score, ShuffleSplit
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, OneHotEncoder
flights = pd.read_csv('data/nycflights13/flights.csv.gz')
weather = pd.read_csv('data/nycflights13/weather.csv.gz')
airports = pd.read_csv('data/nycflights13/airports.csv.gz')
df_withweather = pd.merge(flights, weather, how='left', on=['year', 'month', 'day', 'hour'])
df = pd.merge(df_withweather, airports, how='left', left_on='dest', right_on='faa')
df = df.dropna()
df
pred = 'dep_delay'
features = ['month', 'day', 'dep_time', 'arr_time', 'carrier', 'dest', 'air_time',
'distance', 'lat', 'lon', 'alt', 'dewp', 'humid', 'wind_speed', 'wind_gust',
'precip', 'pressure', 'visib']
features_v = df[features]
pred_v = df[pred]
how_late_is_late = 15.0
pd.options.mode.chained_assignment = None #default='warn'
# 因为航空公司不是一个数字,我们把它转化为数字哑变量
features_v['carrier'] = pd.factorize(features_v['carrier'])[0]
# dest也不是一个数字,我们也把它转为数字
features_v['dest'] = pd.factorize(features_v['dest'])[0]
scaler = StandardScaler()
scaled_features_v = scaler.fit_transform(features_v)
features_train, features_test, pred_train, pred_test = train_test_split(
scaled_features_v, pred_v, test_size=0.30, random_state=0)
# 使用logistic回归来执行分类
clf_lr = sklearn.linear_model.LogisticRegression(penalty='l2',
class_weight='balanced')
logistic_fit = clf_lr.fit(features_train, np.where(pred_train >= how_late_is_late, 1, 0))
predictions = clf_lr.predict(features_test)
# summary Report
# Confusion Matrix
cm_lr = confusion_matrix(np.where(pred_test >= how_late_is_late, 1, 0),
predictions)
print("Confusion Matrix")
print(pd.DataFrame(cm_lr))
# 获取精确值
report_lr = precision_recall_fscore_support(
list(np.where(pred_test >= how_late_is_late, 1, 0)),
list(predictions), average='binary')
#打印精度值
print("\nprecision = %0.2f, recall = %0.2f, F1 = %0.2f, accuracy = %0.2f"
% (report_lr[0], report_lr[1], report_lr[2],
accuracy_score(list(np.where(pred_test >= how_late_is_late, 1, 0)),
list(predictions))))
"""
Explanation: 8 构建分类器(Build a classifier)
我们来预测一个航班是否会晚点
End of explanation
"""
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import sklearn
from sklearn.cluster import KMeans
from sklearn import linear_model, cross_validation, cluster
from sklearn.metrics import classification_report, confusion_matrix, precision_recall_fscore_support, accuracy_score
from sklearn.cross_validation import train_test_split, cross_val_score, ShuffleSplit
from sklearn.preprocessing import StandardScaler, OneHotEncoder
flights = pd.read_csv('data/nycflights13/flights.csv.gz')
weather = pd.read_csv('data/nycflights13/weather.csv.gz')
airports = pd.read_csv('data/nycflights13/airports.csv.gz')
df_withweather = pd.merge(flights, weather, how='left', on=['year', 'month', 'day', 'hour'])
df = pd.merge(df_withweather, airports, how='left', left_on='dest', right_on='faa')
df = df.dropna()
pred = 'dep_delay'
features = ['month', 'day', 'dep_time', 'arr_time', 'carrier', 'dest', 'air_time',
'distance', 'lat', 'lon', 'alt', 'dewp', 'humid', 'wind_speed', 'wind_gust',
'precip', 'pressure', 'visib']
features_v = df[features]
pred_v = df[pred]
how_late_is_late = 15.0
pd.options.mode.chained_assignment = None #default='warn'
# 因为航空公司不是一个数字,我们把它转化为数字哑变量
features_v['carrier'] = pd.factorize(features_v['carrier'])[0]
# dest也不是一个数字,我们也把它转为数字
features_v['dest'] = pd.factorize(features_v['dest'])[0]
scaler = StandardScaler()
scaled_features_v = scaler.fit_transform(features_v)
features_train, features_test, pred_train, pred_test = train_test_split(
scaled_features_v, pred_v, test_size=0.30, random_state=0)
cluster = sklearn.cluster.KMeans(n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=0.0001, precompute_distances='auto', random_state=None, verbose=0)
cluster.fit(features_train)
# 预测测试数据
result = cluster.predict(features_test)
result
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
reduced_data = PCA(n_components=2).fit_transform(features_train)
kmeans = KMeans(init='k-means++', n_clusters=8, n_init=10)
kmeans.fit(reduced_data)
# mesh的步长
h = .02
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(z, interpolation='nearest',
extend=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired
#aspect='auto'
# origin='lower'
)
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-Means clustering on the dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
"""
Explanation: 9 聚合数据(Cluster data)
最简单的聚类方法是K-Means
End of explanation
"""
lines = sc.text('README.md')
lines.take(5)
"""
Explanation: 10 PySpark简介
扩展我们的算法:有时我们需要处理大量数据,并且采样已经无效,这个时候可以通过把数据分到多个机器来处理。
Spark是一个用来并行进行大数据处理的API。它将数据切割到集群来处理。在开发阶段,我们可以只在本地运行。
我们使用PySpark Shell来连接到集群。
运行下面路径的pyspark,会启动PySpark Shell
~/spark/bin/pyspark (Max/Linux)
C:\spark\bin\pyspark (Windows)
此时,可以在Shell中运行文件加载:
lines = sc.textFile("README.md")
lines.first() # 加载第一行
可以在http://localhost:4040查看PySpark运行的Job
大多数情况下,我们希望能够在Jupyter Notebook中运行PySpark,为此,我们需要设置环境变量:
PYSPARK_PYTHON=python3
PYSPARK_DRIVER_PYTHON="jupyter"
PYSPARK_DRIVER_PYTHON_OPTS="notebook"
然后运行~/spark/bin/pyspark,最后一个命令会启动一个jupyter server,样子跟我们用的一样。
End of explanation
"""
linesWithSpark = lines.filter(lambda line: 'spark' in line)
linesWithSpark.count()
"""
Explanation: 我们看看http://localhost:4040 可以查看运行的Job
End of explanation
"""
|
gatmeh/Udacity-deep-learning | intro-to-rnns/Anna_KaRNNa_Exercises.ipynb | mit | import time
from collections import namedtuple
import numpy as np
import tensorflow as tf
"""
Explanation: Anna KaRNNa
In this notebook, we'll build a character-wise RNN trained on Anna Karenina, one of my all-time favorite books. It'll be able to generate new text based on the text from the book.
This network is based off of Andrej Karpathy's post on RNNs and implementation in Torch. Also, some information here at r2rt and from Sherjil Ozair on GitHub. Below is the general architecture of the character-wise RNN.
<img src="assets/charseq.jpeg" width="500">
End of explanation
"""
with open('anna.txt', 'r') as f:
text=f.read()
vocab = sorted(set(text))
vocab_to_int = {c: i for i, c in enumerate(vocab)}
int_to_vocab = dict(enumerate(vocab))
encoded = np.array([vocab_to_int[c] for c in text], dtype=np.int32)
"""
Explanation: First we'll load the text file and convert it into integers for our network to use. Here I'm creating a couple dictionaries to convert the characters to and from integers. Encoding the characters as integers makes it easier to use as input in the network.
End of explanation
"""
text[:100]
"""
Explanation: Let's check out the first 100 characters, make sure everything is peachy. According to the American Book Review, this is the 6th best first line of a book ever.
End of explanation
"""
encoded[:100]
"""
Explanation: And we can see the characters encoded as integers.
End of explanation
"""
len(vocab)
"""
Explanation: Since the network is working with individual characters, it's similar to a classification problem in which we are trying to predict the next character from the previous text. Here's how many 'classes' our network has to pick from.
End of explanation
"""
def get_batches(arr, n_seqs, n_steps):
'''Create a generator that returns batches of size
n_seqs x n_steps from arr.
Arguments
---------
arr: Array you want to make batches from
n_seqs: Batch size, the number of sequences per batch
n_steps: Number of sequence steps per batch
'''
# Get the number of characters per batch and number of batches we can make
characters_per_batch =
n_batches =
# Keep only enough characters to make full batches
arr =
# Reshape into n_seqs rows
arr =
for n in range(0, arr.shape[1], n_steps):
# The features
x =
# The targets, shifted by one
y =
yield x, y
"""
Explanation: Making training mini-batches
Here is where we'll make our mini-batches for training. Remember that we want our batches to be multiple sequences of some desired number of sequence steps. Considering a simple example, our batches would look like this:
<img src="assets/sequence_batching@1x.png" width=500px>
<br>
We have our text encoded as integers as one long array in encoded. Let's create a function that will give us an iterator for our batches. I like using generator functions to do this. Then we can pass encoded into this function and get our batch generator.
The first thing we need to do is discard some of the text so we only have completely full batches. Each batch contains $N \times M$ characters, where $N$ is the batch size (the number of sequences) and $M$ is the number of steps. Then, to get the number of batches we can make from some array arr, you divide the length of arr by the batch size. Once you know the number of batches and the batch size, you can get the total number of characters to keep.
After that, we need to split arr into $N$ sequences. You can do this using arr.reshape(size) where size is a tuple containing the dimensions sizes of the reshaped array. We know we want $N$ sequences (n_seqs below), let's make that the size of the first dimension. For the second dimension, you can use -1 as a placeholder in the size, it'll fill up the array with the appropriate data for you. After this, you should have an array that is $N \times (M * K)$ where $K$ is the number of batches.
Now that we have this array, we can iterate through it to get our batches. The idea is each batch is a $N \times M$ window on the array. For each subsequent batch, the window moves over by n_steps. We also want to create both the input and target arrays. Remember that the targets are the inputs shifted over one character. You'll usually see the first input character used as the last target character, so something like this:
python
y[:, :-1], y[:, -1] = x[:, 1:], x[:, 0]
where x is the input batch and y is the target batch.
The way I like to do this window is use range to take steps of size n_steps from $0$ to arr.shape[1], the total number of steps in each sequence. That way, the integers you get from range always point to the start of a batch, and each window is n_steps wide.
Exercise: Write the code for creating batches in the function below. The exercises in this notebook will not be easy. I've provided a notebook with solutions alongside this notebook. If you get stuck, checkout the solutions. The most important thing is that you don't copy and paste the code into here, type out the solution code yourself.
End of explanation
"""
batches = get_batches(encoded, 10, 50)
x, y = next(batches)
print('x\n', x[:10, :10])
print('\ny\n', y[:10, :10])
"""
Explanation: Now I'll make my data sets and we can check out what's going on here. Here I'm going to use a batch size of 10 and 50 sequence steps.
End of explanation
"""
def build_inputs(batch_size, num_steps):
''' Define placeholders for inputs, targets, and dropout
Arguments
---------
batch_size: Batch size, number of sequences per batch
num_steps: Number of sequence steps in a batch
'''
# Declare placeholders we'll feed into the graph
inputs =
targets =
# Keep probability placeholder for drop out layers
keep_prob =
return inputs, targets, keep_prob
"""
Explanation: If you implemented get_batches correctly, the above output should look something like
```
x
[[55 63 69 22 6 76 45 5 16 35]
[ 5 69 1 5 12 52 6 5 56 52]
[48 29 12 61 35 35 8 64 76 78]
[12 5 24 39 45 29 12 56 5 63]
[ 5 29 6 5 29 78 28 5 78 29]
[ 5 13 6 5 36 69 78 35 52 12]
[63 76 12 5 18 52 1 76 5 58]
[34 5 73 39 6 5 12 52 36 5]
[ 6 5 29 78 12 79 6 61 5 59]
[ 5 78 69 29 24 5 6 52 5 63]]
y
[[63 69 22 6 76 45 5 16 35 35]
[69 1 5 12 52 6 5 56 52 29]
[29 12 61 35 35 8 64 76 78 28]
[ 5 24 39 45 29 12 56 5 63 29]
[29 6 5 29 78 28 5 78 29 45]
[13 6 5 36 69 78 35 52 12 43]
[76 12 5 18 52 1 76 5 58 52]
[ 5 73 39 6 5 12 52 36 5 78]
[ 5 29 78 12 79 6 61 5 59 63]
[78 69 29 24 5 6 52 5 63 76]]
``
although the exact numbers will be different. Check to make sure the data is shifted over one step fory`.
Building the model
Below is where you'll build the network. We'll break it up into parts so it's easier to reason about each bit. Then we can connect them up into the whole network.
<img src="assets/charRNN.png" width=500px>
Inputs
First off we'll create our input placeholders. As usual we need placeholders for the training data and the targets. We'll also create a placeholder for dropout layers called keep_prob. This will be a scalar, that is a 0-D tensor. To make a scalar, you create a placeholder without giving it a size.
Exercise: Create the input placeholders in the function below.
End of explanation
"""
def build_lstm(lstm_size, num_layers, batch_size, keep_prob):
''' Build LSTM cell.
Arguments
---------
keep_prob: Scalar tensor (tf.placeholder) for the dropout keep probability
lstm_size: Size of the hidden layers in the LSTM cells
num_layers: Number of LSTM layers
batch_size: Batch size
'''
### Build the LSTM Cell
# Use a basic LSTM cell
lstm =
# Add dropout to the cell outputs
drop =
# Stack up multiple LSTM layers, for deep learning
cell =
initial_state =
return cell, initial_state
"""
Explanation: LSTM Cell
Here we will create the LSTM cell we'll use in the hidden layer. We'll use this cell as a building block for the RNN. So we aren't actually defining the RNN here, just the type of cell we'll use in the hidden layer.
We first create a basic LSTM cell with
python
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
where num_units is the number of units in the hidden layers in the cell. Then we can add dropout by wrapping it with
python
tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
You pass in a cell and it will automatically add dropout to the inputs or outputs. Finally, we can stack up the LSTM cells into layers with tf.contrib.rnn.MultiRNNCell. With this, you pass in a list of cells and it will send the output of one cell into the next cell. Previously with TensorFlow 1.0, you could do this
python
tf.contrib.rnn.MultiRNNCell([cell]*num_layers)
This might look a little weird if you know Python well because this will create a list of the same cell object. However, TensorFlow 1.0 will create different weight matrices for all cell objects. But, starting with TensorFlow 1.1 you actually need to create new cell objects in the list. To get it to work in TensorFlow 1.1, it should look like
```python
def build_cell(num_units, keep_prob):
lstm = tf.contrib.rnn.BasicLSTMCell(num_units)
drop = tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)
return drop
tf.contrib.rnn.MultiRNNCell([build_cell(num_units, keep_prob) for _ in range(num_layers)])
```
Even though this is actually multiple LSTM cells stacked on each other, you can treat the multiple layers as one cell.
We also need to create an initial cell state of all zeros. This can be done like so
python
initial_state = cell.zero_state(batch_size, tf.float32)
Below, we implement the build_lstm function to create these LSTM cells and the initial state.
End of explanation
"""
def build_output(lstm_output, in_size, out_size):
''' Build a softmax layer, return the softmax output and logits.
Arguments
---------
lstm_output: List of output tensors from the LSTM layer
in_size: Size of the input tensor, for example, size of the LSTM cells
out_size: Size of this softmax layer
'''
# Reshape output so it's a bunch of rows, one row for each step for each sequence.
# Concatenate lstm_output over axis 1 (the columns)
seq_output =
# Reshape seq_output to a 2D tensor with lstm_size columns
x =
# Connect the RNN outputs to a softmax layer
with tf.variable_scope('softmax'):
# Create the weight and bias variables here
softmax_w =
softmax_b =
# Since output is a bunch of rows of RNN cell outputs, logits will be a bunch
# of rows of logit outputs, one for each step and sequence
logits =
# Use softmax to get the probabilities for predicted characters
out =
return out, logits
"""
Explanation: RNN Output
Here we'll create the output layer. We need to connect the output of the RNN cells to a full connected layer with a softmax output. The softmax output gives us a probability distribution we can use to predict the next character, so we want this layer to have size $C$, the number of classes/characters we have in our text.
If our input has batch size $N$, number of steps $M$, and the hidden layer has $L$ hidden units, then the output is a 3D tensor with size $N \times M \times L$. The output of each LSTM cell has size $L$, we have $M$ of them, one for each sequence step, and we have $N$ sequences. So the total size is $N \times M \times L$.
We are using the same fully connected layer, the same weights, for each of the outputs. Then, to make things easier, we should reshape the outputs into a 2D tensor with shape $(M * N) \times L$. That is, one row for each sequence and step, where the values of each row are the output from the LSTM cells. We get the LSTM output as a list, lstm_output. First we need to concatenate this whole list into one array with tf.concat. Then, reshape it (with tf.reshape) to size $(M * N) \times L$.
One we have the outputs reshaped, we can do the matrix multiplication with the weights. We need to wrap the weight and bias variables in a variable scope with tf.variable_scope(scope_name) because there are weights being created in the LSTM cells. TensorFlow will throw an error if the weights created here have the same names as the weights created in the LSTM cells, which they will be default. To avoid this, we wrap the variables in a variable scope so we can give them unique names.
Exercise: Implement the output layer in the function below.
End of explanation
"""
def build_loss(logits, targets, lstm_size, num_classes):
''' Calculate the loss from the logits and the targets.
Arguments
---------
logits: Logits from final fully connected layer
targets: Targets for supervised learning
lstm_size: Number of LSTM hidden units
num_classes: Number of classes in targets
'''
# One-hot encode targets and reshape to match logits, one row per sequence per step
y_one_hot =
y_reshaped =
# Softmax cross entropy loss
loss =
return loss
"""
Explanation: Training loss
Next up is the training loss. We get the logits and targets and calculate the softmax cross-entropy loss. First we need to one-hot encode the targets, we're getting them as encoded characters. Then, reshape the one-hot targets so it's a 2D tensor with size $(MN) \times C$ where $C$ is the number of classes/characters we have. Remember that we reshaped the LSTM outputs and ran them through a fully connected layer with $C$ units. So our logits will also have size $(MN) \times C$.
Then we run the logits and targets through tf.nn.softmax_cross_entropy_with_logits and find the mean to get the loss.
Exercise: Implement the loss calculation in the function below.
End of explanation
"""
def build_optimizer(loss, learning_rate, grad_clip):
''' Build optmizer for training, using gradient clipping.
Arguments:
loss: Network loss
learning_rate: Learning rate for optimizer
'''
# Optimizer for training, using gradient clipping to control exploding gradients
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars), grad_clip)
train_op = tf.train.AdamOptimizer(learning_rate)
optimizer = train_op.apply_gradients(zip(grads, tvars))
return optimizer
"""
Explanation: Optimizer
Here we build the optimizer. Normal RNNs have have issues gradients exploding and disappearing. LSTMs fix the disappearance problem, but the gradients can still grow without bound. To fix this, we can clip the gradients above some threshold. That is, if a gradient is larger than that threshold, we set it to the threshold. This will ensure the gradients never grow overly large. Then we use an AdamOptimizer for the learning step.
End of explanation
"""
class CharRNN:
def __init__(self, num_classes, batch_size=64, num_steps=50,
lstm_size=128, num_layers=2, learning_rate=0.001,
grad_clip=5, sampling=False):
# When we're using this network for sampling later, we'll be passing in
# one character at a time, so providing an option for that
if sampling == True:
batch_size, num_steps = 1, 1
else:
batch_size, num_steps = batch_size, num_steps
tf.reset_default_graph()
# Build the input placeholder tensors
self.inputs, self.targets, self.keep_prob =
# Build the LSTM cell
cell, self.initial_state =
### Run the data through the RNN layers
# First, one-hot encode the input tokens
x_one_hot =
# Run each sequence step through the RNN with tf.nn.dynamic_rnn
outputs, state =
self.final_state = state
# Get softmax predictions and logits
self.prediction, self.logits =
# Loss and optimizer (with gradient clipping)
self.loss =
self.optimizer =
"""
Explanation: Build the network
Now we can put all the pieces together and build a class for the network. To actually run data through the LSTM cells, we will use tf.nn.dynamic_rnn. This function will pass the hidden and cell states across LSTM cells appropriately for us. It returns the outputs for each LSTM cell at each step for each sequence in the mini-batch. It also gives us the final LSTM state. We want to save this state as final_state so we can pass it to the first LSTM cell in the the next mini-batch run. For tf.nn.dynamic_rnn, we pass in the cell and initial state we get from build_lstm, as well as our input sequences. Also, we need to one-hot encode the inputs before going into the RNN.
Exercise: Use the functions you've implemented previously and tf.nn.dynamic_rnn to build the network.
End of explanation
"""
batch_size = 10 # Sequences per batch
num_steps = 50 # Number of sequence steps per batch
lstm_size = 128 # Size of hidden layers in LSTMs
num_layers = 2 # Number of LSTM layers
learning_rate = 0.01 # Learning rate
keep_prob = 0.5 # Dropout keep probability
"""
Explanation: Hyperparameters
Here are the hyperparameters for the network.
batch_size - Number of sequences running through the network in one pass.
num_steps - Number of characters in the sequence the network is trained on. Larger is better typically, the network will learn more long range dependencies. But it takes longer to train. 100 is typically a good number here.
lstm_size - The number of units in the hidden layers.
num_layers - Number of hidden LSTM layers to use
learning_rate - Learning rate for training
keep_prob - The dropout keep probability when training. If you're network is overfitting, try decreasing this.
Here's some good advice from Andrej Karpathy on training the network. I'm going to copy it in here for your benefit, but also link to where it originally came from.
Tips and Tricks
Monitoring Validation Loss vs. Training Loss
If you're somewhat new to Machine Learning or Neural Networks it can take a bit of expertise to get good models. The most important quantity to keep track of is the difference between your training loss (printed during training) and the validation loss (printed once in a while when the RNN is run on the validation data (by default every 1000 iterations)). In particular:
If your training loss is much lower than validation loss then this means the network might be overfitting. Solutions to this are to decrease your network size, or to increase dropout. For example you could try dropout of 0.5 and so on.
If your training/validation loss are about equal then your model is underfitting. Increase the size of your model (either number of layers or the raw number of neurons per layer)
Approximate number of parameters
The two most important parameters that control the model are lstm_size and num_layers. I would advise that you always use num_layers of either 2/3. The lstm_size can be adjusted based on how much data you have. The two important quantities to keep track of here are:
The number of parameters in your model. This is printed when you start training.
The size of your dataset. 1MB file is approximately 1 million characters.
These two should be about the same order of magnitude. It's a little tricky to tell. Here are some examples:
I have a 100MB dataset and I'm using the default parameter settings (which currently print 150K parameters). My data size is significantly larger (100 mil >> 0.15 mil), so I expect to heavily underfit. I am thinking I can comfortably afford to make lstm_size larger.
I have a 10MB dataset and running a 10 million parameter model. I'm slightly nervous and I'm carefully monitoring my validation loss. If it's larger than my training loss then I may want to try to increase dropout a bit and see if that helps the validation loss.
Best models strategy
The winning strategy to obtaining very good models (if you have the compute time) is to always err on making the network larger (as large as you're willing to wait for it to compute) and then try different dropout values (between 0,1). Whatever model has the best validation performance (the loss, written in the checkpoint filename, low is good) is the one you should use in the end.
It is very common in deep learning to run many different models with many different hyperparameter settings, and in the end take whatever checkpoint gave the best validation performance.
By the way, the size of your training and validation splits are also parameters. Make sure you have a decent amount of data in your validation set or otherwise the validation performance will be noisy and not very informative.
End of explanation
"""
epochs = 20
# Save every N iterations
save_every_n = 200
model = CharRNN(len(vocab), batch_size=batch_size, num_steps=num_steps,
lstm_size=lstm_size, num_layers=num_layers,
learning_rate=learning_rate)
saver = tf.train.Saver(max_to_keep=100)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# Use the line below to load a checkpoint and resume training
#saver.restore(sess, 'checkpoints/______.ckpt')
counter = 0
for e in range(epochs):
# Train network
new_state = sess.run(model.initial_state)
loss = 0
for x, y in get_batches(encoded, batch_size, num_steps):
counter += 1
start = time.time()
feed = {model.inputs: x,
model.targets: y,
model.keep_prob: keep_prob,
model.initial_state: new_state}
batch_loss, new_state, _ = sess.run([model.loss,
model.final_state,
model.optimizer],
feed_dict=feed)
end = time.time()
print('Epoch: {}/{}... '.format(e+1, epochs),
'Training Step: {}... '.format(counter),
'Training loss: {:.4f}... '.format(batch_loss),
'{:.4f} sec/batch'.format((end-start)))
if (counter % save_every_n == 0):
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
saver.save(sess, "checkpoints/i{}_l{}.ckpt".format(counter, lstm_size))
"""
Explanation: Time for training
This is typical training code, passing inputs and targets into the network, then running the optimizer. Here we also get back the final LSTM state for the mini-batch. Then, we pass that state back into the network so the next batch can continue the state from the previous batch. And every so often (set by save_every_n) I save a checkpoint.
Here I'm saving checkpoints with the format
i{iteration number}_l{# hidden layer units}.ckpt
Exercise: Set the hyperparameters above to train the network. Watch the training loss, it should be consistently dropping. Also, I highly advise running this on a GPU.
End of explanation
"""
tf.train.get_checkpoint_state('checkpoints')
"""
Explanation: Saved checkpoints
Read up on saving and loading checkpoints here: https://www.tensorflow.org/programmers_guide/variables
End of explanation
"""
def pick_top_n(preds, vocab_size, top_n=5):
p = np.squeeze(preds)
p[np.argsort(p)[:-top_n]] = 0
p = p / np.sum(p)
c = np.random.choice(vocab_size, 1, p=p)[0]
return c
def sample(checkpoint, n_samples, lstm_size, vocab_size, prime="The "):
samples = [c for c in prime]
model = CharRNN(len(vocab), lstm_size=lstm_size, sampling=True)
saver = tf.train.Saver()
with tf.Session() as sess:
saver.restore(sess, checkpoint)
new_state = sess.run(model.initial_state)
for c in prime:
x = np.zeros((1, 1))
x[0,0] = vocab_to_int[c]
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
for i in range(n_samples):
x[0,0] = c
feed = {model.inputs: x,
model.keep_prob: 1.,
model.initial_state: new_state}
preds, new_state = sess.run([model.prediction, model.final_state],
feed_dict=feed)
c = pick_top_n(preds, len(vocab))
samples.append(int_to_vocab[c])
return ''.join(samples)
"""
Explanation: Sampling
Now that the network is trained, we'll can use it to generate new text. The idea is that we pass in a character, then the network will predict the next character. We can use the new one, to predict the next one. And we keep doing this to generate all new text. I also included some functionality to prime the network with some text by passing in a string and building up a state from that.
The network gives us predictions for each character. To reduce noise and make things a little less random, I'm going to only choose a new character from the top N most likely characters.
End of explanation
"""
tf.train.latest_checkpoint('checkpoints')
checkpoint = tf.train.latest_checkpoint('checkpoints')
samp = sample(checkpoint, 2000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i600_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
checkpoint = 'checkpoints/i1200_l512.ckpt'
samp = sample(checkpoint, 1000, lstm_size, len(vocab), prime="Far")
print(samp)
"""
Explanation: Here, pass in the path to a checkpoint and sample from the network.
End of explanation
"""
|
csaladenes/blog | airports/airportia_hu_arrv_parser.ipynb | mit | for i in locations:
print i
if i not in sch:sch[i]={}
#march 11-24 = 2 weeks
for d in range (11,25):
if d not in sch[i]:
try:
url=airportialinks[i]
full=url+'arrivals/201703'+str(d)
m=requests.get(full).content
sch[i][full]=pd.read_html(m)[0]
#print full
except: pass #print 'no tables',i,d
for i in range(11,25):
testurl=u'https://www.airportia.com/hungary/budapest-liszt-ferenc-international-airport/arrivals/201703'+str(i)
print 'nr. of flights on March',i,':',len(sch['BUD'][testurl])
testurl=u'https://www.airportia.com/hungary/budapest-liszt-ferenc-international-airport/arrivals/20170318'
k=sch['BUD'][testurl]
k[k['From']=='Frankfurt FRA']
"""
Explanation: record schedules for 2 weeks, then augment count with weekly flight numbers.
seasonal and seasonal charter will count as once per week for 3 months, so 12/52 per week. TGM separate, since its history is in the past.
End of explanation
"""
mdf=pd.DataFrame()
for i in sch:
for d in sch[i]:
df=sch[i][d].drop(sch[i][d].columns[3:],axis=1).drop(sch[i][d].columns[0],axis=1)
df['To']=i
df['Date']=d
mdf=pd.concat([mdf,df])
mdf=mdf.replace('Hahn','Frankfurt')
mdf=mdf.replace('Hahn HHN','Frankfurt HHN')
mdf['City']=[i[:i.rfind(' ')] for i in mdf['From']]
mdf['Airport']=[i[i.rfind(' ')+1:] for i in mdf['From']]
k=mdf[mdf['Date']==testurl]
k[k['From']=='Frankfurt FRA']
"""
Explanation: sch checks out with source
End of explanation
"""
file("mdf_hu_arrv.json",'w').write(json.dumps(mdf.reset_index().to_json()))
len(mdf)
airlines=set(mdf['Airline'])
cities=set(mdf['City'])
file("cities_hu_arrv.json",'w').write(json.dumps(list(cities)))
file("airlines_hu_arrv.json",'w').write(json.dumps(list(airlines)))
citycoords={}
for i in cities:
if i not in citycoords:
if i==u'Birmingham': z='Birmingham, UK'
elif i==u'Valencia': z='Valencia, Spain'
elif i==u'Naples': z='Naples, Italy'
elif i==u'St. Petersburg': z='St. Petersburg, Russia'
elif i==u'Bristol': z='Bristol, UK'
else: z=i
citycoords[i]=Geocoder(apik).geocode(z)
print i
citysave={}
for i in citycoords:
citysave[i]={"coords":citycoords[i][0].coordinates,
"country":citycoords[i][0].country}
file("citysave_hu_arrv.json",'w').write(json.dumps(citysave))
"""
Explanation: mdf checks out with source
End of explanation
"""
|
marcotcr/lime | doc/notebooks/Tutorial - Image Classification Keras.ipynb | bsd-2-clause | import os
import keras
from keras.applications import inception_v3 as inc_net
from keras.preprocessing import image
from keras.applications.imagenet_utils import decode_predictions
from skimage.io import imread
import matplotlib.pyplot as plt
%matplotlib inline
import numpy as np
print('Notebook run using keras:', keras.__version__)
"""
Explanation: Here is a simpler example of the use of LIME for image classification by using Keras (v2 or greater)
End of explanation
"""
inet_model = inc_net.InceptionV3()
def transform_img_fn(path_list):
out = []
for img_path in path_list:
img = image.load_img(img_path, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = inc_net.preprocess_input(x)
out.append(x)
return np.vstack(out)
"""
Explanation: Using Inception
Here we create a standard InceptionV3 pretrained model and use it on images by first preprocessing them with the preprocessing tools
End of explanation
"""
images = transform_img_fn([os.path.join('data','cat_mouse.jpg')])
# I'm dividing by 2 and adding 0.5 because of how this Inception represents images
plt.imshow(images[0] / 2 + 0.5)
preds = inet_model.predict(images)
for x in decode_predictions(preds)[0]:
print(x)
"""
Explanation: Let's see the top 5 prediction for some image
End of explanation
"""
%load_ext autoreload
%autoreload 2
import os,sys
try:
import lime
except:
sys.path.append(os.path.join('..', '..')) # add the current directory
import lime
from lime import lime_image
explainer = lime_image.LimeImageExplainer()
"""
Explanation: Explanation
Now let's get an explanation
End of explanation
"""
%%time
# Hide color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels
explanation = explainer.explain_instance(images[0].astype('double'), inet_model.predict, top_labels=5, hide_color=0, num_samples=1000)
"""
Explanation: hide_color is the color for a superpixel turned OFF. Alternatively, if it is NONE, the superpixel will be replaced by the average of its pixels. Here, we set it to 0 (in the representation used by inception model, 0 means gray)
End of explanation
"""
from skimage.segmentation import mark_boundaries
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=True)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: Image classifiers are a bit slow. Notice that an explanation on my Surface Book dGPU took 1min 12s
Now let's see the explanation for the top class ( Black Bear)
We can see the top 5 superpixels that are most positive towards the class with the rest of the image hidden
End of explanation
"""
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=True, num_features=5, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: Or with the rest of the image present:
End of explanation
"""
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: We can also see the 'pros and cons' (pros in green, cons in red)
End of explanation
"""
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, num_features=1000, hide_rest=False, min_weight=0.1)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: Or the pros and cons that have weight at least 0.1
End of explanation
"""
#Select the same class explained on the figures above.
ind = explanation.top_labels[0]
#Map each explanation weight to the corresponding superpixel
dict_heatmap = dict(explanation.local_exp[ind])
heatmap = np.vectorize(dict_heatmap.get)(explanation.segments)
#Plot. The visualization makes more sense if a symmetrical colorbar is used.
plt.imshow(heatmap, cmap = 'RdBu', vmin = -heatmap.max(), vmax = heatmap.max())
plt.colorbar()
"""
Explanation: Alternatively, we can also plot explanation weights onto a heatmap visualization. The colorbar shows the values of the weights.
End of explanation
"""
temp, mask = explanation.get_image_and_mask(106, positive_only=True, num_features=5, hide_rest=True)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: Let's see the explanation for the second highest prediction
Most positive towards wombat:
End of explanation
"""
temp, mask = explanation.get_image_and_mask(106, positive_only=False, num_features=10, hide_rest=False)
plt.imshow(mark_boundaries(temp / 2 + 0.5, mask))
"""
Explanation: Pros and cons:
End of explanation
"""
|
zzsza/Datascience_School | 06. 기초 선형대수/05. 행렬의 연산과 성질.ipynb | mit | A = (np.arange(9) - 4).reshape((3, 3))
A
np.linalg.norm(A)
"""
Explanation: 행렬의 연산과 성질
행렬에는 곱셈, 전치 이외에도 지수 함수 등의 다양한 연산을 정의할 수 있다. 각각의 정의와 성질을 알아보자.
행렬의 부호
행렬은 복수의 실수 값을 가지고 있으므로 행렬 전체의 부호는 정의할 수 없다. 하지만 행렬에서도 실수의 부호 정의와 유사한 기능을 가지는 정의가 존재한다. 바로 행렬의 양-한정(positive definite) 특성이다. (정방행렬에 한정됨)
쿼드라틱 Form의 결과는 실수값
모든 실수 공간 $\mathbb{R}^n$ 의 0벡터가 아닌 벡터 $x \in \mathbb{R}^n$ 에 대해 다음 부등식이 성립하면 행렬 $A$ 가 양-한정(positive definite)이라고 한다.
$$ x^T A x > 0 $$
만약 이 식이 등호를 포함한다면 양-반한정(positive semi-definite)이라고 한다.
$$ x^T A x \geq 0 $$
예를 들어 단위 행렬은 양-한정이다.
$$ x^TI x = x^T
\begin{bmatrix}
1&0&\cdots&0\
0&1&\cdots&0\
\vdots&\vdots&\ddots&\vdots\
0&0&\cdots&1\
\end{bmatrix}
x
= x_1^2 + x_2^2 + \cdots + x_n^2 > 0
$$
다음과 같은 행렬도 양-한정이다.
$$ M = \begin{bmatrix} 2&-1&0\-1&2&-1\0&-1&2 \end{bmatrix} $$
$$
\begin{align}
x^{\mathrm{T}}M x
&= \begin{bmatrix} (2x_1-b)&(-x_1+2x_2-x_3)&(-x_2+2c) \end{bmatrix} \begin{bmatrix} x_1\x_2\x_3 \end{bmatrix} \
&= 2{x_1}^2 - 2x_1x_2 + 2{x_2}^2 - 2x_2x_3 + 2{x_3}^2 \
&= {x_1}^2+(x_1 - x_2)^{2} + (x_2 - x_3)^{2}+{x_3}^2
\end{align}
$$
행렬의 크기
행렬에는 크기 개념과 유사하게 하나의 행렬에 대해 하나의 실수를 대응시키는 norm, 대각 성분(trace), 행렬식(determinant)에 대한 정의가 존재한다.
행렬 Norm
행렬의 norm 정의는 다양하지만 그 중 많이 쓰이는 induced p-norm 정의는 다음과 같다. 시그마2개 : 가로 / 세로로 행렬화 해서 다 더해라. 1/p로 차원을 축소
$$ \Vert A \Vert_p = \left( \sum_{i=1}^m \sum_{j=1}^n |a_{ij}|^p \right)^{1/p} $$
이 중 $p=2$는 특별히 Frobenius norm 이라고 불리며 다음과 같이 표시한다.
$$ \Vert A \Vert_F = \sqrt{\sum_{i=1}^m \sum_{j=1}^n a_{ij}^2} $$
NumPy에서는 linalg 서브패키지의 norm 명령으로 Frobenious norm을 계산할 수 있다.
End of explanation
"""
np.trace(np.eye(3))
"""
Explanation: 대각 성분
대각 성분(trace) 행렬의 특성을 결정하는 숫자 중 하나로 정방 행렬(square matrix)에 대해서만 정의되며 다음과 같이 대각 성분(diaginal)의 합으로 계산된다.
$$ \operatorname{tr}(A) = a_{11} + a_{22} + \dots + a_{nn}=\sum_{i=1}^{n} a_{ii} $$
대각 성분은 다음과 같은 성질을 지닌다.
$$ \text{tr} (cA) = c\text{tr} (A) $$
$$ \text{tr} (A^T) = \text{tr} (A) $$
$$ \text{tr} (A + B) = \text{tr} (A) + \text{tr} (B)$$
$$ \text{tr} (AB) = \text{tr} (BA) $$
$$ \text{tr} (ABC) = \text{tr} (BCA) = \text{tr} (CAB) $$
실수는 그 자체가 trace임
특히 마지막 성질은 trace trick이라고 하여 이차 형식(quadratic form)의 값을 구하는데 유용하게 사용된다.
$$ x^TAx = \text{tr}(x^TAx) = \text{tr}(Axx^T) = \text{tr}(xx^TA) $$
NumPy에서는 trace 명령으로 trace를 계산할 수 있다.
End of explanation
"""
A = np.array([[1, 2], [3, 4]])
A
np.linalg.det(A)
"""
Explanation: 행렬식
정방 행렬 $A$의 행렬식(determinant) $\det (A)$ 는 Laplace formula라고 불리는 재귀적인 방법으로 정의된다.
이 식에서 $a_{i,j}$는 $A$의 i행, j열 원소이고 $M_{i,j}$은 정방 행렬 $A$ 에서 i행과 j열을 지워서 얻어진 행렬의 행렬식이다.
$$ \det(A) = \sum_{j=1}^n (-1)^{i+j} a_{i,j} M_{i,j} $$
행렬식은 다음과 같은 성질을 만족한다.
$$ \det(I) = 1 $$
$$ \det(A^{\rm T}) = \det(A) $$
$$ \det(A^{-1}) = \frac{1}{\det(A)}=\det(A)^{-1} $$
$$ \det(AB) = \det(A)\det(B) $$
$$ A \in \mathbf{R}^n \;\;\; \rightarrow \;\;\; \det(cA) = c^n\det(A) $$
또한 역행렬은 행렬식과 다음과 같은 관계를 가진다.
$$ A^{-1} = \dfrac{1}{\det A} M = \dfrac{1}{\det A}
\begin{bmatrix}
M_{1,1}&\cdots&M_{1,n}\
\vdots&\ddots&\vdots\
M_{n,1}&\cdots&M_{n,n}\
\end{bmatrix}
$$
NumPy에서는 linalg 서브패키지의 det 명령으로 det를 계산할 수 있다.
End of explanation
"""
A = np.array([[1.0, 3.0], [1.0, 4.0]])
A
B = sp.linalg.logm(A)
B
sp.linalg.expm(B)
"""
Explanation: 전치 행렬과 대칭 행렬
전치 연산을 통해서 얻어진 행렬을 전치 행렬(transpose matrix)이라고 한다.
$$ [\mathbf{A}^\mathrm{T}]{ij} = [\mathbf{A}]{ji} $$
만약 전치 행렬과 원래의 행렬이 같으면 대칭 행렬(symmetric matrix)이라고 한다.
$$ A^\mathrm{T} = A $$
전치 연산은 다음과 같은 성질을 만족한다.
$$ ( A^\mathrm{T} ) ^\mathrm{T} = A $$
$$ (A+B) ^\mathrm{T} = A^\mathrm{T} + B^\mathrm{T} $$
$$ \left( A B \right) ^\mathrm{T} = B^\mathrm{T} A^\mathrm{T} $$
$$ \det(A^\mathrm{T}) = \det(A) $$
$$ (A^\mathrm{T})^{-1} = (A^{-1})^\mathrm{T} $$
지수 행렬
행렬 $A$에 대해 다음과 같은 급수로 만들어지는 행렬 $e^A=\exp A$ 를 지수 행렬(exponential matrix)이라고 한다.
A가 행렬인 경우임
테일러 시리즈.
$$ e^X = \sum_{k=0}^\infty \dfrac{X^k}{k!} = I + X + \dfrac{1}{2}X^2 + \dfrac{1}{3!}X^3 + \cdots $$
지수 행렬은 다음과 같은 성질을 만족한다.
$$ e^0 = I $$
$$ e^{aX} e^{bX} = e^{(a+b)X} $$
$$ e^X e^{-X} = I $$
$$ XY = YX \;\; \rightarrow \;\; e^Xe^Y = e^Ye^X = e^{X+Y} $$
로그 행렬
행렬 $A$에 대해 다음과 같은 급수로 만들어지는 행렬 $B=e^A$ 가 존재할 때, $A$를 $B$에 대한 로그 행렬이라고 하고 다음과 같이 표기한다.
$$ A = \log B $$
로그 행렬은 다음과 같은 성질은 만족한다.
만약 행렬 $A$, $B$가 모두 양-한정(positive definite)이고 $AB=BA$이면
$$ AB = e^{\ln(A)+\ln(B)} $$
만약 행렬 $A$의 역행렬이 존재하면
$$ A^{-1} = e^{-\ln(A)} $$
지수 행렬이나 로그 행렬은 NumPy에서 계산할 수 없다. SciPy의 linalg 서브패키지의 expm, logm 명령을 사용한다.
End of explanation
"""
|
Naereen/notebooks | Test_for_Binder__access_local_packages.ipynb | mit | import sys
print("Path (sys.path):")
for f in sys.path:
print(f)
import os
print("Current directory:")
print(os.getcwd())
"""
Explanation: Table of Contents
<p><div class="lev1 toc-item"><a href="#Test-for-Binder-v2" data-toc-modified-id="Test-for-Binder-v2-1"><span class="toc-item-num">1 </span>Test for Binder v2</a></div><div class="lev2 toc-item"><a href="#Sys-&-OS-modules" data-toc-modified-id="Sys-&-OS-modules-11"><span class="toc-item-num">1.1 </span>Sys & OS modules</a></div><div class="lev2 toc-item"><a href="#Importing-a-file" data-toc-modified-id="Importing-a-file-12"><span class="toc-item-num">1.2 </span>Importing a file</a></div><div class="lev2 toc-item"><a href="#Conclusion" data-toc-modified-id="Conclusion-13"><span class="toc-item-num">1.3 </span>Conclusion</a></div>
# Test for Binder v2
## Sys & OS modules
End of explanation
"""
import agreg.memoisation
"""
Explanation: Importing a file
I will import this file from the agreg/ sub-folder.
End of explanation
"""
|
deepchem/deepchem | examples/tutorials/Learning_Unsupervised_Embeddings_for_Molecules.ipynb | mit | !pip install --pre deepchem
import deepchem
deepchem.__version__
"""
Explanation: Learning Unsupervised Embeddings for Molecules
In this tutorial, we will use a SeqToSeq model to generate fingerprints for classifying molecules. This is based on the following paper, although some of the implementation details are different: Xu et al., "Seq2seq Fingerprint: An Unsupervised Deep Molecular Embedding for Drug Discovery" (https://doi.org/10.1145/3107411.3107424).
Colab
This tutorial and the rest in this sequence can be done in Google colab. If you'd like to open this notebook in colab, you can use the following link.
End of explanation
"""
import deepchem as dc
tasks, datasets, transformers = dc.molnet.load_muv(split='stratified')
train_dataset, valid_dataset, test_dataset = datasets
train_smiles = train_dataset.ids
valid_smiles = valid_dataset.ids
"""
Explanation: Learning Embeddings with SeqToSeq
Many types of models require their inputs to have a fixed shape. Since molecules can vary widely in the numbers of atoms and bonds they contain, this makes it hard to apply those models to them. We need a way of generating a fixed length "fingerprint" for each molecule. Various ways of doing this have been designed, such as the Extended-Connectivity Fingerprints (ECFPs) we used in earlier tutorials. But in this example, instead of designing a fingerprint by hand, we will let a SeqToSeq model learn its own method of creating fingerprints.
A SeqToSeq model performs sequence to sequence translation. For example, they are often used to translate text from one language to another. It consists of two parts called the "encoder" and "decoder". The encoder is a stack of recurrent layers. The input sequence is fed into it, one token at a time, and it generates a fixed length vector called the "embedding vector". The decoder is another stack of recurrent layers that performs the inverse operation: it takes the embedding vector as input, and generates the output sequence. By training it on appropriately chosen input/output pairs, you can create a model that performs many sorts of transformations.
In this case, we will use SMILES strings describing molecules as the input sequences. We will train the model as an autoencoder, so it tries to make the output sequences identical to the input sequences. For that to work, the encoder must create embedding vectors that contain all information from the original sequence. That's exactly what we want in a fingerprint, so perhaps those embedding vectors will then be useful as a way to represent molecules in other models!
Let's start by loading the data. We will use the MUV dataset. It includes 74,501 molecules in the training set, and 9313 molecules in the validation set, so it gives us plenty of SMILES strings to work with.
End of explanation
"""
tokens = set()
for s in train_smiles:
tokens = tokens.union(set(c for c in s))
tokens = sorted(list(tokens))
"""
Explanation: We need to define the "alphabet" for our SeqToSeq model, the list of all tokens that can appear in sequences. (It's also possible for input and output sequences to have different alphabets, but since we're training it as an autoencoder, they're identical in this case.) Make a list of every character that appears in any training sequence.
End of explanation
"""
from deepchem.models.optimizers import Adam, ExponentialDecay
max_length = max(len(s) for s in train_smiles)
batch_size = 100
batches_per_epoch = len(train_smiles)/batch_size
model = dc.models.SeqToSeq(tokens,
tokens,
max_length,
encoder_layers=2,
decoder_layers=2,
embedding_dimension=256,
model_dir='fingerprint',
batch_size=batch_size,
learning_rate=ExponentialDecay(0.001, 0.9, batches_per_epoch))
"""
Explanation: Create the model and define the optimization method to use. In this case, learning works much better if we gradually decrease the learning rate. We use an ExponentialDecay to multiply the learning rate by 0.9 after each epoch.
End of explanation
"""
def generate_sequences(epochs):
for i in range(epochs):
for s in train_smiles:
yield (s, s)
model.fit_sequences(generate_sequences(40))
"""
Explanation: Let's train it! The input to fit_sequences() is a generator that produces input/output pairs. On a good GPU, this should take a few hours or less.
End of explanation
"""
predicted = model.predict_from_sequences(valid_smiles[:500])
count = 0
for s,p in zip(valid_smiles[:500], predicted):
if ''.join(p) == s:
count += 1
print('reproduced', count, 'of 500 validation SMILES strings')
"""
Explanation: Let's see how well it works as an autoencoder. We'll run the first 500 molecules from the validation set through it, and see how many of them are exactly reproduced.
End of explanation
"""
import numpy as np
train_embeddings = model.predict_embeddings(train_smiles)
train_embeddings_dataset = dc.data.NumpyDataset(train_embeddings,
train_dataset.y,
train_dataset.w.astype(np.float32),
train_dataset.ids)
valid_embeddings = model.predict_embeddings(valid_smiles)
valid_embeddings_dataset = dc.data.NumpyDataset(valid_embeddings,
valid_dataset.y,
valid_dataset.w.astype(np.float32),
valid_dataset.ids)
"""
Explanation: Now we'll trying using the encoder as a way to generate molecular fingerprints. We compute the embedding vectors for all molecules in the training and validation datasets, and create new datasets that have those as their feature vectors. The amount of data is small enough that we can just store everything in memory.
End of explanation
"""
classifier = dc.models.MultitaskClassifier(n_tasks=len(tasks),
n_features=256,
layer_sizes=[512])
classifier.fit(train_embeddings_dataset, nb_epoch=10)
"""
Explanation: For classification, we'll use a simple fully connected network with one hidden layer.
End of explanation
"""
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean, mode="classification")
train_score = classifier.evaluate(train_embeddings_dataset, [metric], transformers)
valid_score = classifier.evaluate(valid_embeddings_dataset, [metric], transformers)
print('Training set ROC AUC:', train_score)
print('Validation set ROC AUC:', valid_score)
"""
Explanation: Find out how well it worked. Compute the ROC AUC for the training and validation datasets.
End of explanation
"""
|
tensorflow/workshops | tfx_airflow/notebooks/step4.ipynb | apache-2.0 | from __future__ import print_function
import os
import tempfile
import pandas as pd
import tensorflow as tf
import tensorflow_transform as tft
from tensorflow_transform import beam as tft_beam
import tfx_utils
from tfx.utils import io_utils
from tensorflow_metadata.proto.v0 import schema_pb2
# For DatasetMetadata boilerplate
from tensorflow_transform.tf_metadata import dataset_metadata
from tensorflow_transform.tf_metadata import dataset_schema
from tensorflow_transform.tf_metadata import schema_utils
tf.get_logger().propagate = False
def _make_default_sqlite_uri(pipeline_name):
return os.path.join(os.environ['HOME'], 'airflow/tfx/metadata', pipeline_name, 'metadata.db')
def get_metadata_store(pipeline_name):
return tfx_utils.TFXReadonlyMetadataStore.from_sqlite_db(_make_default_sqlite_uri(pipeline_name))
pipeline_name = 'taxi'
pipeline_db_path = _make_default_sqlite_uri(pipeline_name)
print('Pipeline DB:\n{}'.format(pipeline_db_path))
store = get_metadata_store(pipeline_name)
"""
Explanation: Step 4: Feature Engineering
Use the code below to run TensorFlow Transform on some example data using the schema from your pipeline. Start by importing and opening the metadata store.
End of explanation
"""
# Get the schema URI from the metadata store
schemas = store.get_artifacts_of_type_df(tfx_utils.TFXArtifactTypes.SCHEMA)
assert len(schemas.URI) == 1
schema_uri = schemas.URI.iloc[0] + 'schema.pbtxt'
print ('Schema URI:\n{}'.format(schema_uri))
"""
Explanation: Get the schema URI from the metadata store
End of explanation
"""
schema_proto = io_utils.parse_pbtxt_file(file_name=schema_uri, message=schema_pb2.Schema())
feature_spec, domains = schema_utils.schema_as_feature_spec(schema_proto)
legacy_metadata = dataset_metadata.DatasetMetadata(dataset_schema.from_feature_spec(feature_spec, domains))
"""
Explanation: Get the schema that was inferred by TensorFlow Data Validation
End of explanation
"""
# Categorical features are assumed to each have a maximum value in the dataset.
MAX_CATEGORICAL_FEATURE_VALUES = [24, 31, 12]
CATEGORICAL_FEATURE_KEYS = [
'trip_start_hour', 'trip_start_day', 'trip_start_month',
'pickup_census_tract', 'dropoff_census_tract', 'pickup_community_area',
'dropoff_community_area'
]
DENSE_FLOAT_FEATURE_KEYS = ['trip_miles', 'fare', 'trip_seconds']
# Number of buckets used by tf.transform for encoding each feature.
FEATURE_BUCKET_COUNT = 10
BUCKET_FEATURE_KEYS = [
'pickup_latitude', 'pickup_longitude', 'dropoff_latitude',
'dropoff_longitude'
]
# Number of vocabulary terms used for encoding VOCAB_FEATURES by tf.transform
VOCAB_SIZE = 1000
# Count of out-of-vocab buckets in which unrecognized VOCAB_FEATURES are hashed.
OOV_SIZE = 10
VOCAB_FEATURE_KEYS = [
'payment_type',
'company',
]
# Keys
LABEL_KEY = 'tips'
FARE_KEY = 'fare'
def transformed_name(key):
return key + '_xf'
def _transformed_names(keys):
return [transformed_name(key) for key in keys]
# Tf.Transform considers these features as "raw"
def _get_raw_feature_spec(schema):
return schema_utils.schema_as_feature_spec(schema).feature_spec
def _gzip_reader_fn(filenames):
"""Small utility returning a record reader that can read gzip'ed files."""
return tf.data.TFRecordDataset(
filenames,
compression_type='GZIP')
def _fill_in_missing(x):
"""Replace missing values in a SparseTensor.
Fills in missing values of `x` with '' or 0, and converts to a dense tensor.
Args:
x: A `SparseTensor` of rank 2. Its dense shape should have size at most 1
in the second dimension.
Returns:
A rank 1 tensor where missing values of `x` have been filled in.
"""
default_value = '' if x.dtype == tf.string else 0
return tf.squeeze(
tf.sparse.to_dense(
tf.SparseTensor(x.indices, x.values, [x.dense_shape[0], 1]),
default_value),
axis=1)
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in DENSE_FLOAT_FEATURE_KEYS:
# Preserve this feature as a dense float, setting nan's to the mean.
outputs[transformed_name(key)] = tft.scale_to_z_score(
_fill_in_missing(inputs[key]))
for key in VOCAB_FEATURE_KEYS:
# Build a vocabulary for this feature.
outputs[transformed_name(key)] = tft.compute_and_apply_vocabulary(
_fill_in_missing(inputs[key]),
top_k=VOCAB_SIZE,
num_oov_buckets=OOV_SIZE)
for key in BUCKET_FEATURE_KEYS:
outputs[transformed_name(key)] = tft.bucketize(
_fill_in_missing(inputs[key]), FEATURE_BUCKET_COUNT,
always_return_num_quantiles=False)
for key in CATEGORICAL_FEATURE_KEYS:
outputs[transformed_name(key)] = _fill_in_missing(inputs[key])
# Was this passenger a big tipper?
taxi_fare = _fill_in_missing(inputs[FARE_KEY])
tips = _fill_in_missing(inputs[LABEL_KEY])
outputs[transformed_name(LABEL_KEY)] = tf.where(
tf.math.is_nan(taxi_fare),
tf.cast(tf.zeros_like(taxi_fare), tf.int64),
# Test if the tip was > 20% of the fare.
tf.cast(
tf.greater(tips, tf.multiply(taxi_fare, tf.constant(0.2))), tf.int64))
return outputs
"""
Explanation: Define features and create functions for TensorFlow Transform
End of explanation
"""
from IPython.display import display
with tft_beam.Context(temp_dir=tempfile.mkdtemp()):
raw_examples = [
{
"fare": [100.0],
"trip_start_hour": [12],
"pickup_census_tract": ['abcd'],
"dropoff_census_tract": [12345.0], # No idea why this is a float
"company": ['taxi inc.'],
"trip_start_timestamp": [123456],
"pickup_longitude": [12.0],
"trip_start_month": [5],
"trip_miles": [8.0],
"dropoff_longitude": [12.05],
"dropoff_community_area": [123],
"pickup_community_area": [123],
"payment_type": ['visa'],
"trip_seconds": [600.0],
"trip_start_day": [12],
"tips": [10.0],
"pickup_latitude": [80.0],
"dropoff_latitude": [80.01],
}
]
(transformed_examples, transformed_metadata), transform_fn = (
(raw_examples, legacy_metadata)
| 'AnalyzeAndTransform' >> tft_beam.AnalyzeAndTransformDataset(
preprocessing_fn))
display(pd.DataFrame(transformed_examples))
"""
Explanation: Display the results of transforming some example data
End of explanation
"""
|
probml/pyprobml | notebooks/book1/19/finetune_cnn_torch.ipynb | mit | import numpy as np
import matplotlib.pyplot as plt
np.random.seed(seed=1)
import math
import os
try:
import torch
except ModuleNotFoundError:
%pip install -qq torch
import torch
from torch import nn
from torch.nn import functional as F
try:
import torchvision
except ModuleNotFoundError:
%pip install -qq torchvision
import torchvision
!mkdir figures # for saving plots
!wget https://raw.githubusercontent.com/d2l-ai/d2l-en/master/d2l/torch.py -q -O d2l.py
import d2l
"""
Explanation: Please find jax implementation of this notebook here: https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/book1/19/finetune_cnn_jax.ipynb
<a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/finetune_cnn_torch.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
Fine-tuning a resnet image classifier to classify hotdog vs not-hotdog
We illustrate how to fine-tune a resnet classifier which has been pre-trained on ImageNet.
Based on sec 13.2 of
http://d2l.ai/chapter_computer-vision/fine-tuning.html.
The target dataset consists of 2 classes (hotdog vs no hotdog), and has 1400 images of each. (This example is inspired by Season 4, Episode 4 of the TV show Silicon Valley.
End of explanation
"""
d2l.DATA_HUB["hotdog"] = (d2l.DATA_URL + "hotdog.zip", "fba480ffa8aa7e0febbb511d181409f899b9baa5")
data_dir = d2l.download_extract("hotdog")
train_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, "train"))
test_imgs = torchvision.datasets.ImageFolder(os.path.join(data_dir, "test"))
"""
Explanation: Dataset
End of explanation
"""
hotdogs = [train_imgs[i][0] for i in range(8)]
not_hotdogs = [train_imgs[-i - 1][0] for i in range(8)]
d2l.show_images(hotdogs + not_hotdogs, 2, 8, scale=1.4);
"""
Explanation: We show the first 8 positive and last 8 negative images. We see the aspect ratio is quite different.
End of explanation
"""
# We specify the mean and variance of the three RGB channels to normalize the
# image channel
normalize = torchvision.transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
train_augs = torchvision.transforms.Compose(
[
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
normalize,
]
)
test_augs = torchvision.transforms.Compose(
[
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
normalize,
]
)
"""
Explanation: We use data augmentation at train and test time, as shown below.
End of explanation
"""
pretrained_net = torchvision.models.resnet18(pretrained=True)
"""
Explanation: Model
End of explanation
"""
finetune_net = torchvision.models.resnet18(pretrained=True)
finetune_net.fc = nn.Linear(finetune_net.fc.in_features, 2)
nn.init.xavier_uniform_(finetune_net.fc.weight)
"""
Explanation: The final layer is called fc, for fully connected.
End of explanation
"""
def train_batch(net, X, y, loss, trainer, devices):
X = X.to(devices[0])
y = y.to(devices[0])
net.train()
trainer.zero_grad()
pred = net(X)
l = loss(pred, y)
l.sum().backward()
trainer.step()
train_loss_sum = l.sum()
train_acc_sum = d2l.accuracy(pred, y)
return train_loss_sum, train_acc_sum
def train(net, train_iter, test_iter, loss, trainer, num_epochs, devices=d2l.try_all_gpus()):
timer, num_batches = d2l.Timer(), len(train_iter)
animator = d2l.Animator(
xlabel="epoch", xlim=[1, num_epochs], ylim=[0, 1], legend=["train loss", "train acc", "test acc"]
)
# net = nn.DataParallel(net, device_ids=devices).to(devices[0])
net = net.to(devices[0])
for epoch in range(num_epochs):
# Store training_loss, training_accuracy, num_examples, num_features
metric = d2l.Accumulator(4)
for i, (features, labels) in enumerate(train_iter):
timer.start()
l, acc = train_batch(net, features, labels, loss, trainer, devices)
metric.add(l, acc, labels.shape[0], labels.numel())
timer.stop()
if (i + 1) % (num_batches // 5) == 0 or i == num_batches - 1:
animator.add(epoch + (i + 1) / num_batches, (metric[0] / metric[2], metric[1] / metric[3], None))
test_acc = d2l.evaluate_accuracy_gpu(net, test_iter)
animator.add(epoch + 1, (None, None, test_acc))
print(f"loss {metric[0] / metric[2]:.3f}, train acc " f"{metric[1] / metric[3]:.3f}, test acc {test_acc:.3f}")
print(f"{metric[2] * num_epochs / timer.sum():.1f} examples/sec on " f"{str(devices)}")
"""
Explanation: Fine tuning
In D2L, they call their training routine train_ch13, since it is in their chapter 13. We modify their code so it uses a single GPU, by commenting out the DataParallel part.
End of explanation
"""
def train_fine_tuning(net, learning_rate, batch_size=128, num_epochs=5, param_group=True):
train_iter = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(os.path.join(data_dir, "train"), transform=train_augs),
batch_size=batch_size,
shuffle=True,
)
test_iter = torch.utils.data.DataLoader(
torchvision.datasets.ImageFolder(os.path.join(data_dir, "test"), transform=test_augs), batch_size=batch_size
)
devices = d2l.try_all_gpus()
loss = nn.CrossEntropyLoss(reduction="none")
if param_group:
params_1x = [param for name, param in net.named_parameters() if name not in ["fc.weight", "fc.bias"]]
trainer = torch.optim.SGD(
[{"params": params_1x}, {"params": net.fc.parameters(), "lr": learning_rate * 10}],
lr=learning_rate,
weight_decay=0.001,
)
else:
trainer = torch.optim.SGD(net.parameters(), lr=learning_rate, weight_decay=0.001)
train(net, train_iter, test_iter, loss, trainer, num_epochs, devices)
train_fine_tuning(finetune_net, 5e-5)
"""
Explanation: We update all the parameters, but use a 10x larger learning rate for the fc layer.
End of explanation
"""
net = finetune_net.to("cpu")
net.eval(); # set to eval mode (not training)
fname = os.path.join(data_dir, "test", "hotdog", "1000.png")
from PIL import Image
img = Image.open(fname)
display(img)
img_t = test_augs(img) # convert to tensor
batch_t = torch.unsqueeze(img_t, 0)
out = net(batch_t)
probs = F.softmax(out, dim=1)
print(probs)
fname = os.path.join(data_dir, "test", "not-hotdog", "1000.png")
from PIL import Image
img = Image.open(fname)
display(img)
img_t = test_augs(img) # convert to tensor
batch_t = torch.unsqueeze(img_t, 0)
out = net(batch_t)
probs = F.softmax(out, dim=1)
print(probs)
"""
Explanation: Test the model
End of explanation
"""
|
intellimath/pyaxon | examples/axon_object_serialization.ipynb | mit | from __future__ import print_function, unicode_literals
from axon.api import loads, dumps
from IPython.display import HTML, display
"""
Explanation: This post continue series about AXON and pyaxon. Now we consider some examples of object serialization/deserialization.
<!-- TEASER_END -->
End of explanation
"""
text = """
graph {
nodes: [
&1 node {x:1 y:1}
&2 node {x:1 y:2}
&3 node {x:2 y:2}
]
edges: [
edge {*1 *2}
edge {*1 *3}
edge {*2 *3}
]
}
"""
"""
Explanation: Simple graph example
Below is AXON text that represents a graph by the way of definition of sequences of nodes and edges. Each node has a reference label and each edge specifies left and right nodes by its reference labels. Using native support of references in AXON makes such representation straightforward.
End of explanation
"""
class Base(object):
#
def __str__(self):
return '%s: %r' % (self.__class__.__name__, self.__dict__)
#
__repr__ = __str__
class Graph(Base):
def __init__(self, nodes=None, edges=None):
self.nodes = list(nodes) if nodes else []
self.edges = list(edges) if edges else []
class Node(Base):
def __init__(self, x, y):
self.x = x
self.y = y
class Edge(Base):
def __init__(self, p1, p2):
self.left = p1
self.right = p2
"""
Explanation: Bellow we define Graph, Node, Edge classes.
End of explanation
"""
from axon.utils import factory, reduce
import axon
@factory('graph')
def create_graph(attrs, args):
return Graph(**attrs)
@factory('node')
def create_node(attrs, args):
return Node(**attrs)
@factory('edge')
def create_edge(attrs, args):
return Edge(*args)
@reduce(Graph)
def reduce_graph(graph):
return axon.node('graph', {'nodes': graph.nodes, 'edges': graph.edges})
@reduce(Node)
def reduce_node(node):
return axon.node('node', {'x': node.x, 'y': node.y})
@reduce(Edge)
def reduce_edge(edge):
return axon.node('edge', None, [edge.left, edge.right])
"""
Explanation: Then we define and register reduce/factory for dumping/loading.
End of explanation
"""
g = loads(text, mode='strict')
display(HTML(u'<b>Graph object:</b>'))
print(g[0])
display(HTML(u'<b>Compact dump:</b>'))
print(dumps(g, crossref=1))
display(HTML(u'<b>Formatted dump without braces:</b>'))
print(dumps(g, pretty=1, crossref=1, hsize=4))
display(HTML(u'<b>Formatted dump with braces:</b>'))
print(dumps(g, pretty=1, braces=1, crossref=1, hsize=4))
"""
Explanation: Now we can load AXON message with graph definition into Graph object and dump it.
End of explanation
"""
from axon import dump_as_str, as_unicode, factory, reduce
import numpy as np
@factory('ndarray')
def create_array(mapping, sequence):
shape = mapping.get('shape', None)
dtype = mapping['dtype']
if type(dtype) is list:
dtype = [(str(n), str(t)) for n, t in dtype]
a = np.array(sequence, dtype=dtype)
if shape is not None:
a.shape = shape
return a
@reduce(np.ndarray)
def reduce_array(a):
signes = {'<', '=', '>', '!'}
if len(a.dtype.descr) > 1:
dtype = [
(as_unicode(n), (as_unicode(t[1:]) \
if t[0] in signes \
else as_unicode(t)))
for n, t in a.dtype.descr]
return axon.node('ndarray', {'dtype':dtype}, a.tolist())
else:
dtype_str = a.dtype.str
dtype_str = as_unicode(dtype_str[1:]) \
if dtype_str[0] in signes \
else as_unicode(dtype_str)
return axon.node('ndarray', {'shape': a.shape, 'dtype':as_unicode(dtype_str)}, a.tolist())
dump_as_str(np.int8)
dump_as_str(np.int16)
dump_as_str(np.int32)
dump_as_str(np.int64)
dump_as_str(np.float16)
dump_as_str(np.float32)
dump_as_str(np.float64)
dump_as_str(np.float128)
dump_as_str(np.int_)
dump_as_str(np.float_)
dump_as_str(np.double)
a = np.array([[1, 2, 3], [3, 4, 5], [5, 6, 7]])
display(HTML('<b>Compact form:</b>'))
text = dumps([a])
print(text)
b = loads(text, mode="strict")[0]
display(HTML('<b>Formatted form with braces:</b>'))
text = dumps([a], pretty=1, braces=1, hsize=4)
print(text)
display(HTML('<b>Formatted form with braces:</b>'))
text = dumps([a], pretty=1, hsize=4)
print(text)
b = loads(text, mode="strict")[0]
a = np.array(
[(1, 2, 3.0), (3, 4, 5.0), (4, 5, 6.0)],
dtype=[('x', int), ('y', int), ('z', float)])
text = dumps([a])
print('val=', text)
b = loads(text, mode="strict")[0]
print('val=', repr(b))
display(HTML('<b>Formatted form</b>:'))
text = dumps([a])
print('val=', text)
display(HTML('<b>Formatted form</b>:'))
text = dumps([a], pretty=1, braces=1, hsize=3)
print('val=', text)
display(HTML('<b>Indented form:</b>'))
text = dumps([a], pretty=1, hsize=3)
print('val=', text)
"""
Explanation: Numpy arrays
Below we consider example of transformation of numpy array objects to/from AXON text. Let's define and register reduce/factory functions.
End of explanation
"""
|
neuro-data-science/neuro_data_science | python/modeling/linear_models_and_bootstrapping.ipynb | gpl-3.0 | import sys
sys.path.append('../src/')
import opencourse as oc
import numpy as np
import scipy.stats as stt
import matplotlib.pyplot as plt
import pandas as pd
from scipy import polyfit
from scipy.ndimage.filters import gaussian_filter1d
%matplotlib inline
# Below we'll plot the PDF of a normal distribution.
mean, std = 0, 1
inputs = np.arange(-4, 4, .01)
prob = stt.norm.pdf(inputs, mean, std)
fig, ax = plt.subplots()
ax.plot(inputs, prob)
"""
Explanation: Statistical Approaches to Neuroscience
Theoretical quantities describe a probability distribution. Estimates of theoretical quantities from data are noisy and we must quantify the accuracy of our estimate.
To warm up, let's consider a particular probability distribution- the standard normal with mean and standard deviation . The corresponding probability density function (pdf) is
$$p(x|\mu=0, \sigma=1)=\frac{1}{\sqrt{2\pi}}exp(\frac{-x^2}{2})$$
which is the familiar "bell curve," pictured below
End of explanation
"""
def simulate_data(n_datapoints, beta_1, beta_0, noise_func=np.random.randn):
x = np.random.rand(n_datapoints)
noise = noise_func(n_datapoints)
y = beta_1 * x + beta_0 + noise
return x, y
def fit_model_to_data(x, y, model_degree=1):
betas_hat = polyfit(x, y, model_degree)
return betas_hat
n_datapoints = 25
beta_1 = 5
beta_0 = 3
x, y = simulate_data(n_datapoints, beta_1, beta_0)
beta_1_hat, beta_0_hat = fit_model_to_data(x, y)
# Create "test" predicted points for our two models
x_pred = np.linspace(x.min(), x.max(), 1000)
# The "true" model
y_pred_true = x_pred * beta_1 + beta_0
y_pred_model = x_pred * beta_1_hat + beta_0_hat
# Now plot the sample datapoints and our model
fig, ax = plt.subplots()
ax.plot(x, y, 'b.')
ax.plot(x_pred, y_pred_true, 'k')
ax.plot(x_pred, y_pred_model, 'r')
"""
Explanation: Take and $\beta_0=3$ and $\beta_1=5$, then we can generate a dataset from our statistical model, such as the one pictured below. In black we plot the line $y=3+5x$, and in red we plot the line of best fit, in the least squares sense.
End of explanation
"""
n_datapoints = 25
n_simulations = 1000
beta_1 = 5
beta_0 = 3
betas = np.zeros([n_simulations, 2])
simulations = np.zeros([n_simulations, x_pred.shape[-1]])
for ii in range(n_simulations):
x = np.random.rand(n_datapoints)
noise = np.random.randn(n_datapoints)
y = beta_1 * x + beta_0 + noise
beta_1_hat, beta_0_hat = polyfit(x, y, 1)
y_pred_model = x_pred * beta_1_hat + beta_0_hat
betas[ii] = [beta_0_hat, beta_1_hat]
simulations[ii] = y_pred_model
fig, axs = plt.subplots(1, 2, sharey=True)
for ii, (ax, ibeta) in enumerate(zip(axs, betas.T)):
ax.hist(ibeta)
ax.set_title("Estimated Beta {}\nMean: {:.3f}\nSTD: {:.3f}".format(
ii, ibeta.mean(), ibeta.std()))
"""
Explanation: Of course, the red and the black lines are not identical, because our datapoints are a random sample from our statistical model. If we were to resample our data, we would get an entirely different set of datapoints, and consequently a new set of estimates.
In fact, let's see this in action, and quantify the variability of our least squares estimate of the slope of the line. Let's sample multiple datasets from our model, and for each dataset estimate the slope of the least squares line. We can then plot a histogram of our estimated slopes. Again, the procedure is:
Generate multiple datasets from our known statistical model
Calculate the statistic of interest for each dataset (here, the slope), and collect them into a vector (say
slopeStar)
Estimate the SE by calculating the standard deviation of the entries of slopeStar.
End of explanation
"""
def simulate_multiple_data_sets(beta_1, beta_0, sample_sizes,
noise_func=np.random.randn, n_simulations=1000,
n_col=2):
n_row = int(np.ceil(len(sample_sizes) / float(n_col)))
fig, axs = plt.subplots(n_row, n_col, figsize=(3*n_col, 3*n_row), sharex=True)
for n_samples, ax in zip(sample_sizes, axs.ravel()):
all_betas = np.zeros([n_simulations, 2])
for ii in range(n_simulations):
x, y = simulate_data(n_samples, beta_1, beta_0, noise_func=noise_func)
betas = fit_model_to_data(x, y)
all_betas[ii] = betas
ax.hist(all_betas[:, 0])
ax.set_title('Sample size: {}'.format(n_samples))
_ = fig.suptitle(r'Distribution of $\beta_1$', fontsize=20)
return fig
### QUESTION ANSWER
sample_sizes = [10, 20, 40, 80]
n_simulations = 1000
fig = simulate_multiple_data_sets(beta_1, beta_0, sample_sizes)
_ = plt.setp(fig.axes, xlim=[0, 8])
### QUESTION ANSWER
def my_noise_func(n):
noise = 4 * np.random.beta(1, 3, n)
return noise - np.mean(noise)
fig, ax = plt.subplots()
ax.hist(my_noise_func(100), bins=20)
### QUESTION ANSWER
# Effect of different noise distributions on the empirical mean
# Define noise function here
empirical_means = np.zeros(n_simulations)
# Run simulations
for ii in range(n_simulations):
x, y = simulate_data(n_datapoints, beta_1, beta_0, noise_func=my_noise_func)
empirical_means[ii] = np.mean(y)
# Plot the results
fig, ax = plt.subplots()
_ = ax.hist(empirical_means, bins=20)
### QUESTION ANSWER
# Fit multiple datasets and show how error dist changes betas
fig = simulate_multiple_data_sets(beta_1, beta_0, sample_sizes,
noise_func=my_noise_func)
_ = plt.setp(fig.axes, xlim=[0, 8])
"""
Explanation: Here we see that the estimates for the slope of the least squares line have a histogram that looks like
it could have plausibly been generated from a normal distribution, centered around the true slope. The histogram also yields an estimate of the standard error of the slope estimates. While in this simple case we could have easily derived the standard error theoretically, it is very easy to do so through simulation. Further, there are some instances where the standard error is diffucult to come by theoretically, and so simulation is a critical tool.
QUESTION: What happens as sample size increases or decreases (e.g. sizeDataSet=15, 100, 250, 500, 1000)? . What happens if the errors are not normally distributed?
ADVANCED QUESTION: What happens to the variability of the empirical mean as the number of simulated datasets (numberDataSets) increases?
End of explanation
"""
from scipy import io as si
data = si.loadmat('../../data/StevensonV2.mat')
# This defines the neuron and target locations we care about
neuron_n = 192
target_location = [0.0706, -0.0709]
# Extract useful information from our dataset
all_spikes = data['spikes']
spikes = all_spikes[neuron_n]
time = data['time']
# This is the onset of each trial
onsets = data['startBins'][0]
# This determines where the target was on each trial
locations = data['targets']
locations = locations.T[:, :2]
unique_locations = np.unique(locations)
n_trials = onsets.shape[0]
# Define time and the sampling frequency of data
time_step = data['timeBase']
sfreq = (1. / time_step).squeeze()
# Define trials with the target location
diff = (locations - target_location) < 1e-4
mask_use = diff.all(axis=1)
# Low-pass the spikes to smooth
spikes_low = gaussian_filter1d(spikes.astype(float),5)
"""
Explanation: So far we have used simulation to show that estimates of statistics of interest are inherently variable across datasets. In practice, however, we only collect one dataset, but we still want to quantify the variability of our estimate. It turns out that the simulation procedure from above is still useful to us.
The bootstrap helps us to quantify the SE for a statistic of interest.
In the section above we empirically showed the validity of some theoretical claims, though we relied on both knowlege of the probability distribution- and knowlege of its parameters- in order to generate multiple datasets. In practice, we may be willing to assume that a certain probability distribution could have reasonably generated our data, though we certainly don't know the true parameters of that distribution. In order to understand the variability of an estimate of a statistic, however, we can still use the framework developed above. Specifically our new procedure, the parametric bootstrap, is
Estimate the parameters of the assumed probability distribution using our dataset and the maximum likelihood estimation procedure.
Generate multiple datasets (pseudo-data) from the assumed distribution, plugging in the parameter estimates in place of their theoretical quantities.
Calculate the statistic of interest for each dataset, and collect them in a vector (say tStar)
Estimate the SE by calculating the standard deviation of the entries of tStar.
The nonparametric bootstrap helps us to quantify the SE for a statistic of interest when our distribution is unknown or highly complex.
The general procedure for the nonparametric bootstrap is essentially the same as before. The steps are
Generate multiple datasets (pseudo-data)
Calculate the statistic of interest for each dataset and collect the statistics 3. Estimate the SE by calculating the standard deviation of the statistics.
The only question that remains is how to generate our pseudo-data. We do this by bootstrap sampling (or sampling with replacement) from our actual dataset.
Let's practice the nonparametric bootstrap with a PSTH. Here our dataset consists of stimulus-locked trials, where each trial is represented by a vector with entries representing the number of spikes in the
respective 50ms bin. Let be the vector of spike counts for the trial, then we can calculate the PSTH as
$$PSTH=\frac{\sum{x_i}}{N*.05}$$
where N is the total number of trials.
TASK: From the StevensonV2 dataset, plot the smoothed PSTH for neuron 193 and target location [0.0706 -0.0709]. Provide 95% confidence bands for the PSTH obtained through the nonparametric bootstrap. Finally, from the smoothed PSTH, estimate the location of the maximal firing rate in the PSTH. From your bootstrapped samples, obtain the bootstrapped distribution of the maximal firing rate.
The bootstrap procedure is:
Generate multiple (say, 1000) datasets (pseudo-data) by obtaining a bootstrap sample from the original dataset
Calculate the statistic of interest for each dataset (here it will be the smoothed PSTH)
In the simulation above we stored the statistic of interest (the mean) for each dataset in a vector called
meanStar. In this case, each psuedo-dataset will generate a PSTH, which we will store as a row vector
in a matrix called psthStar.
Smooth each bootstrapped PSTH. For now, we can simply smooth each PSTH with a Gaussian filter.
Set the window witdth to 5.
To obtain the 95% confidence interval we can sort each column independently and record the 25th and
975th entries for each column (assuming we are generating 1000 bootsrapped datasets). Plot these
confidence bands.
Calculate the location of the maximal firing rate for each bootstrapped dataset.
Plot the distribution of maximal firing rate locations.
For your convenience, a dataset for neuron 193 and target location [0.0706 -0.0709] is generated in the code below. To play around with a different neuron or location, simply change the variables "neuron" and "targetInterest". The variable you will use from this section is "trialMat" which collects the trials along the rows.
End of explanation
"""
# Convert data into epochs
wmin, wmax = -5., 15.
epochs = []
for i_onset in onsets[mask_use]:
this_spikes = spikes_low[i_onset + int(wmin): i_onset + int(wmax)]
epochs.append(this_spikes)
epochs = np.array(epochs)
n_ep = len(epochs)
# Define time for our epochs
tmin = wmin / sfreq
tmax = wmax / sfreq
times = np.linspace(tmin, tmax, num=epochs.shape[-1])
"""
Explanation: Now that we have information about when each trial begins, we can slice our data so that we collect a window around each trial. Here we'll define the window, and create a new array of shape (trials, neurons, times). We'll use the phrase epochs interchangeably with trials.
End of explanation
"""
# Bootstrap lo / hi at each time point
n_boots = 1000
boot_means = np.zeros([n_boots, len(times)])
for ii, i_time in enumerate(times):
for jj in range(n_boots):
sample = epochs[:, ii][np.random.randint(0, n_ep, n_ep)]
boot_means[jj, ii] = sample.mean()
max_times = boot_means.argmax(axis=1)
clo, chi = np.percentile(boot_means, [2.5, 97.5], axis=0)
# Plot the mean firing rate across trials
fig, ax = plt.subplots()
ax.plot(times, epochs.mean(0), 'k')
ax.fill_between(times, clo, chi, alpha=.3, color='k')
ax.set_title('Mean +/- 95% CI PSTH')
plt.autoscale(tight=True)
"""
Explanation: We'll now bootstrap lower / upper bounds for the activity at each timepoint in a trial. We'll do this by considering the data across trials.
End of explanation
"""
fig, ax = plt.subplots()
_ = ax.hist(times[max_times], bins=20)
ax.set_title('Maximum time in each bootstrap')
"""
Explanation: Finally, we can plot the timepoint that had the most activity in each bootstrap iteration. This gives us an idea for the variability across trials, and where in time the activity tends to be clustered.
End of explanation
"""
### QUESTION ANSWER
sample_sizes = [15, 50, 100, 150, 300, 500, 1000, 10000]
n_simulations = 1000
stat = np.mean
random_func = np.random.randn
#
standard_errors = pd.DataFrame(index=sample_sizes,
columns=['se', 'se_bootstrap'])
for n_sample in sample_sizes:
sample = random_func(n_sample)
se = np.std(sample) / np.sqrt(n_sample)
simulation_means = np.zeros(n_simulations)
for ii in range(n_simulations):
boot_sample = sample[np.random.randint(0, n_sample, n_sample)]
simulation_means[ii] = stat(boot_sample)
se_boot = np.std(simulation_means)
standard_errors.loc[n_sample] = [se, se_boot]
standard_errors
"""
Explanation: ADVANCED QUESTION:
Question: What happens to a bootstrapped estimate of the standard error (as compared to the theoretically derived standard error) as the sample size (not the number of bootstrapped datasets) increases? You can test your answer as follows:
1. Sample a dataset of some sample size from a known distribution (say, a normal distribution)
2. For several different sample sizes ( say, sizeDataSet=15, 50, 100, 150, 300, 500, 1000, 10000,
100000), and for some statistic (for instance the mean), do the following:
* Calculate the theoretical SE for the given sample size (you can also calculate this by simulation as we did above, though keep this simulation distinct from your original dataset)
* Perform a nonparametric bootstrap on your original dataset
* Compare the bootstrap SE and the theoretical SE.
End of explanation
"""
|
graphistry/pygraphistry | demos/more_examples/simple/tutorial_csv_mini_app_icij_implants.ipynb | bsd-3-clause | #!pip install graphistry -q
import pandas as pd
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
"""
Explanation: Visualize CSV Mini-App
Jupyter: File -> Make a copy
Colab: File -> Save a copy in Drive
Run notebook cells by pressing shift-enter
Either edit annd run top cells one-by-one, or edit and run the self-contained version at the bottom
End of explanation
"""
file_path = './events-1551346702.csv'
df = pd.read_csv(file_path)
print('# rows', len(df))
df.sample(min(len(df), 3))
df.columns
"""
Explanation: 1. Upload csv
Use a file by uploading it or via URL.
Run help(pd.read_csv) for more options.
File Upload: Jupyter Notebooks
If circle on top right not green, click kernel -> reconnect
Go to file directory (/tree) by clicking the Jupyter logo
Navigate to the directory page containing your notebook
Press the upload button on the top right
File Upload: Google Colab
Open the left sidebar by pressing the right arrow on the left
Go to the Files tab
Press UPLOAD
Make sure goes into /content
File Upload: URL
Uncomment below line and put in the actual data url
Run help(pd.read_csv) for more options
End of explanation
"""
hits = pd.DataFrame([[c, len(df[c].unique())] for c in df.columns], columns=['col', 'num_uniq']).sort_values('num_uniq')
hits.query('num_uniq > 10 & num_uniq < 9288')
skip_nodes = ['icij_notes', 'determined_cause', 'action_classification', 'icij_notes', 'country', 'status', 'source']
nodes = [x for x in list(hits.query('num_uniq > 10 & num_uniq < 9288')['col']) if not x in skip_nodes]
nodes
df = df_orig.query('country == "USA"')
"""
Explanation: 2. Optional: Clean up CSV
End of explanation
"""
#Pick 'A', 'B', or 'C'
mode = 'B'
max_rows = 50000
### 'A' == mode
my_src_col = 'attackerIP'
my_dest_col = 'victimIP'
### 'B' == mode
node_cols = nodes
categories = { #optional
#'date': [ 'create_date', 'date_initiated_by_firm', 'date_posted', 'date_terminated', 'updated_at' ]
#'ip': ['attacker_IP', 'victimIP']
#, 'user': ['owner', 'seller'],
}
### 'C' == mode
edges = {
'attackerIP': [ 'victimIP', 'victimPort', 'vulnName'],
'victimIP': [ 'victimPort'],
'vulnName': [ 'victimIP' ]
}
categories = { #optional
'ip': ['attackerIP', 'victimIP']
#, user': ['owner', 'seller'], ...
}
"""
Explanation: 3. Configure: Visualize with 3 kinds of graphs
Set mode and the corresponding values:
Mode "A". See graph from table of (src,dst) edges
Mode "B". See hypergraph: Draw row as node and connect it to entities in same row
Pick which cols to make nodes
If multiple cols share same type (e.g., "src_ip", "dest_ip" are both "ip"), unify them
Mode "C". See by creating multiple nodes, edges per row
Pick how different column values point to other column values
If multiple cols share same type (e.g., "src_ip", "dest_ip" are both "ip"), unify them
End of explanation
"""
g = None
hg = None
num_rows = min(max_rows, len(df))
if mode == 'A':
g = graphistry.edges(df.sample(num_rows)).bind(source=my_src_col, destination=my_dest_col)
elif mode == 'B':
hg = graphistry.hypergraph(df.sample(num_rows), node_cols, opts={'CATEGORIES': categories})
g = hg['graph']
elif mode == 'C':
nodes = list(edges.keys())
for dests in edges.values():
for dest in dests:
nodes.append(dest)
node_cols = list(set(nodes))
hg = graphistry.hypergraph(df.sample(num_rows), node_cols, direct=True, opts={'CATEGORIES': categories, 'EDGES': edges})
g = hg['graph']
#hg
print(len(g._edges))
g.plot()
"""
Explanation: 4. Plot: Upload & render!
See UI guide: https://labs.graphistry.com/graphistry/ui.html
End of explanation
"""
#!pip install graphistry -q
import pandas as pd
import graphistry
#graphistry.register(key='MY_KEY', server='labs.graphistry.com')
##########
#1. Load
file_path = './events-1551346702.csv'
df = pd.read_csv(file_path)
print(df.columns)
print('rows:', len(df))
print(df.sample(min(len(df),3)))
##########
#2. Clean
#df = df.rename(columns={'attackerIP': 'src_ip', 'victimIP: 'dest_ip', 'victimPort': 'protocol'})
hits = pd.DataFrame([[c, len(df[c].unique())] for c in df.columns], columns=['col', 'num_uniq']).sort_values('num_uniq')
skip_nodes = ['icij_notes', 'determined_cause', 'action_classification', 'icij_notes', 'country', 'status', 'source']
nodes = [x for x in list(hits.query('num_uniq > 10 & num_uniq < 9288')['col']) if not x in skip_nodes]
df = df.query('country == "USA"')
##########
#3. Config - Pick 'A', 'B', or 'C'
mode = 'B'
max_rows = 50000
### 'A' == mode
my_src_col = 'attackerIP'
my_dest_col = 'victimIP'
### 'B' == mode
node_cols = nodes
categories = { #optional
#'ip': ['src_ip', 'dest_ip']
#, 'user': ['owner', 'seller'],
}
### 'C' == mode
edges = {
'attackerIP': [ 'victimIP', 'victimPort', 'vulnName'],
'victimIP': [ 'victimPort' ],
'vulnName': ['victimIP' ]
}
categories = { #optional
#'ip': ['attackerIP', 'victimIP']
#, 'user': ['owner', 'seller'], ...
}
##########
#4. Plot
g = None
hg = None
num_rows = min(max_rows, len(df))
if mode == 'A':
g = graphistry.edges(df.sample(num_rows)).bind(source=my_src_col, destination=my_dest_col)
elif mode == 'B':
hg = graphistry.hypergraph(df.sample(num_rows), node_cols, opts={'CATEGORIES': categories})
g = hg['graph']
elif mode == 'C':
nodes = list(edges.keys())
for dests in edges.values():
for dest in dests:
nodes.append(dest)
node_cols = list(set(nodes))
hg = graphistry.hypergraph(df.sample(num_rows), node_cols, direct=True, opts={'CATEGORIES': categories, 'EDGES': edges})
g = hg['graph']
g.plot()
"""
Explanation: Alternative: Combined
Split into data loading and cleaning/configuring/plotting.
End of explanation
"""
|
manoharan-lab/structural-color | structure_factor_data.ipynb | gpl-3.0 | import numpy as np
import matplotlib.pyplot as plt
import structcol as sc
import structcol.refractive_index as ri
from structcol import montecarlo as mc
from structcol import detector as det
from structcol import model
from structcol import structure
%matplotlib inline
"""
Explanation: Tutorial for using structure factor data as the structure factor used in the structural-color package
This tutorial describes how to add your own structor factor data to Monte Carlo calculations
Copyright 2016, Vinothan N. Manoharan, Victoria Hwang, Annie Stephenson
This file is part of the structural-color python package.
This package is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
This package is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with this package. If not, see http://www.gnu.org/licenses/.
End of explanation
"""
wavelengths = sc.Quantity(np.arange(400, 800, 20), 'nm') # wavelengths
radius = sc.Quantity('0.5 um') # particle radius
volume_fraction = sc.Quantity(0.5, '') # volume fraction of particles
n_particle = ri.n('fused silica', wavelengths)
n_matrix = ri.n('vacuum', wavelengths) # called from the refractive_index module. n_matrix is the
n_medium = ri.n('vacuum', wavelengths) # space within sample. n_medium is outside the sample.
# n_particle and n_matrix can have complex indices if absorption is desired
thickness = sc.Quantity('50 um') # thickness of the sample film
"""
Explanation: For the single scattering model
set parameters
End of explanation
"""
qd_data = np.arange(0,75, 0.1)
s_data = structure.factor_py(qd_data, volume_fraction.magnitude)
"""
Explanation: Construct the structure factor data
Here, we use discrete points from the percus-yevick approximation for structure factor, as an example. In practice, you will most likely use actual structure factor data imported from your own file
End of explanation
"""
qd = np.arange(0,70, 0.1)# works up to qd = 72
s = structure.factor_data(qd, s_data, qd_data)
plt.figure()
plt.plot(qd, s, label = 'interpolated')
plt.plot(qd_data, s_data,'.', label = 'data')
plt.legend()
plt.xlabel('qd')
plt.ylabel('structure factor')
"""
Explanation: plot the structure factor data and interpolated function
End of explanation
"""
reflectance=np.zeros(len(wavelengths))
for i in range(len(wavelengths)):
reflectance[i],_,_,_,_ = sc.model.reflection(n_particle[i], n_matrix[i], n_medium[i], wavelengths[i],
radius, volume_fraction,
thickness=thickness,
structure_type='data',
structure_s_data=s_data,
structure_qd_data=qd_data)
"""
Explanation: Calculate reflectance
End of explanation
"""
plt.figure()
plt.plot(wavelengths, reflectance)
plt.ylim([0,0.1])
plt.ylabel('Reflectance')
plt.xlabel('wavelength (nm)')
"""
Explanation: plot
End of explanation
"""
ntrajectories = 500 # number of trajectories
nevents = 500 # number of scattering events in each trajectory
wavelengths = sc.Quantity(np.arange(400, 800, 20), 'nm') # wavelengths
radius = sc.Quantity('0.5 um') # particle radius
volume_fraction = sc.Quantity(0.5, '') # volume fraction of particles
n_particle = ri.n('fused silica', wavelengths)
n_matrix = ri.n('vacuum', wavelengths) # called from the refractive_index module. n_matrix is the
n_medium = ri.n('vacuum', wavelengths) # space within sample. n_medium is outside the sample.
# n_particle and n_matrix can have complex indices if absorption is desired
boundary = 'film' # geometry of sample, can be 'film' or 'sphere', see below for tutorial
# on sphere case
thickness = sc.Quantity('50 um') # thickness of the sample film
"""
Explanation: For the Monte Carlo model
set parameters
End of explanation
"""
qd_data = np.arange(0,75, 0.1)
s_data = structure.factor_py(qd_data, volume_fraction.magnitude)
"""
Explanation: Construct the structure factor data
Here, we use discrete points from the percus-yevick approximation for structure factor, as an example. In practice, you will most likely use actual structure factor data imported from your own file
End of explanation
"""
qd = np.arange(0,70, 0.1)# works up to qd = 72
s = structure.factor_data(qd, s_data, qd_data)
plt.figure()
plt.plot(qd, s, label = 'interpolated')
plt.plot(qd_data, s_data,'.', label = 'data')
plt.legend()
plt.xlabel('qd')
plt.ylabel('structure factor')
"""
Explanation: plot the structure factor data and interpolated function
End of explanation
"""
reflectance = np.zeros(wavelengths.size)
for i in range(wavelengths.size):
# calculate n_sample
n_sample = ri.n_eff(n_particle[i], n_matrix[i], volume_fraction)
# Calculate the phase function and scattering and absorption coefficients from the single scattering model
p, mu_scat, mu_abs = mc.calc_scat(radius, n_particle[i], n_sample, volume_fraction, wavelengths[i],
structure_type = 'data',
structure_s_data = s_data,
structure_qd_data = qd_data)
# Initialize the trajectories
r0, k0, W0 = mc.initialize(nevents, ntrajectories, n_medium[i], n_sample, boundary)
r0 = sc.Quantity(r0, 'um')
k0 = sc.Quantity(k0, '')
W0 = sc.Quantity(W0, '')
# Generate a matrix of all the randomly sampled angles first
sintheta, costheta, sinphi, cosphi, _, _ = mc.sample_angles(nevents, ntrajectories, p)
# Create step size distribution
step = mc.sample_step(nevents, ntrajectories, mu_scat)
# Create trajectories object
trajectories = mc.Trajectory(r0, k0, W0)
# Run photons
trajectories.absorb(mu_abs, step)
trajectories.scatter(sintheta, costheta, sinphi, cosphi)
trajectories.move(step)
reflectance[i], transmittance = det.calc_refl_trans(trajectories, thickness, n_medium[i], n_sample, boundary)
"""
Explanation: Calculate reflectance
End of explanation
"""
plt.figure()
plt.plot(wavelengths, reflectance)
plt.ylim([0,1])
plt.ylabel('Reflectance')
plt.xlabel('wavelength (nm)')
"""
Explanation: plot
End of explanation
"""
|
mitdbg/modeldb | client/workflows/demos/composite-model.ipynb | mit | try:
import verta
except ImportError:
!pip install verta
HOST = "app.verta.ai"
# import os
# os.environ['VERTA_EMAIL'] =
# os.environ['VERTA_DEV_KEY'] =
"""
Explanation: Logistic Regression with Preprocessing
This example demonstrates how to call one deployed endpoint from another.
In this scenario, two projects could be iterated on and deployed independently—one for pre-processing and one for classification—
and composed modularly across their endpoints.
End of explanation
"""
from __future__ import print_function
import itertools
import os
import time
import pandas as pd
import sklearn
from sklearn import preprocessing
from sklearn import linear_model
try:
import wget
except ImportError:
!pip install wget # you may need pip3
import wget
"""
Explanation: Imports
End of explanation
"""
train_data_url = "http://s3.amazonaws.com/verta-starter/census-train.csv"
train_data_filename = wget.detect_filename(train_data_url)
if not os.path.isfile(train_data_filename):
wget.download(train_data_url)
test_data_url = "http://s3.amazonaws.com/verta-starter/census-test.csv"
test_data_filename = wget.detect_filename(test_data_url)
if not os.path.isfile(test_data_filename):
wget.download(test_data_url)
df_train = pd.read_csv(train_data_filename)
X_train = df_train.iloc[:,:-1]
y_train = df_train.iloc[:, -1]
df_test = pd.read_csv(test_data_filename)
X_test = df_test.iloc[:,:-1]
y_test = df_test.iloc[:, -1]
df_train.head()
"""
Explanation: Prepare data
End of explanation
"""
from verta import Client
client = Client(HOST)
"""
Explanation: Instantiate client
End of explanation
"""
import pickle
class Preprocessor:
def __init__(self, artifacts):
with open(artifacts['preprocessor'], 'rb') as f:
self.preprocessor = pickle.load(f)
def predict(self, x):
return self.preprocessor.transform(x)
def example(self):
return [
[44, 0, 0, 40, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
client.get_or_create_project("Preprocessor")
client.get_or_create_experiment("Normalization")
run = client.get_or_create_experiment_run()
sklearn_preprocessor = preprocessing.Normalizer()
run.log_artifact('preprocessor', sklearn_preprocessor)
run.log_model(Preprocessor, artifacts=['preprocessor'])
run.log_requirements(['sklearn'])
endpoint = client.get_or_create_endpoint("ml-preprocessor")
endpoint.update(run, wait=True)
endpoint
"""
Explanation: Log preprocessor
First, we will log and deploy a data preprocessor. This will have its own endpoint that can be called (and updated) in isolation.
End of explanation
"""
import pickle
assert client # the model will reuse the client from this notebook
class Classifier:
def __init__(self, artifacts):
with open(artifacts['classifier'], 'rb') as f:
self.classifier = pickle.load(f)
endpoint = client.get_endpoint("ml-preprocessor")
self.preprocessor = endpoint.get_deployed_model()
def predict(self, x):
transformed_x = self.preprocessor.predict(x)
return self.classifier.predict(transformed_x)
def example(self):
return [
[0.7396263843801948, 0.0, 0.0, 0.6723876221638134, 0.0, 0.016809690554095334, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.016809690554095334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.016809690554095334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
client.get_or_create_project("Classifier")
client.get_or_create_experiment("Logistic Regression")
run = client.get_or_create_experiment_run()
sklearn_classifier = linear_model.LogisticRegression(max_iter=10**5)
sklearn_classifier.fit(sklearn_preprocessor.transform(X_train), y_train)
run.log_artifact('classifier', sklearn_classifier)
run.log_model(Classifier, artifacts=['classifier'])
run.log_requirements(['sklearn', 'urllib3'])
endpoint = client.get_or_create_endpoint("ml-classifier")
endpoint.update(run, wait=True)
endpoint
"""
Explanation: Log classifier
With the preprocessor running, we can call its endpoint from within our model. Inputs sent to this model endpoint will therefore also be passed to the preprocessor endpoint during the course of a prediction.
End of explanation
"""
deployed_model = endpoint.get_deployed_model()
for row in itertools.cycle(X_test.values):
print(deployed_model.predict([row]))
time.sleep(.5)
"""
Explanation: Run predictions
End of explanation
"""
|
metpy/MetPy | v0.6/_downloads/Point_Interpolation.ipynb | bsd-3-clause | import cartopy
import cartopy.crs as ccrs
from matplotlib.colors import BoundaryNorm
import matplotlib.pyplot as plt
import numpy as np
from metpy.cbook import get_test_data
from metpy.gridding.gridding_functions import (interpolate, remove_nan_observations,
remove_repeat_coordinates)
from metpy.plots import add_metpy_logo
def basic_map(proj):
"""Make our basic default map for plotting"""
fig = plt.figure(figsize=(15, 10))
add_metpy_logo(fig, 0, 80, size='large')
view = fig.add_axes([0, 0, 1, 1], projection=proj)
view.set_extent([-120, -70, 20, 50])
view.add_feature(cartopy.feature.NaturalEarthFeature(category='cultural',
name='admin_1_states_provinces_lakes',
scale='50m', facecolor='none'))
view.add_feature(cartopy.feature.OCEAN)
view.add_feature(cartopy.feature.COASTLINE)
view.add_feature(cartopy.feature.BORDERS, linestyle=':')
return view
def station_test_data(variable_names, proj_from=None, proj_to=None):
with get_test_data('station_data.txt') as f:
all_data = np.loadtxt(f, skiprows=1, delimiter=',',
usecols=(1, 2, 3, 4, 5, 6, 7, 17, 18, 19),
dtype=np.dtype([('stid', '3S'), ('lat', 'f'), ('lon', 'f'),
('slp', 'f'), ('air_temperature', 'f'),
('cloud_fraction', 'f'), ('dewpoint', 'f'),
('weather', '16S'),
('wind_dir', 'f'), ('wind_speed', 'f')]))
all_stids = [s.decode('ascii') for s in all_data['stid']]
data = np.concatenate([all_data[all_stids.index(site)].reshape(1, ) for site in all_stids])
value = data[variable_names]
lon = data['lon']
lat = data['lat']
if proj_from is not None and proj_to is not None:
try:
proj_points = proj_to.transform_points(proj_from, lon, lat)
return proj_points[:, 0], proj_points[:, 1], value
except Exception as e:
print(e)
return None
return lon, lat, value
from_proj = ccrs.Geodetic()
to_proj = ccrs.AlbersEqualArea(central_longitude=-97.0000, central_latitude=38.0000)
levels = list(range(-20, 20, 1))
cmap = plt.get_cmap('magma')
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
x, y, temp = station_test_data('air_temperature', from_proj, to_proj)
x, y, temp = remove_nan_observations(x, y, temp)
x, y, temp = remove_repeat_coordinates(x, y, temp)
"""
Explanation: Point Interpolation
Compares different point interpolation approaches.
End of explanation
"""
gx, gy, img = interpolate(x, y, temp, interp_type='linear', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
"""
Explanation: Scipy.interpolate linear
End of explanation
"""
gx, gy, img = interpolate(x, y, temp, interp_type='natural_neighbor', hres=75000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
"""
Explanation: Natural neighbor interpolation (MetPy implementation)
Reference <https://github.com/Unidata/MetPy/files/138653/cwp-657.pdf>_
End of explanation
"""
gx, gy, img = interpolate(x, y, temp, interp_type='cressman', minimum_neighbors=1, hres=75000,
search_radius=100000)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
"""
Explanation: Cressman interpolation
search_radius = 100 km
grid resolution = 25 km
min_neighbors = 1
End of explanation
"""
gx, gy, img1 = interpolate(x, y, temp, interp_type='barnes', hres=75000, search_radius=100000)
img1 = np.ma.masked_where(np.isnan(img1), img1)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img1, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
"""
Explanation: Barnes Interpolation
search_radius = 100km
min_neighbors = 3
End of explanation
"""
gx, gy, img = interpolate(x, y, temp, interp_type='rbf', hres=75000, rbf_func='linear',
rbf_smooth=0)
img = np.ma.masked_where(np.isnan(img), img)
view = basic_map(to_proj)
mmb = view.pcolormesh(gx, gy, img, cmap=cmap, norm=norm)
plt.colorbar(mmb, shrink=.4, pad=0, boundaries=levels)
plt.show()
"""
Explanation: Radial basis function interpolation
linear
End of explanation
"""
|
mssalvador/Fifa2018 | Teknisk Tirsdag Tutorial (Supervised Learning).ipynb | apache-2.0 | # Run the datacleaning notebook to get all the variables
%run 'Teknisk Tirsdag - Data Cleaning.ipynb'
"""
Explanation: Teknisk Tirsdag: Supervised Learning
I denne opgave skal vi bruge Logistisk Regression til at forudsige hvilke danske fodboldspillere der egentlig kunne spille for en storklub.
End of explanation
"""
corr = overall_set.corr()
fig = plt.figure(figsize=(20, 16))
ax = sb.heatmap(corr, xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
linewidths=0.25, vmax=1.0, square=True,
linecolor='black', annot=False
)
plt.show()
"""
Explanation: Efter at have hentet vores rensede data, hvor vi minder os selv om at vi har: <br>
* dansker_set
* topklub_set
* ikke_topklub_set
* overall_set
Det første, vi gerne vil kigge lidt på, er, om vi var grundige nok i vores foranalyse. Derfor laver vi et heatmap, der skal fortælle os hvor stor sammenhængen er (korrelation) mellem kolonnerne i forhold til hinanden.
End of explanation
"""
overall_set['label'] = overall_set['Club'].isin(topklub_set.Club).astype(int)
y = overall_set['label']
X = overall_set.iloc[:,0:-1].select_dtypes(include=['float64', 'int64'])
"""
Explanation: Hvad vi ser her, er en korrelationsmatrix. Jo mørkere farver, des højere korrelation, rød for positiv- og blå for negativ-korrelation. <br>
Vi ser altså at der er høj korrelation, i vores nedre højre hjørne; Dette er spilpositionerne. Vi ser også et stort blåt kryds, som er målmandsdata. Disse har meget negativ korrelation med resten af vores datasæt. (Dobbeltklik evt. på plottet, hvis det er meget svært at læse teksten)<br>
Derudover kan vi se, at ID kolonnen slet ikke korrelere. Man kan derfor vælge at tage den ud.
Vi tilføjer nu vores "kendte" labels til vores data. (Hvis man spiller for en af vores topklubber, får man et 1-tal, og ellers får man et 0) <br>
Vi deler også vores træningssæt op i en X matrix med alle vores numeriske features, og en y vektor med alle vores labels.
End of explanation
"""
overall_set.groupby('label').mean()
"""
Explanation: Vi kan kigge lidt overordnet på tallene mellem de 2 klasser.
End of explanation
"""
# hent nødvendige pakker fra Scikit Learn biblioteket (generelt super hvis man vil lave data science)
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
"""
Explanation: Observationer
Alderen siger ikke rigtig noget om, hvorvidt du spiller for en topklub eller ej
Topklubsspillere er i gennemsnittet en faktor 10 mere værd end ikke-topklub spillere
Topklubsspillere er i gennemsnittet generelt ca. 10+ på alt i forhold til ikke-topklub spillere
Vi er nu klar til at gå i gang med vores første Machine Learning algoritme.
På forhånd ved vi, at der i vores træningssæt er {{y.where(y==1).count()}} som spiller i topklubber, og {{y.where(y==0).count()}} der ikke gør. <br>
Der er en 50/50 chance for at ramme rigtigt, hvis man bare gætte tilfældigt. Vi håber derfor, at algoritmen kan slå den 50% svarrate.
Logistisk regression
End of explanation
"""
model = LogisticRegression()
model = model.fit(X,y)
model.score(X,y)
"""
Explanation: Vi fitter nu en logistic regression classifier til vores data, og fitter en model, så den kan genkende om man spiller for en topklub eller ej, og evaluere resultatet:
End of explanation
"""
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2)
print('Træningsæt størrelse: {} - Testsæt størrelse: {}'.format(len(X_train), len(X_test)))
"""
Explanation: Altså har vores model ret i
{{'{:.0f}'.format(100*model.score(X, y))}}% af tiden i træningssættet. <br>
Pretty good!! Den har altså fundet nogle mønstre der kan mappe data til labels, og gætter ikke bare.
Men vi kan ikke vide, om den har overfittet, og derved har tilpasset sig for godt til sit kendte data, så nyt data vil blive fejlmappet. <br>
Hvad vi kan prøve, er at splitte vores træningssæt op i et trænings- og testsæt. På den måde kan vi først fitte og derefter evaluere på "nyt" kendt data, om den stadig performer som forventet.
End of explanation
"""
model2 = LogisticRegression()
model2 = model2.fit(X_train, y_train)
model2.score(X_train, y_train)
"""
Explanation: Og vi er nu klar til at prøve igen!
Logistisk regression 2.0
Igen fitter vi en logistisk regression til vores træningsdata, og danner en model, men denne gang uden at bruge testdatasættet.
End of explanation
"""
y_pred = model2.predict(X_test)
y_probs = model2.predict_proba(X_test)
# Evalueringsmålinger
from sklearn import metrics
print('Nøjagtigheden af vores logistiske regressions models prediction på testsættet er {:.0f}'.format(100*metrics.accuracy_score(y_test, y_pred))+'%', '\n')
print('Arealet under vores ROC AUC kurve er {:.0f}'.format(100*metrics.roc_auc_score(y_test, y_probs[:, 1]))+'%')
"""
Explanation: Modellen matcher nu
{{'{:.0f}'.format(100*model2.score(X, y))}}% af tiden i træningssættet. <br>
Men har den overfittet?
Evaluering af modellen
Vi genererer derfor vores y forudsigelse og også sandsynlighederne for vores testsæt, da disse bruges til at evaluere modellen.
End of explanation
"""
confusion_matrix = metrics.confusion_matrix(y_test, y_pred)
print(confusion_matrix)
"""
Explanation: Det ser jo ret fornuftigt ud.<br>
For at sige noget om vores nye model, kan vi også lave en "confusion_matrix"
<img src='http://revolution-computing.typepad.com/.a/6a010534b1db25970b01bb08c97955970d-pi',
align="center"
width="40%"
alt="confusion matrix">
T og F står for henholdsvist True og False<br>
P og N står for henholdsvist Positive og Negative
End of explanation
"""
print(metrics.classification_report(y_test, y_pred))
"""
Explanation: Resultatet fortæller os, at vi har {{confusion_matrix[0,0]}}+{{confusion_matrix[1,1]}} = {{confusion_matrix[0,0]+confusion_matrix[1,1]}} korrekte forudsigelser og {{confusion_matrix[0,1]}}+{{confusion_matrix[1,0]}} = {{confusion_matrix[0,1]+confusion_matrix[1,0]}} ukorrekte
Man kan også bede classifieren om en rapport:
End of explanation
"""
# 10-folds cross-validation
from sklearn.model_selection import cross_val_score
scores = cross_val_score(LogisticRegression(), X, y, scoring='accuracy', cv=10)
print(scores,'\n')
print(scores.mean())
"""
Explanation: Logistisk regression med krydsvalidering
Vi er egentlig meget tilfredse med vores model, men ofte kan det være en god idé at teste på flere små testsæt, og holde dem op mod hinanden. <br>
Her laver vi en 10-folds krydsvalidering og får altså 10 scorer ud:
End of explanation
"""
dansker_pred = None ### Fjern NONE og UDFYLD MIG ###
dansker_probs = None ### Fjern NONE og UDFYLD MIG ###
"""
Explanation: Her preformer modellen altså i gennemsnit
{{'{:.0f}'.format(100*scores.mean())}}%.
Det lyder meget lovende, men vi holder os til vores model2 og kan nu prøve modellen af på det rigtige datasæt
Danskersættet
Vi skal nu prøve vores model på vores danske spillere<br>
Opgave:
Vi skal lave prediction og probability på vores danske spillere, ligesom vi gjorde tidligere for testsættet. (Lige under Evaluering af modellen)<br>
Husk din dataframe kun må indeholder numeriske værdier, når vi bruger modellen.<br>
Fx. "df.select_dtypes(include=['float64', 'int64'])"
End of explanation
"""
dansker_set_df = dansker_set.copy()
dansker_set_df[['prob1','prob2']] = pd.DataFrame(dansker_probs, index=dansker_set.index)
dansker_set_df['Probabilities [0,1]'] = dansker_set_df[['prob1','prob2']].values.tolist()
dansker_set_df['Prediction'] = pd.Series(dansker_pred, index=dansker_set.index)
del dansker_set_df['prob1'], dansker_set_df['prob2']
# dansker_set_df.head()
"""
Explanation: Modellen har fundet {{np.bincount(dansker_pred)[0]}} nuller og {{np.bincount(dansker_pred)[1]}} ét-taller
Hvis du satte top_klub_ratio til 75 i Opgave 1 i Data Cleaning, skulle der være omkring 27-28 ét-taller. <br>
top_klub_ratio blev sat til: {{top_klub_ratio}}
Vi tilføjer disse kolonner til vores dataframe.
End of explanation
"""
dansker_set_df.loc[:,'pred=1'] = dansker_set_df['Probabilities [0,1]'].map(lambda x: x[1]).sort_values(ascending=False)
dansker_sorted = dansker_set_df.sort_values('pred=1', ascending=False)
dansker_sorted = dansker_sorted[['Name', 'Club', 'Overall', 'Potential', 'Probabilities [0,1]', 'Prediction']]
dansker_sorted.loc[:,'in'] = np.arange(1, len(dansker_set_df)+1)
dansker_sorted.set_index('in')
"""
Explanation: Og sortere listen, så de bedste danske spillere står øvers, og tilføjer et index, så vi kan få et bedre overblik
End of explanation
"""
dansker_sorted[dansker_sorted['Club'].isin(top_clubs)].set_index('in')
"""
Explanation: Efter flot hattrick mod Irland, kan man vidst ikke være i tvivl om Kong Christian tager pladsen på tronen
<img src='kongen.png',
align="center"
width="40%"
alt="kongen">
Men hvilke danske spillere spiller egentlig for topklubber, og hvordan er de rangeret i forhold til vores model?
End of explanation
"""
dansker_sorted.loc[dansker_sorted.Name == 'N. Bendtner'].set_index('in')
"""
Explanation: Man kan undre sig over hvad Jacob Larsen laver hos stopklubben Borussia Dortmund, men en hurtig googling viser, at han simpelthen blev headhuntet til klubben som 16-årig.
Og så er der jo nok nogen, der vil spørger - Hvad med Bendtner?
Så han skal da også lige have en plads i vores analyse:
End of explanation
"""
df.loc[df.Name == 'N. Bendtner']
"""
Explanation: Opgave:
Vi kan også kigge på ham i det store billede. Prøv evt. at lege lidt rundt med forskellige spillere eller andre features.<br>
Er der noget specielt, der kunne være sjovt at kigge på?
End of explanation
"""
top_df = df[df.Club.isin(top_clubs)]
top_df[top_df.Overall < 70].sort_values('Overall', ascending=True)
"""
Explanation: Ekstra lege/analyse opgaver
Danske Rezan Corlu som ellers ligger ret lavt selv på potentiale har alligevel sikret sig en plads hos A.S. Roma i en alder af 20 år.
Men hvordan var det egentlig med de topklub spillere? Hvor langt ned kan man gå i potentiale, og stadig spille for en topklub?
End of explanation
"""
bund_df = df[~df.Club.isin(top_clubs)]
bund_df[bund_df.Overall > 70]
"""
Explanation: Vi kan altså se, at der bliver satset på ungdommen, hvor deres kommende potentiale nok taler for deres plads i en storklub.<br>
Men hvad så med ikke-topklubsspillere og deres performance?
End of explanation
"""
top_clubs
"""
Explanation: Måske er de 22 klubber, vi har udvalgt ikke helt nok til at beskrive topklubber
End of explanation
"""
|
Krekelmans/Train_prediction_kaggle | backup.ipynb | mit | %matplotlib inline
%pylab inline
import pandas as pd
import numpy as np
from collections import Counter, OrderedDict
import json
import matplotlib
import matplotlib.pyplot as plt
import re
from scipy.misc import imread
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report
"""
Explanation: Predicting the occupancies of Belgian trains
In this lab, we will go over some of the typical steps in a data science pipeline:
Data processing & cleaning
Exploratory Data Analysis
Feature extraction/engineering
Model selection & hyper-parameter tuning
Data linking
...
We will make use of the following technologies and libraries:
Python3.5
Python libraries: pandas, numpy, sklearn, matplotlib, ...
Kaggle
NO SPARK!!! (next lab will deal with machine learning with Spark MLlib)
End of explanation
"""
from pandas.io.json import json_normalize
import pickle
training_json = pd.DataFrame()
with open('training_data.nldjson') as data_file:
for line in data_file:
training_json = training_json.append(json_normalize(json.loads(line)))
with open('test.nldjson') as data_file:
for line in data_file:
out_test_json = json_normalize(json.loads(line))
pickle.dump(training_json,open("Train_prediction_kaggle/temp_data/training.pkl","wb"))
pickle.dump(out_test_json,open("Train_prediction_kaggle/temp_data/out_test.pkl","wb"))
out_test = out_test_json
training = training_json
out_test[0:1]
stations_df.info()
trends_df = pd.DataFrame()
from pytrends.request import TrendReq
import pandas as pd
# enter your own credentials
google_username = "davidjohansmolders@gmail.com"
google_password = "******"
#path = ""
# Login to Google. Only need to run this once, the rest of requests will use the same session.
pytrend = TrendReq(google_username, google_password, custom_useragent='My Pytrends Script')
for i in range(0,645):
if i % 5 != 0:
continue
try:
pytrend.build_payload(kw_list=[stations_df[stations_df.destination == i].zoekterm.values[0], stations_df[stations_df.destination == i+1].zoekterm.values[0], stations_df[stations_df.destination == i+2].zoekterm.values[0], stations_df[stations_df.destination == i+3].zoekterm.values[0], stations_df[stations_df.destination == i+4].zoekterm.values[0]],geo="BE",timeframe='2016-07-27 2017-04-05')
trends_df = pd.concat([trends_df,pytrend.interest_over_time()], axis=1)
except:
continue
no_dup_trends = trends_df.T.groupby(level=0).first().T
#training_holidays_druktes = pd.merge(training_holidays_druktes,stations_df[["destination","zoekterm"]], left_on = 'destination', right_on = 'destination')
out_test_holidays_druktes = pd.merge(out_test_holidays_druktes,stations_df[["destination","zoekterm"]], left_on = 'destination', right_on = 'destination')
training_holidays_druktes_copy = training_holidays_druktes
out_test_holidays_druktes_copy = out_test_holidays_druktes
#training_holidays_druktes = training_holidays_druktes_copy
out_test_holidays_druktes = out_test_holidays_druktes_copy
int(no_dup_trends.loc["2016-07-28"]["Brussel trein"])
def get_trends(row):
zoek = str(row.zoekterm_x)
datum = str(row["date"])
try:
row["trend"] = int(no_dup_trends.loc[datum][zoek])
except:
row["trend"] = 0
return row
#training_holidays_druktes = training_holidays_druktes.apply(get_trends, axis=1)
out_test_holidays_druktes = out_test_holidays_druktes.apply(get_trends, axis=1)
"""
Explanation: 0. Create a kaggle account! https://www.kaggle.com/
The competition can be found here: https://inclass.kaggle.com/c/train-occupancy-prediction-v2/leaderboard
Create an account and form a team (shuffle II), use your names and BDS_ as a prefix in your team name
Note: you can only make 5 submissions per day
There are also student groups from Kortrijk (Master of Science in Industrial Engineering) participating. They get no help at all (you get this notebook) but this is their final lab + they have no project. THEREFORE: Let's push them down the leaderboard!!! ;)
Your deadline: the end of the kaggle competition.
Evaluation: Your work will be evaluated for 50%, your result will also matter for another 50%. The top 5 student groups get bonus points for this part of the course!
1. Loading and processing the data
Trains can get really crowded sometimes, so wouldn't it be great to know in advance how busy your train will be, so you can take an earlier or later one? iRail, created just that. their application, SpitsGids, shows you the occupancy of every train in Belgium. Furthermore, you can indicate the occupancy yourself. Using the collected data, machine learning models can be trained to predict what the occupancy level of a train will be.
The dataset which we will use during this labo is composed of two files:
train.nldjson: contains labeled training data (JSON records, separated by newlines)
test.nldjson: unlabeled data for which we will create a submission for a Kaggle competition at the end of this lab (again: JSON records, separated by newlines). Each of the records is uniquely identifiable through an id
A json record has the following structure:
{
"querytype": "occupancy",
"querytime": "2016-09-29T16:24:43+02:00",
"post": {
"connection": "http://irail.be/connections/008811601/20160929/S85666",
"from": "http://irail.be/stations/NMBS/008811601",
"to": "http://irail.be/stations/NMBS/008811676",
"date": "20160929",
"vehicle": "http://irail.be/vehicle/S85666",
"occupancy": "http://api.irail.be/terms/medium"
},
"user_agent": "Railer/1610 CFNetwork/808.0.2 Darwin/16.0.0"
}
This is how the five first rows of a processed DataFrame COULD look like
1.1: Load in both files and store the data in a pandas DataFrame, different methodologies can be applied in order to parse the JSON records (pd.io.json.json_normalize, json library, ...)
End of explanation
"""
training['querytime'] = pd.to_datetime(training['querytime'])
out_test['querytime'] = pd.to_datetime(out_test['querytime'])
training = training.dropna()
training['post.occupancy'] = training['post.occupancy'].apply(lambda x: x.split("http://api.irail.be/terms/",1)[1])
training['post.vehicle'] = training['post.vehicle'].apply(lambda x: x.split("http://irail.be/vehicle/",1)[1])
out_test['post.vehicle'] = out_test['post.vehicle'].apply(lambda x: x.split("http://irail.be/vehicle/",1)[1])
#create class column, eg IC058 -> IC
training['post.class'] = training['post.vehicle'].apply(lambda x: " ".join(re.findall("[a-zA-Z]+", x)))
out_test['post.class'] = out_test['post.vehicle'].apply(lambda x: " ".join(re.findall("[a-zA-Z]+", x)))
#reset the index because you have duplicate indexes now because you appended DFs in a for loop
training = training.reset_index()
stations_df = pd.read_csv('stations.csv')
stations_df['from'] = stations_df.index
stations_df['destination'] = stations_df['from']
stations_df[0:4]
#post.from en post.to are in the some format of URI
stations_df["zoekterm"]=stations_df["name"]+" trein"
stations_df.loc[stations_df['zoekterm'].str.startswith("Zaventem"), "zoekterm"] = "Zaventem trein"
stations_df.loc[stations_df['zoekterm'].str.startswith("Brussel"), "zoekterm"] = "Brussel trein"
stations_df.loc[stations_df['zoekterm'].str.startswith("Gent"), "zoekterm"] = "Gent trein"
stations_df.loc[stations_df['zoekterm'].str.startswith("Antwerpen"), "zoekterm"] = "Antwerpen trein"
training_holidays_druktes[0:1]
out_test_holidays_druktes[0:1].destination
stations_df[0:1]
druktes_df = pd.read_csv('station_druktes.csv')
druktes_df[0:4]
training_holidays[0:1]
training = pd.merge(training,stations_df[["URI","from"]], left_on = 'post.from', right_on = 'URI')
training = pd.merge(training,stations_df[["URI","destination"]], left_on = 'post.to', right_on = 'URI')
training = training.drop(['URI_y','URI_x'],1)
out_test = pd.merge(out_test,stations_df[["URI","from"]], left_on = 'post.from', right_on = 'URI')
out_test = pd.merge(out_test,stations_df[["URI","destination"]], left_on = 'post.to', right_on = 'URI')
out_test = out_test.drop(['URI_y','URI_x'],1)
"""
Explanation: 1.2: Clean the data! Make sure the station- and vehicle-identifiers are in the right format. A station identifier consists of 9 characters (prefix = '00') and a vehicle identifier consists of the concatentation of the vehicle type (IC/L/S/P/...) and the line identifier. Try to fix as much of the records as possible, drop only the unfixable ones. How many records did you drop?
End of explanation
"""
fig, ax = plt.subplots(1,1, figsize=(5,5))
training['post.class'].value_counts().plot(kind='pie', ax=ax, autopct='%1.1f%%')
#we have a lot of null/undefined, especially in our test set, we can't simply throw them away
"""
Explanation: 2. Exploratory Data Analysis (EDA)
Let's create some visualisations of our data in order to gain some insights. Which features are useful, which ones aren't?
We will create 3 visualisations:
* Pie chart of the class distribution
* Stacked Bar Chart depicting the distribution for one aggregated variable (such as the weekday or the vehicle type)
* Scattter plot depicting the 'crowdiness' of the stations in Belgium
For each of the visualisations, code to generate the plot has already been handed to you. You only need to prepare the data (i.e. create a new dataframe or select certain columns) such that it complies with the input specifications. If you want to create your own plotting code or extend the given code, you are free to do so!
2.1: *Create a pie_chart with the distribution of the different classes. Have a look at our webscraping lab for plotting pie charts. TIP: the value_counts() does most of the work for you!
End of explanation
"""
#training['weekday'] = training['querytime'].apply(lambda l: l.weekday())
#out_test['weekday'] = out_test['querytime'].apply(lambda l: l.weekday())
#print("timerange from training data:",training['querytime'].min(),training['querytime'].max())
#fig, ax = plt.subplots(1,1, figsize=(5,5))
#training['weekday'].value_counts().plot(kind='pie', ax=ax, autopct='%1.1f%%')
print(training_holidays_druktes['querytime'].describe())
print(out_test_holidays['querytime'].describe())
date_training = training_holidays_druktes.set_index('querytime')
date_test = out_test_holidays.set_index('querytime')
grouper = pd.TimeGrouper("1d")
date_training = date_training.groupby(grouper).size()
date_test = date_test.groupby(grouper).size()
# plot
fig, ax = plt.subplots(1,1, figsize=(10,7))
ax.plot(date_training)
ax.plot(date_test)
fig, ax = plt.subplots(1,1, figsize=(6,6))
training_holidays_druktes['weekday'].value_counts().plot(kind='pie', ax=ax, autopct='%1.1f%%')
training['post.occupancy'].value_counts()
"""
Explanation: 2.2: *Analyze the timestamps in the training and testset. First convert the timestamps to a pandas datetime object using pd.datetime. http://pandas.pydata.org/pandas-docs/stable/timeseries.html
Have the column in this data format simplifies a lot of work, since it allows you to convert and extract time features more easily. For example:
- df['weekday] = df['time'].apply(lambda l: l.weekday())
would map every date to a day of the week in [0,6].
A. What are the ranges of training and testset, is your challenges one of interpolating or extrapolating in the future?
TIP: The describe() function can already be helpful!
B. Plot the number of records in both training and testset per day. Have a look here on how to work with the timegrouper functionality: http://stackoverflow.com/questions/15297053/how-can-i-divide-single-values-of-a-dataframe-by-monthly-averages
C. OPTIONAL: Have insight into the time dependence can get you a long way: Make additional visualizations to make you understand how time affects train occupancy.
End of explanation
"""
stops = stations_df[['URI','longitude','latitude']]
dest_count = training_holidays_druktes['post.to'].value_counts()
dest_count_df = pd.DataFrame({'id':dest_count.index, 'count':dest_count.values})
dest_loc = pd.merge(dest_count_df, stops, left_on = 'id', right_on = 'URI')
dest_loc = dest_loc[['id', 'count', 'latitude','longitude']]
fig, ax = plt.subplots(figsize=(12,10))
ax.scatter(dest_loc.longitude, dest_loc.latitude, s=dest_loc['count'] )
"""
Explanation: 2.3: *Create a stacked_bar_chart with the distribution of the three classes over an aggregated variable (group the data by weekday, vehicle_type, ...). More info on creating stacked bar charts can be found here: http://pandas.pydata.org/pandas-docs/stable/visualization.html#bar-plots
The dataframe you need will require your grouping variables as the index, and 1 column occupancy category, for example:
| Index | Occupancy_Low | Occupancy_Medium | Occupancy_High | Sum_Occupancy |
|-------|----------------|------------------|----------------|---------------|
| IC | 15 | 30 | 10 | 55 |
| S | 20 | 10 | 30 | 60 |
| L | 12 | 9 | 14 | 35 |
If you want the values to be relative (%), add a sum column and use it to divide the occupancy columns
2.4: * To have an idea about the hotspots in the railway network make a scatter plot that depicts the number of visitors per station. Aggregate on the destination station and use the GTFS dataset at iRail to find the geolocation of the stations (stops.txt): https://gtfs.irail.be/nmbs
End of explanation
"""
def get_seconds_since_midnight(x):
midnight = x.replace(hour=0, minute=0, second=0, microsecond=0)
return (x - midnight).seconds
def get_line_number(x):
pattern = re.compile("^[A-Z]+([0-9]+)$")
if pattern.match(x):
return int(pattern.match(x).group(1))
else:
return x
training['seconds_since_midnight'] = training['querytime'].apply(get_seconds_since_midnight)
training['month'] = training['querytime'].apply(lambda x: x.month)
training['occupancy'] = training['post.occupancy'].map({'low': 0, 'medium': 1, 'high': 2})
out_test['seconds_since_midnight'] = out_test['querytime'].apply(get_seconds_since_midnight)
out_test['month'] = out_test['querytime'].apply(lambda x: x.month)
fig, ax = plt.subplots(figsize=(5, 5))
corr_frame = training[['seconds_since_midnight', 'month', 'occupancy']].corr()
cax = ax.matshow(abs(corr_frame))
fig.colorbar(cax)
tickpos = np.array(range(0,len(corr_frame.columns)))
plt.xticks(tickpos,corr_frame.columns, rotation='vertical')
plt.yticks(tickpos,corr_frame.columns, rotation='horizontal')
plt.grid(None)
pd.tools.plotting.scatter_matrix(training[['seconds_since_midnight', 'month', 'occupancy']],
alpha=0.2, diagonal='kde', figsize=(10,10))
plt.grid(None)
"""
Explanation: 3. Predictive modeling: creating a baseline
Now that we have processed, cleaned and explored our data it is time to create a predictive model that predicts the occupancies of future Belgian trains. We will start with applying Logistic Regression on features extracted from our initial dataset. Some code has already been given to get you started.
Feature extraction
Some possible features include (bold ones are already implemented for you):
The day of the week
The number of seconds since midnight of the querytime
The train vehicle type (IC/P/L/...)
The line number
The line category
Information about the from- and to-station (their identifier, their coordinates, the number of visitors, ...)
The month
A binary variable indicating whether a morning (6-10AM) or evening jam (3-7PM) is ongoing
...
In order to do reveal relations between these features you can try and plot them with:
<a href="https://datascience.stackexchange.com/questions/10459/calculation-and-visualization-of-correlation-matrix-with-pandas"> Correlation plot </a>
<a href="http://pandas.pydata.org/pandas-docs/stable/visualization.html#visualization-scatter-matrix"> Scatter matrix </a>
These relations can be important since some models do not perform very will when features are highly correlated
Feature normalization
Most models require the features to have a similar range, preferables [0,1]. A minmax scaler is usually sufficient: x -> (x - xmin) / (xmax - xmin)
Scikit will be used quite extensively from now on, have a look here for preprocessing functionality: http://scikit-learn.org/stable/modules/classes.html#module-sklearn.preprocessing
Dealing with categorical variables
All machine learning techniques, except for tree-based methods, assume that variables are ordinal (you can define an order). For some variables, such as the day of the week or the train vehicle type, this is not true. Therefore, a pre-processing step is required that transforms these categorical variables. A few examples of such transformations are:
One-hot-encoding (supported by pandas: get_dummies )
Binary encoding: map each variable to a number, binary encode these numbers and use each bit as a feature (advantage of this technique is that it introduces a lot less new variables in contrast to one-hot-encoding)
Hash encoding
...
3.1: Extract more features than the two given ones. Make sure you extract at least one categorical variable, and transform it! What gains (in terms of current accuracy (0.417339475755)) do you achieve with new features in comparison to the given code?
End of explanation
"""
skf = StratifiedKFold(n_splits=5, random_state=1337)
X = training[['seconds_since_midnight', 'month']]
y = training['occupancy']
cms = []
accs = []
for train_index, test_index in skf.split(X, y):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
log_reg = LogisticRegression()
log_reg.fit(X_train, y_train)
predictions = log_reg.predict(X_test)
cm = confusion_matrix(y_test, predictions)
cms.append(cm)
accs.append(accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions))
#accs.append(sum([float(cm[i][i]) for i in range(len(cm))])/np.sum(cm))
print('Confusion matrix:\n', np.mean(cms, axis=0))
print('Avg accuracy', np.mean(accs), '+-', np.std(accs))
print('Predict all lows', float(len(y[y == 0]))/float(len(y)))
"""
Explanation: We train our model on a 'training set' and evaluate it on the testset. Functionality for making this split automatically can be found <a href="http://scikit-learn.org/stable/modules/classes.html#module-sklearn.model_selection"> here </a>
Our first model is a linear logistic regression model, more information on the API <a href="http://scikit-learn.org/stable/modules/classes.html#module-sklearn.linear_model"> here </a>
The confusion matrix is part of the <a href="http://scikit-learn.org/stable/modules/classes.html#module-sklearn.metrics"> metrics functionality </a>
End of explanation
"""
training_class = training_holidays[training_holidays.class_enc != 0]
training_class = training_class[training_class.class_enc != 14]
test_class = training_holidays[(training_holidays.class_enc == 0)|(training_holidays.class_enc == 14)]
training_class["class_pred"]=training_class["class_enc"]
training_holidays_enc = pd.concat([training_class,test_class])
X_train = training_class[['seconds_since_midnight','weekday', 'month','id','id_2']]
X_test = test_class[['seconds_since_midnight','weekday', 'month','id','id_2']]
y_train = training_class['class_enc']
train.occupancy.value_counts()/train.shape[0]
test.occupancy.value_counts()/test.shape[0]
out_test_holidays_druktes.occupancy.value_counts()/out_test_holidays_druktes.shape[0]
from sklearn.cross_validation import train_test_split
train, test = train_test_split(training_holidays_druktes, test_size=0.2, random_state=42)
from sklearn.linear_model import SGDClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import f1_score
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import VotingClassifier
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
train[0:1]
X_train = training_holidays_druktes[['seconds_since_midnight','drukte_from','drukte_to','school','name_enc','class_enc','day__0','day__1','day__2','day__3','day__4','day__5','day__6','from_lat','from_lng','des_lat','des_lng','trend']]
X_test = test[['seconds_since_midnight','drukte_from','drukte_to','school','name_enc','class_enc','day__0','day__1','day__2','day__3','day__4','day__5','day__6','from_lat','from_lng','des_lat','des_lng','trend']]
y_train = training_holidays_druktes['occupancy']
y_test = test['occupancy']
#month uit de set halen als we ongeziene willen predicten
ac = AdaBoostClassifier()
ada_param_grid = {'n_estimators': [10, 30, 100, 300, 1000],
'learning_rate': [0.1, 0.3, 1.0, 3.0]}
ac_grid = GridSearchCV(ac,ada_param_grid,cv=3,
scoring='accuracy')
ac_grid.fit(X_train, y_train)
ac = ac_grid.best_estimator_
ac.fit(X_train, y_train)
print(ac_grid.score(X_train,y_train))
print(ac_grid.score(X_test, y_test))
from scipy.stats import randint as sp_randint
param_dist = {"max_depth": [7,6, None],
"max_features": range(1, 5),
"min_samples_split": range(2, 7),
"min_samples_leaf": range(1, 7),
"bootstrap": [True, False],
"criterion": ["gini", "entropy"]}
rand = GridSearchCV(rf,param_dist,cv=3,
scoring='accuracy')
rand.fit(X_train, y_train)
rf = rand.best_estimator_
rf.fit(X_train, y_train)
print(rf.score(X_train,y_train))
print(rf.score(X_test, y_test))
from sklearn.tree import DecisionTreeClassifier
dtc = DecisionTreeClassifier(random_state=0)
dtc.fit(X_train, y_train)
print(dtc.score(X_train,y_train))
print(dtc.score(X_test, y_test))
rf2 = rand.best_estimator_
rf3 = rand.best_estimator_
rf4 = rand.best_estimator_
voting_clf = VotingClassifier(
estimators=[('ac', ac), ('rf', rf), ('dtc', dtc),('rf2', rf2), ('rf3', rf3), ('rf4', rf4)],
voting='hard'
)
from sklearn.metrics import accuracy_score
for clf in (ac, rf, dtc, voting_clf):
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print(clf.__class__.__name__, accuracy_score(y_test, y_pred))
pd.DataFrame([X_train.columns, rf.feature_importances_])
y_predict_test = voting_clf.predict(out_test_holidays_druktes[['seconds_since_midnight','drukte_from','drukte_to','school','name_enc','class_enc','day__0','day__1','day__2','day__3','day__4','day__5','day__6','from_lat','from_lng','des_lat','des_lng','trend']])
out_test_holidays_druktes["occupancy"] = y_predict_test
out_test_holidays_druktes.occupancy.value_counts()/out_test_holidays_druktes.shape[0]
out_test_holidays_druktes[['seconds_since_midnight','drukte_from','drukte_to','name_enc','class_enc','day__0','day__1','day__2','day__3','day__4','day__5','day__6','trend','occupancy']][0:100]
out_test_holidays_druktes[["id","occupancy"]].to_csv('predictions.csv',index=False)
"""
Explanation: Since we have a lot of 'Null' (+-1/3th) values for our 'class' feature, and we don't want to throw that away, we can try to predict these labels based on the other features, we get +75% accuracy so that seems sufficient. But we can't forgot to do the same thing for the test set!
End of explanation
"""
skf = StratifiedKFold(n_splits=5, random_state=1337)
X = training[['seconds_since_midnight', 'month']]
y = training['occupancy']
cms = []
accs = []
parameters = {#'penalty': ['l1', 'l2'], # No penalty tuning, cause 'l1' is only supported by liblinear
# It can be interesting to manually take a look at 'l1' with 'liblinear', since LASSO
# provides sparse solutions (boils down to the fact that LASSO does some feature selection for you)
'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag'],
'tol': [1e-4, 1e-6, 1e-8],
'C': [1e-2, 1e-1, 1.0, 1e1],
'max_iter': [1e2, 1e3]
}
for train_index, test_index in skf.split(X, y):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
tuned_log_reg = GridSearchCV(LogisticRegression(penalty='l2'), parameters, cv=3,
scoring='accuracy')
tuned_log_reg.fit(X_train, y_train)
print(tuned_log_reg.best_params_)
predictions = tuned_log_reg.predict(X_test)
cm = confusion_matrix(y_test, predictions)
cms.append(cm)
accs.append(accuracy_score(y_test, predictions))
print(classification_report(y_test, predictions))
print('Confusion matrix:\n', np.mean(cms, axis=0))
print('Avg accuracy', np.mean(accs), '+-', np.std(accs))
print('Predict all lows', float(len(y[y == 0]))/float(len(y)))
"""
Explanation: 4. 'Advanced' predictive modeling: model selection & hyper-parameter tuning
Model evaluation and hyper-parameter tuning
In order to evaluate your model, K-fold cross-validation (https://en.wikipedia.org/wiki/Cross-validation_(statistics) ) is often applied. Here, the data is divided in K chunks, K-1 chunks are used for training while 1 chunk is used for testing. Different metrics exist, such as accuracy, AUC, F1 score, and more. For this lab, we will use accuracy.
Some machine learning techniques, supported by sklearn:
SVMs
Decision Trees
Decision Tree Ensemble: AdaBoost, Random Forest, Gradient Boosting
Multi-Level Perceptrons/Neural Networks
Naive Bayes
K-Nearest Neighbor
...
To tune the different hyper-parameters of a machine learning model, again different techniques exist:
* Grid search: exhaustively try all possible parameter combinations (Code to tune the different parameters of our LogReg model has been given)
* Random search: try a number of random combinations, it has been shown that this is quite equivalent to grid search
4.1: *Choose one or more machine learning techniques, different from Logistic Regression and apply them to our data, with tuned hyper-parameters! You will see that switching techniques in sklearn is really simple! Which model performs best on this data? *
End of explanation
"""
holiday_pops = pd.read_json('holidays.json')
holidays = pd.read_json( (holiday_pops['holidays']).to_json(), orient='index')
holidays['date'] = pd.to_datetime(holidays['date'])
holidays.head(1)
training["date"] = training["querytime"].values.astype('datetime64[D]')
out_test["date"] = out_test["querytime"].values.astype('datetime64[D]')
training_holidays = pd.merge(training,holidays, how="left", on='date')
training_holidays.school = training_holidays.school.fillna(0)
training_holidays.name = training_holidays.name.fillna("geen")
training_holidays[0:1]
out_test_holidays = pd.merge(out_test,holidays, how="left", on='date')
out_test_holidays.school = out_test_holidays.school.fillna(0)
out_test_holidays.name = out_test_holidays.name.fillna("geen")
out_test_holidays[0:1]
out_test_holidays.sort('date')
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
#encode the names from the holidays (Summer,Christmas...)
training_holidays["name_enc"] = encoder.fit_transform(training_holidays["name"])
out_test_holidays["name_enc"] = encoder.fit_transform(out_test_holidays["name"])
#encode the classes (IC,TGV,L...)
training_holidays["class_enc"] = encoder.fit_transform(training_holidays["post.class"])
out_test_holidays["class_enc"] = encoder.fit_transform(out_test_holidays["post.class"])
training_holidays=training_holidays.rename(columns = {'too':'destination'})
out_test_holidays=out_test_holidays.rename(columns = {'too':'destination'})
stations_df[0:1]
"""
Explanation: 5. Data augmentation with external data sources
There is a unlimited amount of factors that influence the occupancy of a train! Definitely more than the limited amount of data given in the feedback logs. Therefore, we will try to create new features for our dataset using external data sources. Examples of data sources include:
Weather APIs
A holiday calendar
Event calendars
Connection and delay information of the SpitsGidsAPI
Data from the NMBS/SNCB
Twitter and other social media
many, many more
In order to save time, a few 'prepared' files have already been given to you. Of course, you are free to scrape/generate your own data as well:
Hourly weather data for all stations in Belgium, from August till April weather_data.zip
A file which contains the vehicle identifiers and the stations where this vehicle stops line_info.csv
Based on this line_info, you can construct a graph of the rail net in Belgium and apply some fancy graph features (pagerank, edge betweenness, ...) iGraph experiments.ipynb
A file containing the coordinates of a station, and the number of visitors during week/weekend for 2015 station_druktes.csv
A file with some of the holidays (this can definitely be extended) holidays.json
For event data, there is the Eventful API
5.1: Pick one (or more) external data source(s) and link your current data frame to that data source (requires some creativity in most cases). Extract features from your new, linked data source and re-train your model. How much gain did you achieve?
Als we kijken naar "training.id.value_counts()" dan zien we vooral dat het om studenten bestemmingen gaat, misschien komt dat omdat het vooral hen zijn die deze app gebruiken? We moeten dus nadenken wanneer zij de trein nemen, en wat dat kan beinvloeden. Misschien het aantal studenten per station incorporeren?
End of explanation
"""
def transform_druktes(row):
start = row['from']
destination = row['destination']
day = row['weekday']
row['from_lat']=stations_df[stations_df["from"] == start]["latitude"].values[0]
row['from_lng']=stations_df[stations_df["destination"] == destination]["longitude"].values[0]
row['des_lat']=stations_df[stations_df["from"] == start]["latitude"].values[0]
row['des_lng']=stations_df[stations_df["destination"] == destination]["longitude"].values[0]
row['zoekterm']=stations_df[stations_df["destination"] == destination]["zoekterm"].values[0]
if day == 5:
row['drukte_from']=stations_df[stations_df["from"] == start]["zaterdag"].values[0]
row['drukte_to']=stations_df[stations_df["destination"] == destination]["zaterdag"].values[0]
elif day == 6:
row['drukte_from']=stations_df[stations_df["from"] == start]["zondag"].values[0]
row['drukte_to']=stations_df[stations_df["destination"] == destination]["zondag"].values[0]
else:
row['drukte_from']=stations_df[stations_df["from"] == start]["week"].values[0]
row['drukte_to']=stations_df[stations_df["destination"] == destination]["week"].values[0]
return row
#training_holidays_druktes = training_holidays.apply(transform_druktes, axis=1)
out_test_holidays_druktes = out_test_holidays.apply(transform_druktes, axis=1)
training_holidays_druktes = pd.concat([training_holidays_druktes,
pd.get_dummies(training_holidays_druktes['weekday'], prefix="day_"),
],1)
out_test_holidays_druktes = pd.concat([out_test_holidays_druktes,
pd.get_dummies(out_test_holidays_druktes['weekday'], prefix="day_"),
],1)
"""
Explanation: Transform all null classes to one null class, maybe try to predict the class? Based on to and from and time
End of explanation
"""
|
phoebe-project/phoebe2-docs | 2.0/tutorials/meshes.ipynb | gpl-3.0 | !pip install -I "phoebe>=2.0,<2.1"
"""
Explanation: Accessing and Plotting Meshes
Setup
Let's first make sure we have the latest version of PHOEBE 2.0 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
End of explanation
"""
%matplotlib inline
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
"""
Explanation: As always, let's do imports and initialize a logger and a new Bundle. See Building a System for more details.
End of explanation
"""
b.run_compute(protomesh=True)
"""
Explanation: The 'protomesh'
The 'protomesh' is the mesh of each star in its own reference frame at periastron. The coordinates are defined such that the x-axis points towards the other component in the parent orbit.
To build the protomesh, set 'protomesh' to be True, either in the compute options or directly as a keyword argument when calling run_compute.
End of explanation
"""
print b['model'].kinds
print b['model'].datasets
"""
Explanation: You'll see that the resulting model has a single dataset kind ('mesh') and with a dataset tag of 'protomesh'.
End of explanation
"""
b.filter(dataset='protomesh', context='model')
b.filter(dataset='protomesh', context='model', component='primary')
b.get_value(dataset='protomesh', context='model', component='primary', qualifier='teffs')
axs, artists = b.filter(dataset='protomesh', context='model', component='secondary').plot(facecolor='teffs', edgecolor=None)
"""
Explanation: Now let's look at the parameters in the protomesh
End of explanation
"""
b.add_dataset('lc', times=[0,1,2], dataset='lc01')
b.run_compute(pbmesh=True)
"""
Explanation: The 'pbmesh'
'pbmesh' is an automatically-created dataset in the returned model which stores the mesh at every time-point at which it was required to be built by other existing datasets.
Again, these will only be stored in the returned model if pbmesh=True is passed during run_compute or is True in the passed compute options.
End of explanation
"""
print b['model'].kinds
print b['model'].datasets
"""
Explanation: Our model now has dataset kinds for both the 'mesh' and 'lc' and has dataset tags for our newly-created 'lc01' dataset as well as the 'pbmesh' datasets in the model created only because pbmesh=True.
End of explanation
"""
b.filter(dataset='pbmesh', context='model')
b.filter(dataset='pbmesh', context='model', component='primary')
"""
Explanation: This time let's look at the parameters in the 'pbmesh' dataset of the model.
End of explanation
"""
b.filter(kind='mesh', context='model', component='primary')
b.filter(dataset='lc01', kind='mesh', context='model', component='primary')
"""
Explanation: As you can see, the intensities are not available here - their dataset tags match the dataset of the light curve. Instead let's access the mesh by dataset-kind:
End of explanation
"""
axs, artists = b.filter(kind='mesh', context='model', time=1.0).plot(facecolor='intensities@lc01', edgecolor='teffs')
"""
Explanation: To plot the intensities as color on the mesh, we can just plot the mesh and then reference the correct column by using twig access:
End of explanation
"""
b.get_value('times@lc01@dataset')
b.add_dataset('mesh', times=[0.5, 1.5], dataset='mesh01')
"""
Explanation: The 'Mesh' Dataset Kind
If you want to force the plot itself to build at specific times but not have any observables (necessarily) computed or filled at those times, you can create a mesh dataset.
Let's create a mesh dataset that fills in the missing times from our lc dataset.
End of explanation
"""
b.run_compute(protomesh=False, pbmesh=False)
"""
Explanation: Now let's run_compute with protomesh and pbmesh set to False (these will default to the values in the compute options - which themselves are defaulted to False).
End of explanation
"""
print b['model'].kinds
print b['model'].datasets
"""
Explanation: As expected, the resulting model has dataset kinds for both mesh and lc, as well as datasets for 'mesh01' and 'lc01' - but the 'pbmesh' and 'protomesh' entries are no longer created (since protomesh and pbmesh are both False).
End of explanation
"""
b.filter(kind='mesh', context='model').times
"""
Explanation: The meshes are only stored at the times of the mesh dataset - not at the times of the lc dataset.
End of explanation
"""
b.get_value(kind='mesh', context='model', dataset='lc01', time=0.5, qualifier='intensities', component='primary')
"""
Explanation: Since there was no lc requested at these times, the 'intensities' columns will be empty.
End of explanation
"""
b.filter(dataset='mesh01', kind='mesh', context='model', component='primary', time=0.5)
axs, artists = b.filter(dataset='mesh01', kind='mesh', context='model', time=0.5).plot(facecolor='teffs', edgecolor=None)
"""
Explanation: But we can still plot any of the dataset-independent quantities
End of explanation
"""
b.run_compute(pbmesh=True)
b.filter(kind='mesh', context='model').times
"""
Explanation: If you want the meshes stored at both the times in the 'mesh' dataset and all other datasets, simply set pbmesh to True.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/ec-earth-consortium/cmip6/models/sandbox-2/atmoschem.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'ec-earth-consortium', 'sandbox-2', 'atmoschem')
"""
Explanation: ES-DOC CMIP6 Model Properties - Atmoschem
MIP Era: CMIP6
Institute: EC-EARTH-CONSORTIUM
Source ID: SANDBOX-2
Topic: Atmoschem
Sub-Topics: Transport, Emissions Concentrations, Gas Phase Chemistry, Stratospheric Heterogeneous Chemistry, Tropospheric Heterogeneous Chemistry, Photo Chemistry.
Properties: 84 (39 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:53:59
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Software Properties
3. Key Properties --> Timestep Framework
4. Key Properties --> Timestep Framework --> Split Operator Order
5. Key Properties --> Tuning Applied
6. Grid
7. Grid --> Resolution
8. Transport
9. Emissions Concentrations
10. Emissions Concentrations --> Surface Emissions
11. Emissions Concentrations --> Atmospheric Emissions
12. Emissions Concentrations --> Concentrations
13. Gas Phase Chemistry
14. Stratospheric Heterogeneous Chemistry
15. Tropospheric Heterogeneous Chemistry
16. Photo Chemistry
17. Photo Chemistry --> Photolysis
1. Key Properties
Key properties of the atmospheric chemistry
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of atmospheric chemistry model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of atmospheric chemistry model code.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.chemistry_scheme_scope')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "troposhere"
# "stratosphere"
# "mesosphere"
# "mesosphere"
# "whole atmosphere"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Chemistry Scheme Scope
Is Required: TRUE Type: ENUM Cardinality: 1.N
Atmospheric domains covered by the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.basic_approximations')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: STRING Cardinality: 1.1
Basic approximations made in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.prognostic_variables_form')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "3D mass/mixing ratio for gas"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables Form
Is Required: TRUE Type: ENUM Cardinality: 1.N
Form of prognostic variables in the atmospheric chemistry component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.number_of_tracers')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 1.6. Number Of Tracers
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of advected tracers in the atmospheric chemistry model
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.family_approach')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.7. Family Approach
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry calculations (not advection) generalized into families of species?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.coupling_with_chemical_reactivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 1.8. Coupling With Chemical Reactivity
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Atmospheric chemistry transport scheme turbulence is couple with chemical reactivity?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Software Properties
Software properties of aerosol code
2.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 2.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Operator splitting"
# "Integrated"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Timestep Framework
Timestepping in the atmospheric chemistry model
3.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Mathematical method deployed to solve the evolution of a given variable
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_advection_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.2. Split Operator Advection Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemical species advection (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_physical_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.3. Split Operator Physical Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for physics (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_chemistry_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.4. Split Operator Chemistry Timestep
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Timestep for chemistry (in seconds).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_alternate_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.5. Split Operator Alternate Order
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_timestep')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 3.6. Integrated Timestep
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Timestep for the atmospheric chemistry model (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.integrated_scheme_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Explicit"
# "Implicit"
# "Semi-implicit"
# "Semi-analytic"
# "Impact solver"
# "Back Euler"
# "Newton Raphson"
# "Rosenbrock"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3.7. Integrated Scheme Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Specify the type of timestep scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.turbulence')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Timestep Framework --> Split Operator Order
**
4.1. Turbulence
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for turbulence scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.convection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.2. Convection
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for convection scheme This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.precipitation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.3. Precipitation
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for precipitation scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.emissions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.4. Emissions
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for emissions scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.5. Deposition
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for deposition scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.gas_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.6. Gas Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for gas phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.tropospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.7. Tropospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for tropospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.stratospheric_heterogeneous_phase_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.8. Stratospheric Heterogeneous Phase Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for stratospheric heterogeneous phase chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.photo_chemistry')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.9. Photo Chemistry
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for photo chemistry scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.timestep_framework.split_operator_order.aerosols')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 4.10. Aerosols
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Call order for aerosols scheme. This should be an integer greater than zero, and may be the same value as for another process if they are calculated at the same time.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Tuning Applied
Tuning methodology for atmospheric chemistry component
5.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Grid
Atmospheric chemistry grid
6.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe the general structure of the atmopsheric chemistry grid
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.matches_atmosphere_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.2. Matches Atmosphere Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
* Does the atmospheric chemistry grid match the atmosphere grid?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Grid --> Resolution
Resolution in the atmospheric chemistry grid
7.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Canonical Horizontal Resolution
Is Required: FALSE Type: STRING Cardinality: 0.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.3. Number Of Horizontal Gridpoints
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 7.4. Number Of Vertical Levels
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.grid.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 7.5. Is Adaptive Grid
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Transport
Atmospheric chemistry transport
8.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview of transport implementation
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.use_atmospheric_transport')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.2. Use Atmospheric Transport
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is transport handled by the atmosphere, rather than within atmospheric cehmistry?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.transport.transport_details')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Transport Details
Is Required: FALSE Type: STRING Cardinality: 0.1
If transport is handled within the atmospheric chemistry scheme, describe it.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Emissions Concentrations
Atmospheric chemistry emissions
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric chemistry emissions
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Vegetation"
# "Soil"
# "Sea surface"
# "Anthropogenic"
# "Biomass burning"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Emissions Concentrations --> Surface Emissions
**
10.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of the chemical species emitted at the surface that are taken into account in the emissions scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define chemical species emitted directly into model layers above the surface (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed via a climatology, and the nature of the climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.surface_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 10.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted at the surface and specified via any other method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.sources')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Aircraft"
# "Biomass burning"
# "Lightning"
# "Volcanos"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Emissions Concentrations --> Atmospheric Emissions
TO DO
11.1. Sources
Is Required: FALSE Type: ENUM Cardinality: 0.N
Sources of chemical species emitted in the atmosphere that are taken into account in the emissions scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.method')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Climatology"
# "Spatially uniform mixing ratio"
# "Spatially uniform concentration"
# "Interactive"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Method
Is Required: FALSE Type: ENUM Cardinality: 0.N
Methods used to define the chemical species emitted in the atmosphere (several methods allowed because the different species may not use the same method).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_climatology_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.3. Prescribed Climatology Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed via a climatology (E.g. CO (monthly), C2H6 (constant))
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.prescribed_spatially_uniform_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.4. Prescribed Spatially Uniform Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and prescribed as spatially uniform
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.interactive_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.5. Interactive Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an interactive method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.atmospheric_emissions.other_emitted_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 11.6. Other Emitted Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of chemical species emitted in the atmosphere and specified via an "other method"
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_lower_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Emissions Concentrations --> Concentrations
TO DO
12.1. Prescribed Lower Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the lower boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.emissions_concentrations.concentrations.prescribed_upper_boundary')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12.2. Prescribed Upper Boundary
Is Required: FALSE Type: STRING Cardinality: 0.1
List of species prescribed at the upper boundary.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 13. Gas Phase Chemistry
Atmospheric chemistry transport
13.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview gas phase atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "HOx"
# "NOy"
# "Ox"
# "Cly"
# "HSOx"
# "Bry"
# "VOCs"
# "isoprene"
# "H2O"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13.2. Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Species included in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_bimolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.3. Number Of Bimolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of bi-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_termolecular_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.4. Number Of Termolecular Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of ter-molecular reactions in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_tropospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.5. Number Of Tropospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_stratospheric_heterogenous_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.6. Number Of Stratospheric Heterogenous Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_advected_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.7. Number Of Advected Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of advected species in the gas phase chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.8. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of gas phase species for which the concentration is updated in the chemical solver assuming photochemical steady state
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.9. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.10. Wet Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet deposition included? Wet deposition describes the moist processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.gas_phase_chemistry.wet_oxidation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 13.11. Wet Oxidation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is wet oxidation included? Oxidation describes the loss of electrons or an increase in oxidation state by a molecule
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 14. Stratospheric Heterogeneous Chemistry
Atmospheric chemistry startospheric heterogeneous chemistry
14.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview stratospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Cly"
# "Bry"
# "NOy"
# TODO - please enter value(s)
"""
Explanation: 14.2. Gas Phase Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Gas phase species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Polar stratospheric ice"
# "NAT (Nitric acid trihydrate)"
# "NAD (Nitric acid dihydrate)"
# "STS (supercooled ternary solution aerosol particule))"
# TODO - please enter value(s)
"""
Explanation: 14.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the stratospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.sedimentation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.5. Sedimentation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is sedimentation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.stratospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 14.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the stratospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15. Tropospheric Heterogeneous Chemistry
Atmospheric chemistry tropospheric heterogeneous chemistry
15.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview tropospheric heterogenous atmospheric chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.gas_phase_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 15.2. Gas Phase Species
Is Required: FALSE Type: STRING Cardinality: 0.1
List of gas phase species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.aerosol_species')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Sulphate"
# "Nitrate"
# "Sea salt"
# "Dust"
# "Ice"
# "Organic"
# "Black carbon/soot"
# "Polar stratospheric ice"
# "Secondary organic aerosols"
# "Particulate organic matter"
# TODO - please enter value(s)
"""
Explanation: 15.3. Aerosol Species
Is Required: FALSE Type: ENUM Cardinality: 0.N
Aerosol species included in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.number_of_steady_state_species')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.4. Number Of Steady State Species
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of steady state species in the tropospheric heterogeneous chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.interactive_dry_deposition')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.5. Interactive Dry Deposition
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is dry deposition interactive (as opposed to prescribed)? Dry deposition describes the dry processes by which gaseous species deposit themselves on solid surfaces thus decreasing their concentration in the air.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.tropospheric_heterogeneous_chemistry.coagulation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 15.6. Coagulation
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is coagulation is included in the tropospheric heterogeneous chemistry scheme or not?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Photo Chemistry
Atmospheric chemistry photo chemistry
16.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview atmospheric photo chemistry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.number_of_reactions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 16.2. Number Of Reactions
Is Required: TRUE Type: INTEGER Cardinality: 1.1
The number of reactions in the photo-chemistry scheme.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Offline (clear sky)"
# "Offline (with clouds)"
# "Online"
# TODO - please enter value(s)
"""
Explanation: 17. Photo Chemistry --> Photolysis
Photolysis scheme
17.1. Method
Is Required: TRUE Type: ENUM Cardinality: 1.1
Photolysis scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.atmoschem.photo_chemistry.photolysis.environmental_conditions')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17.2. Environmental Conditions
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any environmental conditions taken into account by the photolysis scheme (e.g. whether pressure- and temperature-sensitive cross-sections and quantum yields in the photolysis calculations are modified to reflect the modelled conditions.)
End of explanation
"""
|
espressomd/espresso | doc/tutorials/active_matter/active_matter.ipynb | gpl-3.0 | %matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 18})
import tqdm
import numpy as np
import espressomd.observables
import espressomd.accumulators
espressomd.assert_features(
["ENGINE", "ROTATION", "MASS", "ROTATIONAL_INERTIA", "CUDA"])
ED_PARAMS = {'time_step': 0.01,
'box_l': 3*[10.],
'skin': 0.4,
'active_velocity': 5,
'kT': 1,
'gamma': 1,
'gamma_rotation': 1,
'mass': 0.1,
'rinertia': 3*[1.],
'corr_tmax': 100}
ED_N_SAMPLING_STEPS = 5000000
system = espressomd.System(box_l=ED_PARAMS['box_l'])
system.cell_system.skin = ED_PARAMS['skin']
system.time_step = ED_PARAMS['time_step']
"""
Explanation: Active Matter
Table of Contents
Introduction
Active particles
Enhanced Diffusion
Rectification
Hydrodynamics of self-propelled particles
Further reading
Introduction
In this tutorial we explore the ways to simulate self-propulsion in the
simulation software package ESPResSo. We consider three examples that illustrate
the properties of these systems. First, we study the concept of enhanced
diffusion of a self-propelled particle. Second, we investigate rectification in
an asymmetric geometry. Finally, we determine the flow field around a
self-propelled particle using lattice-Boltzmann simulations (LB). These three
subsections should give insight into the basics of simulating active matter
with ESPResSo. This tutorial assumes basic knowledge of Python and ESPResSo,
as well as the use of lattice-Boltzmann within ESPResSo. It is therefore
recommended to go through the relevant tutorials first, before attempting this one.
Active particles
Active matter is a term that describes a class of systems, in which energy is
constantly consumed to perform work. These systems are therefore highly
out-of-equilibrium (thermodynamically) and (can) thus defy description using
the standard framework of statistical mechanics. Active systems are, however,
ubiquitous. On our length scale, we encounter flocks of
birds, schools of fish, and, of course, humans;
on the mesoscopic level examples are found in bacteria, sperm, and algae;
and on the nanoscopic level, transport along the cytoskeleton is achieved by
myosin motors. This exemplifies the range of length scales
which the field of active matter encompasses, as well as its diversity. Recent
years have seen a huge increase in studies into systems consisting of
self-propelled particles, in particular artificial ones in the colloidal
regime.
These self-propelled colloids show promise as physical model systems for
complex biological behavior (bacteria moving collectively) and could be used to
answer fundamental questions concerning out-of-equilibrium statistical
physics.
Simulations can also play an important role in this regard, as the
parameters are more easily tunable and the results ‘cleaner’ than in
experiments. The above should give you some idea of the importance of
the field of active matter and why you should be interested in
performing simulations in it.
Active Particles in ESPResSo
The <tt>ENGINE</tt> feature offers intuitive syntax for adding self-propulsion to
a particle. The propulsion will occur along the vector that defines the
orientation of the particle (henceforth referred to as ‘director’). In ESPResSo
the orientation of the particle is defined by a quaternion; this in turn
defines a rotation matrix that acts on the particle's initial orientation
(along the z-axis), which then defines the particles current orientation
through the matrix-oriented vector.
Within the <tt>ENGINE</tt> feature there are two ways of setting up a self-propelled
particle, with and without hydrodynamic interactions. The particle without
hydrodynamic interactions will be discussed first, as it is the simplest case.
Self-Propulsion without Hydrodynamics
For this type of self-propulsion the Langevin thermostat can be used. The
Langevin thermostat imposes a velocity-dependent friction on a particle.
When a constant force is applied along the director,
the friction causes the particle to attain a terminal velocity, due to the balance
of driving and friction force, see <a href='#fig:balance'>Fig. 1</a>. The exponent with
which the particle's velocity relaxes towards this value depends on the
strength of the friction and the mass of the particle. The <tt>ENGINE</tt>
feature implies that rotation of the particles (the <tt>ROTATION</tt> feature) is
compiled into ESPResSo. The particle can thus reorient due to external torques or
due to thermal fluctuations, whenever the rotational degrees of freedom are
thermalized. Note that the rotation of the particles has to be enabled
explicitly via their <tt>ROTATION</tt> property. This ‘engine’ building block can
be connected to other particles, e.g., via the virtual sites (rigid
body) to construct complex self-propelled objects.
<a id='fig:balance'></a>
<figure><img src="figures/friction.svg" style="float: center; width: 40%"/>
<center>
<figcaption>Fig. 1: A balance of the driving force in the
direction defined by the ‘director’ unit vector and the friction due to
the Langevin thermostat results in a constant terminal
velocity.</figcaption>
</center>
</figure>
Enhanced Diffusion
First we import the necessary modules, define the parameters and set up the system.
End of explanation
"""
pos_obs = espressomd.observables.ParticlePositions(
ids=[part_act.id, part_pass.id])
msd = espressomd.accumulators.Correlator(obs1=pos_obs,
corr_operation="square_distance_componentwise",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(msd)
vel_obs = espressomd.observables.ParticleVelocities(
ids=[part_act.id, part_pass.id])
vacf = espressomd.accumulators.Correlator(obs1=vel_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(vacf)
ang_obs = espressomd.observables.ParticleAngularVelocities(
ids=[part_act.id, part_pass.id])
avacf = espressomd.accumulators.Correlator(obs1=ang_obs,
corr_operation="componentwise_product",
delta_N=1,
tau_max=ED_PARAMS['corr_tmax'],
tau_lin=16)
system.auto_update_accumulators.add(avacf)
"""
Explanation: Exercise
Set up a Langevin thermostat for translation and rotation of the particles.
python
system.thermostat.set_langevin(kT=ED_PARAMS['kT'],
gamma=ED_PARAMS['gamma'],
gamma_rotation=ED_PARAMS['gamma_rotation'],
seed=42)
The configuration for the Langevin-based swimming is exposed as an attribute of
the <tt>ParticleHandle</tt> class of ESPResSo, which represents a particle in the
simulation. You can either set up the self-propulsion during the creation of a
particle or at a later stage.
Exercise
Set up one active and one passive particle, call them part_act and part_pass (Hint: see the docs)
Use ED_PARAMS for the necessary parameters
python
part_act = system.part.add(pos=[5.0, 5.0, 5.0], swimming={'v_swim': ED_PARAMS['active_velocity']},
mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])
part_pass = system.part.add(pos=[5.0, 5.0, 5.0],
mass=ED_PARAMS['mass'], rotation=3 * [True], rinertia=ED_PARAMS['rinertia'])
Next we set up three ESPResSo correlators for the Mean Square Displacement (MSD), Velocity Autocorrelation Function (VACF) and the Angular Velocity Autocorrelation Function (AVACF).
End of explanation
"""
for i in tqdm.tqdm(range(100)):
system.integrator.run(int(ED_N_SAMPLING_STEPS/100))
system.auto_update_accumulators.remove(msd)
msd.finalize()
system.auto_update_accumulators.remove(vacf)
vacf.finalize()
system.auto_update_accumulators.remove(avacf)
avacf.finalize()
taus_msd = msd.lag_times()
msd_result = msd.result()
msd_result = np.sum(msd_result, axis=2)
taus_vacf = vacf.lag_times()
vacf_result = np.sum(vacf.result(), axis=2)
taus_avacf = avacf.lag_times()
avacf_result = np.sum(avacf.result(), axis=2)
fig_msd = plt.figure(figsize=(10, 6))
plt.plot(taus_msd, msd_result[:, 0], label='active')
plt.plot(taus_msd, msd_result[:, 1], label='passive')
plt.xlim((taus_msd[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('MSD(t)')
plt.legend()
plt.show()
"""
Explanation: No more setup needed! We can run the simulation and plot our observables.
End of explanation
"""
def acf_stable_regime(x, y):
"""
Remove the noisy tail in autocorrelation functions of finite time series.
"""
cut = np.argmax(y <= 0.) - 2
assert cut >= 1
return (x[1:cut], y[1:cut])
fig_vacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_vacf, vacf_result[:, 1]), label='passive')
plt.xlim((taus_vacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('VACF(t)')
plt.legend()
plt.show()
fig_avacf = plt.figure(figsize=(10, 6))
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 0]), label='active')
plt.plot(*acf_stable_regime(taus_avacf, avacf_result[:, 1]), label='passive')
plt.xlim((taus_avacf[1], None))
plt.loglog()
plt.xlabel('t')
plt.ylabel('AVACF(t)')
plt.legend()
plt.show()
"""
Explanation: The Mean Square Displacement of an active particle is characterized by a longer ballistic regime and an increased diffusion coefficient for longer lag times. In the overdamped limit it is given by
$$
\langle r^{2}(t) \rangle = 6 D t + \frac{v^{2} \tau^{2}{R}}{2} \left[ \frac{2 t}{\tau^{2}{R}} + \exp\left( \frac{-2t}{\tau^{2}{R}} \right) - 1 \right],
$$
where $\tau{R} = \frac{8\pi\eta R^{3}}{k_{B} T}$ is the characteristic time scale for rotational diffusion and $ D = \frac{k_B T}{\gamma}$ is the translational diffusion coefficient.
For small times ($t \ll \tau_{R}$) the motion is ballistic
$$\langle r^{2}(t) \rangle = 6 D t + v^{2} t^{2},$$
while for long times ($t \gg \tau_{R}$) the motion is diffusive
$$\langle r^{2}(t) \rangle = (6 D + v^{2}\tau_{R}) t.$$
Note that no matter the strength of the activity, provided it is some finite value, the crossover between ballistic motion and enhanced diffusion is controlled by the rotational diffusion time.
The passive particle also displays a crossover from a ballistic to a diffusive motion. However, the crossover time $\tau_{C}=\frac{m}{\gamma}$ is not determined by the rotational motion but instead by the mass of the particles.
From the longterm MSD of the active particles we can define an effective diffusion coefficient $D_{\mathrm{eff}} = D + v^{2}\tau_{R}/6$. One can, of course, also connect this increased diffusion with an effective temperature. However, this apparent equivalence can lead to problems when one then attempts to apply statistical mechanics to such systems at the effective temperature. That is, there is typically more to being out-of-equilibrium than can be captured by a simple remapping of equilibrium parameters, as we will see in the second part of the tutorial.
From the autocorrelation functions of the velocity and the angular velocity we can see that the activity does not influence the rotational diffusion. Yet the directed motion for $t<\tau_{R}$ leads to an enhanced correlation of the velocity.
End of explanation
"""
def clear_system(system):
system.part.clear()
system.thermostat.turn_off()
system.constraints.clear()
system.auto_update_accumulators.clear()
system.time = 0.
clear_system(system)
"""
Explanation: Before we go to the second part, it is important to clear the state of the system.
End of explanation
"""
import espressomd.shapes
import espressomd.math
RECT_PARAMS = {'length': 100,
'radius': 20,
'funnel_inner_radius': 3,
'funnel_angle': np.pi / 4.0,
'funnel_thickness': 0.1,
'n_particles': 500,
'active_velocity': 7,
'time_step': 0.01,
'wca_sigma': 0.5,
'wca_epsilon': 0.1,
'skin': 0.4,
'kT': 0.1,
'gamma': 1.,
'gamma_rotation': 1}
RECT_STEPS_PER_SAMPLE = 100
RECT_N_SAMPLES = 500
TYPES = {'particles': 0,
'boundaries': 1}
box_l = np.array(
[RECT_PARAMS['length'], 2*RECT_PARAMS['radius'], 2*RECT_PARAMS['radius']])
system.box_l = box_l
system.cell_system.skin = RECT_PARAMS['skin']
system.time_step = RECT_PARAMS['time_step']
system.thermostat.set_langevin(
kT=RECT_PARAMS['kT'], gamma=RECT_PARAMS['gamma'], gamma_rotation=RECT_PARAMS['gamma_rotation'], seed=42)
cylinder = espressomd.shapes.Cylinder(
center=0.5 * box_l,
axis=[1, 0, 0], radius=RECT_PARAMS['radius'], length=RECT_PARAMS['length'], direction=-1)
system.constraints.add(shape=cylinder, particle_type=TYPES['boundaries'])
# Setup walls
wall = espressomd.shapes.Wall(dist=0, normal=[1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
wall = espressomd.shapes.Wall(dist=-RECT_PARAMS['length'], normal=[-1, 0, 0])
system.constraints.add(shape=wall, particle_type=TYPES['boundaries'])
funnel_length = (RECT_PARAMS['radius']-RECT_PARAMS['funnel_inner_radius']
)/np.tan(RECT_PARAMS['funnel_angle'])
"""
Explanation: Rectification
In the second part of this tutorial you will consider the ‘rectifying’ properties of certain
asymmetric geometries on active systems. Rectification can be best understood by
considering a system of passive particles first. In an equilibrium system,
for which the particles are confined to an asymmetric box with hard walls, we know that the
particle density is homogeneous throughout. However, in an out-of-equilibrium setting one can have a
heterogeneous distribution of particles, which limits the applicability of an
‘effective’ temperature description.
The geometry we will use is a cylindrical system with a funnel dividing
two halves of the box as shown in <a href='#fig:geometry'>Fig. 2</a>.
<a id='fig:geometry'></a>
<figure><img src="figures/geometry.svg" style="float: center; width: 75%"/>
<center>
<figcaption>Fig. 2: Sketch of the rectifying geometry which we
simulate for this tutorial.</figcaption>
</center>
</figure>
End of explanation
"""
com_deviations = list()
times = list()
"""
Explanation: Exercise
Using funnel_length and the geometric parameters in RECT_PARAMS, set up the funnel cone (Hint: Conical Frustum)
```python
ctp = espressomd.math.CylindricalTransformationParameters(
axis=[1, 0, 0], center=box_l/2.)
hollow_cone = espressomd.shapes.HollowConicalFrustum(
cyl_transform_params=ctp,
r1=RECT_PARAMS['funnel_inner_radius'], r2=RECT_PARAMS['radius'],
thickness=RECT_PARAMS['funnel_thickness'],
length=funnel_length,
direction=1)
system.constraints.add(shape=hollow_cone, particle_type=TYPES['boundaries'])
```
Exercise
Set up a WCA potential between the walls and the particles using the parameters in RECT_PARAMS
python
system.non_bonded_inter[TYPES['particles'], TYPES['boundaries']].wca.set_params(
epsilon=RECT_PARAMS['wca_epsilon'], sigma=RECT_PARAMS['wca_sigma'])
ESPResSo uses quaternions to describe the rotational state of particles. Here we provide a convenience method to calculate quaternions from spherical coordinates.
Exercise
Place an equal number of swimming particles (the total number should be RECT_PARAMS['n_particles']) in the left and the right part of the box such that the center of mass is exactly in the middle. (Hint: Particles do not interact so you can put multiple in the same position)
Particles must be created with a random orientation
```python
for i in range(RECT_PARAMS['n_particles']):
pos = box_l / 2
pos[0] += (-1)**i * 0.25 * RECT_PARAMS['length']
# https://mathworld.wolfram.com/SpherePointPicking.html
theta = np.arccos(2. * np.random.random() - 1)
phi = 2. * np.pi * np.random.random()
director = [np.sin(theta) * np.cos(phi),
np.sin(theta) * np.cos(phi),
np.cos(theta)]
system.part.add(pos=pos, swimming={'v_swim': RECT_PARAMS['active_velocity']},
director=director, rotation=3*[True])
```
End of explanation
"""
def moving_average(data, window_size):
return np.convolve(data, np.ones(window_size), 'same') / window_size
smoothing_window = 10
com_smoothed = moving_average(com_deviations, smoothing_window)
fig_rect = plt.figure(figsize=(10, 6))
plt.plot(times[smoothing_window:-smoothing_window],
com_smoothed[smoothing_window:-smoothing_window])
plt.xlabel('t')
plt.ylabel('center of mass deviation')
plt.show()
"""
Explanation: Exercise
Run the simulation using RECT_N_SAMPLES and RECT_STEPS_PER_SAMPLE and calculate the deviation of the center of mass from the center of the box in each sample step. (Hint: Center of mass)
Save the result and the corresponding time of the system in the lists given above.
python
for _ in tqdm.tqdm(range(RECT_N_SAMPLES)):
system.integrator.run(RECT_STEPS_PER_SAMPLE)
com_deviations.append(system.galilei.system_CMS()[0] - 0.5 * box_l[0])
times.append(system.time)
End of explanation
"""
clear_system(system)
"""
Explanation: Even though the potential energy inside the geometry is 0 in every part of the accessible region, the active particles are clearly not Boltzmann distributed (homogenous density). Instead, they get funneled into the right half, showing the inapplicability of equilibrium statistical mechanics.
End of explanation
"""
import espressomd.lb
HYDRO_PARAMS = {'box_l': 3*[25],
'time_step': 0.01,
'skin': 1,
'agrid': 1,
'dens': 1,
'visc': 1,
'gamma': 1,
'mass': 5,
'dipole_length': 2,
'active_force': 0.1,
'mode': 'pusher'}
HYDRO_N_STEPS = 2000
system.box_l = HYDRO_PARAMS['box_l']
system.cell_system.skin = HYDRO_PARAMS['skin']
system.time_step = HYDRO_PARAMS['time_step']
system.min_global_cut = HYDRO_PARAMS['dipole_length']
"""
Explanation: Hydrodynamics of self-propelled particles
In situations where hydrodynamic interactions between swimmers or swimmers and
objects are of importance, we use the lattice-Boltzmann (LB) to propagate the
fluid's momentum diffusion. We recommend the GPU-based variant of LB in ESPResSo,
since it is much faster. Moreover, the current implementation of the CPU
self-propulsion is limited to one CPU. This is because the ghost-node structure
of the ESPResSo cell-list code does not allow for straightforward MPI parallellization
of the swimmer objects across several CPUs.
Of particular importance for self-propulsion at low Reynolds number is the fact
that active systems (bacteria, sperm, algae, but also artificial chemically
powered swimmers) are force free. That is, the flow field around one of these
objects does not contain a monopolar (Stokeslet) contribution. In the case of a
sperm cell, see <a href='#fig:pusher-puller'>Fig. 3</a>(a), the reasoning is as follows.
The whip-like tail pushes against the fluid and the fluid pushes against the
tail, at the same time the head experiences drag, pushing against the fluid and
being pushed back against by the fluid. This ensures that both the swimmer and
the fluid experience no net force. However, due to the asymmetry of the
distribution of forces around the swimmer, the fluid flow still causes net
motion. When there is no net force on the fluid, the lowest-order multipole
that can be present is a hydrodynamic dipole. Since a dipole has an
orientation, there are two types of swimmer: pushers and pullers. The
distinction is made by whether the particle pulls fluid in from the front and
back, and pushes it out towards its side (puller), or vice versa (pusher), see
<a href='#fig:pusher-puller'>Fig. 3</a>(c,d).
<a id='fig:pusher-puller'></a>
<figure><img src="figures/pusher-puller.svg" style="float: center; width: 75%"/>
<center>
<figcaption>Fig. 3: (a) Illustration of a sperm cell modeled
using our two-point swimmer code. The head is represented by a solid particle,
on which a force is acting (black arrow). In the fluid a counter force is
applied (white arrow). This generates a pusher-type particle. (b) Illustration
of the puller-type Chlamydomonas algae, also represented by our two-point
swimmer. (c,d) Sketch of the flow-lines around the swimmers: (c) pusher and (d)
puller.</figcaption>
</center>
</figure>
For the setup of the swimming particles with hydrodynamics we cannot use the v_swim argument anymore because it is not trivial to determine the friction acting on the particle. Instead, we have to provide the keys f_swim and dipole_length. Together they determine what the dipole strength and the terminal velocity of the swimmer is.
One should be careful, however, the dipole_length should be at least one
grid spacing, since use is made of the LB interpolation scheme. If the length
is less than one grid spacing, you can easily run into discretization artifacts
or cause the particle not to move. This dipole length together with the
director and the keyword <tt>pusher/puller</tt> determines where the counter
force on the fluid is applied to make the system force free, see
<a href='#fig:pusher-puller'>Fig. 3</a>(a) for an illustration of the setup. That is to
say, a force of magnitude f_swim is applied to the particle (leading
to a Stokeslet in the fluid, due to friction) and a counter force is applied to
compensate for this in the fluid (resulting in an extended dipole flow field,
due to the second monopole). For a puller the counter force is applied in front
of the particle and for a pusher it is in the back
(<a href='#fig:pusher-puller'>Fig. 3</a>(b)).
Finally, there are a few caveats to the swimming setup with hydrodynamic
interactions. First, the stability of this algorithm is governed by the
stability limitations of the LB method. Second, since the particle is
essentially a point particle, there is no rotation caused by the fluid
flow, e.g., a swimmer in a Poiseuille flow. If the thermostat is
switched on, the rotational degrees of freedom will also be thermalized, but
there is still no contribution of rotation due to ‘external’ flow fields.
It is recommended to use an alternative means of obtaining rotations in your LB
swimming simulations. For example, by constructing a raspberry
particle.
End of explanation
"""
box_l = np.array(HYDRO_PARAMS['box_l'])
pos = box_l/2.
pos[2] = -10.
"""
Explanation: Exercise
Using HYDRO_PARAMS, set up a lattice-Boltzmann fluid and activate it as a thermostat (Hint: lattice-Boltzmann)
python
lbf = espressomd.lb.LBFluidGPU(agrid=HYDRO_PARAMS['agrid'], dens=HYDRO_PARAMS['dens'],
visc=HYDRO_PARAMS['visc'], tau=HYDRO_PARAMS['time_step'])
system.actors.add(lbf)
system.thermostat.set_lb(LB_fluid=lbf, gamma=HYDRO_PARAMS['gamma'], seed=42)
End of explanation
"""
system.integrator.run(HYDRO_N_STEPS)
vels = np.squeeze(lbf[:, int(system.box_l[1]/2), :].velocity)
vel_abs = np.linalg.norm(vels, axis=2)
lb_shape = lbf.shape
xs, zs = np.meshgrid(np.linspace(0.5, box_l[0] - 0.5, num=lb_shape[0]),
np.linspace(0.5, box_l[2] - 0.5, num=lb_shape[2]))
fig_vels, ax_vels = plt.subplots(figsize=(10, 6))
im = plt.pcolormesh(vel_abs.T, cmap='YlOrRd')
plt.quiver(xs, zs, vels[:, :, 0].T, vels[:, :, 2].T, angles='xy', scale=0.005)
circ = plt.Circle(particle.pos_folded[[0, 2]], 0.5, color='blue')
ax_vels.add_patch(circ)
ax_vels.set_aspect('equal')
plt.xlabel('x')
plt.ylabel('z')
cb = plt.colorbar(im, label=r'$|v_{\mathrm{fluid}}|$')
plt.show()
"""
Explanation: Exercise
Using HYDRO_PARAMS, place particle at pos that swims in z-direction. The particle handle should be called particle.
python
particle = system.part.add(
pos=pos, mass=HYDRO_PARAMS['mass'], rotation=3*[False],
swimming={'f_swim': HYDRO_PARAMS['active_force'],
'mode': HYDRO_PARAMS['mode'],
'dipole_length': HYDRO_PARAMS['dipole_length']})
End of explanation
"""
lbf.write_vtk_velocity('./fluid.vtk')
system.part.writevtk('./particle.vtk')
"""
Explanation: We can also export the particle and fluid data to .vtk format to display the results with a visualization software like ParaView.
End of explanation
"""
|
turbomanage/training-data-analyst | courses/machine_learning/deepdive2/image_classification/solutions/1_mnist_linear.ipynb | apache-2.0 | import os
import shutil
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras import Sequential
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard
from tensorflow.keras.layers import Dense, Flatten, Softmax
print(tf.__version__)
!python3 -m pip freeze | grep 'tensorflow==2\|tensorflow-gpu==2' || \
python3 -m pip install tensorflow==2
"""
Explanation: MNIST Image Classification with TensorFlow
This notebook demonstrates how to implement a simple linear image model on MNIST using the tf.keras API. It builds the foundation for this <a href="https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/courses/machine_learning/deepdive2/image_classification/labs/2_mnist_models.ipynb">companion notebook</a>, which explores tackling the same problem with other types of models such as DNN and CNN.
Learning Objectives
Know how to read and display image data
Know how to find incorrect predictions to analyze the model
Visually see how computers see images
This notebook uses TF2.0
Please check your tensorflow version using the cell below. If it is not 2.0, please run the pip line below and restart the kernel.
End of explanation
"""
mnist = tf.keras.datasets.mnist.load_data()
(x_train, y_train), (x_test, y_test) = mnist
HEIGHT, WIDTH = x_train[0].shape
NCLASSES = tf.size(tf.unique(y_train).y)
print("Image height x width is", HEIGHT, "x", WIDTH)
tf.print("There are", NCLASSES, "classes")
"""
Explanation: Exploring the data
The MNIST dataset is already included in tensorflow through the keras datasets module. Let's load it and get a sense of the data.
End of explanation
"""
IMGNO = 12
# Uncomment to see raw numerical values.
# print(x_test[IMGNO])
plt.imshow(x_test[IMGNO].reshape(HEIGHT, WIDTH));
print("The label for image number", IMGNO, "is", y_test[IMGNO])
"""
Explanation: Each image is 28 x 28 pixels and represents a digit from 0 to 9. These images are black and white, so each pixel is a value from 0 (white) to 255 (black). Raw numbers can be hard to interpret sometimes, so we can plot the values to see the handwritten digit as an image.
End of explanation
"""
def linear_model():
model = Sequential([
Flatten(),
Dense(NCLASSES),
Softmax()
])
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
"""
Explanation: Define the model
Let's start with a very simple linear classifier. This was the first method to be tried on MNIST in 1998, and scored an 88% accuracy. Quite ground breaking at the time!
We can build our linear classifer using the tf.keras API, so we don't have to define or initialize our weights and biases. This happens automatically for us in the background. We can also add a softmax layer to transform the logits into probabilities. Finally, we can compile the model using categorical cross entropy in order to strongly penalize high probability predictions that were incorrect.
When building more complex models such as DNNs and CNNs our code will be more readable by using the tf.keras API. Let's get one working so we can test it and use it as a benchmark.
End of explanation
"""
BUFFER_SIZE = 5000
BATCH_SIZE = 100
def scale(image, label):
image = tf.cast(image, tf.float32)
image /= 255
return image, label
def load_dataset(training=True):
"""Loads MNIST dataset into a tf.data.Dataset"""
(x_train, y_train), (x_test, y_test) = mnist
x = x_train if training else x_test
y = y_train if training else y_test
# One-hot encode the classes
y = tf.keras.utils.to_categorical(y, NCLASSES)
dataset = tf.data.Dataset.from_tensor_slices((x, y))
dataset = dataset.map(scale).batch(BATCH_SIZE)
if training:
dataset = dataset.shuffle(BUFFER_SIZE).repeat()
return dataset
def create_shape_test(training):
dataset = load_dataset(training=training)
data_iter = dataset.__iter__()
(images, labels) = data_iter.get_next()
expected_image_shape = (BATCH_SIZE, HEIGHT, WIDTH)
expected_label_ndim = 2
assert(images.shape == expected_image_shape)
assert(labels.numpy().ndim == expected_label_ndim)
test_name = 'training' if training else 'eval'
print("Test for", test_name, "passed!")
create_shape_test(True)
create_shape_test(False)
"""
Explanation: Write Input Functions
As usual, we need to specify input functions for training and evaluating. We'll scale each pixel value so it's a decimal value between 0 and 1 as a way of normalizing the data.
TODO 1: Define the scale function below and build the dataset
End of explanation
"""
NUM_EPOCHS = 10
STEPS_PER_EPOCH = 100
model = linear_model()
train_data = load_dataset()
validation_data = load_dataset(training=False)
OUTDIR = "mnist_linear/"
checkpoint_callback = ModelCheckpoint(
OUTDIR, save_weights_only=True, verbose=1)
tensorboard_callback = TensorBoard(log_dir=OUTDIR)
history = model.fit(
train_data,
validation_data=validation_data,
epochs=NUM_EPOCHS,
steps_per_epoch=STEPS_PER_EPOCH,
verbose=2,
callbacks=[checkpoint_callback, tensorboard_callback]
)
BENCHMARK_ERROR = .12
BENCHMARK_ACCURACY = 1 - BENCHMARK_ERROR
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
assert(accuracy[-1] > BENCHMARK_ACCURACY)
assert(val_accuracy[-1] > BENCHMARK_ACCURACY)
print("Test to beat benchmark accuracy passed!")
assert(accuracy[0] < accuracy[1])
assert(accuracy[1] < accuracy[-1])
assert(val_accuracy[0] < val_accuracy[1])
assert(val_accuracy[1] < val_accuracy[-1])
print("Test model accuracy is improving passed!")
assert(loss[0] > loss[1])
assert(loss[1] > loss[-1])
assert(val_loss[0] > val_loss[1])
assert(val_loss[1] > val_loss[-1])
print("Test loss is decreasing passed!")
"""
Explanation: Time to train the model! The original MNIST linear classifier had an error rate of 12%. Let's use that to sanity check that our model is learning.
End of explanation
"""
image_numbers = range(0, 10, 1) # Change me, please.
def load_prediction_dataset():
dataset = (x_test[image_numbers], y_test[image_numbers])
dataset = tf.data.Dataset.from_tensor_slices(dataset)
dataset = dataset.map(scale).batch(len(image_numbers))
return dataset
predicted_results = model.predict(load_prediction_dataset())
for index, prediction in enumerate(predicted_results):
predicted_value = np.argmax(prediction)
actual_value = y_test[image_numbers[index]]
if actual_value != predicted_value:
print("image number: " + str(image_numbers[index]))
print("the prediction was " + str(predicted_value))
print("the actual label is " + str(actual_value))
print("")
bad_image_number = 8
plt.imshow(x_test[bad_image_number].reshape(HEIGHT, WIDTH));
"""
Explanation: Evaluating Predictions
Were you able to get an accuracy of over 90%? Not bad for a linear estimator! Let's make some predictions and see if we can find where the model has trouble. Change the range of values below to find incorrect predictions, and plot the corresponding images. What would you have guessed for these images?
TODO 2: Change the range below to find an incorrect prediction
End of explanation
"""
DIGIT = 0 # Change me to be an integer from 0 to 9.
LAYER = 1 # Layer 0 flattens image, so no weights
WEIGHT_TYPE = 0 # 0 for variable weights, 1 for biases
dense_layer_weights = model.layers[LAYER].get_weights()
digit_weights = dense_layer_weights[WEIGHT_TYPE][:, DIGIT]
plt.imshow(digit_weights.reshape((HEIGHT, WIDTH)))
"""
Explanation: It's understandable why the poor computer would have some trouble. Some of these images are difficult for even humans to read. In fact, we can see what the computer thinks each digit looks like.
Each of the 10 neurons in the dense layer of our model has 785 weights feeding into it. That's 1 weight for every pixel in the image + 1 for a bias term. These weights are flattened feeding into the model, but we can reshape them back into the original image dimensions to see what the computer sees.
TODO 3: Reshape the layer weights to be the shape of an input image and plot.
End of explanation
"""
|
pyreaclib/pyreaclib | pynucastro/library/tabular/generate_tabulated_file.ipynb | bsd-3-clause | import numpy as np
import pandas as pd
import re
import matplotlib.pyplot as plt
f = open("/Users/sailor/Desktop/A23_Ne_F.dat","r")
data = f.readlines() # data is a list. each element is a line of "A23_Ne_F.dat"
f.close()
"""
Explanation: How to generate data files for tabulated rates
These tabulated reaction rates are included along with library files
describing how the tables should be read.
Tabulated reactions of the form A -> B are supported, where the
rates are tabulated in a two-dimensional space of:
the product of density and electron fraction
temperature
This directory includes tabulated rates obtained from Suzuki et al.,
2016, ApJ 817:163, downloaded from
http://w3p.phys.chs.nihon-u.ac.jp/~suzuki/data2/link.html
The suzuki data table are in the follower form:
```
!23Ne -> 23F, e-capture with screening effects
!USDB Q=-8.4635 MeV
!Transitions from 5/2+, 1/2+, 7/2+, 3/2+ states of 23Ne are included.
!Experimental data are used.
!
!Log(rhoY) Log(T) mu dQ Vs e-cap-rate nu-energy-loss gamma-energy
! (MeV) (MeV) (MeV) (1/s) (MeV/s) (MeV/s)
7.00 7.00 1.2282 0.0279 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03
7.00 7.20 1.2270 0.0278 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03
7.00 7.40 1.2253 0.0275 0.0077 -0.50000E+03 -0.50000E+03 -0.50000E+03
```
First, we read it.
End of explanation
"""
inde = []
for i in range(len(data)):
if data[i][0] == '!': # if the line start as "!" , this is a header line
inde.append(i) # store the detected index into this list
header = data[inde[0]:len(inde)]
header
"""
Explanation: Next, we find the header line of the data file
End of explanation
"""
header[-2] = '!rhoY T mu dQ Vs e-cap-rate nu-energy-loss gamma-energy\n'
header[-1] = '!g/cm^3 K erg erg erg 1/s erg/s erg/s\n'
header
"""
Explanation: We modify the last 2 lines of header to meet the requirments of pynucastro.
End of explanation
"""
del data[inde[0]:len(inde)] # delete the header lines
# change the list ["1.23 3.45 5.67\n"] into the list ["1.23","3.45","5.67"]
data1 = []
for i in range(len(data)):
data1.append(re.split(r"[ ]",data[i].strip('\n')))
# delete all the "" in each element of data1
for i in range(len(data1)):
while '' in data1[i]:
data1[i].remove('')
# delete all [] in data1
while [] in data1:
data1.remove([])
# convert the type from string into float
data2 = []
for i in range(len(data1)):
data2.append([])
for j in range(len(data1[i])):
data2[i].append(float(data1[i][j]))
data2[0:3]
"""
Explanation: Then we convert the data table from a 1D string array to a 2D float array.
End of explanation
"""
for i in range(len(data2)):
data2[i][0] = "%e"%(np.power(10,data2[i][0]))
data2[i][1] = "%e"%(np.power(10,data2[i][1]))
data2[i][2] = "%e"%(data2[i][2]*1.60218e-6)
data2[i][3] = "%e"%(data2[i][3]*1.60218e-6)
data2[i][4] = "%e"%(data2[i][4]*1.60218e-6)
data2[i][5] = "%e"%(np.power(10,data2[i][5]))
data2[i][6] = "%e"%(np.power(10,data2[i][6])*1.60218e-6)
data2[i][7] = "%e"%(np.power(10,data2[i][7])*1.60218e-6)
data2[0]
"""
Explanation: We need to convert the unit to meet the requirements of pynucastro
- convert log(rhoY/(g/cm^3)) to rhoY/(g/cm^3)
- convert log(T/K) to T/K
- convert Mev to erg in mu
- convert Mev to erg in dQ
- convert Mev to erg in Vs
- convert log(rate/(s^-1)) to rate/(s^-1) in e-cap-beta-rate
- convert log(rate/(Mev/s)) to rate/(erg/s) in nu-energy-loss
- convert log(rate/(Mev/s)) to rate/(erg/s) in gamma-energy
End of explanation
"""
# convert the type from float to string
data3 = []
for i in range(len(data2)):
data3.append([])
for j in range(len(data2[i])):
data3[i].append(str(data2[i][j]))
f = open("/Users/sailor/Desktop/23Ne-23F_electroncapture.dat", "w")
for i in range(len(header)):
f.write(header[i])
for i in range(len(data3)):
for j in range(len(data3[i])-1):
f.write(data3[i][j])
f.write(' ')
f.write(data3[i][len(data3[i])-1])
f.write('\n')
f.close()
"""
Explanation: Then we write the new data into a data file that pynucastro can read.
End of explanation
"""
rate_file = open("/Users/sailor/Desktop/ne23--f23-toki","w")
rate_file.write("t\n "+"ne23"+" "+"f23"+"\n"+"23Ne-23F_electroncapture.dat"+"\n152\n39")
rate_file.close()
"""
Explanation: Next we need to generate a rate file to describe how to read the table file. (You can go to http://pynucastro.github.io/pynucastro/networks.html#tabular-rates to read details)
End of explanation
"""
|
tensorflow/docs-l10n | site/ja/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference.ipynb | apache-2.0 | #@title Licensed under the Apache License, Version 2.0 (the "License"); { display-mode: "form" }
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Explanation: Copyright 2018 The TensorFlow Authors.
Licensed under the Apache License, Version 2.0 (the "License");
End of explanation
"""
#@title Install { display-mode: "form" }
TF_Installation = 'System' #@param ['TF Nightly', 'TF Stable', 'System']
if TF_Installation == 'TF Nightly':
!pip install -q --upgrade tf-nightly
print('Installation of `tf-nightly` complete.')
elif TF_Installation == 'TF Stable':
!pip install -q --upgrade tensorflow
print('Installation of `tensorflow` complete.')
elif TF_Installation == 'System':
pass
else:
raise ValueError('Selection Error: Please select a valid '
'installation option.')
#@title Install { display-mode: "form" }
TFP_Installation = "System" #@param ["Nightly", "Stable", "System"]
if TFP_Installation == "Nightly":
!pip install -q tfp-nightly
print("Installation of `tfp-nightly` complete.")
elif TFP_Installation == "Stable":
!pip install -q --upgrade tensorflow-probability
print("Installation of `tensorflow-probability` complete.")
elif TFP_Installation == "System":
pass
else:
raise ValueError("Selection Error: Please select a valid "
"installation option.")
"""
Explanation: 変分推論を使用した一般化線形混合効果モデルの適合
<table class="tfo-notebook-buttons" align="left">
<td><a target="_blank" href="https://www.tensorflow.org/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference"><img src="https://www.tensorflow.org/images/tf_logo_32px.png">TensorFlow.org で表示</a></td>
<td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Google Colab で実行</a></td>
<td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
<td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/probability/examples/Linear_Mixed_Effects_Model_Variational_Inference.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード</a></td>
</table>
End of explanation
"""
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import os
from six.moves import urllib
import matplotlib.pyplot as plt; plt.style.use('ggplot')
import numpy as np
import pandas as pd
import seaborn as sns; sns.set_context('notebook')
import tensorflow_datasets as tfds
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
import tensorflow_probability as tfp
tfd = tfp.distributions
tfb = tfp.bijectors
"""
Explanation: 概要
このコラボでは、TensorFlow Probability の変分推論を使用して、一般化線形混合効果モデルを適合させる方法を示します。
モデルの族
一般化線形混合効果モデル (GLMM) は、一般化線形モデル (GLM) と似ていますが、予測される線形応答にサンプル固有のノイズが組み込まれている点が異なります。これは、まれな特徴がより一般的に見られる特徴と情報を共有できるため、有用な場合もあります。
生成プロセスとして、一般化線形混合効果モデル (GLMM) には次の特徴があります。
$$ \begin{align} \text{for } & r = 1\ldots R: \hspace{2.45cm}\text{# for each random-effect group}\ &\begin{aligned} \text{for } &c = 1\ldots |C_r|: \hspace{1.3cm}\text{# for each category ("level") of group $r$}\ &\begin{aligned} \beta_{rc} &\sim \text{MultivariateNormal}(\text{loc}=0_{D_r}, \text{scale}=\Sigma_r^{1/2}) \end{aligned} \end{aligned}\ \text{for } & i = 1 \ldots N: \hspace{2.45cm}\text{# for each sample}\ &\begin{aligned} &\eta_i = \underbrace{\vphantom{\sum_{r=1}^R}x_i^\top\omega}\text{fixed-effects} + \underbrace{\sum{r=1}^R z_{r,i}^\top \beta_{r,C_r(i) }}\text{random-effects} \ &Y_i|x_i,\omega,{z{r,i} , \beta_r}_{r=1}^R \sim \text{Distribution}(\text{mean}= g^{-1}(\eta_i)) \end{aligned} \end{align} $$
ここでは、
$$ \begin{align} R &= \text{number of random-effect groups}\ |C_r| &= \text{number of categories for group $r$}\ N &= \text{number of training samples}\ x_i,\omega &\in \mathbb{R}^{D_0}\ D_0 &= \text{number of fixed-effects}\ C_r(i) &= \text{category (under group $r$) of the $i$th sample}\ z_{r,i} &\in \mathbb{R}^{D_r}\ D_r &= \text{number of random-effects associated with group $r$}\ \Sigma_{r} &\in {S\in\mathbb{R}^{D_r \times D_r} : S \succ 0 }\ \eta_i\mapsto g^{-1}(\eta_i) &= \mu_i, \text{inverse link function}\ \text{Distribution} &=\text{some distribution parameterizable solely by its mean} \end{align} $$
つまり、各グループのすべてのカテゴリが、多変量正規分布からのサンプル $\beta_{rc}$ に関連付けられていることを意味します。$\beta_{rc}$ の抽出は常に独立していますが、グループ $r$ に対してのみ同じように分散されます。$r\in{1,\ldots,R}$ ごとに 1 つの $\Sigma_r$ があることに注意してください。
サンプルのグループの特徴である $z_{r,i}$ と密接に組み合わせると、結果は $i$ 番目の予測線形応答 (それ以外の場合は $x_i^\top\omega$) のサンプル固有のノイズになります。
${\Sigma_r:r\in{1,\ldots,R}}$ を推定する場合、基本的に、変量効果グループがもつノイズの量を推定します。そうしないと、 $x_i^\top\omega$ に存在する信号が失われます。
$\text{Distribution}$ および逆リンク関数 $g^{-1}$ にはさまざまなオプションがあります。一般的なオプションは次のとおりです。
$Y_i\sim\text{Normal}(\text{mean}=\eta_i, \text{scale}=\sigma)$,
$Y_i\sim\text{Binomial}(\text{mean}=n_i \cdot \text{sigmoid}(\eta_i), \text{total_count}=n_i)$, and,
$Y_i\sim\text{Poisson}(\text{mean}=\exp(\eta_i))$.
その他のオプションについては、tfp.glm モジュールを参照してください。
変分推論
残念ながら、パラメータ $\beta,{\Sigma_r}_r^R$ の最尤推定値を見つけるには、非分析積分が必要です。この問題を回避するためには、
付録で $q_{\lambda}$ と示されている、パラメータ化された分布のファミリ (「代理密度」) を定義します。
$q_{\lambda}$ が実際の目標密度に近くなるように、パラメータ $\lambda$ を見つけます。
分布族は、適切な次元の独立したガウス分布になり、「目標密度に近い」とは、「カルバック・ライブラー情報量を最小化する」ことを意味します。導出と動機については、「変分推論:統計家のためのレビュー」のセクション 2.2 を参照してください。特に、K-L 情報量を最小化することは、負の変分証拠の下限 (ELBO) を最小限に抑えることと同じであることが示されています。
トイプロブレム
Gelman et al. (2007) の「ラドンデータセット」は、回帰のアプローチを示すために使用されるデータセットです。(密接に関連する PyMC3 ブログ記事を参照してください。) ラドンデータセットには、米国全体で取得されたラドンの屋内測定値が含まれています。ラドンは、高濃度で有毒な自然発生の放射性ガスです。
このデモでは、地下室がある家屋ではラドンレベルが高いという仮説を検証することに関心があると仮定します。また、ラドン濃度は土壌の種類、つまり地理的な問題に関連していると考えられます。
これを機械学習の問題としてフレーム化するために、測定が行われた階の線形関数に基づいて対数ラドンレベルを予測します。また、郡を変量効果として使用し、地理的条件による差異を考慮します。つまり、一般化線形混合効果モデルを使用します。
End of explanation
"""
if tf.test.gpu_device_name() != '/device:GPU:0':
print("We'll just use the CPU for this run.")
else:
print('Huzzah! Found GPU: {}'.format(tf.test.gpu_device_name()))
"""
Explanation: また、GPU の可用性を確認します。
End of explanation
"""
def load_and_preprocess_radon_dataset(state='MN'):
"""Load the Radon dataset from TensorFlow Datasets and preprocess it.
Following the examples in "Bayesian Data Analysis" (Gelman, 2007), we filter
to Minnesota data and preprocess to obtain the following features:
- `county`: Name of county in which the measurement was taken.
- `floor`: Floor of house (0 for basement, 1 for first floor) on which the
measurement was taken.
The target variable is `log_radon`, the log of the Radon measurement in the
house.
"""
ds = tfds.load('radon', split='train')
radon_data = tfds.as_dataframe(ds)
radon_data.rename(lambda s: s[9:] if s.startswith('feat') else s, axis=1, inplace=True)
df = radon_data[radon_data.state==state.encode()].copy()
df['radon'] = df.activity.apply(lambda x: x if x > 0. else 0.1)
# Make county names look nice.
df['county'] = df.county.apply(lambda s: s.decode()).str.strip().str.title()
# Remap categories to start from 0 and end at max(category).
df['county'] = df.county.astype(pd.api.types.CategoricalDtype())
df['county_code'] = df.county.cat.codes
# Radon levels are all positive, but log levels are unconstrained
df['log_radon'] = df['radon'].apply(np.log)
# Drop columns we won't use and tidy the index
columns_to_keep = ['log_radon', 'floor', 'county', 'county_code']
df = df[columns_to_keep].reset_index(drop=True)
return df
df = load_and_preprocess_radon_dataset()
df.head()
"""
Explanation: データセットの取得:
TensorFlow データセットからデータセットを読み込み、簡単な前処理を行います。
End of explanation
"""
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(12, 4))
df.groupby('floor')['log_radon'].plot(kind='density', ax=ax1);
ax1.set_xlabel('Measured log(radon)')
ax1.legend(title='Floor')
df['floor'].value_counts().plot(kind='bar', ax=ax2)
ax2.set_xlabel('Floor where radon was measured')
ax2.set_ylabel('Count')
fig.suptitle("Distribution of log radon and floors in the dataset");
"""
Explanation: GLMM 族の特化
このセクションでは、GLMM 族をラドンレベルの予測タスクに特化します。これを行うには、まず GLMM の固定効果の特殊なケースを検討します: $$ \mathbb{E}[\log(\text{radon}_j)] = c + \text{floor_effect}_j $$
このモデルは、観測値 $j$ の対数ラドンが (予想では) $j$ 番目の測定が行われる階と一定の切片によって支配されることを前提としています。擬似コードでは、次のようになります。
def estimate_log_radon(floor):
return intercept + floor_effect[floor]
すべての階で学習された重みと、普遍的な intercept の条件があります。0 階と 1 階からのラドン測定値を見ると、これは良いスタートのように見えます。
End of explanation
"""
fig, ax = plt.subplots(figsize=(22, 5));
county_freq = df['county'].value_counts()
county_freq.plot(kind='bar', ax=ax)
ax.set_xlabel('County')
ax.set_ylabel('Number of readings');
"""
Explanation: モデルをもう少し洗練されたものにするために、地理に関することを含めるとさらに良いでしょう。ラドンは土壌に含まれるウランの放射壊変により生ずるため、地理が重要であると考えられます。
$$ \mathbb{E}[\log(\text{radon}_j)] = c + \text{floor_effect}_j + \text{county_effect}_j $$
擬似コードは次のとおりです。
def estimate_log_radon(floor, county):
return intercept + floor_effect[floor] + county_effect[county]
郡固有の重みを除いて、以前と同じです。
十分に大きなトレーニングセットなので、これは妥当なモデルです。ただし、ミネソタ州からのデータを見てみると、測定数が少ない郡が多数あります。たとえば、85 の郡のうち 39 の郡の観測値は 5 つ未満です。
そのため、郡ごとの観測数が増えるにつれて上記のモデルに収束するように、すべての観測間で統計的強度を共有するようにします。
End of explanation
"""
features = df[['county_code', 'floor']].astype(int)
labels = df[['log_radon']].astype(np.float32).values.flatten()
"""
Explanation: このモデルを適合させると、county_effect ベクトルは、トレーニングサンプルが少ない郡の結果を記憶することになり、おそらく過適合になり、一般化が不十分になります。
GLMM は、上記の 2 つの GLM の中間に位置します。以下の適合を検討します。
$$ \log(\text{radon}_j) \sim c + \text{floor_effect}_j + \mathcal{N}(\text{county_effect}_j, \text{county_scale}) $$
このモデルは最初のモデルと同じですが、正規分布になる可能性を修正し、(単一の) 変数 county_scale を介してすべての郡で分散を共有します。擬似コードは、以下のとおりです。
def estimate_log_radon(floor, county):
county_mean = county_effect[county]
random_effect = np.random.normal() * county_scale + county_mean
return intercept + floor_effect[floor] + random_effect
観測データを使用して、county_scale、county_mean および random_effect の同時分布を推測します。グローバルな county_scale を使用すると、郡間で統計的強度を共有できます。観測値が多い郡は、観測値が少ない郡の分散を強化します。さらに、より多くのデータを収集すると、このモデルは、プールされたスケール変数のないモデルに収束します。このデータセットを使用しても、どちらのモデルでも最も観察された郡について同様の結論に達します。
実験
次に、TensorFlow の変分推論を使用して、上記の GLMM を適合させます。まず、データを特徴とラベルに分割します。
End of explanation
"""
def make_joint_distribution_coroutine(floor, county, n_counties, n_floors):
def model():
county_scale = yield tfd.HalfNormal(scale=1., name='scale_prior')
intercept = yield tfd.Normal(loc=0., scale=1., name='intercept')
floor_weight = yield tfd.Normal(loc=0., scale=1., name='floor_weight')
county_prior = yield tfd.Normal(loc=tf.zeros(n_counties),
scale=county_scale,
name='county_prior')
random_effect = tf.gather(county_prior, county, axis=-1)
fixed_effect = intercept + floor_weight * floor
linear_response = fixed_effect + random_effect
yield tfd.Normal(loc=linear_response, scale=1., name='likelihood')
return tfd.JointDistributionCoroutineAutoBatched(model)
joint = make_joint_distribution_coroutine(
features.floor.values, features.county_code.values, df.county.nunique(),
df.floor.nunique())
# Define a closure over the joint distribution
# to condition on the observed labels.
def target_log_prob_fn(*args):
return joint.log_prob(*args, likelihood=labels)
"""
Explanation: モデルの指定
End of explanation
"""
# Initialize locations and scales randomly with `tf.Variable`s and
# `tfp.util.TransformedVariable`s.
_init_loc = lambda shape=(): tf.Variable(
tf.random.uniform(shape, minval=-2., maxval=2.))
_init_scale = lambda shape=(): tfp.util.TransformedVariable(
initial_value=tf.random.uniform(shape, minval=0.01, maxval=1.),
bijector=tfb.Softplus())
n_counties = df.county.nunique()
surrogate_posterior = tfd.JointDistributionSequentialAutoBatched([
tfb.Softplus()(tfd.Normal(_init_loc(), _init_scale())), # scale_prior
tfd.Normal(_init_loc(), _init_scale()), # intercept
tfd.Normal(_init_loc(), _init_scale()), # floor_weight
tfd.Normal(_init_loc([n_counties]), _init_scale([n_counties]))]) # county_prior
"""
Explanation: 事後分布を指定する
ここで、サロゲート族 $q_{\lambda}$ を作成しました。パラメータ $\lambda$ はトレーニング可能です。この場合、分布族は、パラメータごとに 1 つずつ、独立した多変量正規分布であり、$\lambda = {(\mu_j, \sigma_j)}$ です。$j$ は 4 つのパラメータにインデックスを付けます。
サロゲート分布族を適合させるために使用するメソッドは、tf.Variables を使用します。また、tfp.util.TransformedVariable を Softplus とともに使用して、(トレーニング可能な) スケールパラメータを正に制約します。また、tfp.util.TransformedVariableを Softplus とともに使用して、(トレーニング可能な) スケールパラメータを正に制約します。
最適化を支援するために、これらのトレーニング可能な変数を初期化します。
End of explanation
"""
optimizer = tf.optimizers.Adam(learning_rate=1e-2)
losses = tfp.vi.fit_surrogate_posterior(
target_log_prob_fn,
surrogate_posterior,
optimizer=optimizer,
num_steps=3000,
seed=42,
sample_size=2)
(scale_prior_,
intercept_,
floor_weight_,
county_weights_), _ = surrogate_posterior.sample_distributions()
print(' intercept (mean): ', intercept_.mean())
print(' floor_weight (mean): ', floor_weight_.mean())
print(' scale_prior (approx. mean): ', tf.reduce_mean(scale_prior_.sample(10000)))
fig, ax = plt.subplots(figsize=(10, 3))
ax.plot(losses, 'k-')
ax.set(xlabel="Iteration",
ylabel="Loss (ELBO)",
title="Loss during training",
ylim=0);
"""
Explanation: このセルは、次のように tfp.experimental.vi.build_factored_surrogate_posterior に置き換えることができることに注意してください。
python
surrogate_posterior = tfp.experimental.vi.build_factored_surrogate_posterior(
event_shape=joint.event_shape_tensor()[:-1],
constraining_bijectors=[tfb.Softplus(), None, None, None])
結果
ここでの目標は、扱いやすいパラメータ化された分布族を定義し、パラメータを選択して、ターゲット分布に近い扱いやすい分布を作成することでした。
上記のようにサロゲート分布を作成し、 tfp.vi.fit_surrogate_posterior を使用できます。これは、オプティマイザと指定された数のステップを受け入れて、負の ELBO を最小化するサロゲートモデルのパラメータを見つけます (これは、サロゲート分布とターゲット分布の間のカルバック・ライブラー情報を最小化することに対応します)。
戻り値は各ステップで負の ELBO であり、surrogate_posterior の分布はオプティマイザによって検出されたパラメータで更新されます。
End of explanation
"""
county_counts = (df.groupby(by=['county', 'county_code'], observed=True)
.agg('size')
.sort_values(ascending=False)
.reset_index(name='count'))
means = county_weights_.mean()
stds = county_weights_.stddev()
fig, ax = plt.subplots(figsize=(20, 5))
for idx, row in county_counts.iterrows():
mid = means[row.county_code]
std = stds[row.county_code]
ax.vlines(idx, mid - std, mid + std, linewidth=3)
ax.plot(idx, means[row.county_code], 'ko', mfc='w', mew=2, ms=7)
ax.set(
xticks=np.arange(len(county_counts)),
xlim=(-1, len(county_counts)),
ylabel="County effect",
title=r"Estimates of county effects on log radon levels. (mean $\pm$ 1 std. dev.)",
)
ax.set_xticklabels(county_counts.county, rotation=90);
"""
Explanation: 推定された平均郡効果をその平均の不確実性とともにプロットし、観測数で並べ替えました。左側が最大です。観測値が多い郡では不確実性は小さく、観測値が 1 つか 2 つしかない郡では不確実性が大きいことに注意してください。
End of explanation
"""
fig, ax = plt.subplots(figsize=(10, 7))
ax.plot(np.log1p(county_counts['count']), stds.numpy()[county_counts.county_code], 'o')
ax.set(
ylabel='Posterior std. deviation',
xlabel='County log-count',
title='Having more observations generally\nlowers estimation uncertainty'
);
"""
Explanation: 実際、推定された標準偏差に対して観測値の対数をプロットすることで、このことを直接に確認でき、関係がほぼ線形であることがわかります。
End of explanation
"""
%%shell
exit # Trick to make this block not execute.
radon = read.csv('srrs2.dat', header = TRUE)
radon = radon[radon$state=='MN',]
radon$radon = ifelse(radon$activity==0., 0.1, radon$activity)
radon$log_radon = log(radon$radon)
# install.packages('lme4')
library(lme4)
fit <- lmer(log_radon ~ 1 + floor + (1 | county), data=radon)
fit
# Linear mixed model fit by REML ['lmerMod']
# Formula: log_radon ~ 1 + floor + (1 | county)
# Data: radon
# REML criterion at convergence: 2171.305
# Random effects:
# Groups Name Std.Dev.
# county (Intercept) 0.3282
# Residual 0.7556
# Number of obs: 919, groups: county, 85
# Fixed Effects:
# (Intercept) floor
# 1.462 -0.693
"""
Explanation: R の lme4 との比較
End of explanation
"""
print(pd.DataFrame(data=dict(intercept=[1.462, tf.reduce_mean(intercept_.mean()).numpy()],
floor=[-0.693, tf.reduce_mean(floor_weight_.mean()).numpy()],
scale=[0.3282, tf.reduce_mean(scale_prior_.sample(10000)).numpy()]),
index=['lme4', 'vi']))
"""
Explanation: 次の表は、結果をまとめたものです。
End of explanation
"""
|
kubeflow/pipelines | components/gcp/dataproc/delete_cluster/sample.ipynb | apache-2.0 | %%capture --no-stderr
!pip3 install kfp --upgrade
"""
Explanation: Name
Data preparation by deleting a cluster in Cloud Dataproc
Label
Cloud Dataproc, cluster, GCP, Cloud Storage, Kubeflow, Pipeline
Summary
A Kubeflow Pipeline component to delete a cluster in Cloud Dataproc.
Intended use
Use this component at the start of a Kubeflow Pipeline to delete a temporary Cloud Dataproc
cluster to run Cloud Dataproc jobs as steps in the pipeline. This component is usually
used with an exit handler to run at the end of a pipeline.
Runtime arguments
| Argument | Description | Optional | Data type | Accepted values | Default |
|----------|-------------|----------|-----------|-----------------|---------|
| project_id | The Google Cloud Platform (GCP) project ID that the cluster belongs to. | No | GCPProjectID | | |
| region | The Cloud Dataproc region in which to handle the request. | No | GCPRegion | | |
| name | The name of the cluster to delete. | No | String | | |
| wait_interval | The number of seconds to pause between polling the operation. | Yes | Integer | | 30 |
Cautions & requirements
To use the component, you must:
* Set up a GCP project by following this guide.
* The component can authenticate to GCP. Refer to Authenticating Pipelines to GCP for details.
* Grant the Kubeflow user service account the role roles/dataproc.editor on the project.
Detailed description
This component deletes a Dataproc cluster by using Dataproc delete cluster REST API.
Follow these steps to use the component in a pipeline:
1. Install the Kubeflow Pipeline SDK:
End of explanation
"""
import kfp.components as comp
dataproc_delete_cluster_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.7.0-rc.3/components/gcp/dataproc/delete_cluster/component.yaml')
help(dataproc_delete_cluster_op)
"""
Explanation: Load the component using KFP SDK
End of explanation
"""
PROJECT_ID = '<Please put your project ID here>'
CLUSTER_NAME = '<Please put your existing cluster name here>'
REGION = 'us-central1'
EXPERIMENT_NAME = 'Dataproc - Delete Cluster'
"""
Explanation: Sample
Note: The following sample code works in an IPython notebook or directly in Python code. See the sample code below to learn how to execute the template.
Prerequisites
Create a Dataproc cluster before running the sample code.
Set sample parameters
End of explanation
"""
import kfp.dsl as dsl
import json
@dsl.pipeline(
name='Dataproc delete cluster pipeline',
description='Dataproc delete cluster pipeline'
)
def dataproc_delete_cluster_pipeline(
project_id = PROJECT_ID,
region = REGION,
name = CLUSTER_NAME
):
dataproc_delete_cluster_op(
project_id=project_id,
region=region,
name=name)
"""
Explanation: Example pipeline that uses the component
End of explanation
"""
pipeline_func = dataproc_delete_cluster_pipeline
pipeline_filename = pipeline_func.__name__ + '.zip'
import kfp.compiler as compiler
compiler.Compiler().compile(pipeline_func, pipeline_filename)
"""
Explanation: Compile the pipeline
End of explanation
"""
#Specify pipeline argument values
arguments = {}
#Get or create an experiment and submit a pipeline run
import kfp
client = kfp.Client()
experiment = client.create_experiment(EXPERIMENT_NAME)
#Submit a pipeline run
run_name = pipeline_func.__name__ + ' run'
run_result = client.run_pipeline(experiment.id, run_name, pipeline_filename, arguments)
"""
Explanation: Submit the pipeline for execution
End of explanation
"""
|
sz2472/foundations-homework | Homework_4_database_shengyingzhao_graded.ipynb | mit | numbers_str = '496,258,332,550,506,699,7,985,171,581,436,804,736,528,65,855,68,279,721,120'
"""
Explanation: Grade: 11 / 11 -- look for TA-COMMENT
Homework #4
These problem sets focus on list comprehensions, string operations and regular expressions.
Problem set #1: List slices and list comprehensions
Let's start with some data. The following cell contains a string with comma-separated integers, assigned to a variable called numbers_str:
End of explanation
"""
raw_numbers = numbers_str.split(",")
numbers_list=[int(x) for x in raw_numbers]
max(numbers_list)
"""
Explanation: In the following cell, complete the code with an expression that evaluates to a list of integers derived from the raw numbers in numbers_str, assigning the value of this expression to a variable numbers. If you do everything correctly, executing the cell should produce the output 985 (not '985').
End of explanation
"""
sorted(numbers_list)[-10:]
"""
Explanation: Great! We'll be using the numbers list you created above in the next few problems.
In the cell below, fill in the square brackets so that the expression evaluates to a list of the ten largest values in numbers. Expected output:
[506, 528, 550, 581, 699, 721, 736, 804, 855, 985]
(Hint: use a slice.)
End of explanation
"""
sorted(x for x in numbers_list if x%3==0)
"""
Explanation: In the cell below, write an expression that evaluates to a list of the integers from numbers that are evenly divisible by three, sorted in numerical order. Expected output:
[120, 171, 258, 279, 528, 699, 804, 855]
End of explanation
"""
from math import sqrt
[sqrt(x) for x in numbers_list if x < 100]
"""
Explanation: Okay. You're doing great. Now, in the cell below, write an expression that evaluates to a list of the square roots of all the integers in numbers that are less than 100. In order to do this, you'll need to use the sqrt function from the math module, which I've already imported for you. Expected output:
[2.6457513110645907, 8.06225774829855, 8.246211251235321]
(These outputs might vary slightly depending on your platform.)
End of explanation
"""
planets = [
{'diameter': 0.382,
'mass': 0.06,
'moons': 0,
'name': 'Mercury',
'orbital_period': 0.24,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.949,
'mass': 0.82,
'moons': 0,
'name': 'Venus',
'orbital_period': 0.62,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 1.00,
'mass': 1.00,
'moons': 1,
'name': 'Earth',
'orbital_period': 1.00,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 0.532,
'mass': 0.11,
'moons': 2,
'name': 'Mars',
'orbital_period': 1.88,
'rings': 'no',
'type': 'terrestrial'},
{'diameter': 11.209,
'mass': 317.8,
'moons': 67,
'name': 'Jupiter',
'orbital_period': 11.86,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 9.449,
'mass': 95.2,
'moons': 62,
'name': 'Saturn',
'orbital_period': 29.46,
'rings': 'yes',
'type': 'gas giant'},
{'diameter': 4.007,
'mass': 14.6,
'moons': 27,
'name': 'Uranus',
'orbital_period': 84.01,
'rings': 'yes',
'type': 'ice giant'},
{'diameter': 3.883,
'mass': 17.2,
'moons': 14,
'name': 'Neptune',
'orbital_period': 164.8,
'rings': 'yes',
'type': 'ice giant'}]
"""
Explanation: Problem set #2: Still more list comprehensions
Still looking good. Let's do a few more with some different data. In the cell below, I've defined a data structure and assigned it to a variable planets. It's a list of dictionaries, with each dictionary describing the characteristics of a planet in the solar system. Make sure to run the cell before you proceed.
End of explanation
"""
[x['name'] for x in planets if x['diameter']>4]
"""
Explanation: Now, in the cell below, write a list comprehension that evaluates to a list of names of the planets that have a radius greater than four earth radii. Expected output:
['Jupiter', 'Saturn', 'Uranus']
End of explanation
"""
sum(x['mass'] for x in planets)
"""
Explanation: In the cell below, write a single expression that evaluates to the sum of the mass of all planets in the solar system. Expected output: 446.79
End of explanation
"""
[x['name'] for x in planets if 'giant' in x['type']]
"""
Explanation: Good work. Last one with the planets. Write an expression that evaluates to the names of the planets that have the word giant anywhere in the value for their type key. Expected output:
['Jupiter', 'Saturn', 'Uranus', 'Neptune']
End of explanation
"""
[x['name'] for x in sorted(planets, key=lambda x:x['moons'])] #can't sort a dictionary, sort the dictionary by the number of moons
def get_moon_count(d):
return d['moons']
[x['name'] for x in sorted(planets, key=get_moon_count)]
#sort the dictionary by reverse order of the diameter:
[x['name'] for x in sorted(planets, key=lambda d:d['diameter'],reverse=True)]
[x['name'] for x in \
sorted(planets, key=lambda d:d['diameter'], reverse=True) \
if x['diameter'] >4]
"""
Explanation: EXTREME BONUS ROUND: Write an expression below that evaluates to a list of the names of the planets in ascending order by their number of moons. (The easiest way to do this involves using the key parameter of the sorted function, which we haven't yet discussed in class! That's why this is an EXTREME BONUS question.) Expected output:
['Mercury', 'Venus', 'Earth', 'Mars', 'Neptune', 'Uranus', 'Saturn', 'Jupiter']
End of explanation
"""
import re
poem_lines = ['Two roads diverged in a yellow wood,',
'And sorry I could not travel both',
'And be one traveler, long I stood',
'And looked down one as far as I could',
'To where it bent in the undergrowth;',
'',
'Then took the other, as just as fair,',
'And having perhaps the better claim,',
'Because it was grassy and wanted wear;',
'Though as for that the passing there',
'Had worn them really about the same,',
'',
'And both that morning equally lay',
'In leaves no step had trodden black.',
'Oh, I kept the first for another day!',
'Yet knowing how way leads on to way,',
'I doubted if I should ever come back.',
'',
'I shall be telling this with a sigh',
'Somewhere ages and ages hence:',
'Two roads diverged in a wood, and I---',
'I took the one less travelled by,',
'And that has made all the difference.']
"""
Explanation: Problem set #3: Regular expressions
In the following section, we're going to do a bit of digital humanities. (I guess this could also be journalism if you were... writing an investigative piece about... early 20th century American poetry?) We'll be working with the following text, Robert Frost's The Road Not Taken. Make sure to run the following cell before you proceed.
End of explanation
"""
[line for line in poem_lines if re.search(r"\b\w{4}\s\w{4}\b",line)]
"""
Explanation: In the cell above, I defined a variable poem_lines which has a list of lines in the poem, and imported the re library.
In the cell below, write a list comprehension (using re.search()) that evaluates to a list of lines that contain two words next to each other (separated by a space) that have exactly four characters. (Hint: use the \b anchor. Don't overthink the "two words in a row" requirement.)
Expected result:
['Then took the other, as just as fair,',
'Had worn them really about the same,',
'And both that morning equally lay',
'I doubted if I should ever come back.',
'I shall be telling this with a sigh']
End of explanation
"""
[line for line in poem_lines if re.search(r"\b\w{5}\b.?$",line)]
"""
Explanation: Good! Now, in the following cell, write a list comprehension that evaluates to a list of lines in the poem that end with a five-letter word, regardless of whether or not there is punctuation following the word at the end of the line. (Hint: Try using the ? quantifier. Is there an existing character class, or a way to write a character class, that matches non-alphanumeric characters?) Expected output:
['And be one traveler, long I stood',
'And looked down one as far as I could',
'And having perhaps the better claim,',
'Though as for that the passing there',
'In leaves no step had trodden black.',
'Somewhere ages and ages hence:']
End of explanation
"""
all_lines = " ".join(poem_lines)
all_lines
"""
Explanation: Okay, now a slightly trickier one. In the cell below, I've created a string all_lines which evaluates to the entire text of the poem in one string. Execute this cell.
End of explanation
"""
re.findall(r"I (\w+)", all_lines)
"""
Explanation: Now, write an expression that evaluates to all of the words in the poem that follow the word 'I'. (The strings in the resulting list should not include the I.) Hint: Use re.findall() and grouping! Expected output:
['could', 'stood', 'could', 'kept', 'doubted', 'should', 'shall', 'took']
End of explanation
"""
entrees = [
"Yam, Rosemary and Chicken Bowl with Hot Sauce $10.95",
"Lavender and Pepperoni Sandwich $8.49",
"Water Chestnuts and Peas Power Lunch (with mayonnaise) $12.95 - v",
"Artichoke, Mustard Green and Arugula with Sesame Oil over noodles $9.95 - v",
"Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce $19.95",
"Rutabaga And Cucumber Wrap $8.49 - v"
]
"""
Explanation: Finally, something super tricky. Here's a list of strings that contains a restaurant menu. Your job is to wrangle this plain text, slightly-structured data into a list of dictionaries.
End of explanation
"""
# TA-COMMENT: (-1) -- let me know if you have questions about this! We can go over the correct code together in lab.
menu = []
for item in entrees:
pass # replace 'pass' with your code
menu
"""
Explanation: You'll need to pull out the name of the dish and the price of the dish. The v after the hyphen indicates that the dish is vegetarian---you'll need to include that information in your dictionary as well. I've included the basic framework; you just need to fill in the contents of the for loop.
Expected output:
[{'name': 'Yam, Rosemary and Chicken Bowl with Hot Sauce ',
'price': 10.95,
'vegetarian': False},
{'name': 'Lavender and Pepperoni Sandwich ',
'price': 8.49,
'vegetarian': False},
{'name': 'Water Chestnuts and Peas Power Lunch (with mayonnaise) ',
'price': 12.95,
'vegetarian': True},
{'name': 'Artichoke, Mustard Green and Arugula with Sesame Oil over noodles ',
'price': 9.95,
'vegetarian': True},
{'name': 'Flank Steak with Lentils And Tabasco Pepper With Sweet Chilli Sauce ',
'price': 19.95,
'vegetarian': False},
{'name': 'Rutabaga And Cucumber Wrap ', 'price': 8.49, 'vegetarian': True}]
End of explanation
"""
|
mne-tools/mne-tools.github.io | 0.17/_downloads/10d15867c4c4d54609e083ad834f1606/plot_dipole_fit.ipynb | bsd-3-clause | from os import path as op
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.forward import make_forward_dipole
from mne.evoked import combine_evoked
from mne.simulation import simulate_evoked
from nilearn.plotting import plot_anat
from nilearn.datasets import load_mni152_template
data_path = mne.datasets.sample.data_path()
subjects_dir = op.join(data_path, 'subjects')
fname_ave = op.join(data_path, 'MEG', 'sample', 'sample_audvis-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis-cov.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem', 'sample-5120-bem-sol.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_raw-trans.fif')
fname_surf_lh = op.join(subjects_dir, 'sample', 'surf', 'lh.white')
"""
Explanation: ============================================================
Source localization with equivalent current dipole (ECD) fit
============================================================
This shows how to fit a dipole using mne-python.
For a comparison of fits between MNE-C and mne-python, see:
https://gist.github.com/Eric89GXL/ca55f791200fe1dc3dd2
End of explanation
"""
evoked = mne.read_evokeds(fname_ave, condition='Right Auditory',
baseline=(None, 0))
evoked.pick_types(meg=True, eeg=False)
evoked_full = evoked.copy()
evoked.crop(0.07, 0.08)
# Fit a dipole
dip = mne.fit_dipole(evoked, fname_cov, fname_bem, fname_trans)[0]
# Plot the result in 3D brain with the MRI image.
dip.plot_locations(fname_trans, 'sample', subjects_dir, mode='orthoview')
# Plot the result in 3D brain with the MRI image using Nilearn
# In MRI coordinates and in MNI coordinates (template brain)
trans = mne.read_trans(fname_trans)
subject = 'sample'
mni_pos = mne.head_to_mni(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
mri_pos = mne.head_to_mri(dip.pos, mri_head_t=trans,
subject=subject, subjects_dir=subjects_dir)
t1_fname = op.join(subjects_dir, subject, 'mri', 'T1.mgz')
fig = plot_anat(t1_fname, cut_coords=mri_pos[0], title='Dipole loc.')
template = load_mni152_template()
fig = plot_anat(template, cut_coords=mni_pos[0],
title='Dipole loc. (MNI Space)')
"""
Explanation: Let's localize the N100m (using MEG only)
End of explanation
"""
fwd, stc = make_forward_dipole(dip, fname_bem, evoked.info, fname_trans)
pred_evoked = simulate_evoked(fwd, stc, evoked.info, cov=None, nave=np.inf)
# find time point with highest GOF to plot
best_idx = np.argmax(dip.gof)
best_time = dip.times[best_idx]
print('Highest GOF %0.1f%% at t=%0.1f ms with confidence volume %0.1f cm^3'
% (dip.gof[best_idx], best_time * 1000,
dip.conf['vol'][best_idx] * 100 ** 3))
# remember to create a subplot for the colorbar
fig, axes = plt.subplots(nrows=1, ncols=4, figsize=[10., 3.4])
vmin, vmax = -400, 400 # make sure each plot has same colour range
# first plot the topography at the time of the best fitting (single) dipole
plot_params = dict(times=best_time, ch_type='mag', outlines='skirt',
colorbar=False, time_unit='s')
evoked.plot_topomap(time_format='Measured field', axes=axes[0], **plot_params)
# compare this to the predicted field
pred_evoked.plot_topomap(time_format='Predicted field', axes=axes[1],
**plot_params)
# Subtract predicted from measured data (apply equal weights)
diff = combine_evoked([evoked, -pred_evoked], weights='equal')
plot_params['colorbar'] = True
diff.plot_topomap(time_format='Difference', axes=axes[2], **plot_params)
plt.suptitle('Comparison of measured and predicted fields '
'at {:.0f} ms'.format(best_time * 1000.), fontsize=16)
"""
Explanation: Calculate and visualise magnetic field predicted by dipole with maximum GOF
and compare to the measured data, highlighting the ipsilateral (right) source
End of explanation
"""
dip_fixed = mne.fit_dipole(evoked_full, fname_cov, fname_bem, fname_trans,
pos=dip.pos[best_idx], ori=dip.ori[best_idx])[0]
dip_fixed.plot(time_unit='s')
"""
Explanation: Estimate the time course of a single dipole with fixed position and
orientation (the one that maximized GOF) over the entire interval
End of explanation
"""
|
dereneaton/ipyrad | newdocs/API-analysis/cookbook-treeslider-reference.ipynb | gpl-3.0 | # conda install ipyrad -c bioconda
# conda install raxml -c bioconda
# conda install toytree -c eaton-lab
import ipyrad.analysis as ipa
import toytree
"""
Explanation: <span style="color:gray">ipyrad-analysis toolkit:</span> treeslider
<h5><span style="color:red">(Reference only method)</span></h5>
With reference mapped RAD loci you can select windows of loci located close together on scaffolds and automate extracting and filtering and concatenating the RAD data to write to phylip format (see also the window_extracter tool.) The treeslider tool here automates this process across many windows, distributes the tree inference jobs in parallel, and organizes the results.
Key features:
Filter and concatenate ref-mapped RAD loci into alignments.
Group individuals into clades represented by consensus (reduces missing data).
Distribute phylogenetic inference jobs (e.g., raxml) in parallel.
Easily restart from checkpoints if interrupted.
Results written as a tree_table (dataframe).
Can be paired with other tools for further analysis (e.g., see clade_weights).
Required software
End of explanation
"""
# the path to your HDF5 formatted seqs file
data = "/home/deren/Downloads/ref_pop2.seqs.hdf5"
# check scaffold idx (row) against scaffold names
ipa.treeslider(data).scaffold_table.head()
"""
Explanation: Load the data
The treeslider() tool takes the .seqs.hdf5 database file from ipyrad as its input file. Select scaffolds by their index (integer) which can be found in the .scaffold_table.
End of explanation
"""
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test2",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=250000,
slide_size=250000,
inference_method="raxml",
inference_args={"N": 100, "T": 4},
minsnps=10,
consensus_reduce=True,
mincov=5,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
ts.show_inference_command()
ts.run(auto=True, force=True)
"""
Explanation: Quick full example
Here I select the scaffold Qrob_Chr03 (scaffold_idx=2), and run 2Mb windows (window_size) non-overlapping (2Mb slide_size) across the entire scaffold. I use the default inference method "raxml", and modify its default arguments to run 100 bootstrap replicates. More details on modifying raxml params later. I set for it to skip windows with <10 SNPs (minsnps), and to filter sites within windows (mincov) to only include those that have coverage across all 9 clades, with samples grouped into clades using an imap dictionary.
End of explanation
"""
ts.tree_table.head()
"""
Explanation: The results table (tree table)
The main result of a tree slider analysis is the tree_table. This is a pandas dataframe that includes information about the size and informativeness of each window in addition to the inferred tree for that window. This table is also saved as a CSV file. You can later re-load this CSV to perform further analysis on the tree results. For example, see the clade_weights tool for how to analyze the support for clades throughout the genome, or see the example tutorial for running ASTRAL species tree or SNAQ species network analyses using the list of trees inferred here.
End of explanation
"""
# example: remove any rows where the tree is NaN
df = ts.tree_table.loc[ts.tree_table.tree.notna()]
mtre = toytree.mtree(df.tree)
mtre.treelist = [i.root("reference") for i in mtre.treelist]
mtre.draw_tree_grid(
nrows=3, ncols=4, start=20,
tip_labels_align=True,
tip_labels_style={"font-size": "9px"},
);
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=1000000,
slide_size=1000000,
inference_method="mb",
inference_args={"N": 0, "T": 4},
minsnps=10,
mincov=9,
consensus_reduce=True,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="test",
data="/home/deren/Downloads/ref_pop2.seqs.hdf5",
workdir="analysis-treeslider",
scaffold_idxs=2,
window_size=2000000,
slide_size=2000000,
inference_method="raxml",
inference_args={"N": 100, "T": 4},
minsnps=10,
mincov=9,
imap={
"reference": ["reference"],
"virg": ["TXWV2", "LALC2", "SCCU3", "FLSF33", "FLBA140"],
"mini": ["FLSF47", "FLMO62", "FLSA185", "FLCK216"],
"gemi": ["FLCK18", "FLSF54", "FLWO6", "FLAB109"],
"bran": ["BJSL25", "BJSB3", "BJVL19"],
"fusi-N": ["TXGR3", "TXMD3"],
"fusi-S": ["MXED8", "MXGT4"],
"sagr": ["CUVN10", "CUCA4", "CUSV6"],
"oleo": ["CRL0030", "HNDA09", "BZBB1", "MXSA3017"],
},
)
"""
Explanation: Filter and examine the tree table
Some windows in your analysis may not include a tree if for example there was too much missing data or insufficient information in that region. You can use pandas masking like below to filter based on various criteria.
End of explanation
"""
# this is the tree inference command that will be used
ts.show_inference_command()
"""
Explanation: The tree inference command
You can examine the command that will be called on each genomic window. By modifying the inference_args above we can modify this string. See examples later in this tutorial.
End of explanation
"""
ts.run(auto=True, force=True)
"""
Explanation: Run tree inference jobs in parallel
To run the command on every window across all available cores call the .run() command. This will automatically save checkpoints to a file of the tree_table as it runs, and can be restarted later if it interrupted.
End of explanation
"""
# the tree table is automatically saved to disk as a CSV during .run()
ts.tree_table.head()
"""
Explanation: The tree table
Our goal is to fill the .tree_table, a pandas DataFrame where rows are genomic windows and the information content of each window is recorded, and a newick string tree is inferred and filled in for each. The tree table is also saved as a CSV formatted file in the workdir. You can re-load it later using Pandas. Below I demonstrate how to plot results from the tree_able. To examine how phylogenetic relationships vary across the genome see also the clade_weights() tool, which takes the tree_table as input.
End of explanation
"""
# filter to only windows with >50 SNPS
trees = ts.tree_table[ts.tree_table.snps > 50].tree.tolist()
# load all trees into a multitree object
mtre = toytree.mtree(trees)
# root trees and collapse nodes with <50 bootstrap support
mtre.treelist = [
i.root("reference").collapse_nodes(min_support=50)
for i in mtre.treelist
]
# draw the first 12 trees in a grid
mtre.draw_tree_grid(
nrows=3, ncols=4, start=0,
tip_labels_align=True,
tip_labels_style={"font-size": "9px"},
);
"""
Explanation: <h3><span style="color:red">Advanced</span>: Plots tree results </h3>
Examine multiple trees
You can select trees from the .tree column of the tree_table and plot them one by one using toytree, or any other tree drawing tool. Below I use toytree to draw a grid of the first 12 trees.
End of explanation
"""
# filter to only windows with >50 SNPS (this could have been done in run)
trees = ts.tree_table[ts.tree_table.snps > 50].tree.tolist()
# load all trees into a multitree object
mtre = toytree.mtree(trees)
# root trees
mtre.treelist = [i.root("reference") for i in mtre.treelist]
# infer a consensus tree to get best tip order
ctre = mtre.get_consensus_tree()
# draw the first 12 trees in a grid
mtre.draw_cloud_tree(
width=400,
height=400,
fixed_order=ctre.get_tip_labels(),
use_edge_lengths=False,
);
"""
Explanation: Draw cloud tree
Using toytree you can easily draw a cloud tree of overlapping gene trees to visualize discordance. These typically look much better if you root the trees, order tips by their consensus tree order, and do not use edge lengths. See below for an example, and see the toytree documentation.
End of explanation
"""
# select a scaffold idx, start, and end positions
ts = ipa.treeslider(
name="chr1_w500K_s100K",
data=data,
workdir="analysis-treeslider",
scaffold_idxs=[0, 1, 2],
window_size=500000,
slide_size=100000,
minsnps=10,
inference_method="raxml",
inference_args={"m": "GTRCAT", "N": 10, "f": "d", 'x': None},
)
# this is the tree inference command that will be used
ts.show_inference_command()
"""
Explanation: <h3><span style="color:red">Advanced</span>: Modify the raxml command</h3>
In this analysis I entered multiple scaffolds to create windows across each scaffold. I also entered a smaller slide size than window size so that windows are partially overlapping. The raxml command string was modified to perform 10 full searches with no bootstraps.
End of explanation
"""
|
jarvis-fga/Projetos | Problema 2/Daniel - Julliana/.ipynb_checkpoints/Amazon2-checkpoint.ipynb | mit | import codecs
with codecs.open("imdb_labelled.txt", "r", "utf-8") as arquivo:
vetor = []
for linha in arquivo:
vetor.append(linha)
with codecs.open("amazon_cells_labelled.txt", "r", "utf-8") as arquivo:
for linha in arquivo:
vetor.append(linha)
with codecs.open("yelp_labelled.txt", "r", "utf-8") as arquivo:
for linha in arquivo:
vetor.append(linha)
"""
Explanation: Primeiramente, é necessária a leitura dos 3 arquivos, inserindo as informações em um vetor:
End of explanation
"""
vetor = [ x[:-1] for x in vetor ]
vetor = ([s.replace('&', '').replace(' - ', '').replace('.', '').replace(',', '').replace('!', '').
replace('+', '')for s in vetor])
"""
Explanation: Depois, devemos retirar cada quebra de linha no final de cada linha, ou seja, os '\n'.
End of explanation
"""
TextosQuebrados = [ x[:-4] for x in vetor ]
TextosQuebrados = map(lambda X:X.lower(),TextosQuebrados)
#TextosQuebrados = [x.split(' ') for x in TextosQuebrados]
TextosQuebrados = [nltk.tokenize.word_tokenize(frase) for frase in TextosQuebrados]
import nltk
stopwords = nltk.corpus.stopwords.words('english')
stemmer = nltk.stem.RSLPStemmer()
dicionario = set()
for comentarios in TextosQuebrados:
validas = [stemmer.stem(palavra) for palavra in comentarios if palavra not in stopwords and len(palavra) > 0]
dicionario.update(validas)
totalDePalavras = len(dicionario)
tuplas = zip(dicionario, xrange(totalDePalavras))
tradutor = {palavra:indice for palavra,indice in tuplas}
def vetorizar_texto(texto, tradutor, stemmer):
vetor = [0] * len(tradutor)
for palavra in texto:
if len(palavra) > 0:
raiz = stemmer.stem(palavra)
if raiz in tradutor:
posicao = tradutor[raiz]
vetor[posicao] += 1
return vetor
vetoresDeTexto = [vetorizar_texto(texto, tradutor,stemmer) for texto in TextosQuebrados]
X = vetoresDeTexto
Y = [ x[-1:] for x in vetor ]
porcentagem_de_treino = 0.8
tamanho_do_treino = porcentagem_de_treino * len(Y)
tamanho_de_validacao = len(Y) - tamanho_do_treino
treino_dados = X[0:int(tamanho_do_treino)]
treino_marcacoes = Y[0:int(tamanho_do_treino)]
validacao_dados = X[int(tamanho_do_treino):]
validacao_marcacoes = Y[int(tamanho_do_treino):]
fim_de_teste = tamanho_do_treino + tamanho_de_validacao
teste_dados = X[int(tamanho_do_treino):int(fim_de_teste)]
teste_marcacoes = Y[int(tamanho_do_treino):int(fim_de_teste)]
"""
Explanation: A seguir, retiramos os dois últimos caracteres sobrando apenas o nosso comentário. Depois, passamos ele para lowercase.
End of explanation
"""
""" from sklearn import svm
from sklearn.model_selection import cross_val_score
k = 10
# Implement poly SVC
poly_svc = svm.SVC(kernel='linear')
accuracy_poly_svc = cross_val_score(poly_svc, treino_dados, treino_marcacoes, cv=k, scoring='accuracy')
print('poly_svc: ', accuracy_poly_svc.mean()) """
"""
Explanation: Foi decidida a abordagem por poly SCV
End of explanation
"""
def fit_and_predict(modelo, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes):
modelo.fit(treino_dados, treino_marcacoes)
resultado = modelo.predict(teste_dados)
acertos = (resultado == teste_marcacoes)
total_de_acertos = sum(acertos)
total_de_elementos = len(teste_dados)
taxa_de_acerto = float(total_de_acertos) / float(total_de_elementos)
print(taxa_de_acerto)
return taxa_de_acerto
resultados = {}
from sklearn.naive_bayes import MultinomialNB
modeloMultinomial = MultinomialNB()
resultadoMultinomial = fit_and_predict(modeloMultinomial, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)
resultados[resultadoMultinomial] = modeloMultinomial
"""
Explanation: Resultado - Poly:
Os 3: Após 10 minutos rodando, foi decidido parar o teste
IMdB: 0.51750234411626805
Amazon: 0.51125019534302241
Yelp: 0.56500429754649173
Resultado - Linear:
Os 3: 0.7745982496802607 (5 minutos)
IMdB: 0.72168288013752147
Amazon: 0.78869745272698855
Yelp: 0.77492342553523996
End of explanation
"""
from sklearn.ensemble import GradientBoostingClassifier
classificador = GradientBoostingClassifier(n_estimators=100, learning_rate=1.0, max_depth=1, random_state=0).fit(treino_dados, treino_marcacoes)
resultado = fit_and_predict(classificador, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)
"""
Explanation: Com maior refinamento de dados:
MultinomialNB:
Todos: 0.808652246256
Adaboost:
Todos:0.527454242928
End of explanation
"""
from sklearn.naive_bayes import GaussianNB
classificador = GaussianNB()
resultado = fit_and_predict(classificador, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)
"""
Explanation: GradientBoostingClassifier:
Todos: 0.77870216306156403
End of explanation
"""
from sklearn.naive_bayes import BernoulliNB
classificador = BernoulliNB()
resultado = fit_and_predict(classificador, treino_dados, treino_marcacoes, teste_dados, teste_marcacoes)
"""
Explanation: Gaussiano:
0.665557404326
End of explanation
"""
|
poppy-project/community-notebooks | tutorials-education/poppy-torso__vrep_Prototype d'ininitiation à l'informatique pour les lycéens/Jeux/jeté de balle.ipynb | lgpl-3.0 | import time
from poppy.creatures import PoppyTorso
poppy = PoppyTorso(simulator='vrep')
"""
Explanation: Jeté de balle – Niveau 1 - Python
TP1
Pour commencer votre programme python devra contenir les lignes de code ci-dessous et le logiciel V-REP devra être lancé.
Dans V-REP (en haut à gauche) utilise les deux icones flèche pour déplacer la vue et regarder poppy sous tous les angles.<br>
Dans notebook, utilise le racourci 'Ctrl+Enter' pour éxécuter les commandes.
End of explanation
"""
io = poppy._controllers[0].io
name = 'cube'
position = [0.2, 0, 1] # X, Y, Z
sizes = [0.15, 0.15, 0.15] # in meters
mass = 0.1 # in kg
io.add_cube(name, position, sizes, mass)
"""
Explanation: Ajouter un objet
End of explanation
"""
#ouvrir
poppy.l_arm_z.goal_position = 20
poppy.r_arm_z.goal_position = -20
#fermer
poppy.l_arm_z.goal_position = -20
poppy.r_arm_z.goal_position = 20
poppy.l_shoulder_y.goal_position = -40
poppy.r_shoulder_y.goal_position = -40
#lever
poppy.l_shoulder_y.goto_position(-180,0.1)
poppy.r_shoulder_y.goto_position(-180,0.1)
#jeter
poppy.l_shoulder_y.goal_position = -40
poppy.r_shoulder_y.goal_position = -40
poppy.l_arm_z.goal_position = 20
poppy.r_arm_z.goal_position = -20
"""
Explanation: Quelques exemples de mouvement "utile":
End of explanation
"""
poppy.reset_simulation()
"""
Explanation: Solution possible:
reprise de volet
catapulte
attrape puis jéte
Aide ajusté l'objet, forme, taille, poid, position...;
Tu as raté? c'est pas grâve, recommmence, essaie ces lignes pour redémarrer :
End of explanation
"""
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
from poppy.creatures import PoppyTorso
poppy=PoppyTorso(simulator='vrep')
"""
Explanation: Encore buger ? essaie celles-ci :
End of explanation
"""
import pypot
poppy.stop_simulation()
pypot.vrep.close_all_connections()
"""
Explanation: Tu as fini? coupe la simulation ici:
End of explanation
"""
|
strandbygaard/deep-learning | reinforcement/Q-learning-cart.ipynb | mit | import gym
import tensorflow as tf
import numpy as np
"""
Explanation: Deep Q-learning
In this notebook, we'll build a neural network that can learn to play games through reinforcement learning. More specifically, we'll use Q-learning to train an agent to play a game called Cart-Pole. In this game, a freely swinging pole is attached to a cart. The cart can move to the left and right, and the goal is to keep the pole upright as long as possible.
We can simulate this game using OpenAI Gym. First, let's check out how OpenAI Gym works. Then, we'll get into training an agent to play the Cart-Pole game.
End of explanation
"""
# Create the Cart-Pole game environment
env = gym.make('CartPole-v0')
"""
Explanation: Note: Make sure you have OpenAI Gym cloned into the same directory with this notebook. I've included gym as a submodule, so you can run git submodule --init --recursive to pull the contents into the gym repo.
End of explanation
"""
env.reset()
rewards = []
for _ in range(100):
env.render()
state, reward, done, info = env.step(env.action_space.sample()) # take a random action
rewards.append(reward)
if done:
rewards = []
env.reset()
"""
Explanation: We interact with the simulation through env. To show the simulation running, you can use env.render() to render one frame. Passing in an action as an integer to env.step will generate the next step in the simulation. You can see how many actions are possible from env.action_space and to get a random action you can use env.action_space.sample(). This is general to all Gym games. In the Cart-Pole game, there are two possible actions, moving the cart left or right. So there are two actions we can take, encoded as 0 and 1.
Run the code below to watch the simulation run.
End of explanation
"""
print(rewards[-20:])
"""
Explanation: To shut the window showing the simulation, use env.close().
If you ran the simulation above, we can look at the rewards:
End of explanation
"""
class QNetwork:
def __init__(self, learning_rate=0.01, state_size=4,
action_size=2, hidden_size=10,
name='QNetwork'):
# state inputs to the Q-network
with tf.variable_scope(name):
self.inputs_ = tf.placeholder(tf.float32, [None, state_size], name='inputs')
# One hot encode the actions to later choose the Q-value for the action
self.actions_ = tf.placeholder(tf.int32, [None], name='actions')
one_hot_actions = tf.one_hot(self.actions_, action_size)
# Target Q values for training
self.targetQs_ = tf.placeholder(tf.float32, [None], name='target')
# ReLU hidden layers
self.fc1 = tf.contrib.layers.fully_connected(self.inputs_, hidden_size)
self.fc2 = tf.contrib.layers.fully_connected(self.fc1, hidden_size)
# Linear output layer
self.output = tf.contrib.layers.fully_connected(self.fc2, action_size,
activation_fn=None)
### Train with loss (targetQ - Q)^2
# output has length 2, for two actions. This next line chooses
# one value from output (per row) according to the one-hot encoded actions.
self.Q = tf.reduce_sum(tf.multiply(self.output, one_hot_actions), axis=1)
self.loss = tf.reduce_mean(tf.square(self.targetQs_ - self.Q))
self.opt = tf.train.AdamOptimizer(learning_rate).minimize(self.loss)
"""
Explanation: The game resets after the pole has fallen past a certain angle. For each frame while the simulation is running, it returns a reward of 1.0. The longer the game runs, the more reward we get. Then, our network's goal is to maximize the reward by keeping the pole vertical. It will do this by moving the cart to the left and the right.
Q-Network
We train our Q-learning agent using the Bellman Equation:
$$
Q(s, a) = r + \gamma \max{Q(s', a')}
$$
where $s$ is a state, $a$ is an action, and $s'$ is the next state from state $s$ and action $a$.
Before we used this equation to learn values for a Q-table. However, for this game there are a huge number of states available. The state has four values: the position and velocity of the cart, and the position and velocity of the pole. These are all real-valued numbers, so ignoring floating point precisions, you practically have infinite states. Instead of using a table then, we'll replace it with a neural network that will approximate the Q-table lookup function.
<img src="assets/deep-q-learning.png" width=450px>
Now, our Q value, $Q(s, a)$ is calculated by passing in a state to the network. The output will be Q-values for each available action, with fully connected hidden layers.
<img src="assets/q-network.png" width=550px>
As I showed before, we can define our targets for training as $\hat{Q}(s,a) = r + \gamma \max{Q(s', a')}$. Then we update the weights by minimizing $(\hat{Q}(s,a) - Q(s,a))^2$.
For this Cart-Pole game, we have four inputs, one for each value in the state, and two outputs, one for each action. To get $\hat{Q}$, we'll first choose an action, then simulate the game using that action. This will get us the next state, $s'$, and the reward. With that, we can calculate $\hat{Q}$ then pass it back into the $Q$ network to run the optimizer and update the weights.
Below is my implementation of the Q-network. I used two fully connected layers with ReLU activations. Two seems to be good enough, three might be better. Feel free to try it out.
End of explanation
"""
from collections import deque
class Memory():
def __init__(self, max_size = 1000):
self.buffer = deque(maxlen=max_size)
def add(self, experience):
self.buffer.append(experience)
def sample(self, batch_size):
idx = np.random.choice(np.arange(len(self.buffer)),
size=batch_size,
replace=False)
return [self.buffer[ii] for ii in idx]
"""
Explanation: Experience replay
Reinforcement learning algorithms can have stability issues due to correlations between states. To reduce correlations when training, we can store the agent's experiences and later draw a random mini-batch of those experiences to train on.
Here, we'll create a Memory object that will store our experiences, our transitions $<s, a, r, s'>$. This memory will have a maxmium capacity, so we can keep newer experiences in memory while getting rid of older experiences. Then, we'll sample a random mini-batch of transitions $<s, a, r, s'>$ and train on those.
Below, I've implemented a Memory object. If you're unfamiliar with deque, this is a double-ended queue. You can think of it like a tube open on both sides. You can put objects in either side of the tube. But if it's full, adding anything more will push an object out the other side. This is a great data structure to use for the memory buffer.
End of explanation
"""
train_episodes = 1000 # max number of episodes to learn from
max_steps = 200 # max steps in an episode
gamma = 0.99 # future reward discount
# Exploration parameters
explore_start = 1.0 # exploration probability at start
explore_stop = 0.01 # minimum exploration probability
decay_rate = 0.0001 # exponential decay rate for exploration prob
# Network parameters
hidden_size = 64 # number of units in each Q-network hidden layer
learning_rate = 0.0001 # Q-network learning rate
# Memory parameters
memory_size = 10000 # memory capacity
batch_size = 20 # experience mini-batch size
pretrain_length = batch_size # number experiences to pretrain the memory
tf.reset_default_graph()
mainQN = QNetwork(name='main', hidden_size=hidden_size, learning_rate=learning_rate)
"""
Explanation: Exploration - Exploitation
To learn about the environment and rules of the game, the agent needs to explore by taking random actions. We'll do this by choosing a random action with some probability $\epsilon$ (epsilon). That is, with some probability $\epsilon$ the agent will make a random action and with probability $1 - \epsilon$, the agent will choose an action from $Q(s,a)$. This is called an $\epsilon$-greedy policy.
At first, the agent needs to do a lot of exploring. Later when it has learned more, the agent can favor choosing actions based on what it has learned. This is called exploitation. We'll set it up so the agent is more likely to explore early in training, then more likely to exploit later in training.
Q-Learning training algorithm
Putting all this together, we can list out the algorithm we'll use to train the network. We'll train the network in episodes. One episode is one simulation of the game. For this game, the goal is to keep the pole upright for 195 frames. So we can start a new episode once meeting that goal. The game ends if the pole tilts over too far, or if the cart moves too far the left or right. When a game ends, we'll start a new episode. Now, to train the agent:
Initialize the memory $D$
Initialize the action-value network $Q$ with random weights
For episode = 1, $M$ do
For $t$, $T$ do
With probability $\epsilon$ select a random action $a_t$, otherwise select $a_t = \mathrm{argmax}_a Q(s,a)$
Execute action $a_t$ in simulator and observe reward $r_{t+1}$ and new state $s_{t+1}$
Store transition $<s_t, a_t, r_{t+1}, s_{t+1}>$ in memory $D$
Sample random mini-batch from $D$: $<s_j, a_j, r_j, s'_j>$
Set $\hat{Q}j = r_j$ if the episode ends at $j+1$, otherwise set $\hat{Q}_j = r_j + \gamma \max{a'}{Q(s'_j, a')}$
Make a gradient descent step with loss $(\hat{Q}_j - Q(s_j, a_j))^2$
endfor
endfor
Hyperparameters
One of the more difficult aspects of reinforcememt learning are the large number of hyperparameters. Not only are we tuning the network, but we're tuning the simulation.
End of explanation
"""
# Initialize the simulation
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
memory = Memory(max_size=memory_size)
# Make a bunch of random actions and store the experiences
for ii in range(pretrain_length):
# Uncomment the line below to watch the simulation
# env.render()
# Make a random action
action = env.action_space.sample()
next_state, reward, done, _ = env.step(action)
if done:
# The simulation fails so no next state
next_state = np.zeros(state.shape)
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
"""
Explanation: Populate the experience memory
Here I'm re-initializing the simulation and pre-populating the memory. The agent is taking random actions and storing the transitions in memory. This will help the agent with exploring the game.
End of explanation
"""
# Now train with experiences
saver = tf.train.Saver()
rewards_list = []
with tf.Session() as sess:
# Initialize variables
sess.run(tf.global_variables_initializer())
step = 0
for ep in range(1, train_episodes):
total_reward = 0
t = 0
while t < max_steps:
step += 1
# Uncomment this next line to watch the training
# env.render()
# Explore or Exploit
explore_p = explore_stop + (explore_start - explore_stop)*np.exp(-decay_rate*step)
if explore_p > np.random.rand():
# Make a random action
action = env.action_space.sample()
else:
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
total_reward += reward
if done:
# the episode ends so no next state
next_state = np.zeros(state.shape)
t = max_steps
print('Episode: {}'.format(ep),
'Total reward: {}'.format(total_reward),
'Training loss: {:.4f}'.format(loss),
'Explore P: {:.4f}'.format(explore_p))
rewards_list.append((ep, total_reward))
# Add experience to memory
memory.add((state, action, reward, next_state))
# Start new episode
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
# Add experience to memory
memory.add((state, action, reward, next_state))
state = next_state
t += 1
# Sample mini-batch from memory
batch = memory.sample(batch_size)
states = np.array([each[0] for each in batch])
actions = np.array([each[1] for each in batch])
rewards = np.array([each[2] for each in batch])
next_states = np.array([each[3] for each in batch])
# Train network
target_Qs = sess.run(mainQN.output, feed_dict={mainQN.inputs_: next_states})
# Set target_Qs to 0 for states where episode ends
episode_ends = (next_states == np.zeros(states[0].shape)).all(axis=1)
target_Qs[episode_ends] = (0, 0)
targets = rewards + gamma * np.max(target_Qs, axis=1)
loss, _ = sess.run([mainQN.loss, mainQN.opt],
feed_dict={mainQN.inputs_: states,
mainQN.targetQs_: targets,
mainQN.actions_: actions})
saver.save(sess, "checkpoints/cartpole.ckpt")
"""
Explanation: Training
Below we'll train our agent. If you want to watch it train, uncomment the env.render() line. This is slow because it's rendering the frames slower than the network can train. But, it's cool to watch the agent get better at the game.
End of explanation
"""
%matplotlib inline
import matplotlib.pyplot as plt
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / N
eps, rews = np.array(rewards_list).T
smoothed_rews = running_mean(rews, 10)
plt.plot(eps[-len(smoothed_rews):], smoothed_rews)
plt.plot(eps, rews, color='grey', alpha=0.3)
plt.xlabel('Episode')
plt.ylabel('Total Reward')
"""
Explanation: Visualizing training
Below I'll plot the total rewards for each episode. I'm plotting the rolling average too, in blue.
End of explanation
"""
test_episodes = 10
test_max_steps = 400
env.reset()
with tf.Session() as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
for ep in range(1, test_episodes):
t = 0
while t < test_max_steps:
env.render()
# Get action from Q-network
feed = {mainQN.inputs_: state.reshape((1, *state.shape))}
Qs = sess.run(mainQN.output, feed_dict=feed)
action = np.argmax(Qs)
# Take action, get new state and reward
next_state, reward, done, _ = env.step(action)
if done:
t = test_max_steps
env.reset()
# Take one random step to get the pole and cart moving
state, reward, done, _ = env.step(env.action_space.sample())
else:
state = next_state
t += 1
env.close()
"""
Explanation: Testing
Let's checkout how our trained agent plays the game.
End of explanation
"""
|
ES-DOC/esdoc-jupyterhub | notebooks/test-institute-1/cmip6/models/sandbox-1/ocean.ipynb | gpl-3.0 | # DO NOT EDIT !
from pyesdoc.ipython.model_topic import NotebookOutput
# DO NOT EDIT !
DOC = NotebookOutput('cmip6', 'test-institute-1', 'sandbox-1', 'ocean')
"""
Explanation: ES-DOC CMIP6 Model Properties - Ocean
MIP Era: CMIP6
Institute: TEST-INSTITUTE-1
Source ID: SANDBOX-1
Topic: Ocean
Sub-Topics: Timestepping Framework, Advection, Lateral Physics, Vertical Physics, Uplow Boundaries, Boundary Forcing.
Properties: 133 (101 required)
Model descriptions: Model description details
Initialized From: --
Notebook Help: Goto notebook help page
Notebook Initialised: 2018-02-15 16:54:43
Document Setup
IMPORTANT: to be executed each time you run the notebook
End of explanation
"""
# Set as follows: DOC.set_author("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Authors
Set document authors
End of explanation
"""
# Set as follows: DOC.set_contributor("name", "email")
# TODO - please enter value(s)
"""
Explanation: Document Contributors
Specify document contributors
End of explanation
"""
# Set publication status:
# 0=do not publish, 1=publish.
DOC.set_publication_status(0)
"""
Explanation: Document Publication
Specify document publication status
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: Document Table of Contents
1. Key Properties
2. Key Properties --> Seawater Properties
3. Key Properties --> Bathymetry
4. Key Properties --> Nonoceanic Waters
5. Key Properties --> Software Properties
6. Key Properties --> Resolution
7. Key Properties --> Tuning Applied
8. Key Properties --> Conservation
9. Grid
10. Grid --> Discretisation --> Vertical
11. Grid --> Discretisation --> Horizontal
12. Timestepping Framework
13. Timestepping Framework --> Tracers
14. Timestepping Framework --> Baroclinic Dynamics
15. Timestepping Framework --> Barotropic
16. Timestepping Framework --> Vertical Physics
17. Advection
18. Advection --> Momentum
19. Advection --> Lateral Tracers
20. Advection --> Vertical Tracers
21. Lateral Physics
22. Lateral Physics --> Momentum --> Operator
23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
24. Lateral Physics --> Tracers
25. Lateral Physics --> Tracers --> Operator
26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
27. Lateral Physics --> Tracers --> Eddy Induced Velocity
28. Vertical Physics
29. Vertical Physics --> Boundary Layer Mixing --> Details
30. Vertical Physics --> Boundary Layer Mixing --> Tracers
31. Vertical Physics --> Boundary Layer Mixing --> Momentum
32. Vertical Physics --> Interior Mixing --> Details
33. Vertical Physics --> Interior Mixing --> Tracers
34. Vertical Physics --> Interior Mixing --> Momentum
35. Uplow Boundaries --> Free Surface
36. Uplow Boundaries --> Bottom Boundary Layer
37. Boundary Forcing
38. Boundary Forcing --> Momentum --> Bottom Friction
39. Boundary Forcing --> Momentum --> Lateral Friction
40. Boundary Forcing --> Tracers --> Sunlight Penetration
41. Boundary Forcing --> Tracers --> Fresh Water Forcing
1. Key Properties
Ocean key properties
1.1. Model Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 1.2. Model Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean model code (NEMO 3.6, MOM 5.0,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.model_family')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "OGCM"
# "slab ocean"
# "mixed layer ocean"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.3. Model Family
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of ocean model.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.basic_approximations')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Primitive equations"
# "Non-hydrostatic"
# "Boussinesq"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.4. Basic Approximations
Is Required: TRUE Type: ENUM Cardinality: 1.N
Basic approximations made in the ocean.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.prognostic_variables')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# "Salinity"
# "U-velocity"
# "V-velocity"
# "W-velocity"
# "SSH"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 1.5. Prognostic Variables
Is Required: TRUE Type: ENUM Cardinality: 1.N
List of prognostic variables in the ocean component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Wright, 1997"
# "Mc Dougall et al."
# "Jackett et al. 2006"
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2. Key Properties --> Seawater Properties
Physical properties of seawater in ocean
2.1. Eos Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_temp')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Potential temperature"
# "Conservative temperature"
# TODO - please enter value(s)
"""
Explanation: 2.2. Eos Functional Temp
Is Required: TRUE Type: ENUM Cardinality: 1.1
Temperature used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_salt')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Practical salinity Sp"
# "Absolute salinity Sa"
# TODO - please enter value(s)
"""
Explanation: 2.3. Eos Functional Salt
Is Required: TRUE Type: ENUM Cardinality: 1.1
Salinity used in EOS for sea water
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.eos_functional_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Pressure (dbars)"
# "Depth (meters)"
# TODO - please enter value(s)
"""
Explanation: 2.4. Eos Functional Depth
Is Required: TRUE Type: ENUM Cardinality: 1.1
Depth or pressure used in EOS for sea water ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_freezing_point')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "TEOS 2010"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 2.5. Ocean Freezing Point
Is Required: TRUE Type: ENUM Cardinality: 1.1
Equation used to compute the freezing point (in deg C) of seawater, as a function of salinity and pressure
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_specific_heat')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.6. Ocean Specific Heat
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Specific heat in ocean (cpocean) in J/(kg K)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.seawater_properties.ocean_reference_density')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 2.7. Ocean Reference Density
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Boussinesq reference density (rhozero) in kg / m3
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.reference_dates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Present day"
# "21000 years BP"
# "6000 years BP"
# "LGM"
# "Pliocene"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 3. Key Properties --> Bathymetry
Properties of bathymetry in ocean
3.1. Reference Dates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Reference date of bathymetry
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 3.2. Type
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the bathymetry fixed in time in the ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.ocean_smoothing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.3. Ocean Smoothing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any smoothing or hand editing of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.bathymetry.source')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 3.4. Source
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe source of bathymetry in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.isolated_seas')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4. Key Properties --> Nonoceanic Waters
Non oceanic waters treatement in ocean
4.1. Isolated Seas
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how isolated seas is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.nonoceanic_waters.river_mouth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 4.2. River Mouth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe if/how river mouth mixing or estuaries specific treatment is performed
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.repository')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5. Key Properties --> Software Properties
Software properties of ocean code
5.1. Repository
Is Required: FALSE Type: STRING Cardinality: 0.1
Location of code for this component.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_version')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.2. Code Version
Is Required: FALSE Type: STRING Cardinality: 0.1
Code version identifier.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.software_properties.code_languages')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 5.3. Code Languages
Is Required: FALSE Type: STRING Cardinality: 0.N
Code language(s).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6. Key Properties --> Resolution
Resolution in the ocean grid
6.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
This is a string usually used by the modelling group to describe the resolution of this grid, e.g. ORCA025, N512L180, T512L70 etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.canonical_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.2. Canonical Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Expression quoted for gross comparisons of resolution, eg. 50km or 0.1 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.range_horizontal_resolution')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 6.3. Range Horizontal Resolution
Is Required: TRUE Type: STRING Cardinality: 1.1
Range of horizontal resolution with spatial details, eg. 50(Equator)-100km or 0.1-0.5 degrees etc.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_horizontal_gridpoints')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.4. Number Of Horizontal Gridpoints
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Total number of horizontal (XY) points (or degrees of freedom) on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.number_of_vertical_levels')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.5. Number Of Vertical Levels
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Number of vertical levels resolved on computational grid.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.is_adaptive_grid')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 6.6. Is Adaptive Grid
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Default is False. Set true if grid resolution changes during execution.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.resolution.thickness_level_1')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 6.7. Thickness Level 1
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Thickness of first surface ocean level (in meters)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7. Key Properties --> Tuning Applied
Tuning methodology for ocean component
7.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
General overview description of tuning: explain and motivate the main targets and metrics retained. &Document the relative weight given to climate performance metrics versus process oriented metrics, &and on the possible conflicts with parameterization level tuning. In particular describe any struggle &with a parameter value that required pushing it to its limits to solve a particular model deficiency.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.global_mean_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.2. Global Mean Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List set of metrics of the global mean state used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.regional_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.3. Regional Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List of regional metrics of mean state (e.g THC, AABW, regional means etc) used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.tuning_applied.trend_metrics_used')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 7.4. Trend Metrics Used
Is Required: FALSE Type: STRING Cardinality: 0.N
List observed trend metrics used in tuning model/component
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.description')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8. Key Properties --> Conservation
Conservation in the ocean component
8.1. Description
Is Required: TRUE Type: STRING Cardinality: 1.1
Brief description of conservation methodology
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.scheme')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Energy"
# "Enstrophy"
# "Salt"
# "Volume of ocean"
# "Momentum"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 8.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.N
Properties conserved in the ocean by the numerical schemes
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.consistency_properties')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.3. Consistency Properties
Is Required: FALSE Type: STRING Cardinality: 0.1
Any additional consistency properties (energy conversion, pressure gradient discretisation, ...)?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.corrected_conserved_prognostic_variables')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 8.4. Corrected Conserved Prognostic Variables
Is Required: FALSE Type: STRING Cardinality: 0.1
Set of variables which are conserved by more than the numerical scheme alone.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.key_properties.conservation.was_flux_correction_used')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 8.5. Was Flux Correction Used
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Does conservation involve flux correction ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 9. Grid
Ocean grid
9.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of grid in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.coordinates')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Z-coordinate"
# "Z*-coordinate"
# "S-coordinate"
# "Isopycnic - sigma 0"
# "Isopycnic - sigma 2"
# "Isopycnic - sigma 4"
# "Isopycnic - other"
# "Hybrid / Z+S"
# "Hybrid / Z+isopycnic"
# "Hybrid / other"
# "Pressure referenced (P)"
# "P*"
# "Z**"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 10. Grid --> Discretisation --> Vertical
Properties of vertical discretisation in ocean
10.1. Coordinates
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical coordinates in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.vertical.partial_steps')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 10.2. Partial Steps
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Using partial steps with Z or Z vertical coordinate in ocean ?*
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Lat-lon"
# "Rotated north pole"
# "Two north poles (ORCA-style)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11. Grid --> Discretisation --> Horizontal
Type of horizontal discretisation scheme in ocean
11.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal grid type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.staggering')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Arakawa B-grid"
# "Arakawa C-grid"
# "Arakawa E-grid"
# "N/a"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.2. Staggering
Is Required: FALSE Type: ENUM Cardinality: 0.1
Horizontal grid staggering type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.grid.discretisation.horizontal.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Finite difference"
# "Finite volumes"
# "Finite elements"
# "Unstructured grid"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 11.3. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Horizontal discretisation scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 12. Timestepping Framework
Ocean Timestepping Framework
12.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.diurnal_cycle')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Via coupling"
# "Specific treatment"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 12.2. Diurnal Cycle
Is Required: TRUE Type: ENUM Cardinality: 1.1
Diurnal cycle type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 13. Timestepping Framework --> Tracers
Properties of tracers time stepping in ocean
13.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Tracers time stepping scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.tracers.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 13.2. Time Step
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Tracers time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Preconditioned conjugate gradient"
# "Sub cyling"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14. Timestepping Framework --> Baroclinic Dynamics
Baroclinic dynamics in ocean
14.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics type
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Leap-frog + Asselin filter"
# "Leap-frog + Periodic Euler"
# "Predictor-corrector"
# "Runge-Kutta 2"
# "AM3-LF"
# "Forward-backward"
# "Forward operator"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 14.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Baroclinic dynamics scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.baroclinic_dynamics.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 14.3. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Baroclinic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.splitting')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "split explicit"
# "implicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 15. Timestepping Framework --> Barotropic
Barotropic time stepping in ocean
15.1. Splitting
Is Required: TRUE Type: ENUM Cardinality: 1.1
Time splitting method
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.barotropic.time_step')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 15.2. Time Step
Is Required: FALSE Type: INTEGER Cardinality: 0.1
Barotropic time step (in seconds)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.timestepping_framework.vertical_physics.method')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 16. Timestepping Framework --> Vertical Physics
Vertical physics time stepping in ocean
16.1. Method
Is Required: TRUE Type: STRING Cardinality: 1.1
Details of vertical time stepping in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 17. Advection
Ocean advection
17.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of advection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Flux form"
# "Vector form"
# TODO - please enter value(s)
"""
Explanation: 18. Advection --> Momentum
Properties of lateral momemtum advection scheme in ocean
18.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of lateral momemtum advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.scheme_name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 18.2. Scheme Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Name of ocean momemtum advection scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.momentum.ALE')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 18.3. ALE
Is Required: FALSE Type: BOOLEAN Cardinality: 0.1
Using ALE for vertical advection ? (if vertical coordinates are sigma)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19. Advection --> Lateral Tracers
Properties of lateral tracer advection scheme in ocean
19.1. Order
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Order of lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 19.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for lateral tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.effective_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 19.3. Effective Order
Is Required: TRUE Type: FLOAT Cardinality: 1.1
Effective order of limited lateral tracer advection scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.4. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for lateral tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers')
# PROPERTY VALUE(S):
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Ideal age"
# "CFC 11"
# "CFC 12"
# "SF6"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 19.5. Passive Tracers
Is Required: FALSE Type: ENUM Cardinality: 0.N
Passive tracers advected
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.lateral_tracers.passive_tracers_advection')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 19.6. Passive Tracers Advection
Is Required: FALSE Type: STRING Cardinality: 0.1
Is advection of passive tracers different than active ? if so, describe.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.name')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 20. Advection --> Vertical Tracers
Properties of vertical tracer advection scheme in ocean
20.1. Name
Is Required: TRUE Type: STRING Cardinality: 1.1
Descriptive text for vertical tracer advection scheme in ocean (e.g. MUSCL, PPM-H5, PRATHER,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.advection.vertical_tracers.flux_limiter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 20.2. Flux Limiter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Monotonic flux limiter for vertical tracer advection scheme in ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 21. Lateral Physics
Ocean lateral physics
21.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of lateral physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Eddy active"
# "Eddy admitting"
# TODO - please enter value(s)
"""
Explanation: 21.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of transient eddy representation in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22. Lateral Physics --> Momentum --> Operator
Properties of lateral physics operator for momentum in ocean
22.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 22.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics momemtum scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 23. Lateral Physics --> Momentum --> Eddy Viscosity Coeff
Properties of eddy viscosity coeff in lateral physics momemtum scheme in the ocean
23.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics momemtum eddy viscosity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 23.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy viscosity coeff in lateral physics momemtum scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy viscosity coeff in lateral physics momemtum scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 23.4. Coeff Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe background eddy viscosity coeff in lateral physics momemtum scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.momentum.eddy_viscosity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 23.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy viscosity coeff in lateral physics momemtum scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.mesoscale_closure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24. Lateral Physics --> Tracers
Properties of lateral physics for tracers in ocean
24.1. Mesoscale Closure
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a mesoscale closure in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.submesoscale_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 24.2. Submesoscale Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there a submesoscale mixing parameterisation (i.e Fox-Kemper) in the lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.direction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Horizontal"
# "Isopycnal"
# "Isoneutral"
# "Geopotential"
# "Iso-level"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25. Lateral Physics --> Tracers --> Operator
Properties of lateral physics operator for tracers in ocean
25.1. Direction
Is Required: TRUE Type: ENUM Cardinality: 1.1
Direction of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Harmonic"
# "Bi-harmonic"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.2. Order
Is Required: TRUE Type: ENUM Cardinality: 1.1
Order of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.operator.discretisation')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Second order"
# "Higher order"
# "Flux limiter"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 25.3. Discretisation
Is Required: TRUE Type: ENUM Cardinality: 1.1
Discretisation of lateral physics tracers scheme in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant"
# "Space varying"
# "Time + space varying (Smagorinsky)"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 26. Lateral Physics --> Tracers --> Eddy Diffusity Coeff
Properties of eddy diffusity coeff in lateral physics tracers scheme in the ocean
26.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Lateral physics tracers eddy diffusity coeff type in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.constant_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.2. Constant Coefficient
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant, value of eddy diffusity coeff in lateral physics tracers scheme (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.variable_coefficient')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 26.3. Variable Coefficient
Is Required: FALSE Type: STRING Cardinality: 0.1
If space-varying, describe variations of eddy diffusity coeff in lateral physics tracers scheme
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 26.4. Coeff Background
Is Required: TRUE Type: INTEGER Cardinality: 1.1
Describe background eddy diffusity coeff in lateral physics tracers scheme (give values in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_diffusity_coeff.coeff_backscatter')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 26.5. Coeff Backscatter
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there backscatter in eddy diffusity coeff in lateral physics tracers scheme ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "GM"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 27. Lateral Physics --> Tracers --> Eddy Induced Velocity
Properties of eddy induced velocity (EIV) in lateral physics tracers scheme in the ocean
27.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of EIV in lateral physics tracers in the ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.constant_val')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 27.2. Constant Val
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If EIV scheme for tracers is constant, specify coefficient value (M2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.flux_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.3. Flux Type
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV flux (advective or skew)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.lateral_physics.tracers.eddy_induced_velocity.added_diffusivity')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 27.4. Added Diffusivity
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of EIV added diffusivity (constant, flow dependent or none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 28. Vertical Physics
Ocean Vertical Physics
28.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of vertical physics in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.details.langmuir_cells_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 29. Vertical Physics --> Boundary Layer Mixing --> Details
Properties of vertical physics in ocean
29.1. Langmuir Cells Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there Langmuir cells mixing in upper ocean ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 30. Vertical Physics --> Boundary Layer Mixing --> Tracers
*Properties of boundary layer (BL) mixing on tracers in the ocean *
30.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of tracers, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 30.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 30.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure - TKE"
# "Turbulent closure - KPP"
# "Turbulent closure - Mellor-Yamada"
# "Turbulent closure - Bulk Mixed Layer"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 31. Vertical Physics --> Boundary Layer Mixing --> Momentum
*Properties of boundary layer (BL) mixing on momentum in the ocean *
31.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of boundary layer mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.closure_order')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.2. Closure Order
Is Required: FALSE Type: FLOAT Cardinality: 0.1
If turbulent BL mixing of momentum, specific order of closure (0, 1, 2.5, 3)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 31.3. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant BL mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.boundary_layer_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 31.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background BL mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.convection_type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Non-penetrative convective adjustment"
# "Enhanced vertical diffusion"
# "Included in turbulence closure"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 32. Vertical Physics --> Interior Mixing --> Details
*Properties of interior mixing in the ocean *
32.1. Convection Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of vertical convection in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.tide_induced_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 32.2. Tide Induced Mixing
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how tide induced mixing is modelled (barotropic, baroclinic, none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.double_diffusion')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.3. Double Diffusion
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there double diffusion
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.details.shear_mixing')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 32.4. Shear Mixing
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is there interior shear mixing
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 33. Vertical Physics --> Interior Mixing --> Tracers
*Properties of interior mixing on tracers in the ocean *
33.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for tracers in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 33.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of tracers, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for tracers (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.tracers.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 33.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of tracers coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Constant value"
# "Turbulent closure / TKE"
# "Turbulent closure - Mellor-Yamada"
# "Richardson number dependent - PP"
# "Richardson number dependent - KT"
# "Imbeded as isopycnic vertical coordinate"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 34. Vertical Physics --> Interior Mixing --> Momentum
*Properties of interior mixing on momentum in the ocean *
34.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of interior mixing for momentum in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.constant')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 34.2. Constant
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If constant interior mixing of momentum, specific coefficient (m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.profile')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.3. Profile
Is Required: TRUE Type: STRING Cardinality: 1.1
Is the background interior mixing using a vertical profile for momentum (i.e is NOT constant) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.vertical_physics.interior_mixing.momentum.background')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 34.4. Background
Is Required: TRUE Type: STRING Cardinality: 1.1
Background interior mixing of momentum coefficient, (schema and value in m2/s - may by none)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 35. Uplow Boundaries --> Free Surface
Properties of free surface in ocean
35.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of free surface in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear implicit"
# "Linear filtered"
# "Linear semi-explicit"
# "Non-linear implicit"
# "Non-linear filtered"
# "Non-linear semi-explicit"
# "Fully explicit"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 35.2. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Free surface scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.free_surface.embeded_seaice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 35.3. Embeded Seaice
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the sea-ice embeded in the ocean model (instead of levitating) ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36. Uplow Boundaries --> Bottom Boundary Layer
Properties of bottom boundary layer in ocean
36.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.type_of_bbl')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Diffusive"
# "Acvective"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 36.2. Type Of Bbl
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of bottom boundary layer in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.lateral_mixing_coef')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# TODO - please enter value(s)
"""
Explanation: 36.3. Lateral Mixing Coef
Is Required: FALSE Type: INTEGER Cardinality: 0.1
If bottom BL is diffusive, specify value of lateral mixing coefficient (in m2/s)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.uplow_boundaries.bottom_boundary_layer.sill_overflow')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 36.4. Sill Overflow
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe any specific treatment of sill overflows
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.overview')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37. Boundary Forcing
Ocean boundary forcing
37.1. Overview
Is Required: TRUE Type: STRING Cardinality: 1.1
Overview of boundary forcing in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.surface_pressure')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.2. Surface Pressure
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how surface pressure is transmitted to ocean (via sea-ice, nothing specific,...)
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.3. Momentum Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface momentum flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers_flux_correction')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.4. Tracers Flux Correction
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe any type of ocean surface tracers flux correction and, if applicable, how it is applied and where.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.wave_effects')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.5. Wave Effects
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how wave effects are modelled at ocean surface.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.river_runoff_budget')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.6. River Runoff Budget
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe how river runoff from land surface is routed to ocean and any global adjustment done.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.geothermal_heating')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 37.7. Geothermal Heating
Is Required: TRUE Type: STRING Cardinality: 1.1
Describe if/how geothermal heating is present at ocean bottom.
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.bottom_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Linear"
# "Non-linear"
# "Non-linear (drag function of speed of tides)"
# "Constant drag coefficient"
# "None"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 38. Boundary Forcing --> Momentum --> Bottom Friction
Properties of momentum bottom friction in ocean
38.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum bottom friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.momentum.lateral_friction.type')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "None"
# "Free-slip"
# "No-slip"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 39. Boundary Forcing --> Momentum --> Lateral Friction
Properties of momentum lateral friction in ocean
39.1. Type
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of momentum lateral friction in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.scheme')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "1 extinction depth"
# "2 extinction depth"
# "3 extinction depth"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 40. Boundary Forcing --> Tracers --> Sunlight Penetration
Properties of sunlight penetration scheme in ocean
40.1. Scheme
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of sunlight penetration scheme in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.ocean_colour')
# PROPERTY VALUE:
# Set as follows: DOC.set_value(value)
# Valid Choices:
# True
# False
# TODO - please enter value(s)
"""
Explanation: 40.2. Ocean Colour
Is Required: TRUE Type: BOOLEAN Cardinality: 1.1
Is the ocean sunlight penetration scheme ocean colour dependent ?
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.sunlight_penetration.extinction_depth')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 40.3. Extinction Depth
Is Required: FALSE Type: STRING Cardinality: 0.1
Describe and list extinctions depths for sunlight penetration scheme (if applicable).
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_atmopshere')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41. Boundary Forcing --> Tracers --> Fresh Water Forcing
Properties of surface fresh water forcing in ocean
41.1. From Atmopshere
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from atmos in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.from_sea_ice')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# Valid Choices:
# "Freshwater flux"
# "Virtual salt flux"
# "Real salt flux"
# "Other: [Please specify]"
# TODO - please enter value(s)
"""
Explanation: 41.2. From Sea Ice
Is Required: TRUE Type: ENUM Cardinality: 1.1
Type of surface fresh water forcing from sea-ice in ocean
End of explanation
"""
# PROPERTY ID - DO NOT EDIT !
DOC.set_id('cmip6.ocean.boundary_forcing.tracers.fresh_water_forcing.forced_mode_restoring')
# PROPERTY VALUE:
# Set as follows: DOC.set_value("value")
# TODO - please enter value(s)
"""
Explanation: 41.3. Forced Mode Restoring
Is Required: TRUE Type: STRING Cardinality: 1.1
Type of surface salinity restoring in forced mode (OMIP)
End of explanation
"""
|
facebook/prophet | notebooks/seasonality,_holiday_effects,_and_regressors.ipynb | mit | %%R
library(dplyr)
playoffs <- data_frame(
holiday = 'playoff',
ds = as.Date(c('2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
superbowls <- data_frame(
holiday = 'superbowl',
ds = as.Date(c('2010-02-07', '2014-02-02', '2016-02-07')),
lower_window = 0,
upper_window = 1
)
holidays <- bind_rows(playoffs, superbowls)
playoffs = pd.DataFrame({
'holiday': 'playoff',
'ds': pd.to_datetime(['2008-01-13', '2009-01-03', '2010-01-16',
'2010-01-24', '2010-02-07', '2011-01-08',
'2013-01-12', '2014-01-12', '2014-01-19',
'2014-02-02', '2015-01-11', '2016-01-17',
'2016-01-24', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
superbowls = pd.DataFrame({
'holiday': 'superbowl',
'ds': pd.to_datetime(['2010-02-07', '2014-02-02', '2016-02-07']),
'lower_window': 0,
'upper_window': 1,
})
holidays = pd.concat((playoffs, superbowls))
"""
Explanation: Modeling Holidays and Special Events
If you have holidays or other recurring events that you'd like to model, you must create a dataframe for them. It has two columns (holiday and ds) and a row for each occurrence of the holiday. It must include all occurrences of the holiday, both in the past (back as far as the historical data go) and in the future (out as far as the forecast is being made). If they won't repeat in the future, Prophet will model them and then not include them in the forecast.
You can also include columns lower_window and upper_window which extend the holiday out to [lower_window, upper_window] days around the date. For instance, if you wanted to include Christmas Eve in addition to Christmas you'd include lower_window=-1,upper_window=0. If you wanted to use Black Friday in addition to Thanksgiving, you'd include lower_window=0,upper_window=1. You can also include a column prior_scale to set the prior scale separately for each holiday, as described below.
Here we create a dataframe that includes the dates of all of Peyton Manning's playoff appearances:
End of explanation
"""
%%R
m <- prophet(df, holidays = holidays)
forecast <- predict(m, future)
m = Prophet(holidays=holidays)
forecast = m.fit(df).predict(future)
"""
Explanation: Above we have included the superbowl days as both playoff games and superbowl games. This means that the superbowl effect will be an additional additive bonus on top of the playoff effect.
Once the table is created, holiday effects are included in the forecast by passing them in with the holidays argument. Here we do it with the Peyton Manning data from the Quickstart:
End of explanation
"""
%%R
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
"""
Explanation: The holiday effect can be seen in the forecast dataframe:
End of explanation
"""
%%R -w 9 -h 12 -u in
prophet_plot_components(m, forecast)
fig = m.plot_components(forecast)
"""
Explanation: The holiday effects will also show up in the components plot, where we see that there is a spike on the days around playoff appearances, with an especially large spike for the superbowl:
End of explanation
"""
%%R
m <- prophet(holidays = holidays)
m <- add_country_holidays(m, country_name = 'US')
m <- fit.prophet(m, df)
m = Prophet(holidays=holidays)
m.add_country_holidays(country_name='US')
m.fit(df)
"""
Explanation: Individual holidays can be plotted using the plot_forecast_component function (imported from prophet.plot in Python) like plot_forecast_component(m, forecast, 'superbowl') to plot just the superbowl holiday component.
Built-in Country Holidays
You can use a built-in collection of country-specific holidays using the add_country_holidays method (Python) or function (R). The name of the country is specified, and then major holidays for that country will be included in addition to any holidays that are specified via the holidays argument described above:
End of explanation
"""
%%R
m$train.holiday.names
m.train_holiday_names
"""
Explanation: You can see which holidays were included by looking at the train_holiday_names (Python) or train.holiday.names (R) attribute of the model:
End of explanation
"""
%%R -w 9 -h 12 -u in
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
forecast = m.predict(future)
fig = m.plot_components(forecast)
"""
Explanation: The holidays for each country are provided by the holidays package in Python. A list of available countries, and the country name to use, is available on their page: https://github.com/dr-prodigy/python-holidays. In addition to those countries, Prophet includes holidays for these countries: Brazil (BR), Indonesia (ID), India (IN), Malaysia (MY), Vietnam (VN), Thailand (TH), Philippines (PH), Pakistan (PK), Bangladesh (BD), Egypt (EG), China (CN), and Russian (RU), Korea (KR), Belarus (BY), and United Arab Emirates (AE).
In Python, most holidays are computed deterministically and so are available for any date range; a warning will be raised if dates fall outside the range supported by that country. In R, holiday dates are computed for 1995 through 2044 and stored in the package as data-raw/generated_holidays.csv. If a wider date range is needed, this script can be used to replace that file with a different date range: https://github.com/facebook/prophet/blob/main/python/scripts/generate_holidays_file.py.
As above, the country-level holidays will then show up in the components plot:
End of explanation
"""
%%R -w 9 -h 3 -u in
m <- prophet(df)
prophet:::plot_yearly(m)
from prophet.plot import plot_yearly
m = Prophet().fit(df)
a = plot_yearly(m)
"""
Explanation: Fourier Order for Seasonalities
Seasonalities are estimated using a partial Fourier sum. See the paper for complete details, and this figure on Wikipedia for an illustration of how a partial Fourier sum can approximate an arbitrary periodic signal. The number of terms in the partial sum (the order) is a parameter that determines how quickly the seasonality can change. To illustrate this, consider the Peyton Manning data from the Quickstart. The default Fourier order for yearly seasonality is 10, which produces this fit:
End of explanation
"""
%%R -w 9 -h 3 -u in
m <- prophet(df, yearly.seasonality = 20)
prophet:::plot_yearly(m)
from prophet.plot import plot_yearly
m = Prophet(yearly_seasonality=20).fit(df)
a = plot_yearly(m)
"""
Explanation: The default values are often appropriate, but they can be increased when the seasonality needs to fit higher-frequency changes, and generally be less smooth. The Fourier order can be specified for each built-in seasonality when instantiating the model, here it is increased to 20:
End of explanation
"""
%%R -w 9 -h 9 -u in
m <- prophet(weekly.seasonality=FALSE)
m <- add_seasonality(m, name='monthly', period=30.5, fourier.order=5)
m <- fit.prophet(m, df)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='monthly', period=30.5, fourier_order=5)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
"""
Explanation: Increasing the number of Fourier terms allows the seasonality to fit faster changing cycles, but can also lead to overfitting: N Fourier terms corresponds to 2N variables used for modeling the cycle
Specifying Custom Seasonalities
Prophet will by default fit weekly and yearly seasonalities, if the time series is more than two cycles long. It will also fit daily seasonality for a sub-daily time series. You can add other seasonalities (monthly, quarterly, hourly) using the add_seasonality method (Python) or function (R).
The inputs to this function are a name, the period of the seasonality in days, and the Fourier order for the seasonality. For reference, by default Prophet uses a Fourier order of 3 for weekly seasonality and 10 for yearly seasonality. An optional input to add_seasonality is the prior scale for that seasonal component - this is discussed below.
As an example, here we fit the Peyton Manning data from the Quickstart, but replace the weekly seasonality with monthly seasonality. The monthly seasonality then will appear in the components plot:
End of explanation
"""
%%R
is_nfl_season <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
return(month > 8 | month < 2)
}
df$on_season <- is_nfl_season(df$ds)
df$off_season <- !is_nfl_season(df$ds)
def is_nfl_season(ds):
date = pd.to_datetime(ds)
return (date.month > 8 or date.month < 2)
df['on_season'] = df['ds'].apply(is_nfl_season)
df['off_season'] = ~df['ds'].apply(is_nfl_season)
"""
Explanation: Seasonalities that depend on other factors
In some instances the seasonality may depend on other factors, such as a weekly seasonal pattern that is different during the summer than it is during the rest of the year, or a daily seasonal pattern that is different on weekends vs. on weekdays. These types of seasonalities can be modeled using conditional seasonalities.
Consider the Peyton Manning example from the Quickstart. The default weekly seasonality assumes that the pattern of weekly seasonality is the same throughout the year, but we'd expect the pattern of weekly seasonality to be different during the on-season (when there are games every Sunday) and the off-season. We can use conditional seasonalities to construct separate on-season and off-season weekly seasonalities.
First we add a boolean column to the dataframe that indicates whether each date is during the on-season or the off-season:
End of explanation
"""
%%R -w 9 -h 12 -u in
m <- prophet(weekly.seasonality=FALSE)
m <- add_seasonality(m, name='weekly_on_season', period=7, fourier.order=3, condition.name='on_season')
m <- add_seasonality(m, name='weekly_off_season', period=7, fourier.order=3, condition.name='off_season')
m <- fit.prophet(m, df)
future$on_season <- is_nfl_season(future$ds)
future$off_season <- !is_nfl_season(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
m = Prophet(weekly_seasonality=False)
m.add_seasonality(name='weekly_on_season', period=7, fourier_order=3, condition_name='on_season')
m.add_seasonality(name='weekly_off_season', period=7, fourier_order=3, condition_name='off_season')
future['on_season'] = future['ds'].apply(is_nfl_season)
future['off_season'] = ~future['ds'].apply(is_nfl_season)
forecast = m.fit(df).predict(future)
fig = m.plot_components(forecast)
"""
Explanation: Then we disable the built-in weekly seasonality, and replace it with two weekly seasonalities that have these columns specified as a condition. This means that the seasonality will only be applied to dates where the condition_name column is True. We must also add the column to the future dataframe for which we are making predictions.
End of explanation
"""
%%R
m <- prophet(df, holidays = holidays, holidays.prior.scale = 0.05)
forecast <- predict(m, future)
forecast %>%
select(ds, playoff, superbowl) %>%
filter(abs(playoff + superbowl) > 0) %>%
tail(10)
m = Prophet(holidays=holidays, holidays_prior_scale=0.05).fit(df)
forecast = m.predict(future)
forecast[(forecast['playoff'] + forecast['superbowl']).abs() > 0][
['ds', 'playoff', 'superbowl']][-10:]
"""
Explanation: Both of the seasonalities now show up in the components plots above. We can see that during the on-season when games are played every Sunday, there are large increases on Sunday and Monday that are completely absent during the off-season.
Prior scale for holidays and seasonality
If you find that the holidays are overfitting, you can adjust their prior scale to smooth them using the parameter holidays_prior_scale. By default this parameter is 10, which provides very little regularization. Reducing this parameter dampens holiday effects:
End of explanation
"""
%%R
m <- prophet()
m <- add_seasonality(
m, name='weekly', period=7, fourier.order=3, prior.scale=0.1)
m = Prophet()
m.add_seasonality(
name='weekly', period=7, fourier_order=3, prior_scale=0.1)
"""
Explanation: The magnitude of the holiday effect has been reduced compared to before, especially for superbowls, which had the fewest observations. There is a parameter seasonality_prior_scale which similarly adjusts the extent to which the seasonality model will fit the data.
Prior scales can be set separately for individual holidays by including a column prior_scale in the holidays dataframe. Prior scales for individual seasonalities can be passed as an argument to add_seasonality. For instance, the prior scale for just weekly seasonality can be set using:
End of explanation
"""
%%R -w 9 -h 12 -u in
nfl_sunday <- function(ds) {
dates <- as.Date(ds)
month <- as.numeric(format(dates, '%m'))
as.numeric((weekdays(dates) == "Sunday") & (month > 8 | month < 2))
}
df$nfl_sunday <- nfl_sunday(df$ds)
m <- prophet()
m <- add_regressor(m, 'nfl_sunday')
m <- fit.prophet(m, df)
future$nfl_sunday <- nfl_sunday(future$ds)
forecast <- predict(m, future)
prophet_plot_components(m, forecast)
def nfl_sunday(ds):
date = pd.to_datetime(ds)
if date.weekday() == 6 and (date.month > 8 or date.month < 2):
return 1
else:
return 0
df['nfl_sunday'] = df['ds'].apply(nfl_sunday)
m = Prophet()
m.add_regressor('nfl_sunday')
m.fit(df)
future['nfl_sunday'] = future['ds'].apply(nfl_sunday)
forecast = m.predict(future)
fig = m.plot_components(forecast)
"""
Explanation: Additional regressors
Additional regressors can be added to the linear part of the model using the add_regressor method or function. A column with the regressor value will need to be present in both the fitting and prediction dataframes. For example, we can add an additional effect on Sundays during the NFL season. On the components plot, this effect will show up in the 'extra_regressors' plot:
End of explanation
"""
|
ethanrowe/flowz | userguide/05. Incremental Assembly.ipynb | mit | random.seed(1)
chan = IterChannel((i, random.randint(100, 200)) for i in range(10))
print_chans(chan.tee())
"""
Explanation: Incremental Assembly
Suppose you have a function that calculates some value for a given index, which we will think of as "days from the beginning of the year".
End of explanation
"""
from flowz.util import incremental_assembly, NO_VALUE
# NO_VALUE is a special value defined for incremental_assembly() that indicates the start of assembly
def prepend_assembler(new, old):
"""
A simple assembler that prepends new data at the beginning of the tuple of old data.
"""
if old is NO_VALUE:
return (new,)
else:
return (new,) + old
dest = IterChannel([])
out = incremental_assembly(chan.tee(), dest.tee(), prepend_assembler)
print_chans(out)
"""
Explanation: On any given day, you may want to know not just the value on that day, but all of the historical values as well. And it would be lovely to be able to get that in one data structure, especially if stored in cloud storage, rather than having to iterate over a channel each time.
flowz provides an incremental assembly facility that makes this relatively easy to do.
End of explanation
"""
|
regardscitoyens/consultation_an | exploitation/analyse_quanti_theme3.ipynb | agpl-3.0 | def loadContributions(file, withsexe=False):
contributions = pd.read_json(path_or_buf=file, orient="columns")
rows = [];
rindex = [];
for i in range(0, contributions.shape[0]):
row = {};
row['id'] = contributions['id'][i]
rindex.append(contributions['id'][i])
if (withsexe):
if (contributions['sexe'][i] == 'Homme'):
row['sexe'] = 0
else:
row['sexe'] = 1
for question in contributions['questions'][i]:
if (question.get('Reponse')) and (question['texte'][0:5] != 'Savez') :
row[question['titreQuestion']+' : '+question['texte']] = 1
for criteres in question.get('Reponse'):
# print(criteres['critere'].keys())
row[question['titreQuestion']+'. (Réponse) '+question['texte']+' -> '+str(criteres['critere'].get('texte'))] = 1
rows.append(row)
df = pd.DataFrame(data=rows)
df.fillna(0, inplace=True)
return df
df = loadContributions('../data/EGALITE3.brut.json', True)
df.fillna(0, inplace=True)
df.index = df['id']
#df.to_csv('consultation_an.csv', format='%d')
#df.columns = ['Q_' + str(col+1) for col in range(len(df.columns) - 2)] + ['id' , 'sexe']
df.head()
"""
Explanation: Reading the data
End of explanation
"""
from sklearn.cluster import KMeans
from sklearn import metrics
import numpy as np
X = df.drop('id', axis=1).values
def train_kmeans(nb_clusters, X):
kmeans = KMeans(n_clusters=nb_clusters, random_state=0).fit(X)
return kmeans
#print(kmeans.predict(X))
#kmeans.cluster_centers_
def select_nb_clusters():
perfs = {};
for nbclust in range(2,10):
kmeans_model = train_kmeans(nbclust, X);
labels = kmeans_model.labels_
# from http://scikit-learn.org/stable/modules/clustering.html#calinski-harabaz-index
# we are in an unsupervised model. cannot get better!
# perfs[nbclust] = metrics.calinski_harabaz_score(X, labels);
perfs[nbclust] = metrics.silhouette_score(X, labels);
print(perfs);
return perfs;
df['clusterindex'] = train_kmeans(4, X).predict(X)
#df
perfs = select_nb_clusters();
# result :
# {2: 341.07570462155348, 3: 227.39963334619881, 4: 186.90438345452918, 5: 151.03979976346525, 6: 129.11214073405731, 7: 112.37235520885432, 8: 102.35994869157568, 9: 93.848315820675438}
optimal_nb_clusters = max(perfs, key=perfs.get);
print("optimal_nb_clusters" , optimal_nb_clusters);
"""
Explanation: Build clustering model
Here we build a kmeans model , and select the "optimal" of clusters.
Here we see that the optimal number of clusters is 2.
End of explanation
"""
km_model = train_kmeans(optimal_nb_clusters, X);
df['clusterindex'] = km_model.predict(X)
lGroupBy = df.groupby(['clusterindex']).mean();
cluster_profile_counts = df.groupby(['clusterindex']).count();
cluster_profile_means = df.groupby(['clusterindex']).mean();
global_counts = df.count()
global_means = df.mean()
cluster_profile_counts.head(10)
df_profiles = pd.DataFrame();
nbclusters = cluster_profile_means.shape[0]
df_profiles['clusterindex'] = range(nbclusters)
for col in cluster_profile_means.columns:
if(col != "clusterindex"):
df_profiles[col] = np.zeros(nbclusters)
for cluster in range(nbclusters):
df_profiles[col][cluster] = cluster_profile_means[col][cluster]
# row.append(df[col].mean());
df_profiles.head()
#print(df_profiles.columns)
intereseting_columns = {};
for col in df_profiles.columns:
if(col != "clusterindex"):
global_mean = df[col].mean()
diff_means_global = abs(df_profiles[col] - global_mean). max();
# print(col , diff_means_global)
if(diff_means_global > 0.05):
intereseting_columns[col] = True
#print(intereseting_columns)
%matplotlib inline
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
"""
Explanation: Build the optimal model and apply it
End of explanation
"""
interesting = list(intereseting_columns.keys())
df_profiles_sorted = df_profiles[interesting].sort_index(axis=1)
df_profiles_sorted.plot.bar(figsize =(1, 1))
df_profiles_sorted.plot.bar(figsize =(16, 8), legend=False)
df_profiles_sorted.T
#df_profiles.sort_index(axis=1).T
"""
Explanation: Cluster Profiles
Here, the optimal model ihas two clusters , cluster 0 with 399 cases, and 1 with 537 cases.
As this model is based on binary inputs. Given this, the best description of the clusters is by the distribution of zeros and ones of each input (question).
The figure below gives the cluster profiles of this model. Cluster 0 on the left. 1 on the right. The questions invloved as different (highest bars)
End of explanation
"""
|
pysal/spaghetti | notebooks/network-spatial-dependence.ipynb | bsd-3-clause | %config InlineBackend.figure_format = "retina"
%load_ext watermark
%watermark
import geopandas
import libpysal
import matplotlib
import matplotlib_scalebar
from matplotlib_scalebar.scalebar import ScaleBar
import numpy
import spaghetti
%matplotlib inline
%watermark -w
%watermark -iv
"""
Explanation: If any part of this notebook is used in your research, please cite with the reference found in README.md.
Network-constrained spatial dependence
Demonstrating cluster detection along networks with the Global Auto K function
Author: James D. Gaboardi jgaboardi@gmail.com
This notebook is an advanced walk-through for:
Understanding the global auto K function with an elementary geometric object
Basic examples with synthetic data
Empirical examples
End of explanation
"""
def plot_k(k, _arcs, df1, df2, obs, scale=True, wr=[1, 1.2], size=(14, 7)):
"""Plot a Global Auto K-function and spatial context."""
def function_plot(f, ax):
"""Plot a Global Auto K-function."""
ax.plot(k.xaxis, k.observed, "b-", linewidth=1.5, label="Observed")
ax.plot(k.xaxis, k.upperenvelope, "r--", label="Upper")
ax.plot(k.xaxis, k.lowerenvelope, "k--", label="Lower")
ax.legend(loc="best", fontsize="x-large")
title_text = "Global Auto $K$ Function: %s\n" % obs
title_text += "%s steps, %s permutations," % (k.nsteps, k.permutations)
title_text += " %s distribution" % k.distribution
f.suptitle(title_text, fontsize=25, y=1.1)
ax.set_xlabel("Distance $(r)$", fontsize="x-large")
ax.set_ylabel("$K(r)$", fontsize="x-large")
def spatial_plot(ax):
"""Plot spatial context."""
base = _arcs.plot(ax=ax, color="k", alpha=0.25)
df1.plot(ax=base, color="g", markersize=30, alpha=0.25)
df2.plot(ax=base, color="g", marker="x", markersize=100, alpha=0.5)
carto_elements(base, scale)
sub_args = {"gridspec_kw":{"width_ratios": wr}, "figsize":size}
fig, arr = matplotlib.pyplot.subplots(1, 2, **sub_args)
function_plot(fig, arr[0])
spatial_plot(arr[1])
fig.tight_layout()
def carto_elements(b, scale):
"""Add/adjust cartographic elements."""
if scale:
kw = {"units":"ft", "dimension":"imperial-length", "fixed_value":1000}
b.add_artist(ScaleBar(1, **kw))
b.set(xticklabels=[], xticks=[], yticklabels=[], yticks=[]);
"""
Explanation: The K function considers all pairwise distances of nearest neighbors to determine the existence of clustering, or lack thereof, over a delineated range of distances. For further description see O’Sullivan and Unwin (2010) and Okabe and Sugihara (2012).
D. O’Sullivan and D. J. Unwin. Point Pattern Analysis, chapter 5, pages 121–156. John Wiley & Sons, Ltd, 2010. doi:10.1002/9780470549094.ch5.
Atsuyki Okabe and Kokichi Sugihara. Network K Function Methods, chapter 6, pages 119–136. John Wiley & Sons, Ltd, 2012. doi:10.1002/9781119967101.ch6.
1. A demonstration of clustering
Results plotting helper function
End of explanation
"""
def equilateral_triangle(x1, y1, x2, mids=True):
"""Return an equilateral triangle and its side midpoints."""
x3 = (x1+x2)/2.
y3 = numpy.sqrt((x1-x2)**2 - (x3-x1)**2) + y1
p1, p2, p3 = (x1, y1), (x2, y1), (x3, y3)
eqitri = libpysal.cg.Chain([p1, p2, p3, p1])
if mids:
eqvs = eqitri.vertices[:-1]
eqimps, vcount = [], len(eqvs),
for i in range(vcount):
for j in range(i+1, vcount):
(_x1, _y1), (_x2, _y2) = eqvs[i], eqvs[j]
mp = libpysal.cg.Point(((_x1+_x2)/2., (_y1+_y2)/2.))
eqimps.append(mp)
return eqitri, eqimps
eqtri_sides, eqtri_midps = equilateral_triangle(0., 0., 6., 1)
ntw = spaghetti.Network(eqtri_sides)
ntw.snapobservations(eqtri_midps, "eqtri_midps")
vertices_df, arcs_df = spaghetti.element_as_gdf(
ntw, vertices=ntw.vertex_coords, arcs=ntw.arcs
)
eqv = spaghetti.element_as_gdf(ntw, pp_name="eqtri_midps")
eqv_snapped = spaghetti.element_as_gdf(ntw, pp_name="eqtri_midps", snapped=True)
eqv_snapped
numpy.random.seed(0)
kres = ntw.GlobalAutoK(
ntw.pointpatterns["eqtri_midps"],
nsteps=100,
permutations=100)
plot_k(kres, arcs_df, eqv, eqv_snapped, "eqtri_mps", wr=[1, 1.8], scale=False)
"""
Explanation: Equilateral triangle
End of explanation
"""
bounds = (0,0,3,3)
h, v = 2, 2
lattice = spaghetti.regular_lattice(bounds, h, nv=v, exterior=True)
ntw = spaghetti.Network(in_data=lattice)
"""
Explanation: Interpretation:
This example demonstrates a complete lack of clustering with a strong indication of dispersion when approaching 5 units of distance.
2. Synthetic examples
Regular lattice — distinguishing visual clustering from statistical clustering
End of explanation
"""
midpoints = []
for chain in lattice:
(v1x, v1y), (v2x, v2y) = chain.vertices
mp = libpysal.cg.Point(((v1x+v2x)/2., (v1y+v2y)/2.))
midpoints.append(mp)
ntw.snapobservations(midpoints, "midpoints")
"""
Explanation: Network arc midpoints: statistical clustering
End of explanation
"""
npts = len(midpoints) * 2
xs = [0.0] * npts + [2.0] * npts
ys = list(numpy.linspace(0.4,0.6, npts)) + list(numpy.linspace(2.1,2.9, npts))
pclusters = [libpysal.cg.Point(xy) for xy in zip(xs,ys)]
ntw.snapobservations(pclusters, "pclusters")
vertices_df, arcs_df = spaghetti.element_as_gdf(ntw, vertices=True, arcs=True)
midpoints = spaghetti.element_as_gdf(ntw, pp_name="midpoints", snapped=True)
pclusters = spaghetti.element_as_gdf(ntw, pp_name="pclusters", snapped=True)
"""
Explanation: All observations on two network arcs: visual clustering
End of explanation
"""
numpy.random.seed(0)
kres = ntw.GlobalAutoK(ntw.pointpatterns["pclusters"], nsteps=100, permutations=100)
plot_k(kres, arcs_df, pclusters, pclusters, "pclusters", wr=[1, 1.8], scale=False)
"""
Explanation: Visual clustering
End of explanation
"""
numpy.random.seed(0)
kres = ntw.GlobalAutoK(ntw.pointpatterns["midpoints"], nsteps=100, permutations=100)
plot_k(kres, arcs_df, midpoints, midpoints, "midpoints", wr=[1, 1.8], scale=False)
"""
Explanation: Interpretation:
This example exhibits a high degree of clustering within 1 unit of distance followed by a complete lack of clustering, then a strong indication of clustering around 3.5 units of distance and above. Both colloquilly and statistically, this pattern is clustered.
Statistical clustering
End of explanation
"""
ntw = spaghetti.Network(in_data=libpysal.examples.get_path("streets.shp"))
vertices_df, arcs_df = spaghetti.element_as_gdf(
ntw, vertices=ntw.vertex_coords, arcs=ntw.arcs
)
"""
Explanation: Interpretation:
This example exhibits no clustering within 1 unit of distance followed by large increases in clustering at each 1-unit increment. After 3 units of distance, this pattern is highly clustered. Statistically speaking, this pattern is clustered, but not colloquilly.
3. Empircal examples
Instantiate the network from a .shp file
End of explanation
"""
for pp_name in ["crimes", "schools"]:
pp_shp = libpysal.examples.get_path("%s.shp" % pp_name)
ntw.snapobservations(pp_shp, pp_name, attribute=True)
ntw.pointpatterns
"""
Explanation: Associate the network with point patterns
End of explanation
"""
schools = spaghetti.element_as_gdf(ntw, pp_name="schools")
schools_snapped = spaghetti.element_as_gdf(ntw, pp_name="schools", snapped=True)
numpy.random.seed(0)
kres = ntw.GlobalAutoK(
ntw.pointpatterns["schools"],
nsteps=100,
permutations=100)
plot_k(kres, arcs_df, schools, schools_snapped, "schools")
"""
Explanation: Empircal — schools
End of explanation
"""
crimes = spaghetti.element_as_gdf(ntw, pp_name="crimes")
crimes_snapped = spaghetti.element_as_gdf(ntw, pp_name="crimes", snapped=True)
numpy.random.seed(0)
kres = ntw.GlobalAutoK(
ntw.pointpatterns["crimes"],
nsteps=100,
permutations=100)
plot_k(kres, arcs_df, crimes, crimes_snapped, "crimes")
"""
Explanation: Interpretation:
Schools exhibit no clustering until roughly 1,000 feet then display more clustering up to approximately 3,000 feet, followed by high clustering up to 6,000 feet.
Empircal — crimes
End of explanation
"""
|
NYUDataBootcamp/Projects | UG_F16/Mario Zapata_AirBnb Multiple Listings in Barcelona.ipynb | mit | import pandas as pd
import sys # system module
import pandas as pd # data package
import matplotlib as mpl # graphics package
import matplotlib.pyplot as plt # pyplot module
import datetime as dt # date and time module
import numpy as np
import seaborn as sns #seaborn module
%matplotlib inline
url1 = "http://data.insideairbnb.com/spain/catalonia"
url2 = "/barcelona/2016-12-08/data/listings.csv.gz"
full_df = pd.read_csv(url1+url2, compression="gzip")
full_df.columns
df = full_df[["host_id","minimum_nights","maximum_nights","license","reviews_per_month","latitude",'host_total_listings_count', "longitude","neighbourhood_cleansed","availability_365","price",
"monthly_price","room_type","calculated_host_listings_count",'review_scores_rating']]
df = df.rename(columns={'minimum_nights': 'Minimum Nights', 'maximum_nights':'Maximum Nights',
'license':'License','availability_365':'Yearly Availability', 'price':'Price',
'review_scores_rating':'Ratings',"room_type":"Listing Type",
'calculated_host_listings_count':"Listings Count",'reviews_per_month':"Reviews per month",'neighbourhood_cleansed':"Neighbourhood"})
df.replace({'Price': {'\$': ''}}, regex=True, inplace=True)
df.replace({'Price': {'\,': ''}}, regex=True, inplace=True)
df['Price'] = df['Price'].astype('float64', copy=False)
plt.style.use('seaborn-white')
fig, ax = plt.subplots(5, sharex=True, sharey=False,)
plt.title('', fontsize=15, loc='center')
df.plot.scatter(ax=ax[0],
x="Listings Count",
y="Minimum Nights",
alpha=0.3,
ylim = (1,30),
figsize=(11,12),
color = "b",
title='Listing characteristics by type of host',
xlim = (0,80) )
df.plot.scatter(ax=ax[1],
x="Listings Count",
y="Price",
alpha=0.3,
figsize=(11, 12),
title='',
color = "b",
ylim = (0,150),
xlim = (0,80))
df.plot.scatter(ax=ax[2],
x="Listings Count",
y="Yearly Availability",
alpha=0.3,
title='',
figsize=(11, 12),
color = "b",
ylim = (-10,375),
xlim = (-1,80))
df.plot.scatter(ax=ax[3],
x="Listings Count",
y="Ratings",
alpha=0.2,
title='',
figsize=(11, 12),
color = "b",
ylim = (15,105),
xlim = (-5,85))
df.plot.scatter(ax=ax[4],
x="Listings Count",
y="Reviews per month",
alpha=0.2,
title='',
figsize=(11, 12),
color = "b",
ylim = (0,10),
xlim = (-5,85))
"""
Explanation: AirBnb abuse in Barcelona and current regulations against the touristic bubble
Mario Zapata
*mario.zapata@nyu.edu *
The role of multi-listings in Barcelona
Tourism in the city of Barcelona has been a hot topic since the 2015 municipal elections, when the (now) mayor of Barcelona and housing activist Ada Colau pointed out that the city was starting to show signs of a tourism bubble — meaning that assets prices in the city were rising due to optimist tourism growth expectations. One of her rally issues was the use of traditional urban housing for touristic rentals, which allegedly makes the overall housing prices rise and kicks out residents out of urban spaces as these become more expensive. That is why one of the first measures that her administration took was to fine AirBnb for allowing individuals without a license to post listings on its website. Barcelona became one of the first cities to fine AirBnb, and it did it in two occasions. The last fine imposed to the company was of 600,000 EUR. These fines created controversy — for obvious reasons — but it is unclear whether they solved the problem they were trying to address: housing prices increase due to the rise in AirBnb listings.
A particular characteristic of the touristic housing landscape of Barcelona is that the city has one of the highest percentage of multiple listings per total listings in the developed world, meaning that a majority of the listings (57%) belongs to hosts who have more than one listing (up to 80, in some cases). This is extraordinariously higher than in the cities of Madrid, Berlin, London, Vienna, Amsterdam, Paris, NYC, Dublin, Copenhaguen, Athens, and many others, where this rate is normally between 15 and 30%.
This points to the possibility that the tourism bubble in Barcelona may be caused by these multi-listings. Considering that as per InsideAirbnb, many other potential indicators of AirBnb host abuse are not as dissonant as this one, this possibility does not seem unreasonable. This leads us to the questions: Are multi-hosts responsible for creating a housing bubble, and if so, will the current sanctions imposed — forcing hosts to get municipal licences for their listings — actually fix the current bubble?
What characterizes multi-listings in Barcelona?
The answer to the question of whether multi-listing is leading to abuse goes through understanding what the characteristics of these listings are. A reasonable argument is that these multi-listings are actually regular (non-touristic) rentals (with or without licences) that use AirBnb to advertise themselves, which would explain why there are so many of them.
We will look into Inside AirBnb data to test this hypothesis, particularly minimum nights requirements, aggregated review score, number of reviews per month, availability per year and price per night. I will plot these listing characteristics against the calculated number of listings posted by the same host.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 16))
sns.boxplot(x="Listings Count", y='Neighbourhood',data=df, orient="h")
"""
Explanation: These plots offer us an interesting insight. While there aren't clear cutting lines between one type of homes and the other, we can see some interesting trends as the number of listings posted by the same host increases. Overall, the findings suggest something different than our initial hypothesis: they suggest that these listings are not residential but higher end, slightly longer-term touristic rentals.
Lower likelyhood of having high minimum night requirements
As the graph shows, no listings past the 10/15 listing count has high minimum-night requirements. High minimum night requirements (over 10-30 days) normally indicate abuse (as per Inside AirBnb), as it signals that the apartment is being rented for long periods of time. This indicates that hosts with many listings do not necessarily set lower boundaries in terms of how long their guests' stay must be.
Higher quality
As the "Price" and "Ratings" score shows, homes in a high listing count tend to fare better in terms of how much guests value their services. Customers are normally ready to pay more and rate higher homes that belong to a large multi-listing.
Lower guest turns, but all year round
Assuming that the number of reviews per month is indicative of the number of guests the home has had (which should be a relatively accurate representation, since in AirBnb it is in the interest of both hosts and guests to review each other), the graph shows that homes in greater listing counts are less likely to have many guests per month. This may be because these listings are of higher quality, and so they are picked for slightly longer stays (possibly even for longer term sublets). Furthermore, whereas homes with a lower listings count are more diverse in terms of how available they are yearly (there are equally distributed dots for homes in listing counts of up to 4 listings), homes in higher listing counts are available all year.
End of explanation
"""
licensedf = df[["License",'Listings Count','Listing Type',
"host_id","Minimum Nights",
"latitude",
"longitude","Neighbourhood","Yearly Availability","Price",
'Ratings']]
licensedf = licensedf.dropna() # we drop all entries without a license (no other field has missing data),
# (our license rate is similar to that calculated by Inside AirBnb)
licensedf.describe()
"""
Explanation: The graph above confirms that homes in high listing counts are more likely to be in the higher end range by showing us the presence of these businesses in higher income neighbourhoods. This can be observed in that these neighbourhoods show higher listing counts medians than others (even though many of these are in touristic areas and are thus likely to have high individual listings too). "La Vila de Gracia" is a good example of this: Despite being in one of the most densily populated and most touristic areas of the city, it shows a higher median listing count than others (rounding to 3 listing count per home), with a wide range of upper-level outliers.
Summary
Overall, these graphs show that listings in high listing counts are likely to belong to individuals or companies that dedicate their real estate exclusively for touristic purposes, targeting customers in the high end market, and normally with lower guest turns. It also shows that often homes in up to 5 listing counts are likely to be of very diverse nature. I attribute this to the these homes belonging to an array of very diverse hosts, i.e. hosts who list 4 private rooms in their own residential house and hosts who list 4 entire homes they own, mixed in the same group.
Multi-listings and current regulations
The greatest concern of the mayor of Barcelona Ada Colau seems to be that all homes listed in AirBnb get a touristic license. However, is that likely to solve the current problem of a touristic housing bubble? In this section we observe the distribution of licenses in the variable "license". This variable includes either NaN (no values) if the home does not have a posted license or the date when the license was approved (in a string).
End of explanation
"""
plt.style.use('seaborn-notebook')
sns.boxplot(x="Listings Count", y='Listing Type',data=df, order = ["Entire home/apt",'Private room','Shared room'])
sns.boxplot(x="Listings Count", y='Listing Type',data=licensedf)
"""
Explanation: Overall, we observe that a very small number of homes have licences. Let us observe the distribution of licensed listings versus the overall count per type of home.
End of explanation
"""
df['Multiple Listings'] = df['Listings Count'] >= 2
df['Multiple Listings'].describe()
df['Multiple Listings'] = df['Listings Count'] >= 3
df['Multiple Listings'].describe()
df['Multiple Listings'] = df['Listings Count'] >= 4
df['Multiple Listings'].describe()
df['Multiple Listings'] = df['Listings Count'] >= 5
df['Multiple Listings'].describe()
licensedf['Multiple Listings'] = licensedf['Listings Count'] >= 2
licensedf['Multiple Listings'].describe()
licensedf['Multiple Listings'] = licensedf['Listings Count'] >= 3
licensedf['Multiple Listings'].describe()
licensedf['Multiple Listings'] = licensedf['Listings Count'] >= 4
licensedf['Multiple Listings'].describe()
licensedf['Multiple Listings'] = licensedf['Listings Count'] >= 5
licensedf['Multiple Listings'].describe()
"""
Explanation: Here we see two interesting patterns. 1) Even though private rooms are the most common listing option as per Inside AirBnb, among licenced listings "Entire home/apt" is the most popular option. 2) When we observe all homes, the median observations for each time of listing is normally much closer to lower listings count than when we observe only licensed listings. This suggests that even though a majority of the listings does not have a license, the number of licenses is proportionately higher for homes in higher listing counts than it is for others. This means that hosts with many listings are more likely to license their homes than hosts with fewer listings. We can see this trend better by looking at the following variable descriptions.
End of explanation
"""
fig, ax = plt.subplots(figsize=(8, 14))
sns.boxplot(x="Listings Count", y='Neighbourhood',data=licensedf, orient="h")
"""
Explanation: As we can see above, more than half the listings that have licenses belong to hosts with more than 5 homes listed. This seems disproportionate, considering that less than a third of total listings belongs in this category. In other words, this confirms our hypothesis that homes in higher listing counts are more likely to have a license, and thus conform best to the standards set by the Town Hall of Barcelona.
This also suggests the following: since a majority of listings in Barcelona are multi-listings, the mayor's policy of enforcing licencing is likely to have limited effects in the seemingly extensive practice for hosts with many listings of using AirBnb to list higher end homes. While it is true that only a minority of these have licenses, in order to avoid a housing bubble, new policies should be put in place to control licensed homes owned by hosts with multiple listings, such as a corporate or luxury tax, since otherwise these homes will continue to contribute to the increase in overall housing prices in the city.
End of explanation
"""
|
CalPolyPat/phys202-2015-work | assignments/assignment06/ProjectEuler17.ipynb | mit | import numpy as np
def number_to_words(n,numlist):
"""Given a number n between 1-1000 inclusive return a list of words for the number."""
if len(str(n)) == 1:
if str(n)[-1] == '1':
numlist.append('one')
elif str(n)[-1] == '2':
numlist.append('two')
elif str(n)[-1] == '3':
numlist.append('three')
elif str(n)[-1] == '4':
numlist.append('four')
elif str(n)[-1] == '5':
numlist.append('five')
elif str(n)[-1] == '6':
numlist.append('six')
elif str(n)[-1] == '7':
numlist.append('seven')
elif str(n)[-1] == '8':
numlist.append('eight')
elif str(n)[-1] == '9':
numlist.append('nine')
elif str(n)[-1] == '0':
numlist.append('zero')
if len(str(n)) == 2:
if str(n)[-2]=='1':
if str(n)[-1] == '1':
numlist.append('eleven')
elif str(n)[-1] == '2':
numlist.append('twelve')
elif str(n)[-1] == '3':
numlist.append('thirteen')
elif str(n)[-1] == '4':
numlist.append('fourteen')
elif str(n)[-1] == '5':
numlist.append('fifteen')
elif str(n)[-1] == '6':
numlist.append('sixteen')
elif str(n)[-1] == '7':
numlist.append('seventeen')
elif str(n)[-1] == '8':
numlist.append('eighteen')
elif str(n)[-1] == '9':
numlist.append('nineteen')
elif str(n)[-1] == '0':
numlist.append('ten')
elif str(n)[-2]=='2':
numlist.append('twenty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='3':
numlist.append('thirty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='4':
numlist.append('forty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='5':
numlist.append('fifty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='6':
numlist.append('sixty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='7':
numlist.append('seventy')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='8':
numlist.append('eighty')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='9':
numlist.append('ninety')
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
elif str(n)[-2]=='0':
if str(n)[-1] != '0':
number_to_words(str(n)[-1], numlist)
if len(str(n)) == 3:
if str(n)[-3] == '1':
if str(n)[-2:] != '00':
numlist.append('onehundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('onehundred')
elif str(n)[-3]=='2':
if str(n)[-2:] != '00':
numlist.append('twohundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('twohundred')
elif str(n)[-3]=='3':
if str(n)[-2:] != '00':
numlist.append('threehundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('threehundred')
elif str(n)[-3]=='4':
if str(n)[-2:] != '00':
numlist.append('fourhundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('fourhundred')
elif str(n)[-3]=='5':
if str(n)[-2:] != '00':
numlist.append('fivehundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('fivehundred')
elif str(n)[-3]=='6':
if str(n)[-2:] != '00':
numlist.append('sixhundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('sixhundred')
elif str(n)[-3]=='7':
if str(n)[-2:] != '00':
numlist.append('sevenhundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('sevenhundred')
elif str(n)[-3]=='8':
if str(n)[-2:] != '00':
numlist.append('eighthundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('eighthundred')
elif str(n)[-3]=='9':
if str(n)[-2:] != '00':
numlist.append('ninehundredand')
number_to_words(str(n)[-2:], numlist)
else:
numlist.append('ninehundred')
if str(n) == '1000':
numlist.append('onethousand')
return numlist
"""
Explanation: Project Euler: Problem 17
https://projecteuler.net/problem=17
If the numbers 1 to 5 are written out in words: one, two, three, four, five, then there are 3 + 3 + 5 + 4 + 4 = 19 letters used in total.
If all the numbers from 1 to 1000 (one thousand) inclusive were written out in words, how many letters would be used?
NOTE: Do not count spaces or hyphens. For example, 342 (three hundred and forty-two) contains 23 letters and 115 (one hundred and fifteen) contains 20 letters. The use of "and" when writing out numbers is in compliance with British usage.
First write a number_to_words(n) function that takes an integer n between 1 and 1000 inclusive and returns a list of words for the number as described above
End of explanation
"""
numlist = []
assert number_to_words(221, numlist)==['twohundredand', 'twenty', 'one']
assert True # use this for grading the number_to_words tests.
"""
Explanation: Now write a set of assert tests for your number_to_words function that verifies that it is working as expected.
End of explanation
"""
def count_letters(n):
"""Count the number of letters used to write out the words for 1-n inclusive."""
numlist = []
for i in range(1,n+1):
numlist=number_to_words(i, numlist)
num=np.array(numlist)
np.ravel(num)
numsum = ''
for x in num:
numsum += x
return len(numsum)
"""
Explanation: Now define a count_letters(n) that returns the number of letters used to write out the words for all of the the numbers 1 to n inclusive.
End of explanation
"""
assert count_letters(5)==19
assert count_letters(10)==39
assert True # use this for grading the count_letters tests.
"""
Explanation: Now write a set of assert tests for your count_letters function that verifies that it is working as expected.
End of explanation
"""
print(count_letters(1000))
assert True # use this for gradig the answer to the original question.
"""
Explanation: Finally used your count_letters function to solve the original question.
End of explanation
"""
|
RogueAstro/solar-twins-project | find_vsini.ipynb | gpl-2.0 | import numpy as np
from pwoogs import moog,estimate,utils
import matplotlib.pyplot as plt
import q2
import shutil as sh
%matplotlib inline
# Getting star names
star_names = np.loadtxt('s_twins.csv',
skiprows=1,
usecols=(0,),
dtype=str,
delimiter=',')
# This is used to manage data arrays
u = utils.arr_manage()
"""
Explanation: Find v sin(i) with Python and MOOG synth
This notebook is used to estimate the projected rotation of solar twin stars using Python, MOOG synth, in addition to RogueAstro's pwoogs and astroChasqui's q2 codes.
Required files:
* s_twins.csv: contains the information of the stars
* filenames.lis: contains the list of names of the spectrum .fits files
* lines.dat: contains information about the lines to be analyzed
* continuum.dat: contains the wavelengths to be used to calibrate the continuum
End of explanation
"""
def set_star(choice, **kwargs):
if ('inverted_filelist' in kwargs):
invert = kwargs['inverted_filelist']
else:
invert = False
choice = int(choice)
print 'Creating the stellar atmosphere file.'
data = np.loadtxt('s_twins.csv',
usecols=(1,2,3,4),
skiprows=1,
delimiter=',')
model = q2.modatm.interpolate(data[choice,0],
data[choice,1],
data[choice,2],
grid='odfnew')
N = len(model['RHOX'])
with open('star.mod','w') as f:
f.truncate()
f.write(
"""KURTYPE
%.1f/ %.2f/ %.2f mic = %.2f
%i
5000.0\n""" % (data[choice,0],
data[choice,1],
data[choice,2],
data[choice,3],
N)
)
for i in range(N):
f.write(" %.8E %.1f %.3E %.3E\n" % (
model['RHOX'][i],
model['T'][i],
model['P'][i],
model['XNE'][i])
)
f.write(' %.2f\n' % data[choice,3])
f.write(
"""NATOMS 0 %.2f
NMOL 28
101.0 106.0 107.0 108.0 112.0 126.0
606.0 607.0 608.0
707.0 708.0
808.0 812.0 822.0 823.0 840.0
10108.0 10820.0 60808.0
6.1 7.1 8.1 12.1 20.1 22.1 23.1 26.1 40.1
""" % data[choice,2]
)
# filenames.lis contains the names of all the fits files of the spectra
# In my case, the list is in an opposite order as the list of star names,
# so that's choice is re-set if invert == True
filename = np.loadtxt('filenames.lis',str)
if invert == True:
choice = len(star_names)-1-choice
print "Creating the spectrum_full.dat file for %s" % filename[choice]
sh.copyfile(filename[choice],'spectrum_full.dat')
"""
Explanation: The following function is used to set the input files to be used by MOOG for a specific star from the list star_names.
End of explanation
"""
def v_m(T):
return 3.6+(T-5777.)/486
"""
Explanation: v_m returns the macroturbulent velocity for a solar twin with a temperature T.
End of explanation
"""
def manage(choice, interval, lines, chunk):
print 'Managing the data file.'
spec_window = np.array([lines[choice,0]-interval/2,lines[choice,0]+interval/2])
u.cut(spec_window[0]-chunk,spec_window[1]+chunk,'spectrum_full.dat','spectrum.dat')
print 'Done.\n'
return spec_window
"""
Explanation: Managing data file because it is possibly huge
End of explanation
"""
def correct(choice, data, lines, cont_type, r_1, r_2, r_3):
# The following lines are used to find calibration corrections for the spectral line
print 'Finding the shift on the wavelength.'
wl_shift = 10.
ind = u.find_index(lines[choice,0],data[:,0])
while abs(wl_shift) > 2.0:
center = u.find_center(data[ind-r_1+1:ind+r_1+2])
wl_shift = lines[choice,0]-center
print 'Wavelength shift = %.4f\n' % wl_shift
print "Finding the correction factor for the continuum."
ind_min = u.find_index(lines[choice,0]-r_2,data[:,0])
ind_max = u.find_index(lines[choice,0]+r_2,data[:,0])
if cont_type == 'single':
corr = 1.0/np.mean(u.find_corr(
data[ind_min:ind_max,:],
r_3
))
elif cont_type == 'multi':
target_wls = np.loadtxt('continuum.dat')
corr = 1.0/np.mean(u.find_corr_from_ensemble(
data[ind_min:ind_max,:],
target_wls[choice,:],
r_3
))
print "Correction factor = %.4f" % corr
return wl_shift, corr
"""
Explanation: Function that returns the corrections factors for line center and continuum
End of explanation
"""
def full_auto(choice, interval, res_power, SN, **kwargs):
# Spectrum chunk size. Default = 10. angstroms
if ('chunk' in kwargs):
chunk = kwargs['chunk']
assert chunk > interval, 'Invalid chunk size'
else:
chunk = 10.0
# Continuum correction: choose between 'single' or 'multi' wavelengths
if ('continuum_correction' in kwargs):
cont_type = kwargs['continuum_correction']
assert cont_type == 'multi', 'Continuum correction type invalid'
else:
cont_type = 'single'
# Radius of points to be used in finding the correction for the line center
# Default = 3
if ('r_1' in kwargs):
radius_1 = kwargs['r_1']
assert radius_1 > 0, 'Invalid radius for line center correction'
else:
radius_1 = 3
# Radius in angstroms for the region around the target wavelength to be
# analyzed for the continuum . Default = 3.0
if ('r_2' in kwargs):
radius_2 = kwargs['r_2']
assert radius_2 > 0, 'Invalid radius of wavelength region'
else:
radius_2 = 3.0
# Radius in points to be used in finding the correction for the continuum.
# Default = 2
if ('r_3' in kwargs):
radius_3 = kwargs['r_3']
assert radius_3 > 0, 'Invalid radius for continuum correction'
else:
radius_3 = 2
# Radius in points to be used in evaluating the performance function
# Default = 7
if ('r_4' in kwargs):
radius_4 = kwargs['r_4']
assert radius_4 > 0, 'Invalid radius for performance evaluation'
else:
radius_4 = 7
# Blue wing weight to be used on estimation. Default = 10.0
if ('bw' in kwargs):
bw = kwargs['bw']
assert bw >= 0.0, 'Invalid weight for blue wing'
else:
bw = 10.0
# Red wing weight to be used on estimation. Default = 5.0
if ('rw' in kwargs):
rw = kwargs['rw']
assert rw >= 0.0, 'Invalid weight for red wing'
else:
rw = 5.0
# Line center weight to be used on estimation. Default = 25.0
if ('cw' in kwargs):
cw = kwargs['cw']
assert cw >= 0.0, 'Invalid weight for line center'
else:
cw = 25.0
# Bad fit tolerance in number of points above the S/N ratio. Default = 2
if ('tol' in kwargs):
tol = kwargs['tol']
assert tol >= 0, 'Invalid tolerance'
else:
tol = 2
# 'plot' on window or 'save' as png? Default = plot on window
if ('output' in kwargs):
output = kwargs['output']
assert output == 'save', 'Invalid radius for continuum correction'
else:
output = 'plot'
choice = int(choice)
# Synthesis parameters
line_file = 'lines.dat'
lines = np.loadtxt(line_file,skiprows=1,usecols=(0,1))
# Star parameters
star_info = np.genfromtxt('star.mod',skip_header=1,skip_footer=83,
usecols=(0,1),delimiter='/ ')
T_star = star_info[0]
v_macro = v_m(T_star)
data = np.loadtxt('spectrum.dat')
# Managing the data file
spec_window = manage(choice, interval, lines, chunk)
# The instrumental broadening
gauss = np.mean(spec_window)/res_power
# Finding the correction factors
wl_shift, corr = correct(choice, data, lines, cont_type, radius_1, radius_2,
radius_3)
print "Now starting estimation of vsini..."
# Instatiating the function to write parameters for MOOG
r = estimate.vsini(
spec_window,
gauss,
v_macro,
line_file,
choice,
x_wl=wl_shift,
y_mult=corr,
bwing_w = bw,
rwing_w = rw,
center_w = cw,
perf_radius=radius_4,
SN=SN,
badfit_tol = tol,
star_name=star_names[m]
)
if output == 'plot':
save = 'window'
else:
save = '%s_line%i.png'%(star_names[m],choice)
# Finding vsini and abundance
vsini,abund,bfs = r.find(N=15,
max_i=20,
min_i=10,
limits=[0.05,0.001],
save=save)
return vsini,abund,bfs
m = 88
set_star(m, inverted_filelist=True)
v, a, b = full_auto(choice=5, interval=1.0, r_1=3, r_2=3.0, r_4=7, res_power=65000., SN=400)
"""
Explanation: Next, there is the code that finds the v sin(i) in fully automatic mode, for a specific star.
End of explanation
"""
|
IanHawke/maths-with-python | 11-more-classes.ipynb | mit | class Polynomial(object):
"""Representing a polynomial."""
explanation = "I am a polynomial"
def __init__(self, roots, leading_term):
self.roots = roots
self.leading_term = leading_term
self.order = len(roots)
def display(self):
string = str(self.leading_term)
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def multiply(self, other):
roots = self.roots + other.roots
leading_term = self.leading_term * other.leading_term
return Polynomial(roots, leading_term)
def explain_to(self, caller):
print("Hello, {}. {}.".format(caller,self.explanation))
print("My roots are {}.".format(self.roots))
"""
Explanation: Classes and Object Oriented Programming
In an earlier section we discussed classes as a way of representing an abstract object, such as a polynomial. The resulting code
End of explanation
"""
p_roots = (1, 2, -3)
p_leading_term = 2
p = Polynomial(p_roots, p_leading_term)
p.explain_to("Alice")
q = Polynomial((1,1,0,-2), -1)
q.explain_to("Bob")
"""
Explanation: allowed polynomials to be created, displayed, and multiplied together. However, the language is a little cumbersome. We can take advantage of a number of useful features of Python, many of which carry over to other programming languages, to make it easier to use the results.
Remember that the __init__ function is called when a variable is created. There are a number of special class functions, each of which has two underscores before and after the name. This is another Python convention that is effectively a rule: functions surrounded by two underscores have special effects, and will be called by other Python functions internally. So now we can create a variable that represents a specific polynomial by storing its roots and the leading term:
End of explanation
"""
class Polynomial(object):
"""Representing a polynomial."""
explanation = "I am a polynomial"
def __init__(self, roots, leading_term):
self.roots = roots
self.leading_term = leading_term
self.order = len(roots)
def __repr__(self):
string = str(self.leading_term)
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def explain_to(self, caller):
print("Hello, {}. {}.".format(caller,self.explanation))
print("My roots are {}.".format(self.roots))
p = Polynomial(p_roots, p_leading_term)
print(p)
q = Polynomial((1,1,0,-2), -1)
print(q)
"""
Explanation: Another special function that is very useful is __repr__. This gives a representation of the class. In essence, if you ask Python to print a variable, it will print the string returned by the __repr__ function. This was the role played by our display method, so we can just change the name of the function, making the Polynomial class easier to use. We can use this to create a simple string representation of the polynomial:
End of explanation
"""
class Polynomial(object):
"""Representing a polynomial."""
explanation = "I am a polynomial"
def __init__(self, roots, leading_term):
self.roots = roots
self.leading_term = leading_term
self.order = len(roots)
def __repr__(self):
string = str(self.leading_term)
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
def __mul__(self, other):
roots = self.roots + other.roots
leading_term = self.leading_term * other.leading_term
return Polynomial(roots, leading_term)
def explain_to(self, caller):
print("Hello, {}. {}.".format(caller,self.explanation))
print("My roots are {}.".format(self.roots))
p = Polynomial(p_roots, p_leading_term)
q = Polynomial((1,1,0,-2), -1)
r = p*q
print(r)
"""
Explanation: The final special function we'll look at (although there are many more, many of which may be useful) is __mul__. This allows Python to multiply two variables together. We did this before using the multiply method, but by using the __mul__ method we can multiply together two polynomials using the standard * operator. With this we can take the product of two polynomials:
End of explanation
"""
class Monomial(Polynomial):
"""Representing a monomial, which is a polynomial with leading term 1."""
def __init__(self, roots):
self.roots = roots
self.leading_term = 1
self.order = len(roots)
"""
Explanation: We now have a simple class that can represent polynomials and multiply them together, whilst printing out a simple string form representing itself. This can obviously be extended to be much more useful.
Inheritance
As we can see above, building a complete class from scratch can be lengthy and tedious. If there is another class that does much of what we want, we can build on top of that. This is the idea behind inheritance.
In the case of the Polynomial we declared that it started from the object class in the first line defining the class: class Polynomial(object). But we can build on any class, by replacing object with something else. Here we will build on the Polynomial class that we've started with.
A monomial is a polynomial whose leading term is simply 1. A monomial is a polynomial, and could be represented as such. However, we could build a class that knows that the leading term is always 1: there may be cases where we can take advantage of this additional simplicity.
We build a new monomial class as follows:
End of explanation
"""
m = Monomial((-1, 4, 9))
m.explain_to("Caroline")
print(m)
"""
Explanation: Variables of the Monomial class are also variables of the Polynomial class, so can use all the methods and functions from the Polynomial class automatically:
End of explanation
"""
class Monomial(Polynomial):
"""Representing a monomial, which is a polynomial with leading term 1."""
explanation = "I am a monomial"
def __init__(self, roots):
self.roots = roots
self.leading_term = 1
self.order = len(roots)
def __repr__(self):
string = ""
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
m = Monomial((-1, 4, 9))
m.explain_to("Caroline")
print(m)
"""
Explanation: We note that these functions, methods and variables may not be exactly right, as they are given for the general Polynomial class, not by the specific Monomial class. If we redefine these functions and variables inside the Monomial class, they will override those defined in the Polynomial class. We do not have to override all the functions and variables, just the parts we want to change:
End of explanation
"""
s = Polynomial((2, 3), 4)
s.explain_to("David")
print(s)
"""
Explanation: This has had no effect on the original Polynomial class and variables, which can be used as before:
End of explanation
"""
t = m*s
t.explain_to("Erik")
print(t)
"""
Explanation: And, as Monomial variables are Polynomials, we can multiply them together to get a Polynomial:
End of explanation
"""
class Monomial(Polynomial):
"""Representing a monomial, which is a polynomial with leading term 1."""
explanation = "I am a monomial"
def __init__(self, roots):
Polynomial.__init__(self, roots, 1)
def __repr__(self):
string = ""
for root in self.roots:
if root == 0:
string = string + "x"
elif root > 0:
string = string + "(x - {})".format(root)
else:
string = string + "(x + {})".format(-root)
return string
v = Monomial((2, -3))
v.explain_to("Fred")
print(v)
"""
Explanation: In fact, we can be a bit smarter than this. Note that the __init__ function of the Monomial class is identical to that of the Polynomial class, just with the leading_term set explicitly to 1. Rather than duplicating the code and modifying a single value, we can call the __init__ function of the Polynomial class directly. This is because the Monomial class is built on the Polynomial class, so knows about it. We regenerate the class, but only change the __init__ function:
End of explanation
"""
|
fnakashima/deep-learning | student-admissions-keras/StudentAdmissionsKeras.ipynb | mit | # Importing pandas and numpy
import pandas as pd
import numpy as np
# Reading the csv file into a pandas DataFrame
data = pd.read_csv('student_data.csv')
# Printing out the first 10 rows of our data
data[:10]
"""
Explanation: Predicting Student Admissions with Neural Networks in Keras
In this notebook, we predict student admissions to graduate school at UCLA based on three pieces of data:
- GRE Scores (Test)
- GPA Scores (Grades)
- Class rank (1-4)
The dataset originally came from here: http://www.ats.ucla.edu/
Loading the data
To load the data and format it nicely, we will use two very useful packages called Pandas and Numpy. You can read on the documentation here:
- https://pandas.pydata.org/pandas-docs/stable/
- https://docs.scipy.org/
End of explanation
"""
# Importing matplotlib
import matplotlib.pyplot as plt
# Function to help us plot
def plot_points(data):
X = np.array(data[["gre","gpa"]])
y = np.array(data["admit"])
admitted = X[np.argwhere(y==1)]
rejected = X[np.argwhere(y==0)]
plt.scatter([s[0][0] for s in rejected], [s[0][1] for s in rejected], s = 25, color = 'red', edgecolor = 'k')
plt.scatter([s[0][0] for s in admitted], [s[0][1] for s in admitted], s = 25, color = 'cyan', edgecolor = 'k')
plt.xlabel('Test (GRE)')
plt.ylabel('Grades (GPA)')
# Plotting the points
plot_points(data)
plt.show()
"""
Explanation: Plotting the data
First let's make a plot of our data to see how it looks. In order to have a 2D plot, let's ingore the rank.
End of explanation
"""
# Separating the ranks
data_rank1 = data[data["rank"]==1]
data_rank2 = data[data["rank"]==2]
data_rank3 = data[data["rank"]==3]
data_rank4 = data[data["rank"]==4]
# Plotting the graphs
plot_points(data_rank1)
plt.title("Rank 1")
plt.show()
plot_points(data_rank2)
plt.title("Rank 2")
plt.show()
plot_points(data_rank3)
plt.title("Rank 3")
plt.show()
plot_points(data_rank4)
plt.title("Rank 4")
plt.show()
"""
Explanation: Roughly, it looks like the students with high scores in the grades and test passed, while the ones with low scores didn't, but the data is not as nicely separable as we hoped it would. Maybe it would help to take the rank into account? Let's make 4 plots, each one for each rank.
End of explanation
"""
# Make dummy variables for rank
one_hot_data = pd.concat([data, pd.get_dummies(data['rank'], prefix='rank')], axis=1)
# Drop the previous rank column
one_hot_data = one_hot_data.drop('rank', axis=1)
# Print the first 10 rows of our data
one_hot_data[:10]
"""
Explanation: This looks more promising, as it seems that the lower the rank, the higher the acceptance rate. Let's use the rank as one of our inputs. In order to do this, we should one-hot encode it.
One-hot encoding the rank
For this, we'll use the get_dummies function in numpy.
End of explanation
"""
# Copying our data
processed_data = one_hot_data[:]
# Scaling the columns
processed_data['gre'] = processed_data['gre']/800
processed_data['gpa'] = processed_data['gpa']/4.0
processed_data[:10]
"""
Explanation: Scaling the data
The next step is to scale the data. We notice that the range for grades is 1.0-4.0, whereas the range for test scores is roughly 200-800, which is much larger. This means our data is skewed, and that makes it hard for a neural network to handle. Let's fit our two features into a range of 0-1, by dividing the grades by 4.0, and the test score by 800.
End of explanation
"""
sample = np.random.choice(processed_data.index, size=int(len(processed_data)*0.9), replace=False)
train_data, test_data = processed_data.iloc[sample], processed_data.drop(sample)
print("Number of training samples is", len(train_data))
print("Number of testing samples is", len(test_data))
print(train_data[:10])
print(test_data[:10])
"""
Explanation: Splitting the data into Training and Testing
In order to test our algorithm, we'll split the data into a Training and a Testing set. The size of the testing set will be 10% of the total data.
End of explanation
"""
import keras
# Separate data and one-hot encode the output
# Note: We're also turning the data into numpy arrays, in order to train the model in Keras
features = np.array(train_data.drop('admit', axis=1))
targets = np.array(keras.utils.to_categorical(train_data['admit'], 2))
features_test = np.array(test_data.drop('admit', axis=1))
targets_test = np.array(keras.utils.to_categorical(test_data['admit'], 2))
print(features[:10])
print(targets[:10])
"""
Explanation: Splitting the data into features and targets (labels)
Now, as a final step before the training, we'll split the data into features (X) and targets (y).
Also, in Keras, we need to one-hot encode the output. We'll do this with the to_categorical function.
End of explanation
"""
# Imports
import numpy as np
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
# Building the model
model = Sequential()
model.add(Dense(128, activation='relu', input_shape=(6,)))
model.add(Dropout(.2))
model.add(Dense(64, activation='relu'))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
# Compiling the model
model.compile(loss = 'categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.summary()
"""
Explanation: Defining the model architecture
Here's where we use Keras to build our neural network.
End of explanation
"""
# Training the model
model.fit(features, targets, epochs=200, batch_size=100, verbose=0)
"""
Explanation: Training the model
End of explanation
"""
# Evaluating the model on the training and testing set
score = model.evaluate(features, targets)
print("\n Training Accuracy:", score[1])
score = model.evaluate(features_test, targets_test)
print("\n Testing Accuracy:", score[1])
"""
Explanation: Scoring the model
End of explanation
"""
|
gte620v/graph_entity_resolution | 201610_EmoryDS/Talk.ipynb | apache-2.0 | df = pd.read_csv(
'../data/scraped_data.csv.gz',
converters={'name': lambda x: str(x).lower(),
'number': str,
'oid': str,
'post_id': str},
parse_dates=['postdate'])
df.head()
"""
Explanation: <div><img src="../images/title.png"></div>
501c3 Nonprofit started to connect Professional and Aspiring Data Scientists with problems involving Social Good.
Hackathons
Meetups
Anidata 1.x
Collection of hackathons and events focused on helping the Fulton County DA Office prosecute Human Trafficking cases.
Fulton County DA Human Trafficking Problem
Internet Ads are used to solicit for human trafficking
Police, investigators, and DAs often start a case with a snippet of data (a phone number or email address) about a perp
One challenge is that this information is often fleeting
Our goal was to help resolve this issue by performing entity resolution (ER) on the contact information contained in Internet ad posts.
<table><tr><td>
<img src="../images/hackathon.png" width=600px/></td><td><img src="../images/hackathon2.png" width=600px/></td></tr></table>
Data
Scraped several months of ad data from seedy websites
Columns:
name
phone number
oid (poster unique ID)
posterage
region
type
Sample data includes three flat files that pair a post_id with an email, user ID, or email address.
Data Sample
End of explanation
"""
df.describe(include = 'all')
"""
Explanation: Data Description
End of explanation
"""
df\
.groupby('number')\
.count()\
.sort_values('post_id',ascending=False)[['post_id']]\
.head()
"""
Explanation: Entity Resolution
After wrestling with the data a bit, we realized that we can conceptualize the data as a graph.
Entity Graph
Vertices: Backpage posts
Edged: Common attributes (email, phone number, poster ID)
Explore Subgraph Sizes
End of explanation
"""
ph_sample = df[df.number=='7865032020']
ph_sample.sort_values('name',ascending=False).head()
"""
Explanation: Example Sub-Graph
One challenge is to efficiently create the sub graphs.
Our first approach was to make fully connected graph out of the data subsets.
Here is an example of a phone number that is seen on 11 posts:
End of explanation
"""
def plot_graph_data(in_data, data_type, color, G=nx.Graph()):
''' Plot graph '''
out = []
for a, b in itertools.product(in_data, in_data):
out.append((a, b, {'type': data_type, 'color': color}))
G.add_edges_from(out)
pos = nx.circular_layout(G)
colors = [G[u][v]['color'] for u, v in G.edges()]
nx.draw(G, pos, node_color='k', edge_color=colors, width=1,node_size=15)
return G
G_samp = plot_graph_data(ph_sample.post_id, 'phone', 'b', G=nx.Graph());
"""
Explanation: Fully Connected
Phone Numbers Only
End of explanation
"""
em_sample = df[df['name'].str.contains('tuc',False)]
em_sample
G_samp_em = plot_graph_data(em_sample.post_id, 'email', 'r', G=nx.Graph())
"""
Explanation: Email Addresses Only
End of explanation
"""
out = []
for a, b in itertools.product(em_sample.post_id, em_sample.post_id):
out.append((a, b, {'type': 'email', 'color': 'r'}))
G_samp.add_edges_from(out)
pos = nx.spring_layout(G_samp)
colors = [G_samp[u][v]['color'] for u, v in G_samp.edges()]
nx.draw(G_samp, pos, node_color='k', edge_color=colors, width=1,node_size=15)
"""
Explanation: Combined Graph with Email and Phone Numbers
End of explanation
"""
G_samp_loop = nx.Graph()
# No product for loop
v = ph_sample.post_id.values.tolist()
v_right = v[1:]
if len(v) == 1:
v_right = v
else:
v_right[-1] = v[0]
out = [(a, b,{'type':'phone','color':'b'}) for a, b in zip(v, v_right)]
G_samp_loop.add_edges_from(out)
pos = nx.spectral_layout(G_samp_loop)
colors = [G_samp_loop[u][v]['color'] for u,v in G_samp_loop.edges()]
nx.draw(G_samp_loop,pos,node_color='k',edge_color=colors,width=2,node_size=15)
v = em_sample.post_id.values.tolist()
v_right = v[1:]
if len(v) == 1:
v_right = v
else:
v_right[-1] = v[0]
out += [(a, b,{'type':'phone','color':'r'}) for a, b in zip(v, v_right)]
G_samp_loop.add_edges_from(out)
pos = nx.spring_layout(G_samp_loop)
colors = [G_samp_loop[u][v]['color'] for u,v in G_samp_loop.edges()]
nx.draw(G_samp_loop,pos,node_color='k',edge_color=colors,width=2,node_size=15)
"""
Explanation: Simplifying The Graph
This works, but having a fully connected set of graphs ends up taking a bunch of Memory.
To simplify, we only need each network of posts to be connected--not fully connected.
Create a sub-graph that is a loosely connected loop instead.
End of explanation
"""
def make_graph(df, color, data_type):
'''
Makes a list of tuple lists for each node-edge-node segment in the graph
'''
out = []
for i, (k, v) in enumerate(df.groupby(df.columns[-1])):
v = v.values.tolist()
v = [x[0] for x in v]
v_right = v[1:]
if len(v) == 1:
v_right = v
else:
v_right[-1] = v[0]
out.append([(a, b, {'type': data_type,
'color': color}) for a, b in zip(v, v_right)])
out = [item for sublist in out for item in sublist]
return out
"""
Explanation: Graph Clusters
When viewed this way, a set of connected posts (vertices) and poster attributes (edges) constitute an entity.
Approach
Make a graph out of the data using these ideas
Find all of the disjoint subgraphs and designate those as entities
End of explanation
"""
out = make_graph(df[df.name!=''][['post_id','name']],'r','email')
out += make_graph(df[df.number!=''][['post_id','number']],'b','number')
out += make_graph(df[df.oid!=''][['post_id','oid']],'g','oid')
"""
Explanation: Add Graphs for Each Type of Connection
End of explanation
"""
G = nx.Graph()
G.add_edges_from(out)
sub_graphs = []
for i, x in enumerate(nx.connected_component_subgraphs(G)):
nodes = nx.nodes(x)
sub_graphs.append(list(zip([i] * len(nodes), nodes)))
sub_graphs = [item for sublist in sub_graphs for item in sublist]
"""
Explanation: Use NetworkX to Find Disjoint SubGraphs
End of explanation
"""
df_out = pd.DataFrame(sub_graphs,
columns=['entity_id',
'post_id'])
df_out.head(10)
"""
Explanation: Check Entity Data
End of explanation
"""
df_out = df_out.merge(df,on='post_id')
df_out.set_index(['entity_id','number','name','oid'],inplace=True)
df_out.head(10)
"""
Explanation: Merge With Original Data
And we are done...
End of explanation
"""
df_out.xs('tucenicienta360@gmail.com',level='name')
G['104780']
"""
Explanation: Check Results
Check Email
End of explanation
"""
df_out.loc[560].sort_index()
G_check = G.subgraph(df_out.loc[560:610].post_id.values)
pos = nx.spring_layout(G_check)
colors = [G_check[u][v]['color'] for u,v in G_check.edges()]
nx.draw(G_check,pos,node_color='k',edge_color=colors,width=2,node_size=5)
"""
Explanation: Check Entity
End of explanation
"""
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.